]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
Import mandoc 1.4.1rc2
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133
134 #include <net/bpf.h>
135
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_notif_wait.h>
157 #include <dev/iwm/if_iwm_util.h>
158 #include <dev/iwm/if_iwm_binding.h>
159 #include <dev/iwm/if_iwm_phy_db.h>
160 #include <dev/iwm/if_iwm_mac_ctxt.h>
161 #include <dev/iwm/if_iwm_phy_ctxt.h>
162 #include <dev/iwm/if_iwm_time_event.h>
163 #include <dev/iwm/if_iwm_power.h>
164 #include <dev/iwm/if_iwm_scan.h>
165
166 #include <dev/iwm/if_iwm_pcie_trans.h>
167 #include <dev/iwm/if_iwm_led.h>
168
169 #define IWM_NVM_HW_SECTION_NUM_FAMILY_7000      0
170 #define IWM_NVM_HW_SECTION_NUM_FAMILY_8000      10
171
172 /* lower blocks contain EEPROM image and calibration data */
173 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000      (16 * 512 * sizeof(uint16_t)) /* 16 KB */
174 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000      (32 * 512 * sizeof(uint16_t)) /* 32 KB */
175
176 #define IWM7260_FW      "iwm7260fw"
177 #define IWM3160_FW      "iwm3160fw"
178 #define IWM7265_FW      "iwm7265fw"
179 #define IWM7265D_FW     "iwm7265Dfw"
180 #define IWM8000_FW      "iwm8000Cfw"
181
182 #define IWM_DEVICE_7000_COMMON                                          \
183         .device_family = IWM_DEVICE_FAMILY_7000,                        \
184         .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000,              \
185         .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_7000,       \
186         .apmg_wake_up_wa = 1
187
188 const struct iwm_cfg iwm7260_cfg = {
189         .fw_name = IWM7260_FW,
190         IWM_DEVICE_7000_COMMON,
191         .host_interrupt_operation_mode = 1,
192 };
193
194 const struct iwm_cfg iwm3160_cfg = {
195         .fw_name = IWM3160_FW,
196         IWM_DEVICE_7000_COMMON,
197         .host_interrupt_operation_mode = 1,
198 };
199
200 const struct iwm_cfg iwm3165_cfg = {
201         /* XXX IWM7265D_FW doesn't seem to work properly yet */
202         .fw_name = IWM7265_FW,
203         IWM_DEVICE_7000_COMMON,
204         .host_interrupt_operation_mode = 0,
205 };
206
207 const struct iwm_cfg iwm7265_cfg = {
208         .fw_name = IWM7265_FW,
209         IWM_DEVICE_7000_COMMON,
210         .host_interrupt_operation_mode = 0,
211 };
212
213 const struct iwm_cfg iwm7265d_cfg = {
214         /* XXX IWM7265D_FW doesn't seem to work properly yet */
215         .fw_name = IWM7265_FW,
216         IWM_DEVICE_7000_COMMON,
217         .host_interrupt_operation_mode = 0,
218 };
219
220 #define IWM_DEVICE_8000_COMMON                                          \
221         .device_family = IWM_DEVICE_FAMILY_8000,                        \
222         .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000,              \
223         .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_8000
224
225 const struct iwm_cfg iwm8260_cfg = {
226         .fw_name = IWM8000_FW,
227         IWM_DEVICE_8000_COMMON,
228         .host_interrupt_operation_mode = 0,
229 };
230
231 const uint8_t iwm_nvm_channels[] = {
232         /* 2.4 GHz */
233         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
234         /* 5 GHz */
235         36, 40, 44, 48, 52, 56, 60, 64,
236         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
237         149, 153, 157, 161, 165
238 };
239 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
240     "IWM_NUM_CHANNELS is too small");
241
242 const uint8_t iwm_nvm_channels_8000[] = {
243         /* 2.4 GHz */
244         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
245         /* 5 GHz */
246         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
247         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
248         149, 153, 157, 161, 165, 169, 173, 177, 181
249 };
250 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
251     "IWM_NUM_CHANNELS_8000 is too small");
252
253 #define IWM_NUM_2GHZ_CHANNELS   14
254 #define IWM_N_HW_ADDR_MASK      0xF
255
256 /*
257  * XXX For now, there's simply a fixed set of rate table entries
258  * that are populated.
259  */
260 const struct iwm_rate {
261         uint8_t rate;
262         uint8_t plcp;
263 } iwm_rates[] = {
264         {   2,  IWM_RATE_1M_PLCP  },
265         {   4,  IWM_RATE_2M_PLCP  },
266         {  11,  IWM_RATE_5M_PLCP  },
267         {  22,  IWM_RATE_11M_PLCP },
268         {  12,  IWM_RATE_6M_PLCP  },
269         {  18,  IWM_RATE_9M_PLCP  },
270         {  24,  IWM_RATE_12M_PLCP },
271         {  36,  IWM_RATE_18M_PLCP },
272         {  48,  IWM_RATE_24M_PLCP },
273         {  72,  IWM_RATE_36M_PLCP },
274         {  96,  IWM_RATE_48M_PLCP },
275         { 108,  IWM_RATE_54M_PLCP },
276 };
277 #define IWM_RIDX_CCK    0
278 #define IWM_RIDX_OFDM   4
279 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
280 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
281 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
282
283 struct iwm_nvm_section {
284         uint16_t length;
285         uint8_t *data;
286 };
287
288 #define IWM_MVM_UCODE_CALIB_TIMEOUT     (2*hz)
289
290 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
291 static int      iwm_firmware_store_section(struct iwm_softc *,
292                                            enum iwm_ucode_type,
293                                            const uint8_t *, size_t);
294 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
295 static void     iwm_fw_info_free(struct iwm_fw_info *);
296 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
297 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
298 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
299                                      bus_size_t, bus_size_t);
300 static void     iwm_dma_contig_free(struct iwm_dma_info *);
301 static int      iwm_alloc_fwmem(struct iwm_softc *);
302 static int      iwm_alloc_sched(struct iwm_softc *);
303 static int      iwm_alloc_kw(struct iwm_softc *);
304 static int      iwm_alloc_ict(struct iwm_softc *);
305 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
306 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
307 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
308 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
309                                   int);
310 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
311 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
312 static void     iwm_enable_interrupts(struct iwm_softc *);
313 static void     iwm_restore_interrupts(struct iwm_softc *);
314 static void     iwm_disable_interrupts(struct iwm_softc *);
315 static void     iwm_ict_reset(struct iwm_softc *);
316 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
317 static void     iwm_stop_device(struct iwm_softc *);
318 static void     iwm_mvm_nic_config(struct iwm_softc *);
319 static int      iwm_nic_rx_init(struct iwm_softc *);
320 static int      iwm_nic_tx_init(struct iwm_softc *);
321 static int      iwm_nic_init(struct iwm_softc *);
322 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
323 static int      iwm_post_alive(struct iwm_softc *);
324 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
325                                    uint16_t, uint8_t *, uint16_t *);
326 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
327                                      uint16_t *, uint32_t);
328 static uint32_t iwm_eeprom_channel_flags(uint16_t);
329 static void     iwm_add_channel_band(struct iwm_softc *,
330                     struct ieee80211_channel[], int, int *, int, size_t,
331                     const uint8_t[]);
332 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
333                     struct ieee80211_channel[]);
334 static struct iwm_nvm_data *
335         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
336                            const uint16_t *, const uint16_t *,
337                            const uint16_t *, const uint16_t *,
338                            const uint16_t *);
339 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
340 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
341                                                struct iwm_nvm_data *,
342                                                const uint16_t *,
343                                                const uint16_t *);
344 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
345                             const uint16_t *);
346 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
347 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
348                                   const uint16_t *);
349 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
350                                    const uint16_t *);
351 static void     iwm_set_radio_cfg(const struct iwm_softc *,
352                                   struct iwm_nvm_data *, uint32_t);
353 static struct iwm_nvm_data *
354         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
355 static int      iwm_nvm_init(struct iwm_softc *);
356 static int      iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
357                                        const uint8_t *, uint32_t);
358 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
359                                         const uint8_t *, uint32_t);
360 static int      iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
361 static int      iwm_load_cpu_sections_8000(struct iwm_softc *,
362                                            struct iwm_fw_sects *, int , int *);
363 static int      iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
364 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
365 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
366 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
367 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
368 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
369                                               enum iwm_ucode_type);
370 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
371 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
372 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
373 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
374                                             struct iwm_rx_phy_info *);
375 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
376                                       struct iwm_rx_packet *,
377                                       struct iwm_rx_data *);
378 static int      iwm_get_noise(struct iwm_softc *sc,
379                     const struct iwm_mvm_statistics_rx_non_phy *);
380 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
381                                    struct iwm_rx_data *);
382 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
383                                          struct iwm_rx_packet *,
384                                          struct iwm_node *);
385 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
386                                   struct iwm_rx_data *);
387 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
388 #if 0
389 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
390                                  uint16_t);
391 #endif
392 static const struct iwm_rate *
393         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
394                         struct mbuf *, struct iwm_tx_cmd *);
395 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
396                        struct ieee80211_node *, int);
397 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
398                              const struct ieee80211_bpf_params *);
399 static int      iwm_mvm_flush_tx_path(struct iwm_softc *sc,
400                                       uint32_t tfd_msk, uint32_t flags);
401 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
402                                                 struct iwm_mvm_add_sta_cmd_v7 *,
403                                                 int *);
404 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
405                                        int);
406 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
407 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
408 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
409                                            struct iwm_int_sta *,
410                                            const uint8_t *, uint16_t, uint16_t);
411 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
412 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
413 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
414 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
415 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
416 static struct ieee80211_node *
417                 iwm_node_alloc(struct ieee80211vap *,
418                                const uint8_t[IEEE80211_ADDR_LEN]);
419 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
420 static int      iwm_media_change(struct ifnet *);
421 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
422 static void     iwm_endscan_cb(void *, int);
423 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
424                                         struct iwm_sf_cfg_cmd *,
425                                         struct ieee80211_node *);
426 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
427 static int      iwm_send_bt_init_conf(struct iwm_softc *);
428 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
429 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
430 static int      iwm_init_hw(struct iwm_softc *);
431 static void     iwm_init(struct iwm_softc *);
432 static void     iwm_start(struct iwm_softc *);
433 static void     iwm_stop(struct iwm_softc *);
434 static void     iwm_watchdog(void *);
435 static void     iwm_parent(struct ieee80211com *);
436 #ifdef IWM_DEBUG
437 static const char *
438                 iwm_desc_lookup(uint32_t);
439 static void     iwm_nic_error(struct iwm_softc *);
440 static void     iwm_nic_umac_error(struct iwm_softc *);
441 #endif
442 static void     iwm_notif_intr(struct iwm_softc *);
443 static void     iwm_intr(void *);
444 static int      iwm_attach(device_t);
445 static int      iwm_is_valid_ether_addr(uint8_t *);
446 static void     iwm_preinit(void *);
447 static int      iwm_detach_local(struct iwm_softc *sc, int);
448 static void     iwm_init_task(void *);
449 static void     iwm_radiotap_attach(struct iwm_softc *);
450 static struct ieee80211vap *
451                 iwm_vap_create(struct ieee80211com *,
452                                const char [IFNAMSIZ], int,
453                                enum ieee80211_opmode, int,
454                                const uint8_t [IEEE80211_ADDR_LEN],
455                                const uint8_t [IEEE80211_ADDR_LEN]);
456 static void     iwm_vap_delete(struct ieee80211vap *);
457 static void     iwm_scan_start(struct ieee80211com *);
458 static void     iwm_scan_end(struct ieee80211com *);
459 static void     iwm_update_mcast(struct ieee80211com *);
460 static void     iwm_set_channel(struct ieee80211com *);
461 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
462 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
463 static int      iwm_detach(device_t);
464
465 /*
466  * Firmware parser.
467  */
468
469 static int
470 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
471 {
472         const struct iwm_fw_cscheme_list *l = (const void *)data;
473
474         if (dlen < sizeof(*l) ||
475             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
476                 return EINVAL;
477
478         /* we don't actually store anything for now, always use s/w crypto */
479
480         return 0;
481 }
482
483 static int
484 iwm_firmware_store_section(struct iwm_softc *sc,
485     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
486 {
487         struct iwm_fw_sects *fws;
488         struct iwm_fw_onesect *fwone;
489
490         if (type >= IWM_UCODE_TYPE_MAX)
491                 return EINVAL;
492         if (dlen < sizeof(uint32_t))
493                 return EINVAL;
494
495         fws = &sc->sc_fw.fw_sects[type];
496         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
497                 return EINVAL;
498
499         fwone = &fws->fw_sect[fws->fw_count];
500
501         /* first 32bit are device load offset */
502         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
503
504         /* rest is data */
505         fwone->fws_data = data + sizeof(uint32_t);
506         fwone->fws_len = dlen - sizeof(uint32_t);
507
508         fws->fw_count++;
509
510         return 0;
511 }
512
513 #define IWM_DEFAULT_SCAN_CHANNELS 40
514
515 /* iwlwifi: iwl-drv.c */
516 struct iwm_tlv_calib_data {
517         uint32_t ucode_type;
518         struct iwm_tlv_calib_ctrl calib;
519 } __packed;
520
521 static int
522 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
523 {
524         const struct iwm_tlv_calib_data *def_calib = data;
525         uint32_t ucode_type = le32toh(def_calib->ucode_type);
526
527         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
528                 device_printf(sc->sc_dev,
529                     "Wrong ucode_type %u for default "
530                     "calibration.\n", ucode_type);
531                 return EINVAL;
532         }
533
534         sc->sc_default_calib[ucode_type].flow_trigger =
535             def_calib->calib.flow_trigger;
536         sc->sc_default_calib[ucode_type].event_trigger =
537             def_calib->calib.event_trigger;
538
539         return 0;
540 }
541
542 static void
543 iwm_fw_info_free(struct iwm_fw_info *fw)
544 {
545         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
546         fw->fw_fp = NULL;
547         /* don't touch fw->fw_status */
548         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
549 }
550
551 static int
552 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
553 {
554         struct iwm_fw_info *fw = &sc->sc_fw;
555         const struct iwm_tlv_ucode_header *uhdr;
556         struct iwm_ucode_tlv tlv;
557         enum iwm_ucode_tlv_type tlv_type;
558         const struct firmware *fwp;
559         const uint8_t *data;
560         uint32_t usniffer_img;
561         uint32_t paging_mem_size;
562         int error = 0;
563         size_t len;
564
565         if (fw->fw_status == IWM_FW_STATUS_DONE &&
566             ucode_type != IWM_UCODE_INIT)
567                 return 0;
568
569         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
570                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
571         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
572
573         if (fw->fw_fp != NULL)
574                 iwm_fw_info_free(fw);
575
576         /*
577          * Load firmware into driver memory.
578          * fw_fp will be set.
579          */
580         IWM_UNLOCK(sc);
581         fwp = firmware_get(sc->cfg->fw_name);
582         IWM_LOCK(sc);
583         if (fwp == NULL) {
584                 device_printf(sc->sc_dev,
585                     "could not read firmware %s (error %d)\n",
586                     sc->cfg->fw_name, error);
587                 goto out;
588         }
589         fw->fw_fp = fwp;
590
591         /* (Re-)Initialize default values. */
592         sc->sc_capaflags = 0;
593         sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
594         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
595         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
596
597         /*
598          * Parse firmware contents
599          */
600
601         uhdr = (const void *)fw->fw_fp->data;
602         if (*(const uint32_t *)fw->fw_fp->data != 0
603             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
604                 device_printf(sc->sc_dev, "invalid firmware %s\n",
605                     sc->cfg->fw_name);
606                 error = EINVAL;
607                 goto out;
608         }
609
610         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
611             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
612             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
613             IWM_UCODE_API(le32toh(uhdr->ver)));
614         data = uhdr->data;
615         len = fw->fw_fp->datasize - sizeof(*uhdr);
616
617         while (len >= sizeof(tlv)) {
618                 size_t tlv_len;
619                 const void *tlv_data;
620
621                 memcpy(&tlv, data, sizeof(tlv));
622                 tlv_len = le32toh(tlv.length);
623                 tlv_type = le32toh(tlv.type);
624
625                 len -= sizeof(tlv);
626                 data += sizeof(tlv);
627                 tlv_data = data;
628
629                 if (len < tlv_len) {
630                         device_printf(sc->sc_dev,
631                             "firmware too short: %zu bytes\n",
632                             len);
633                         error = EINVAL;
634                         goto parse_out;
635                 }
636
637                 switch ((int)tlv_type) {
638                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
639                         if (tlv_len < sizeof(uint32_t)) {
640                                 device_printf(sc->sc_dev,
641                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
642                                     __func__,
643                                     (int) tlv_len);
644                                 error = EINVAL;
645                                 goto parse_out;
646                         }
647                         sc->sc_capa_max_probe_len
648                             = le32toh(*(const uint32_t *)tlv_data);
649                         /* limit it to something sensible */
650                         if (sc->sc_capa_max_probe_len >
651                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
652                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
653                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
654                                     "ridiculous\n", __func__);
655                                 error = EINVAL;
656                                 goto parse_out;
657                         }
658                         break;
659                 case IWM_UCODE_TLV_PAN:
660                         if (tlv_len) {
661                                 device_printf(sc->sc_dev,
662                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
663                                     __func__,
664                                     (int) tlv_len);
665                                 error = EINVAL;
666                                 goto parse_out;
667                         }
668                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
669                         break;
670                 case IWM_UCODE_TLV_FLAGS:
671                         if (tlv_len < sizeof(uint32_t)) {
672                                 device_printf(sc->sc_dev,
673                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
674                                     __func__,
675                                     (int) tlv_len);
676                                 error = EINVAL;
677                                 goto parse_out;
678                         }
679                         /*
680                          * Apparently there can be many flags, but Linux driver
681                          * parses only the first one, and so do we.
682                          *
683                          * XXX: why does this override IWM_UCODE_TLV_PAN?
684                          * Intentional or a bug?  Observations from
685                          * current firmware file:
686                          *  1) TLV_PAN is parsed first
687                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
688                          * ==> this resets TLV_PAN to itself... hnnnk
689                          */
690                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
691                         break;
692                 case IWM_UCODE_TLV_CSCHEME:
693                         if ((error = iwm_store_cscheme(sc,
694                             tlv_data, tlv_len)) != 0) {
695                                 device_printf(sc->sc_dev,
696                                     "%s: iwm_store_cscheme(): returned %d\n",
697                                     __func__,
698                                     error);
699                                 goto parse_out;
700                         }
701                         break;
702                 case IWM_UCODE_TLV_NUM_OF_CPU: {
703                         uint32_t num_cpu;
704                         if (tlv_len != sizeof(uint32_t)) {
705                                 device_printf(sc->sc_dev,
706                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
707                                     __func__,
708                                     (int) tlv_len);
709                                 error = EINVAL;
710                                 goto parse_out;
711                         }
712                         num_cpu = le32toh(*(const uint32_t *)tlv_data);
713                         if (num_cpu < 1 || num_cpu > 2) {
714                                 device_printf(sc->sc_dev,
715                                     "%s: Driver supports only 1 or 2 CPUs\n",
716                                     __func__);
717                                 error = EINVAL;
718                                 goto parse_out;
719                         }
720                         break;
721                 }
722                 case IWM_UCODE_TLV_SEC_RT:
723                         if ((error = iwm_firmware_store_section(sc,
724                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
725                                 device_printf(sc->sc_dev,
726                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
727                                     __func__,
728                                     error);
729                                 goto parse_out;
730                         }
731                         break;
732                 case IWM_UCODE_TLV_SEC_INIT:
733                         if ((error = iwm_firmware_store_section(sc,
734                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
735                                 device_printf(sc->sc_dev,
736                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
737                                     __func__,
738                                     error);
739                                 goto parse_out;
740                         }
741                         break;
742                 case IWM_UCODE_TLV_SEC_WOWLAN:
743                         if ((error = iwm_firmware_store_section(sc,
744                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
745                                 device_printf(sc->sc_dev,
746                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
747                                     __func__,
748                                     error);
749                                 goto parse_out;
750                         }
751                         break;
752                 case IWM_UCODE_TLV_DEF_CALIB:
753                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
754                                 device_printf(sc->sc_dev,
755                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
756                                     __func__,
757                                     (int) tlv_len,
758                                     (int) sizeof(struct iwm_tlv_calib_data));
759                                 error = EINVAL;
760                                 goto parse_out;
761                         }
762                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
763                                 device_printf(sc->sc_dev,
764                                     "%s: iwm_set_default_calib() failed: %d\n",
765                                     __func__,
766                                     error);
767                                 goto parse_out;
768                         }
769                         break;
770                 case IWM_UCODE_TLV_PHY_SKU:
771                         if (tlv_len != sizeof(uint32_t)) {
772                                 error = EINVAL;
773                                 device_printf(sc->sc_dev,
774                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
775                                     __func__,
776                                     (int) tlv_len);
777                                 goto parse_out;
778                         }
779                         sc->sc_fw.phy_config =
780                             le32toh(*(const uint32_t *)tlv_data);
781                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
782                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
783                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
784                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
785                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
786                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
787                         break;
788
789                 case IWM_UCODE_TLV_API_CHANGES_SET: {
790                         const struct iwm_ucode_api *api;
791                         if (tlv_len != sizeof(*api)) {
792                                 error = EINVAL;
793                                 goto parse_out;
794                         }
795                         api = (const struct iwm_ucode_api *)tlv_data;
796                         /* Flags may exceed 32 bits in future firmware. */
797                         if (le32toh(api->api_index) > 0) {
798                                 device_printf(sc->sc_dev,
799                                     "unsupported API index %d\n",
800                                     le32toh(api->api_index));
801                                 goto parse_out;
802                         }
803                         sc->sc_ucode_api = le32toh(api->api_flags);
804                         break;
805                 }
806
807                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
808                         const struct iwm_ucode_capa *capa;
809                         int idx, i;
810                         if (tlv_len != sizeof(*capa)) {
811                                 error = EINVAL;
812                                 goto parse_out;
813                         }
814                         capa = (const struct iwm_ucode_capa *)tlv_data;
815                         idx = le32toh(capa->api_index);
816                         if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
817                                 device_printf(sc->sc_dev,
818                                     "unsupported API index %d\n", idx);
819                                 goto parse_out;
820                         }
821                         for (i = 0; i < 32; i++) {
822                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
823                                         continue;
824                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
825                         }
826                         break;
827                 }
828
829                 case 48: /* undocumented TLV */
830                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
831                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
832                         /* ignore, not used by current driver */
833                         break;
834
835                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
836                         if ((error = iwm_firmware_store_section(sc,
837                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
838                             tlv_len)) != 0)
839                                 goto parse_out;
840                         break;
841
842                 case IWM_UCODE_TLV_PAGING:
843                         if (tlv_len != sizeof(uint32_t)) {
844                                 error = EINVAL;
845                                 goto parse_out;
846                         }
847                         paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
848
849                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
850                             "%s: Paging: paging enabled (size = %u bytes)\n",
851                             __func__, paging_mem_size);
852                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
853                                 device_printf(sc->sc_dev,
854                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
855                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
856                                 error = EINVAL;
857                                 goto out;
858                         }
859                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
860                                 device_printf(sc->sc_dev,
861                                     "%s: Paging: image isn't multiple %u\n",
862                                     __func__, IWM_FW_PAGING_SIZE);
863                                 error = EINVAL;
864                                 goto out;
865                         }
866
867                         sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
868                             paging_mem_size;
869                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
870                         sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
871                             paging_mem_size;
872                         break;
873
874                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
875                         if (tlv_len != sizeof(uint32_t)) {
876                                 error = EINVAL;
877                                 goto parse_out;
878                         }
879                         sc->sc_capa_n_scan_channels =
880                           le32toh(*(const uint32_t *)tlv_data);
881                         break;
882
883                 case IWM_UCODE_TLV_FW_VERSION:
884                         if (tlv_len != sizeof(uint32_t) * 3) {
885                                 error = EINVAL;
886                                 goto parse_out;
887                         }
888                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
889                             "%d.%d.%d",
890                             le32toh(((const uint32_t *)tlv_data)[0]),
891                             le32toh(((const uint32_t *)tlv_data)[1]),
892                             le32toh(((const uint32_t *)tlv_data)[2]));
893                         break;
894
895                 case IWM_UCODE_TLV_FW_MEM_SEG:
896                         break;
897
898                 default:
899                         device_printf(sc->sc_dev,
900                             "%s: unknown firmware section %d, abort\n",
901                             __func__, tlv_type);
902                         error = EINVAL;
903                         goto parse_out;
904                 }
905
906                 len -= roundup(tlv_len, 4);
907                 data += roundup(tlv_len, 4);
908         }
909
910         KASSERT(error == 0, ("unhandled error"));
911
912  parse_out:
913         if (error) {
914                 device_printf(sc->sc_dev, "firmware parse error %d, "
915                     "section type %d\n", error, tlv_type);
916         }
917
918         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
919                 device_printf(sc->sc_dev,
920                     "device uses unsupported power ops\n");
921                 error = ENOTSUP;
922         }
923
924  out:
925         if (error) {
926                 fw->fw_status = IWM_FW_STATUS_NONE;
927                 if (fw->fw_fp != NULL)
928                         iwm_fw_info_free(fw);
929         } else
930                 fw->fw_status = IWM_FW_STATUS_DONE;
931         wakeup(&sc->sc_fw);
932
933         return error;
934 }
935
936 /*
937  * DMA resource routines
938  */
939
940 static void
941 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
942 {
943         if (error != 0)
944                 return;
945         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
946         *(bus_addr_t *)arg = segs[0].ds_addr;
947 }
948
949 static int
950 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
951     bus_size_t size, bus_size_t alignment)
952 {
953         int error;
954
955         dma->tag = NULL;
956         dma->map = NULL;
957         dma->size = size;
958         dma->vaddr = NULL;
959
960         error = bus_dma_tag_create(tag, alignment,
961             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
962             1, size, 0, NULL, NULL, &dma->tag);
963         if (error != 0)
964                 goto fail;
965
966         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
967             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
968         if (error != 0)
969                 goto fail;
970
971         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
972             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
973         if (error != 0) {
974                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
975                 dma->vaddr = NULL;
976                 goto fail;
977         }
978
979         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
980
981         return 0;
982
983 fail:
984         iwm_dma_contig_free(dma);
985
986         return error;
987 }
988
989 static void
990 iwm_dma_contig_free(struct iwm_dma_info *dma)
991 {
992         if (dma->vaddr != NULL) {
993                 bus_dmamap_sync(dma->tag, dma->map,
994                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
995                 bus_dmamap_unload(dma->tag, dma->map);
996                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
997                 dma->vaddr = NULL;
998         }
999         if (dma->tag != NULL) {
1000                 bus_dma_tag_destroy(dma->tag);
1001                 dma->tag = NULL;
1002         }
1003 }
1004
1005 /* fwmem is used to load firmware onto the card */
1006 static int
1007 iwm_alloc_fwmem(struct iwm_softc *sc)
1008 {
1009         /* Must be aligned on a 16-byte boundary. */
1010         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
1011             sc->sc_fwdmasegsz, 16);
1012 }
1013
1014 /* tx scheduler rings.  not used? */
1015 static int
1016 iwm_alloc_sched(struct iwm_softc *sc)
1017 {
1018         /* TX scheduler rings must be aligned on a 1KB boundary. */
1019         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1020             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
1021 }
1022
1023 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
1024 static int
1025 iwm_alloc_kw(struct iwm_softc *sc)
1026 {
1027         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
1028 }
1029
1030 /* interrupt cause table */
1031 static int
1032 iwm_alloc_ict(struct iwm_softc *sc)
1033 {
1034         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1035             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
1036 }
1037
1038 static int
1039 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1040 {
1041         bus_size_t size;
1042         int i, error;
1043
1044         ring->cur = 0;
1045
1046         /* Allocate RX descriptors (256-byte aligned). */
1047         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1048         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1049         if (error != 0) {
1050                 device_printf(sc->sc_dev,
1051                     "could not allocate RX ring DMA memory\n");
1052                 goto fail;
1053         }
1054         ring->desc = ring->desc_dma.vaddr;
1055
1056         /* Allocate RX status area (16-byte aligned). */
1057         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1058             sizeof(*ring->stat), 16);
1059         if (error != 0) {
1060                 device_printf(sc->sc_dev,
1061                     "could not allocate RX status DMA memory\n");
1062                 goto fail;
1063         }
1064         ring->stat = ring->stat_dma.vaddr;
1065
1066         /* Create RX buffer DMA tag. */
1067         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1068             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1069             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1070         if (error != 0) {
1071                 device_printf(sc->sc_dev,
1072                     "%s: could not create RX buf DMA tag, error %d\n",
1073                     __func__, error);
1074                 goto fail;
1075         }
1076
1077         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1078         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1079         if (error != 0) {
1080                 device_printf(sc->sc_dev,
1081                     "%s: could not create RX buf DMA map, error %d\n",
1082                     __func__, error);
1083                 goto fail;
1084         }
1085         /*
1086          * Allocate and map RX buffers.
1087          */
1088         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1089                 struct iwm_rx_data *data = &ring->data[i];
1090                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1091                 if (error != 0) {
1092                         device_printf(sc->sc_dev,
1093                             "%s: could not create RX buf DMA map, error %d\n",
1094                             __func__, error);
1095                         goto fail;
1096                 }
1097                 data->m = NULL;
1098
1099                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1100                         goto fail;
1101                 }
1102         }
1103         return 0;
1104
1105 fail:   iwm_free_rx_ring(sc, ring);
1106         return error;
1107 }
1108
1109 static void
1110 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1111 {
1112         /* Reset the ring state */
1113         ring->cur = 0;
1114
1115         /*
1116          * The hw rx ring index in shared memory must also be cleared,
1117          * otherwise the discrepancy can cause reprocessing chaos.
1118          */
1119         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1120 }
1121
1122 static void
1123 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1124 {
1125         int i;
1126
1127         iwm_dma_contig_free(&ring->desc_dma);
1128         iwm_dma_contig_free(&ring->stat_dma);
1129
1130         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1131                 struct iwm_rx_data *data = &ring->data[i];
1132
1133                 if (data->m != NULL) {
1134                         bus_dmamap_sync(ring->data_dmat, data->map,
1135                             BUS_DMASYNC_POSTREAD);
1136                         bus_dmamap_unload(ring->data_dmat, data->map);
1137                         m_freem(data->m);
1138                         data->m = NULL;
1139                 }
1140                 if (data->map != NULL) {
1141                         bus_dmamap_destroy(ring->data_dmat, data->map);
1142                         data->map = NULL;
1143                 }
1144         }
1145         if (ring->spare_map != NULL) {
1146                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1147                 ring->spare_map = NULL;
1148         }
1149         if (ring->data_dmat != NULL) {
1150                 bus_dma_tag_destroy(ring->data_dmat);
1151                 ring->data_dmat = NULL;
1152         }
1153 }
1154
1155 static int
1156 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1157 {
1158         bus_addr_t paddr;
1159         bus_size_t size;
1160         size_t maxsize;
1161         int nsegments;
1162         int i, error;
1163
1164         ring->qid = qid;
1165         ring->queued = 0;
1166         ring->cur = 0;
1167
1168         /* Allocate TX descriptors (256-byte aligned). */
1169         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1170         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1171         if (error != 0) {
1172                 device_printf(sc->sc_dev,
1173                     "could not allocate TX ring DMA memory\n");
1174                 goto fail;
1175         }
1176         ring->desc = ring->desc_dma.vaddr;
1177
1178         /*
1179          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1180          * to allocate commands space for other rings.
1181          */
1182         if (qid > IWM_MVM_CMD_QUEUE)
1183                 return 0;
1184
1185         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1186         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1187         if (error != 0) {
1188                 device_printf(sc->sc_dev,
1189                     "could not allocate TX cmd DMA memory\n");
1190                 goto fail;
1191         }
1192         ring->cmd = ring->cmd_dma.vaddr;
1193
1194         /* FW commands may require more mapped space than packets. */
1195         if (qid == IWM_MVM_CMD_QUEUE) {
1196                 maxsize = IWM_RBUF_SIZE;
1197                 nsegments = 1;
1198         } else {
1199                 maxsize = MCLBYTES;
1200                 nsegments = IWM_MAX_SCATTER - 2;
1201         }
1202
1203         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1204             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1205             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1206         if (error != 0) {
1207                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1208                 goto fail;
1209         }
1210
1211         paddr = ring->cmd_dma.paddr;
1212         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1213                 struct iwm_tx_data *data = &ring->data[i];
1214
1215                 data->cmd_paddr = paddr;
1216                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1217                     + offsetof(struct iwm_tx_cmd, scratch);
1218                 paddr += sizeof(struct iwm_device_cmd);
1219
1220                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1221                 if (error != 0) {
1222                         device_printf(sc->sc_dev,
1223                             "could not create TX buf DMA map\n");
1224                         goto fail;
1225                 }
1226         }
1227         KASSERT(paddr == ring->cmd_dma.paddr + size,
1228             ("invalid physical address"));
1229         return 0;
1230
1231 fail:   iwm_free_tx_ring(sc, ring);
1232         return error;
1233 }
1234
1235 static void
1236 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1237 {
1238         int i;
1239
1240         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1241                 struct iwm_tx_data *data = &ring->data[i];
1242
1243                 if (data->m != NULL) {
1244                         bus_dmamap_sync(ring->data_dmat, data->map,
1245                             BUS_DMASYNC_POSTWRITE);
1246                         bus_dmamap_unload(ring->data_dmat, data->map);
1247                         m_freem(data->m);
1248                         data->m = NULL;
1249                 }
1250         }
1251         /* Clear TX descriptors. */
1252         memset(ring->desc, 0, ring->desc_dma.size);
1253         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1254             BUS_DMASYNC_PREWRITE);
1255         sc->qfullmsk &= ~(1 << ring->qid);
1256         ring->queued = 0;
1257         ring->cur = 0;
1258
1259         if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1260                 iwm_pcie_clear_cmd_in_flight(sc);
1261 }
1262
1263 static void
1264 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1265 {
1266         int i;
1267
1268         iwm_dma_contig_free(&ring->desc_dma);
1269         iwm_dma_contig_free(&ring->cmd_dma);
1270
1271         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1272                 struct iwm_tx_data *data = &ring->data[i];
1273
1274                 if (data->m != NULL) {
1275                         bus_dmamap_sync(ring->data_dmat, data->map,
1276                             BUS_DMASYNC_POSTWRITE);
1277                         bus_dmamap_unload(ring->data_dmat, data->map);
1278                         m_freem(data->m);
1279                         data->m = NULL;
1280                 }
1281                 if (data->map != NULL) {
1282                         bus_dmamap_destroy(ring->data_dmat, data->map);
1283                         data->map = NULL;
1284                 }
1285         }
1286         if (ring->data_dmat != NULL) {
1287                 bus_dma_tag_destroy(ring->data_dmat);
1288                 ring->data_dmat = NULL;
1289         }
1290 }
1291
1292 /*
1293  * High-level hardware frobbing routines
1294  */
1295
1296 static void
1297 iwm_enable_interrupts(struct iwm_softc *sc)
1298 {
1299         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1300         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1301 }
1302
1303 static void
1304 iwm_restore_interrupts(struct iwm_softc *sc)
1305 {
1306         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1307 }
1308
1309 static void
1310 iwm_disable_interrupts(struct iwm_softc *sc)
1311 {
1312         /* disable interrupts */
1313         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1314
1315         /* acknowledge all interrupts */
1316         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1317         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1318 }
1319
1320 static void
1321 iwm_ict_reset(struct iwm_softc *sc)
1322 {
1323         iwm_disable_interrupts(sc);
1324
1325         /* Reset ICT table. */
1326         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1327         sc->ict_cur = 0;
1328
1329         /* Set physical address of ICT table (4KB aligned). */
1330         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1331             IWM_CSR_DRAM_INT_TBL_ENABLE
1332             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1333             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1334             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1335
1336         /* Switch to ICT interrupt mode in driver. */
1337         sc->sc_flags |= IWM_FLAG_USE_ICT;
1338
1339         /* Re-enable interrupts. */
1340         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1341         iwm_enable_interrupts(sc);
1342 }
1343
1344 /* iwlwifi pcie/trans.c */
1345
1346 /*
1347  * Since this .. hard-resets things, it's time to actually
1348  * mark the first vap (if any) as having no mac context.
1349  * It's annoying, but since the driver is potentially being
1350  * stop/start'ed whilst active (thanks openbsd port!) we
1351  * have to correctly track this.
1352  */
1353 static void
1354 iwm_stop_device(struct iwm_softc *sc)
1355 {
1356         struct ieee80211com *ic = &sc->sc_ic;
1357         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1358         int chnl, qid;
1359         uint32_t mask = 0;
1360
1361         /* tell the device to stop sending interrupts */
1362         iwm_disable_interrupts(sc);
1363
1364         /*
1365          * FreeBSD-local: mark the first vap as not-uploaded,
1366          * so the next transition through auth/assoc
1367          * will correctly populate the MAC context.
1368          */
1369         if (vap) {
1370                 struct iwm_vap *iv = IWM_VAP(vap);
1371                 iv->is_uploaded = 0;
1372         }
1373
1374         /* device going down, Stop using ICT table */
1375         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1376
1377         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1378
1379         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1380
1381         if (iwm_nic_lock(sc)) {
1382                 /* Stop each Tx DMA channel */
1383                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1384                         IWM_WRITE(sc,
1385                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1386                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1387                 }
1388
1389                 /* Wait for DMA channels to be idle */
1390                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1391                     5000)) {
1392                         device_printf(sc->sc_dev,
1393                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1394                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1395                 }
1396                 iwm_nic_unlock(sc);
1397         }
1398         iwm_pcie_rx_stop(sc);
1399
1400         /* Stop RX ring. */
1401         iwm_reset_rx_ring(sc, &sc->rxq);
1402
1403         /* Reset all TX rings. */
1404         for (qid = 0; qid < nitems(sc->txq); qid++)
1405                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1406
1407         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1408                 /* Power-down device's busmaster DMA clocks */
1409                 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1410                     IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1411                 DELAY(5);
1412         }
1413
1414         /* Make sure (redundant) we've released our request to stay awake */
1415         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1416             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1417
1418         /* Stop the device, and put it in low power state */
1419         iwm_apm_stop(sc);
1420
1421         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1422          * Clean again the interrupt here
1423          */
1424         iwm_disable_interrupts(sc);
1425         /* stop and reset the on-board processor */
1426         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1427
1428         /*
1429          * Even if we stop the HW, we still want the RF kill
1430          * interrupt
1431          */
1432         iwm_enable_rfkill_int(sc);
1433         iwm_check_rfkill(sc);
1434 }
1435
1436 /* iwlwifi: mvm/ops.c */
1437 static void
1438 iwm_mvm_nic_config(struct iwm_softc *sc)
1439 {
1440         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1441         uint32_t reg_val = 0;
1442         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1443
1444         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1445             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1446         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1447             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1448         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1449             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1450
1451         /* SKU control */
1452         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1453             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1454         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1455             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1456
1457         /* radio configuration */
1458         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1459         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1460         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1461
1462         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1463
1464         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1465             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1466             radio_cfg_step, radio_cfg_dash);
1467
1468         /*
1469          * W/A : NIC is stuck in a reset state after Early PCIe power off
1470          * (PCIe power is lost before PERST# is asserted), causing ME FW
1471          * to lose ownership and not being able to obtain it back.
1472          */
1473         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1474                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1475                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1476                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1477         }
1478 }
1479
1480 static int
1481 iwm_nic_rx_init(struct iwm_softc *sc)
1482 {
1483         /*
1484          * Initialize RX ring.  This is from the iwn driver.
1485          */
1486         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1487
1488         /* Stop Rx DMA */
1489         iwm_pcie_rx_stop(sc);
1490
1491         if (!iwm_nic_lock(sc))
1492                 return EBUSY;
1493
1494         /* reset and flush pointers */
1495         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1496         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1497         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1498         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1499
1500         /* Set physical address of RX ring (256-byte aligned). */
1501         IWM_WRITE(sc,
1502             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1503
1504         /* Set physical address of RX status (16-byte aligned). */
1505         IWM_WRITE(sc,
1506             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1507
1508         /* Enable RX. */
1509         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1510             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1511             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1512             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1513             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1514             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1515             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1516             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1517
1518         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1519
1520         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1521         if (sc->cfg->host_interrupt_operation_mode)
1522                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1523
1524         /*
1525          * Thus sayeth el jefe (iwlwifi) via a comment:
1526          *
1527          * This value should initially be 0 (before preparing any
1528          * RBs), should be 8 after preparing the first 8 RBs (for example)
1529          */
1530         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1531
1532         iwm_nic_unlock(sc);
1533
1534         return 0;
1535 }
1536
1537 static int
1538 iwm_nic_tx_init(struct iwm_softc *sc)
1539 {
1540         int qid;
1541
1542         if (!iwm_nic_lock(sc))
1543                 return EBUSY;
1544
1545         /* Deactivate TX scheduler. */
1546         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1547
1548         /* Set physical address of "keep warm" page (16-byte aligned). */
1549         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1550
1551         /* Initialize TX rings. */
1552         for (qid = 0; qid < nitems(sc->txq); qid++) {
1553                 struct iwm_tx_ring *txq = &sc->txq[qid];
1554
1555                 /* Set physical address of TX ring (256-byte aligned). */
1556                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1557                     txq->desc_dma.paddr >> 8);
1558                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1559                     "%s: loading ring %d descriptors (%p) at %lx\n",
1560                     __func__,
1561                     qid, txq->desc,
1562                     (unsigned long) (txq->desc_dma.paddr >> 8));
1563         }
1564
1565         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1566
1567         iwm_nic_unlock(sc);
1568
1569         return 0;
1570 }
1571
1572 static int
1573 iwm_nic_init(struct iwm_softc *sc)
1574 {
1575         int error;
1576
1577         iwm_apm_init(sc);
1578         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1579                 iwm_set_pwr(sc);
1580
1581         iwm_mvm_nic_config(sc);
1582
1583         if ((error = iwm_nic_rx_init(sc)) != 0)
1584                 return error;
1585
1586         /*
1587          * Ditto for TX, from iwn
1588          */
1589         if ((error = iwm_nic_tx_init(sc)) != 0)
1590                 return error;
1591
1592         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1593             "%s: shadow registers enabled\n", __func__);
1594         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1595
1596         return 0;
1597 }
1598
1599 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1600         IWM_MVM_TX_FIFO_VO,
1601         IWM_MVM_TX_FIFO_VI,
1602         IWM_MVM_TX_FIFO_BE,
1603         IWM_MVM_TX_FIFO_BK,
1604 };
1605
1606 static int
1607 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1608 {
1609         if (!iwm_nic_lock(sc)) {
1610                 device_printf(sc->sc_dev,
1611                     "%s: cannot enable txq %d\n",
1612                     __func__,
1613                     qid);
1614                 return EBUSY;
1615         }
1616
1617         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1618
1619         if (qid == IWM_MVM_CMD_QUEUE) {
1620                 /* unactivate before configuration */
1621                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1622                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1623                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1624
1625                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1626
1627                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1628
1629                 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1630                 /* Set scheduler window size and frame limit. */
1631                 iwm_write_mem32(sc,
1632                     sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1633                     sizeof(uint32_t),
1634                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1635                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1636                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1637                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1638
1639                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1640                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1641                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1642                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1643                     IWM_SCD_QUEUE_STTS_REG_MSK);
1644         } else {
1645                 struct iwm_scd_txq_cfg_cmd cmd;
1646                 int error;
1647
1648                 iwm_nic_unlock(sc);
1649
1650                 memset(&cmd, 0, sizeof(cmd));
1651                 cmd.scd_queue = qid;
1652                 cmd.enable = 1;
1653                 cmd.sta_id = sta_id;
1654                 cmd.tx_fifo = fifo;
1655                 cmd.aggregate = 0;
1656                 cmd.window = IWM_FRAME_LIMIT;
1657
1658                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1659                     sizeof(cmd), &cmd);
1660                 if (error) {
1661                         device_printf(sc->sc_dev,
1662                             "cannot enable txq %d\n", qid);
1663                         return error;
1664                 }
1665
1666                 if (!iwm_nic_lock(sc))
1667                         return EBUSY;
1668         }
1669
1670         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1671             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1672
1673         iwm_nic_unlock(sc);
1674
1675         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1676             __func__, qid, fifo);
1677
1678         return 0;
1679 }
1680
1681 static int
1682 iwm_post_alive(struct iwm_softc *sc)
1683 {
1684         int nwords;
1685         int error, chnl;
1686         uint32_t base;
1687
1688         if (!iwm_nic_lock(sc))
1689                 return EBUSY;
1690
1691         base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1692         if (sc->sched_base != base) {
1693                 device_printf(sc->sc_dev,
1694                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1695                     __func__, sc->sched_base, base);
1696         }
1697
1698         iwm_ict_reset(sc);
1699
1700         /* Clear TX scheduler state in SRAM. */
1701         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1702             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1703             / sizeof(uint32_t);
1704         error = iwm_write_mem(sc,
1705             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1706             NULL, nwords);
1707         if (error)
1708                 goto out;
1709
1710         /* Set physical address of TX scheduler rings (1KB aligned). */
1711         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1712
1713         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1714
1715         iwm_nic_unlock(sc);
1716
1717         /* enable command channel */
1718         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1719         if (error)
1720                 return error;
1721
1722         if (!iwm_nic_lock(sc))
1723                 return EBUSY;
1724
1725         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1726
1727         /* Enable DMA channels. */
1728         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1729                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1730                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1731                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1732         }
1733
1734         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1735             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1736
1737         /* Enable L1-Active */
1738         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1739                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1740                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1741         }
1742
1743  out:
1744         iwm_nic_unlock(sc);
1745         return error;
1746 }
1747
1748 /*
1749  * NVM read access and content parsing.  We do not support
1750  * external NVM or writing NVM.
1751  * iwlwifi/mvm/nvm.c
1752  */
1753
1754 /* Default NVM size to read */
1755 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1756
1757 #define IWM_NVM_WRITE_OPCODE 1
1758 #define IWM_NVM_READ_OPCODE 0
1759
1760 /* load nvm chunk response */
1761 enum {
1762         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1763         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1764 };
1765
1766 static int
1767 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1768         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1769 {
1770         struct iwm_nvm_access_cmd nvm_access_cmd = {
1771                 .offset = htole16(offset),
1772                 .length = htole16(length),
1773                 .type = htole16(section),
1774                 .op_code = IWM_NVM_READ_OPCODE,
1775         };
1776         struct iwm_nvm_access_resp *nvm_resp;
1777         struct iwm_rx_packet *pkt;
1778         struct iwm_host_cmd cmd = {
1779                 .id = IWM_NVM_ACCESS_CMD,
1780                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1781                 .data = { &nvm_access_cmd, },
1782         };
1783         int ret, bytes_read, offset_read;
1784         uint8_t *resp_data;
1785
1786         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1787
1788         ret = iwm_send_cmd(sc, &cmd);
1789         if (ret) {
1790                 device_printf(sc->sc_dev,
1791                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1792                 return ret;
1793         }
1794
1795         pkt = cmd.resp_pkt;
1796
1797         /* Extract NVM response */
1798         nvm_resp = (void *)pkt->data;
1799         ret = le16toh(nvm_resp->status);
1800         bytes_read = le16toh(nvm_resp->length);
1801         offset_read = le16toh(nvm_resp->offset);
1802         resp_data = nvm_resp->data;
1803         if (ret) {
1804                 if ((offset != 0) &&
1805                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1806                         /*
1807                          * meaning of NOT_VALID_ADDRESS:
1808                          * driver try to read chunk from address that is
1809                          * multiple of 2K and got an error since addr is empty.
1810                          * meaning of (offset != 0): driver already
1811                          * read valid data from another chunk so this case
1812                          * is not an error.
1813                          */
1814                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1815                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1816                                     offset);
1817                         *len = 0;
1818                         ret = 0;
1819                 } else {
1820                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1821                                     "NVM access command failed with status %d\n", ret);
1822                         ret = EIO;
1823                 }
1824                 goto exit;
1825         }
1826
1827         if (offset_read != offset) {
1828                 device_printf(sc->sc_dev,
1829                     "NVM ACCESS response with invalid offset %d\n",
1830                     offset_read);
1831                 ret = EINVAL;
1832                 goto exit;
1833         }
1834
1835         if (bytes_read > length) {
1836                 device_printf(sc->sc_dev,
1837                     "NVM ACCESS response with too much data "
1838                     "(%d bytes requested, %d bytes received)\n",
1839                     length, bytes_read);
1840                 ret = EINVAL;
1841                 goto exit;
1842         }
1843
1844         /* Write data to NVM */
1845         memcpy(data + offset, resp_data, bytes_read);
1846         *len = bytes_read;
1847
1848  exit:
1849         iwm_free_resp(sc, &cmd);
1850         return ret;
1851 }
1852
1853 /*
1854  * Reads an NVM section completely.
1855  * NICs prior to 7000 family don't have a real NVM, but just read
1856  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1857  * by uCode, we need to manually check in this case that we don't
1858  * overflow and try to read more than the EEPROM size.
1859  * For 7000 family NICs, we supply the maximal size we can read, and
1860  * the uCode fills the response with as much data as we can,
1861  * without overflowing, so no check is needed.
1862  */
1863 static int
1864 iwm_nvm_read_section(struct iwm_softc *sc,
1865         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1866 {
1867         uint16_t seglen, length, offset = 0;
1868         int ret;
1869
1870         /* Set nvm section read length */
1871         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1872
1873         seglen = length;
1874
1875         /* Read the NVM until exhausted (reading less than requested) */
1876         while (seglen == length) {
1877                 /* Check no memory assumptions fail and cause an overflow */
1878                 if ((size_read + offset + length) >
1879                     sc->cfg->eeprom_size) {
1880                         device_printf(sc->sc_dev,
1881                             "EEPROM size is too small for NVM\n");
1882                         return ENOBUFS;
1883                 }
1884
1885                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1886                 if (ret) {
1887                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1888                                     "Cannot read NVM from section %d offset %d, length %d\n",
1889                                     section, offset, length);
1890                         return ret;
1891                 }
1892                 offset += seglen;
1893         }
1894
1895         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1896                     "NVM section %d read completed\n", section);
1897         *len = offset;
1898         return 0;
1899 }
1900
1901 /*
1902  * BEGIN IWM_NVM_PARSE
1903  */
1904
1905 /* iwlwifi/iwl-nvm-parse.c */
1906
1907 /* NVM offsets (in words) definitions */
1908 enum iwm_nvm_offsets {
1909         /* NVM HW-Section offset (in words) definitions */
1910         IWM_HW_ADDR = 0x15,
1911
1912 /* NVM SW-Section offset (in words) definitions */
1913         IWM_NVM_SW_SECTION = 0x1C0,
1914         IWM_NVM_VERSION = 0,
1915         IWM_RADIO_CFG = 1,
1916         IWM_SKU = 2,
1917         IWM_N_HW_ADDRS = 3,
1918         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1919
1920 /* NVM calibration section offset (in words) definitions */
1921         IWM_NVM_CALIB_SECTION = 0x2B8,
1922         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1923 };
1924
1925 enum iwm_8000_nvm_offsets {
1926         /* NVM HW-Section offset (in words) definitions */
1927         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1928         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1929         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1930         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1931         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1932
1933         /* NVM SW-Section offset (in words) definitions */
1934         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1935         IWM_NVM_VERSION_8000 = 0,
1936         IWM_RADIO_CFG_8000 = 0,
1937         IWM_SKU_8000 = 2,
1938         IWM_N_HW_ADDRS_8000 = 3,
1939
1940         /* NVM REGULATORY -Section offset (in words) definitions */
1941         IWM_NVM_CHANNELS_8000 = 0,
1942         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1943         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1944         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1945
1946         /* NVM calibration section offset (in words) definitions */
1947         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1948         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1949 };
1950
1951 /* SKU Capabilities (actual values from NVM definition) */
1952 enum nvm_sku_bits {
1953         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1954         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1955         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1956         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1957 };
1958
1959 /* radio config bits (actual values from NVM definition) */
1960 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1961 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1962 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1963 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1964 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1965 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1966
1967 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1968 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1969 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1970 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1971 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1972 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1973
1974 #define DEFAULT_MAX_TX_POWER 16
1975
1976 /**
1977  * enum iwm_nvm_channel_flags - channel flags in NVM
1978  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1979  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1980  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1981  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1982  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1983  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1984  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1985  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1986  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1987  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1988  */
1989 enum iwm_nvm_channel_flags {
1990         IWM_NVM_CHANNEL_VALID = (1 << 0),
1991         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1992         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1993         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1994         IWM_NVM_CHANNEL_DFS = (1 << 7),
1995         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1996         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1997         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1998         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1999 };
2000
2001 /*
2002  * Translate EEPROM flags to net80211.
2003  */
2004 static uint32_t
2005 iwm_eeprom_channel_flags(uint16_t ch_flags)
2006 {
2007         uint32_t nflags;
2008
2009         nflags = 0;
2010         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2011                 nflags |= IEEE80211_CHAN_PASSIVE;
2012         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2013                 nflags |= IEEE80211_CHAN_NOADHOC;
2014         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2015                 nflags |= IEEE80211_CHAN_DFS;
2016                 /* Just in case. */
2017                 nflags |= IEEE80211_CHAN_NOADHOC;
2018         }
2019
2020         return (nflags);
2021 }
2022
2023 static void
2024 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2025     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2026     const uint8_t bands[])
2027 {
2028         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2029         uint32_t nflags;
2030         uint16_t ch_flags;
2031         uint8_t ieee;
2032         int error;
2033
2034         for (; ch_idx < ch_num; ch_idx++) {
2035                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2036                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2037                         ieee = iwm_nvm_channels[ch_idx];
2038                 else
2039                         ieee = iwm_nvm_channels_8000[ch_idx];
2040
2041                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2042                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2043                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
2044                             ieee, ch_flags,
2045                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2046                             "5.2" : "2.4");
2047                         continue;
2048                 }
2049
2050                 nflags = iwm_eeprom_channel_flags(ch_flags);
2051                 error = ieee80211_add_channel(chans, maxchans, nchans,
2052                     ieee, 0, 0, nflags, bands);
2053                 if (error != 0)
2054                         break;
2055
2056                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2057                     "Ch. %d Flags %x [%sGHz] - Added\n",
2058                     ieee, ch_flags,
2059                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2060                     "5.2" : "2.4");
2061         }
2062 }
2063
2064 static void
2065 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2066     struct ieee80211_channel chans[])
2067 {
2068         struct iwm_softc *sc = ic->ic_softc;
2069         struct iwm_nvm_data *data = sc->nvm_data;
2070         uint8_t bands[IEEE80211_MODE_BYTES];
2071         size_t ch_num;
2072
2073         memset(bands, 0, sizeof(bands));
2074         /* 1-13: 11b/g channels. */
2075         setbit(bands, IEEE80211_MODE_11B);
2076         setbit(bands, IEEE80211_MODE_11G);
2077         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2078             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2079
2080         /* 14: 11b channel only. */
2081         clrbit(bands, IEEE80211_MODE_11G);
2082         iwm_add_channel_band(sc, chans, maxchans, nchans,
2083             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2084
2085         if (data->sku_cap_band_52GHz_enable) {
2086                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2087                         ch_num = nitems(iwm_nvm_channels);
2088                 else
2089                         ch_num = nitems(iwm_nvm_channels_8000);
2090                 memset(bands, 0, sizeof(bands));
2091                 setbit(bands, IEEE80211_MODE_11A);
2092                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2093                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2094         }
2095 }
2096
2097 static void
2098 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2099         const uint16_t *mac_override, const uint16_t *nvm_hw)
2100 {
2101         const uint8_t *hw_addr;
2102
2103         if (mac_override) {
2104                 static const uint8_t reserved_mac[] = {
2105                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2106                 };
2107
2108                 hw_addr = (const uint8_t *)(mac_override +
2109                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2110
2111                 /*
2112                  * Store the MAC address from MAO section.
2113                  * No byte swapping is required in MAO section
2114                  */
2115                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2116
2117                 /*
2118                  * Force the use of the OTP MAC address in case of reserved MAC
2119                  * address in the NVM, or if address is given but invalid.
2120                  */
2121                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2122                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2123                     iwm_is_valid_ether_addr(data->hw_addr) &&
2124                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2125                         return;
2126
2127                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2128                     "%s: mac address from nvm override section invalid\n",
2129                     __func__);
2130         }
2131
2132         if (nvm_hw) {
2133                 /* read the mac address from WFMP registers */
2134                 uint32_t mac_addr0 =
2135                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2136                 uint32_t mac_addr1 =
2137                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2138
2139                 hw_addr = (const uint8_t *)&mac_addr0;
2140                 data->hw_addr[0] = hw_addr[3];
2141                 data->hw_addr[1] = hw_addr[2];
2142                 data->hw_addr[2] = hw_addr[1];
2143                 data->hw_addr[3] = hw_addr[0];
2144
2145                 hw_addr = (const uint8_t *)&mac_addr1;
2146                 data->hw_addr[4] = hw_addr[1];
2147                 data->hw_addr[5] = hw_addr[0];
2148
2149                 return;
2150         }
2151
2152         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2153         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2154 }
2155
2156 static int
2157 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2158             const uint16_t *phy_sku)
2159 {
2160         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2161                 return le16_to_cpup(nvm_sw + IWM_SKU);
2162
2163         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2164 }
2165
2166 static int
2167 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2168 {
2169         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2170                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2171         else
2172                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2173                                                 IWM_NVM_VERSION_8000));
2174 }
2175
2176 static int
2177 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2178                   const uint16_t *phy_sku)
2179 {
2180         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2181                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2182
2183         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2184 }
2185
2186 static int
2187 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2188 {
2189         int n_hw_addr;
2190
2191         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2192                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2193
2194         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2195
2196         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2197 }
2198
2199 static void
2200 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2201                   uint32_t radio_cfg)
2202 {
2203         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2204                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2205                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2206                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2207                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2208                 return;
2209         }
2210
2211         /* set the radio configuration for family 8000 */
2212         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2213         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2214         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2215         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2216         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2217         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2218 }
2219
2220 static int
2221 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2222                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2223 {
2224 #ifdef notyet /* for FAMILY 9000 */
2225         if (cfg->mac_addr_from_csr) {
2226                 iwm_set_hw_address_from_csr(sc, data);
2227         } else
2228 #endif
2229         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2230                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2231
2232                 /* The byte order is little endian 16 bit, meaning 214365 */
2233                 data->hw_addr[0] = hw_addr[1];
2234                 data->hw_addr[1] = hw_addr[0];
2235                 data->hw_addr[2] = hw_addr[3];
2236                 data->hw_addr[3] = hw_addr[2];
2237                 data->hw_addr[4] = hw_addr[5];
2238                 data->hw_addr[5] = hw_addr[4];
2239         } else {
2240                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2241         }
2242
2243         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2244                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2245                 return EINVAL;
2246         }
2247
2248         return 0;
2249 }
2250
2251 static struct iwm_nvm_data *
2252 iwm_parse_nvm_data(struct iwm_softc *sc,
2253                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2254                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2255                    const uint16_t *phy_sku, const uint16_t *regulatory)
2256 {
2257         struct iwm_nvm_data *data;
2258         uint32_t sku, radio_cfg;
2259
2260         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2261                 data = malloc(sizeof(*data) +
2262                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2263                     M_DEVBUF, M_NOWAIT | M_ZERO);
2264         } else {
2265                 data = malloc(sizeof(*data) +
2266                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2267                     M_DEVBUF, M_NOWAIT | M_ZERO);
2268         }
2269         if (!data)
2270                 return NULL;
2271
2272         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2273
2274         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2275         iwm_set_radio_cfg(sc, data, radio_cfg);
2276
2277         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2278         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2279         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2280         data->sku_cap_11n_enable = 0;
2281
2282         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2283
2284         /* If no valid mac address was found - bail out */
2285         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2286                 free(data, M_DEVBUF);
2287                 return NULL;
2288         }
2289
2290         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2291                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2292                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2293         } else {
2294                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2295                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2296         }
2297
2298         return data;
2299 }
2300
2301 static void
2302 iwm_free_nvm_data(struct iwm_nvm_data *data)
2303 {
2304         if (data != NULL)
2305                 free(data, M_DEVBUF);
2306 }
2307
2308 static struct iwm_nvm_data *
2309 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2310 {
2311         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2312
2313         /* Checking for required sections */
2314         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2315                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2316                     !sections[sc->cfg->nvm_hw_section_num].data) {
2317                         device_printf(sc->sc_dev,
2318                             "Can't parse empty OTP/NVM sections\n");
2319                         return NULL;
2320                 }
2321         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2322                 /* SW and REGULATORY sections are mandatory */
2323                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2324                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2325                         device_printf(sc->sc_dev,
2326                             "Can't parse empty OTP/NVM sections\n");
2327                         return NULL;
2328                 }
2329                 /* MAC_OVERRIDE or at least HW section must exist */
2330                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2331                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2332                         device_printf(sc->sc_dev,
2333                             "Can't parse mac_address, empty sections\n");
2334                         return NULL;
2335                 }
2336
2337                 /* PHY_SKU section is mandatory in B0 */
2338                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2339                         device_printf(sc->sc_dev,
2340                             "Can't parse phy_sku in B0, empty sections\n");
2341                         return NULL;
2342                 }
2343         } else {
2344                 panic("unknown device family %d\n", sc->cfg->device_family);
2345         }
2346
2347         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2348         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2349         calib = (const uint16_t *)
2350             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2351         regulatory = (const uint16_t *)
2352             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2353         mac_override = (const uint16_t *)
2354             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2355         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2356
2357         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2358             phy_sku, regulatory);
2359 }
2360
2361 static int
2362 iwm_nvm_init(struct iwm_softc *sc)
2363 {
2364         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2365         int i, ret, section;
2366         uint32_t size_read = 0;
2367         uint8_t *nvm_buffer, *temp;
2368         uint16_t len;
2369
2370         memset(nvm_sections, 0, sizeof(nvm_sections));
2371
2372         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2373                 return EINVAL;
2374
2375         /* load NVM values from nic */
2376         /* Read From FW NVM */
2377         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2378
2379         nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2380         if (!nvm_buffer)
2381                 return ENOMEM;
2382         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2383                 /* we override the constness for initial read */
2384                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2385                                            &len, size_read);
2386                 if (ret)
2387                         continue;
2388                 size_read += len;
2389                 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2390                 if (!temp) {
2391                         ret = ENOMEM;
2392                         break;
2393                 }
2394                 memcpy(temp, nvm_buffer, len);
2395
2396                 nvm_sections[section].data = temp;
2397                 nvm_sections[section].length = len;
2398         }
2399         if (!size_read)
2400                 device_printf(sc->sc_dev, "OTP is blank\n");
2401         free(nvm_buffer, M_DEVBUF);
2402
2403         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2404         if (!sc->nvm_data)
2405                 return EINVAL;
2406         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2407                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2408
2409         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2410                 if (nvm_sections[i].data != NULL)
2411                         free(nvm_sections[i].data, M_DEVBUF);
2412         }
2413
2414         return 0;
2415 }
2416
2417 /*
2418  * Firmware loading gunk.  This is kind of a weird hybrid between the
2419  * iwn driver and the Linux iwlwifi driver.
2420  */
2421
2422 static int
2423 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2424         const uint8_t *section, uint32_t byte_cnt)
2425 {
2426         int error = EINVAL;
2427         uint32_t chunk_sz, offset;
2428
2429         chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2430
2431         for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2432                 uint32_t addr, len;
2433                 const uint8_t *data;
2434
2435                 addr = dst_addr + offset;
2436                 len = MIN(chunk_sz, byte_cnt - offset);
2437                 data = section + offset;
2438
2439                 error = iwm_firmware_load_chunk(sc, addr, data, len);
2440                 if (error)
2441                         break;
2442         }
2443
2444         return error;
2445 }
2446
2447 static int
2448 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2449         const uint8_t *chunk, uint32_t byte_cnt)
2450 {
2451         struct iwm_dma_info *dma = &sc->fw_dma;
2452         int error;
2453
2454         /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2455         memcpy(dma->vaddr, chunk, byte_cnt);
2456         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2457
2458         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2459             dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2460                 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2461                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2462         }
2463
2464         sc->sc_fw_chunk_done = 0;
2465
2466         if (!iwm_nic_lock(sc))
2467                 return EBUSY;
2468
2469         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2470             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2471         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2472             dst_addr);
2473         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2474             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2475         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2476             (iwm_get_dma_hi_addr(dma->paddr)
2477               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2478         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2479             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2480             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2481             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2482         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2483             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2484             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2485             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2486
2487         iwm_nic_unlock(sc);
2488
2489         /* wait 1s for this segment to load */
2490         while (!sc->sc_fw_chunk_done)
2491                 if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
2492                         break;
2493
2494         if (!sc->sc_fw_chunk_done) {
2495                 device_printf(sc->sc_dev,
2496                     "fw chunk addr 0x%x len %d failed to load\n",
2497                     dst_addr, byte_cnt);
2498         }
2499
2500         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2501             dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2502                 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2503                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2504                 iwm_nic_unlock(sc);
2505         }
2506
2507         return error;
2508 }
2509
2510 int
2511 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2512     int cpu, int *first_ucode_section)
2513 {
2514         int shift_param;
2515         int i, error = 0, sec_num = 0x1;
2516         uint32_t val, last_read_idx = 0;
2517         const void *data;
2518         uint32_t dlen;
2519         uint32_t offset;
2520
2521         if (cpu == 1) {
2522                 shift_param = 0;
2523                 *first_ucode_section = 0;
2524         } else {
2525                 shift_param = 16;
2526                 (*first_ucode_section)++;
2527         }
2528
2529         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2530                 last_read_idx = i;
2531                 data = fws->fw_sect[i].fws_data;
2532                 dlen = fws->fw_sect[i].fws_len;
2533                 offset = fws->fw_sect[i].fws_devoff;
2534
2535                 /*
2536                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2537                  * CPU1 to CPU2.
2538                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2539                  * CPU2 non paged to CPU2 paging sec.
2540                  */
2541                 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2542                     offset == IWM_PAGING_SEPARATOR_SECTION)
2543                         break;
2544
2545                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2546                     "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2547                     i, offset, dlen, cpu);
2548
2549                 if (dlen > sc->sc_fwdmasegsz) {
2550                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2551                             "chunk %d too large (%d bytes)\n", i, dlen);
2552                         error = EFBIG;
2553                 } else {
2554                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2555                 }
2556                 if (error) {
2557                         device_printf(sc->sc_dev,
2558                             "could not load firmware chunk %d (error %d)\n",
2559                             i, error);
2560                         return error;
2561                 }
2562
2563                 /* Notify the ucode of the loaded section number and status */
2564                 if (iwm_nic_lock(sc)) {
2565                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2566                         val = val | (sec_num << shift_param);
2567                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2568                         sec_num = (sec_num << 1) | 0x1;
2569                         iwm_nic_unlock(sc);
2570
2571                         /*
2572                          * The firmware won't load correctly without this delay.
2573                          */
2574                         DELAY(8000);
2575                 }
2576         }
2577
2578         *first_ucode_section = last_read_idx;
2579
2580         if (iwm_nic_lock(sc)) {
2581                 if (cpu == 1)
2582                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2583                 else
2584                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2585                 iwm_nic_unlock(sc);
2586         }
2587
2588         return 0;
2589 }
2590
2591 int
2592 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2593 {
2594         struct iwm_fw_sects *fws;
2595         int error = 0;
2596         int first_ucode_section;
2597
2598         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2599             ucode_type);
2600
2601         fws = &sc->sc_fw.fw_sects[ucode_type];
2602
2603         /* configure the ucode to be ready to get the secured image */
2604         /* release CPU reset */
2605         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2606
2607         /* load to FW the binary Secured sections of CPU1 */
2608         error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2609         if (error)
2610                 return error;
2611
2612         /* load to FW the binary sections of CPU2 */
2613         return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2614 }
2615
2616 static int
2617 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2618 {
2619         struct iwm_fw_sects *fws;
2620         int error, i;
2621         const void *data;
2622         uint32_t dlen;
2623         uint32_t offset;
2624
2625         sc->sc_uc.uc_intr = 0;
2626
2627         fws = &sc->sc_fw.fw_sects[ucode_type];
2628         for (i = 0; i < fws->fw_count; i++) {
2629                 data = fws->fw_sect[i].fws_data;
2630                 dlen = fws->fw_sect[i].fws_len;
2631                 offset = fws->fw_sect[i].fws_devoff;
2632                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2633                     "LOAD FIRMWARE type %d offset %u len %d\n",
2634                     ucode_type, offset, dlen);
2635                 if (dlen > sc->sc_fwdmasegsz) {
2636                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2637                             "chunk %d too large (%d bytes)\n", i, dlen);
2638                         error = EFBIG;
2639                 } else {
2640                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2641                 }
2642                 if (error) {
2643                         device_printf(sc->sc_dev,
2644                             "could not load firmware chunk %u of %u "
2645                             "(error=%d)\n", i, fws->fw_count, error);
2646                         return error;
2647                 }
2648         }
2649
2650         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2651
2652         return 0;
2653 }
2654
2655 static int
2656 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2657 {
2658         int error, w;
2659
2660         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2661                 error = iwm_load_firmware_8000(sc, ucode_type);
2662         else
2663                 error = iwm_load_firmware_7000(sc, ucode_type);
2664         if (error)
2665                 return error;
2666
2667         /* wait for the firmware to load */
2668         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2669                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2670         }
2671         if (error || !sc->sc_uc.uc_ok) {
2672                 device_printf(sc->sc_dev, "could not load firmware\n");
2673                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2674                         device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2675                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2676                         device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2677                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2678                 }
2679         }
2680
2681         return error;
2682 }
2683
2684 /* iwlwifi: pcie/trans.c */
2685 static int
2686 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2687 {
2688         int error;
2689
2690         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2691
2692         if ((error = iwm_nic_init(sc)) != 0) {
2693                 device_printf(sc->sc_dev, "unable to init nic\n");
2694                 return error;
2695         }
2696
2697         /* make sure rfkill handshake bits are cleared */
2698         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2699         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2700             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2701
2702         /* clear (again), then enable host interrupts */
2703         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2704         iwm_enable_interrupts(sc);
2705
2706         /* really make sure rfkill handshake bits are cleared */
2707         /* maybe we should write a few times more?  just to make sure */
2708         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2709         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2710
2711         /* Load the given image to the HW */
2712         return iwm_load_firmware(sc, ucode_type);
2713 }
2714
2715 static int
2716 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2717 {
2718         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2719                 .valid = htole32(valid_tx_ant),
2720         };
2721
2722         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2723             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2724 }
2725
2726 /* iwlwifi: mvm/fw.c */
2727 static int
2728 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2729 {
2730         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2731         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2732
2733         /* Set parameters */
2734         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2735         phy_cfg_cmd.calib_control.event_trigger =
2736             sc->sc_default_calib[ucode_type].event_trigger;
2737         phy_cfg_cmd.calib_control.flow_trigger =
2738             sc->sc_default_calib[ucode_type].flow_trigger;
2739
2740         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2741             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2742         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2743             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2744 }
2745
2746 static int
2747 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2748         struct iwm_rx_packet *pkt, void *data)
2749 {
2750         struct iwm_phy_db *phy_db = data;
2751
2752         if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2753                 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2754                         device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2755                             __func__, pkt->hdr.code);
2756                 }
2757                 return TRUE;
2758         }
2759
2760         if (iwm_phy_db_set_section(phy_db, pkt)) {
2761                 device_printf(sc->sc_dev,
2762                     "%s: iwm_phy_db_set_section failed\n", __func__);
2763         }
2764
2765         return FALSE;
2766 }
2767
2768 static int
2769 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2770         enum iwm_ucode_type ucode_type)
2771 {
2772         enum iwm_ucode_type old_type = sc->sc_uc_current;
2773         int error;
2774
2775         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2776                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2777                         error);
2778                 return error;
2779         }
2780
2781         sc->sc_uc_current = ucode_type;
2782         error = iwm_start_fw(sc, ucode_type);
2783         if (error) {
2784                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2785                 sc->sc_uc_current = old_type;
2786                 return error;
2787         }
2788
2789         error = iwm_post_alive(sc);
2790         if (error) {
2791                 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2792         }
2793         return error;
2794 }
2795
2796 /*
2797  * mvm misc bits
2798  */
2799
2800 /*
2801  * follows iwlwifi/fw.c
2802  */
2803 static int
2804 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2805 {
2806         struct iwm_notification_wait calib_wait;
2807         static const uint16_t init_complete[] = {
2808                 IWM_INIT_COMPLETE_NOTIF,
2809                 IWM_CALIB_RES_NOTIF_PHY_DB
2810         };
2811         int ret;
2812
2813         /* do not operate with rfkill switch turned on */
2814         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2815                 device_printf(sc->sc_dev,
2816                     "radio is disabled by hardware switch\n");
2817                 return EPERM;
2818         }
2819
2820         iwm_init_notification_wait(sc->sc_notif_wait,
2821                                    &calib_wait,
2822                                    init_complete,
2823                                    nitems(init_complete),
2824                                    iwm_wait_phy_db_entry,
2825                                    sc->sc_phy_db);
2826
2827         /* Will also start the device */
2828         ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2829         if (ret) {
2830                 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2831                     ret);
2832                 goto error;
2833         }
2834
2835         if (justnvm) {
2836                 /* Read nvm */
2837                 ret = iwm_nvm_init(sc);
2838                 if (ret) {
2839                         device_printf(sc->sc_dev, "failed to read nvm\n");
2840                         goto error;
2841                 }
2842                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2843                 goto error;
2844         }
2845
2846         ret = iwm_send_bt_init_conf(sc);
2847         if (ret) {
2848                 device_printf(sc->sc_dev,
2849                     "failed to send bt coex configuration: %d\n", ret);
2850                 goto error;
2851         }
2852
2853         /* Init Smart FIFO. */
2854         ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2855         if (ret)
2856                 goto error;
2857
2858         /* Send TX valid antennas before triggering calibrations */
2859         ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2860         if (ret) {
2861                 device_printf(sc->sc_dev,
2862                     "failed to send antennas before calibration: %d\n", ret);
2863                 goto error;
2864         }
2865
2866         /*
2867          * Send phy configurations command to init uCode
2868          * to start the 16.0 uCode init image internal calibrations.
2869          */
2870         ret = iwm_send_phy_cfg_cmd(sc);
2871         if (ret) {
2872                 device_printf(sc->sc_dev,
2873                     "%s: Failed to run INIT calibrations: %d\n",
2874                     __func__, ret);
2875                 goto error;
2876         }
2877
2878         /*
2879          * Nothing to do but wait for the init complete notification
2880          * from the firmware.
2881          */
2882         IWM_UNLOCK(sc);
2883         ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
2884             IWM_MVM_UCODE_CALIB_TIMEOUT);
2885         IWM_LOCK(sc);
2886
2887
2888         goto out;
2889
2890 error:
2891         iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
2892 out:
2893         return ret;
2894 }
2895
2896 /*
2897  * receive side
2898  */
2899
2900 /* (re)stock rx ring, called at init-time and at runtime */
2901 static int
2902 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2903 {
2904         struct iwm_rx_ring *ring = &sc->rxq;
2905         struct iwm_rx_data *data = &ring->data[idx];
2906         struct mbuf *m;
2907         bus_dmamap_t dmamap = NULL;
2908         bus_dma_segment_t seg;
2909         int nsegs, error;
2910
2911         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2912         if (m == NULL)
2913                 return ENOBUFS;
2914
2915         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2916         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2917             &seg, &nsegs, BUS_DMA_NOWAIT);
2918         if (error != 0) {
2919                 device_printf(sc->sc_dev,
2920                     "%s: can't map mbuf, error %d\n", __func__, error);
2921                 goto fail;
2922         }
2923
2924         if (data->m != NULL)
2925                 bus_dmamap_unload(ring->data_dmat, data->map);
2926
2927         /* Swap ring->spare_map with data->map */
2928         dmamap = data->map;
2929         data->map = ring->spare_map;
2930         ring->spare_map = dmamap;
2931
2932         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2933         data->m = m;
2934
2935         /* Update RX descriptor. */
2936         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
2937         ring->desc[idx] = htole32(seg.ds_addr >> 8);
2938         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2939             BUS_DMASYNC_PREWRITE);
2940
2941         return 0;
2942 fail:
2943         m_freem(m);
2944         return error;
2945 }
2946
2947 /* iwlwifi: mvm/rx.c */
2948 #define IWM_RSSI_OFFSET 50
2949 static int
2950 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2951 {
2952         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2953         uint32_t agc_a, agc_b;
2954         uint32_t val;
2955
2956         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2957         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2958         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2959
2960         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2961         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2962         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2963
2964         /*
2965          * dBm = rssi dB - agc dB - constant.
2966          * Higher AGC (higher radio gain) means lower signal.
2967          */
2968         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2969         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2970         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2971
2972         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2973             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2974             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2975
2976         return max_rssi_dbm;
2977 }
2978
2979 /* iwlwifi: mvm/rx.c */
2980 /*
2981  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2982  * values are reported by the fw as positive values - need to negate
2983  * to obtain their dBM.  Account for missing antennas by replacing 0
2984  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2985  */
2986 static int
2987 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2988 {
2989         int energy_a, energy_b, energy_c, max_energy;
2990         uint32_t val;
2991
2992         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2993         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2994             IWM_RX_INFO_ENERGY_ANT_A_POS;
2995         energy_a = energy_a ? -energy_a : -256;
2996         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2997             IWM_RX_INFO_ENERGY_ANT_B_POS;
2998         energy_b = energy_b ? -energy_b : -256;
2999         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3000             IWM_RX_INFO_ENERGY_ANT_C_POS;
3001         energy_c = energy_c ? -energy_c : -256;
3002         max_energy = MAX(energy_a, energy_b);
3003         max_energy = MAX(max_energy, energy_c);
3004
3005         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3006             "energy In A %d B %d C %d , and max %d\n",
3007             energy_a, energy_b, energy_c, max_energy);
3008
3009         return max_energy;
3010 }
3011
3012 static void
3013 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3014         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3015 {
3016         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3017
3018         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3019         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3020
3021         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3022 }
3023
3024 /*
3025  * Retrieve the average noise (in dBm) among receivers.
3026  */
3027 static int
3028 iwm_get_noise(struct iwm_softc *sc,
3029     const struct iwm_mvm_statistics_rx_non_phy *stats)
3030 {
3031         int i, total, nbant, noise;
3032
3033         total = nbant = noise = 0;
3034         for (i = 0; i < 3; i++) {
3035                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3036                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3037                     __func__,
3038                     i,
3039                     noise);
3040
3041                 if (noise) {
3042                         total += noise;
3043                         nbant++;
3044                 }
3045         }
3046
3047         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3048             __func__, nbant, total);
3049 #if 0
3050         /* There should be at least one antenna but check anyway. */
3051         return (nbant == 0) ? -127 : (total / nbant) - 107;
3052 #else
3053         /* For now, just hard-code it to -96 to be safe */
3054         return (-96);
3055 #endif
3056 }
3057
3058 /*
3059  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3060  *
3061  * Handles the actual data of the Rx packet from the fw
3062  */
3063 static void
3064 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3065         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3066 {
3067         struct ieee80211com *ic = &sc->sc_ic;
3068         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3069         struct ieee80211_frame *wh;
3070         struct ieee80211_node *ni;
3071         struct ieee80211_rx_stats rxs;
3072         struct mbuf *m;
3073         struct iwm_rx_phy_info *phy_info;
3074         struct iwm_rx_mpdu_res_start *rx_res;
3075         uint32_t len;
3076         uint32_t rx_pkt_status;
3077         int rssi;
3078
3079         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3080
3081         phy_info = &sc->sc_last_phy_info;
3082         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3083         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3084         len = le16toh(rx_res->byte_count);
3085         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3086
3087         m = data->m;
3088         m->m_data = pkt->data + sizeof(*rx_res);
3089         m->m_pkthdr.len = m->m_len = len;
3090
3091         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3092                 device_printf(sc->sc_dev,
3093                     "dsp size out of range [0,20]: %d\n",
3094                     phy_info->cfg_phy_cnt);
3095                 goto fail;
3096         }
3097
3098         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3099             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3100                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3101                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3102                 goto fail;
3103         }
3104
3105         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3106                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3107         } else {
3108                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3109         }
3110
3111         /* Note: RSSI is absolute (ie a -ve value) */
3112         if (rssi < IWM_MIN_DBM)
3113                 rssi = IWM_MIN_DBM;
3114         else if (rssi > IWM_MAX_DBM)
3115                 rssi = IWM_MAX_DBM;
3116
3117         /* Map it to relative value */
3118         rssi = rssi - sc->sc_noise;
3119
3120         /* replenish ring for the buffer we're going to feed to the sharks */
3121         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3122                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3123                     __func__);
3124                 goto fail;
3125         }
3126
3127         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3128             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3129
3130         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3131
3132         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3133             "%s: phy_info: channel=%d, flags=0x%08x\n",
3134             __func__,
3135             le16toh(phy_info->channel),
3136             le16toh(phy_info->phy_flags));
3137
3138         /*
3139          * Populate an RX state struct with the provided information.
3140          */
3141         bzero(&rxs, sizeof(rxs));
3142         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3143         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3144         rxs.c_ieee = le16toh(phy_info->channel);
3145         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3146                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3147         } else {
3148                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3149         }
3150
3151         /* rssi is in 1/2db units */
3152         rxs.c_rssi = rssi * 2;
3153         rxs.c_nf = sc->sc_noise;
3154         if (ieee80211_add_rx_params(m, &rxs) == 0) {
3155                 if (ni)
3156                         ieee80211_free_node(ni);
3157                 goto fail;
3158         }
3159
3160         if (ieee80211_radiotap_active_vap(vap)) {
3161                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3162
3163                 tap->wr_flags = 0;
3164                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3165                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3166                 tap->wr_chan_freq = htole16(rxs.c_freq);
3167                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3168                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3169                 tap->wr_dbm_antsignal = (int8_t)rssi;
3170                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3171                 tap->wr_tsft = phy_info->system_timestamp;
3172                 switch (phy_info->rate) {
3173                 /* CCK rates. */
3174                 case  10: tap->wr_rate =   2; break;
3175                 case  20: tap->wr_rate =   4; break;
3176                 case  55: tap->wr_rate =  11; break;
3177                 case 110: tap->wr_rate =  22; break;
3178                 /* OFDM rates. */
3179                 case 0xd: tap->wr_rate =  12; break;
3180                 case 0xf: tap->wr_rate =  18; break;
3181                 case 0x5: tap->wr_rate =  24; break;
3182                 case 0x7: tap->wr_rate =  36; break;
3183                 case 0x9: tap->wr_rate =  48; break;
3184                 case 0xb: tap->wr_rate =  72; break;
3185                 case 0x1: tap->wr_rate =  96; break;
3186                 case 0x3: tap->wr_rate = 108; break;
3187                 /* Unknown rate: should not happen. */
3188                 default:  tap->wr_rate =   0;
3189                 }
3190         }
3191
3192         IWM_UNLOCK(sc);
3193         if (ni != NULL) {
3194                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3195                 ieee80211_input_mimo(ni, m);
3196                 ieee80211_free_node(ni);
3197         } else {
3198                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3199                 ieee80211_input_mimo_all(ic, m);
3200         }
3201         IWM_LOCK(sc);
3202
3203         return;
3204
3205 fail:
3206         counter_u64_add(ic->ic_ierrors, 1);
3207 }
3208
3209 static int
3210 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3211         struct iwm_node *in)
3212 {
3213         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3214         struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3215         struct ieee80211_node *ni = &in->in_ni;
3216         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3217
3218         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3219
3220         /* Update rate control statistics. */
3221         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3222             __func__,
3223             (int) le16toh(tx_resp->status.status),
3224             (int) le16toh(tx_resp->status.sequence),
3225             tx_resp->frame_count,
3226             tx_resp->bt_kill_count,
3227             tx_resp->failure_rts,
3228             tx_resp->failure_frame,
3229             le32toh(tx_resp->initial_rate),
3230             (int) le16toh(tx_resp->wireless_media_time));
3231
3232         txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3233                      IEEE80211_RATECTL_STATUS_LONG_RETRY;
3234         txs->short_retries = tx_resp->failure_rts;
3235         txs->long_retries = tx_resp->failure_frame;
3236         if (status != IWM_TX_STATUS_SUCCESS &&
3237             status != IWM_TX_STATUS_DIRECT_DONE) {
3238                 switch (status) {
3239                 case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3240                         txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3241                         break;
3242                 case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3243                         txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3244                         break;
3245                 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3246                         txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3247                         break;
3248                 default:
3249                         txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3250                         break;
3251                 }
3252         } else {
3253                 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3254         }
3255         ieee80211_ratectl_tx_complete(ni, txs);
3256
3257         return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3258 }
3259
3260 static void
3261 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3262         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3263 {
3264         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3265         int idx = cmd_hdr->idx;
3266         int qid = cmd_hdr->qid;
3267         struct iwm_tx_ring *ring = &sc->txq[qid];
3268         struct iwm_tx_data *txd = &ring->data[idx];
3269         struct iwm_node *in = txd->in;
3270         struct mbuf *m = txd->m;
3271         int status;
3272
3273         KASSERT(txd->done == 0, ("txd not done"));
3274         KASSERT(txd->in != NULL, ("txd without node"));
3275         KASSERT(txd->m != NULL, ("txd without mbuf"));
3276
3277         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3278
3279         sc->sc_tx_timer = 0;
3280
3281         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3282
3283         /* Unmap and free mbuf. */
3284         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3285         bus_dmamap_unload(ring->data_dmat, txd->map);
3286
3287         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3288             "free txd %p, in %p\n", txd, txd->in);
3289         txd->done = 1;
3290         txd->m = NULL;
3291         txd->in = NULL;
3292
3293         ieee80211_tx_complete(&in->in_ni, m, status);
3294
3295         if (--ring->queued < IWM_TX_RING_LOMARK) {
3296                 sc->qfullmsk &= ~(1 << ring->qid);
3297                 if (sc->qfullmsk == 0) {
3298                         /*
3299                          * Well, we're in interrupt context, but then again
3300                          * I guess net80211 does all sorts of stunts in
3301                          * interrupt context, so maybe this is no biggie.
3302                          */
3303                         iwm_start(sc);
3304                 }
3305         }
3306 }
3307
3308 /*
3309  * transmit side
3310  */
3311
3312 /*
3313  * Process a "command done" firmware notification.  This is where we wakeup
3314  * processes waiting for a synchronous command completion.
3315  * from if_iwn
3316  */
3317 static void
3318 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3319 {
3320         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3321         struct iwm_tx_data *data;
3322
3323         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3324                 return; /* Not a command ack. */
3325         }
3326
3327         /* XXX wide commands? */
3328         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3329             "cmd notification type 0x%x qid %d idx %d\n",
3330             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3331
3332         data = &ring->data[pkt->hdr.idx];
3333
3334         /* If the command was mapped in an mbuf, free it. */
3335         if (data->m != NULL) {
3336                 bus_dmamap_sync(ring->data_dmat, data->map,
3337                     BUS_DMASYNC_POSTWRITE);
3338                 bus_dmamap_unload(ring->data_dmat, data->map);
3339                 m_freem(data->m);
3340                 data->m = NULL;
3341         }
3342         wakeup(&ring->desc[pkt->hdr.idx]);
3343
3344         if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3345                 device_printf(sc->sc_dev,
3346                     "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3347                     __func__, pkt->hdr.idx, ring->queued, ring->cur);
3348                 /* XXX call iwm_force_nmi() */
3349         }
3350
3351         KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3352         ring->queued--;
3353         if (ring->queued == 0)
3354                 iwm_pcie_clear_cmd_in_flight(sc);
3355 }
3356
3357 #if 0
3358 /*
3359  * necessary only for block ack mode
3360  */
3361 void
3362 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3363         uint16_t len)
3364 {
3365         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3366         uint16_t w_val;
3367
3368         scd_bc_tbl = sc->sched_dma.vaddr;
3369
3370         len += 8; /* magic numbers came naturally from paris */
3371         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3372                 len = roundup(len, 4) / 4;
3373
3374         w_val = htole16(sta_id << 12 | len);
3375
3376         /* Update TX scheduler. */
3377         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3378         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3379             BUS_DMASYNC_PREWRITE);
3380
3381         /* I really wonder what this is ?!? */
3382         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3383                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3384                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3385                     BUS_DMASYNC_PREWRITE);
3386         }
3387 }
3388 #endif
3389
3390 /*
3391  * Take an 802.11 (non-n) rate, find the relevant rate
3392  * table entry.  return the index into in_ridx[].
3393  *
3394  * The caller then uses that index back into in_ridx
3395  * to figure out the rate index programmed /into/
3396  * the firmware for this given node.
3397  */
3398 static int
3399 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3400     uint8_t rate)
3401 {
3402         int i;
3403         uint8_t r;
3404
3405         for (i = 0; i < nitems(in->in_ridx); i++) {
3406                 r = iwm_rates[in->in_ridx[i]].rate;
3407                 if (rate == r)
3408                         return (i);
3409         }
3410
3411         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3412             "%s: couldn't find an entry for rate=%d\n",
3413             __func__,
3414             rate);
3415
3416         /* XXX Return the first */
3417         /* XXX TODO: have it return the /lowest/ */
3418         return (0);
3419 }
3420
3421 static int
3422 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3423 {
3424         int i;
3425
3426         for (i = 0; i < nitems(iwm_rates); i++) {
3427                 if (iwm_rates[i].rate == rate)
3428                         return (i);
3429         }
3430         /* XXX error? */
3431         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3432             "%s: couldn't find an entry for rate=%d\n",
3433             __func__,
3434             rate);
3435         return (0);
3436 }
3437
3438 /*
3439  * Fill in the rate related information for a transmit command.
3440  */
3441 static const struct iwm_rate *
3442 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3443         struct mbuf *m, struct iwm_tx_cmd *tx)
3444 {
3445         struct ieee80211_node *ni = &in->in_ni;
3446         struct ieee80211_frame *wh;
3447         const struct ieee80211_txparam *tp = ni->ni_txparms;
3448         const struct iwm_rate *rinfo;
3449         int type;
3450         int ridx, rate_flags;
3451
3452         wh = mtod(m, struct ieee80211_frame *);
3453         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3454
3455         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3456         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3457
3458         if (type == IEEE80211_FC0_TYPE_MGT) {
3459                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3460                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3461                     "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3462         } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3463                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3464                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3465                     "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3466         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3467                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3468                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3469                     "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3470         } else if (m->m_flags & M_EAPOL) {
3471                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3472                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3473                     "%s: EAPOL\n", __func__);
3474         } else if (type == IEEE80211_FC0_TYPE_DATA) {
3475                 int i;
3476
3477                 /* for data frames, use RS table */
3478                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3479                 /* XXX pass pktlen */
3480                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3481                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3482                 ridx = in->in_ridx[i];
3483
3484                 /* This is the index into the programmed table */
3485                 tx->initial_rate_index = i;
3486                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3487
3488                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3489                     "%s: start with i=%d, txrate %d\n",
3490                     __func__, i, iwm_rates[ridx].rate);
3491         } else {
3492                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3493                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3494                     __func__, tp->mgmtrate);
3495         }
3496
3497         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3498             "%s: frame type=%d txrate %d\n",
3499                 __func__, type, iwm_rates[ridx].rate);
3500
3501         rinfo = &iwm_rates[ridx];
3502
3503         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3504             __func__, ridx,
3505             rinfo->rate,
3506             !! (IWM_RIDX_IS_CCK(ridx))
3507             );
3508
3509         /* XXX TODO: hard-coded TX antenna? */
3510         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3511         if (IWM_RIDX_IS_CCK(ridx))
3512                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3513         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3514
3515         return rinfo;
3516 }
3517
3518 #define TB0_SIZE 16
3519 static int
3520 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3521 {
3522         struct ieee80211com *ic = &sc->sc_ic;
3523         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3524         struct iwm_node *in = IWM_NODE(ni);
3525         struct iwm_tx_ring *ring;
3526         struct iwm_tx_data *data;
3527         struct iwm_tfd *desc;
3528         struct iwm_device_cmd *cmd;
3529         struct iwm_tx_cmd *tx;
3530         struct ieee80211_frame *wh;
3531         struct ieee80211_key *k = NULL;
3532         struct mbuf *m1;
3533         const struct iwm_rate *rinfo;
3534         uint32_t flags;
3535         u_int hdrlen;
3536         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3537         int nsegs;
3538         uint8_t tid, type;
3539         int i, totlen, error, pad;
3540
3541         wh = mtod(m, struct ieee80211_frame *);
3542         hdrlen = ieee80211_anyhdrsize(wh);
3543         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3544         tid = 0;
3545         ring = &sc->txq[ac];
3546         desc = &ring->desc[ring->cur];
3547         memset(desc, 0, sizeof(*desc));
3548         data = &ring->data[ring->cur];
3549
3550         /* Fill out iwm_tx_cmd to send to the firmware */
3551         cmd = &ring->cmd[ring->cur];
3552         cmd->hdr.code = IWM_TX_CMD;
3553         cmd->hdr.flags = 0;
3554         cmd->hdr.qid = ring->qid;
3555         cmd->hdr.idx = ring->cur;
3556
3557         tx = (void *)cmd->data;
3558         memset(tx, 0, sizeof(*tx));
3559
3560         rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3561
3562         /* Encrypt the frame if need be. */
3563         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3564                 /* Retrieve key for TX && do software encryption. */
3565                 k = ieee80211_crypto_encap(ni, m);
3566                 if (k == NULL) {
3567                         m_freem(m);
3568                         return (ENOBUFS);
3569                 }
3570                 /* 802.11 header may have moved. */
3571                 wh = mtod(m, struct ieee80211_frame *);
3572         }
3573
3574         if (ieee80211_radiotap_active_vap(vap)) {
3575                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3576
3577                 tap->wt_flags = 0;
3578                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3579                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3580                 tap->wt_rate = rinfo->rate;
3581                 if (k != NULL)
3582                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3583                 ieee80211_radiotap_tx(vap, m);
3584         }
3585
3586
3587         totlen = m->m_pkthdr.len;
3588
3589         flags = 0;
3590         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3591                 flags |= IWM_TX_CMD_FLG_ACK;
3592         }
3593
3594         if (type == IEEE80211_FC0_TYPE_DATA
3595             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3596             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3597                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3598         }
3599
3600         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3601             type != IEEE80211_FC0_TYPE_DATA)
3602                 tx->sta_id = sc->sc_aux_sta.sta_id;
3603         else
3604                 tx->sta_id = IWM_STATION_ID;
3605
3606         if (type == IEEE80211_FC0_TYPE_MGT) {
3607                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3608
3609                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3610                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3611                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3612                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3613                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3614                 } else {
3615                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3616                 }
3617         } else {
3618                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3619         }
3620
3621         if (hdrlen & 3) {
3622                 /* First segment length must be a multiple of 4. */
3623                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3624                 pad = 4 - (hdrlen & 3);
3625         } else
3626                 pad = 0;
3627
3628         tx->driver_txop = 0;
3629         tx->next_frame_len = 0;
3630
3631         tx->len = htole16(totlen);
3632         tx->tid_tspec = tid;
3633         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3634
3635         /* Set physical address of "scratch area". */
3636         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3637         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3638
3639         /* Copy 802.11 header in TX command. */
3640         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3641
3642         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3643
3644         tx->sec_ctl = 0;
3645         tx->tx_flags |= htole32(flags);
3646
3647         /* Trim 802.11 header. */
3648         m_adj(m, hdrlen);
3649         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3650             segs, &nsegs, BUS_DMA_NOWAIT);
3651         if (error != 0) {
3652                 if (error != EFBIG) {
3653                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3654                             error);
3655                         m_freem(m);
3656                         return error;
3657                 }
3658                 /* Too many DMA segments, linearize mbuf. */
3659                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3660                 if (m1 == NULL) {
3661                         device_printf(sc->sc_dev,
3662                             "%s: could not defrag mbuf\n", __func__);
3663                         m_freem(m);
3664                         return (ENOBUFS);
3665                 }
3666                 m = m1;
3667
3668                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3669                     segs, &nsegs, BUS_DMA_NOWAIT);
3670                 if (error != 0) {
3671                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3672                             error);
3673                         m_freem(m);
3674                         return error;
3675                 }
3676         }
3677         data->m = m;
3678         data->in = in;
3679         data->done = 0;
3680
3681         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3682             "sending txd %p, in %p\n", data, data->in);
3683         KASSERT(data->in != NULL, ("node is NULL"));
3684
3685         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3686             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3687             ring->qid, ring->cur, totlen, nsegs,
3688             le32toh(tx->tx_flags),
3689             le32toh(tx->rate_n_flags),
3690             tx->initial_rate_index
3691             );
3692
3693         /* Fill TX descriptor. */
3694         desc->num_tbs = 2 + nsegs;
3695
3696         desc->tbs[0].lo = htole32(data->cmd_paddr);
3697         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3698             (TB0_SIZE << 4);
3699         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3700         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3701             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3702               + hdrlen + pad - TB0_SIZE) << 4);
3703
3704         /* Other DMA segments are for data payload. */
3705         for (i = 0; i < nsegs; i++) {
3706                 seg = &segs[i];
3707                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3708                 desc->tbs[i+2].hi_n_len = \
3709                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3710                     | ((seg->ds_len) << 4);
3711         }
3712
3713         bus_dmamap_sync(ring->data_dmat, data->map,
3714             BUS_DMASYNC_PREWRITE);
3715         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3716             BUS_DMASYNC_PREWRITE);
3717         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3718             BUS_DMASYNC_PREWRITE);
3719
3720 #if 0
3721         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3722 #endif
3723
3724         /* Kick TX ring. */
3725         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3726         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3727
3728         /* Mark TX ring as full if we reach a certain threshold. */
3729         if (++ring->queued > IWM_TX_RING_HIMARK) {
3730                 sc->qfullmsk |= 1 << ring->qid;
3731         }
3732
3733         return 0;
3734 }
3735
3736 static int
3737 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3738     const struct ieee80211_bpf_params *params)
3739 {
3740         struct ieee80211com *ic = ni->ni_ic;
3741         struct iwm_softc *sc = ic->ic_softc;
3742         int error = 0;
3743
3744         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3745             "->%s begin\n", __func__);
3746
3747         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3748                 m_freem(m);
3749                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3750                     "<-%s not RUNNING\n", __func__);
3751                 return (ENETDOWN);
3752         }
3753
3754         IWM_LOCK(sc);
3755         /* XXX fix this */
3756         if (params == NULL) {
3757                 error = iwm_tx(sc, m, ni, 0);
3758         } else {
3759                 error = iwm_tx(sc, m, ni, 0);
3760         }
3761         sc->sc_tx_timer = 5;
3762         IWM_UNLOCK(sc);
3763
3764         return (error);
3765 }
3766
3767 /*
3768  * mvm/tx.c
3769  */
3770
3771 /*
3772  * Note that there are transports that buffer frames before they reach
3773  * the firmware. This means that after flush_tx_path is called, the
3774  * queue might not be empty. The race-free way to handle this is to:
3775  * 1) set the station as draining
3776  * 2) flush the Tx path
3777  * 3) wait for the transport queues to be empty
3778  */
3779 int
3780 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3781 {
3782         int ret;
3783         struct iwm_tx_path_flush_cmd flush_cmd = {
3784                 .queues_ctl = htole32(tfd_msk),
3785                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3786         };
3787
3788         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3789             sizeof(flush_cmd), &flush_cmd);
3790         if (ret)
3791                 device_printf(sc->sc_dev,
3792                     "Flushing tx queue failed: %d\n", ret);
3793         return ret;
3794 }
3795
3796 /*
3797  * BEGIN mvm/sta.c
3798  */
3799
3800 static int
3801 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3802         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3803 {
3804         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3805             cmd, status);
3806 }
3807
3808 /* send station add/update command to firmware */
3809 static int
3810 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3811 {
3812         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3813         int ret;
3814         uint32_t status;
3815
3816         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3817
3818         add_sta_cmd.sta_id = IWM_STATION_ID;
3819         add_sta_cmd.mac_id_n_color
3820             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3821                 IWM_DEFAULT_COLOR));
3822         if (!update) {
3823                 int ac;
3824                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3825                         add_sta_cmd.tfd_queue_msk |=
3826                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3827                 }
3828                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3829         }
3830         add_sta_cmd.add_modify = update ? 1 : 0;
3831         add_sta_cmd.station_flags_msk
3832             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3833         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3834         if (update)
3835                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3836
3837         status = IWM_ADD_STA_SUCCESS;
3838         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3839         if (ret)
3840                 return ret;
3841
3842         switch (status) {
3843         case IWM_ADD_STA_SUCCESS:
3844                 break;
3845         default:
3846                 ret = EIO;
3847                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3848                 break;
3849         }
3850
3851         return ret;
3852 }
3853
3854 static int
3855 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3856 {
3857         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3858 }
3859
3860 static int
3861 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3862 {
3863         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3864 }
3865
3866 static int
3867 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3868         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3869 {
3870         struct iwm_mvm_add_sta_cmd_v7 cmd;
3871         int ret;
3872         uint32_t status;
3873
3874         memset(&cmd, 0, sizeof(cmd));
3875         cmd.sta_id = sta->sta_id;
3876         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3877
3878         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3879         cmd.tid_disable_tx = htole16(0xffff);
3880
3881         if (addr)
3882                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3883
3884         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3885         if (ret)
3886                 return ret;
3887
3888         switch (status) {
3889         case IWM_ADD_STA_SUCCESS:
3890                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3891                     "%s: Internal station added.\n", __func__);
3892                 return 0;
3893         default:
3894                 device_printf(sc->sc_dev,
3895                     "%s: Add internal station failed, status=0x%x\n",
3896                     __func__, status);
3897                 ret = EIO;
3898                 break;
3899         }
3900         return ret;
3901 }
3902
3903 static int
3904 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3905 {
3906         int ret;
3907
3908         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3909         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3910
3911         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3912         if (ret)
3913                 return ret;
3914
3915         ret = iwm_mvm_add_int_sta_common(sc,
3916             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3917
3918         if (ret)
3919                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3920         return ret;
3921 }
3922
3923 /*
3924  * END mvm/sta.c
3925  */
3926
3927 /*
3928  * BEGIN mvm/quota.c
3929  */
3930
3931 static int
3932 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3933 {
3934         struct iwm_time_quota_cmd cmd;
3935         int i, idx, ret, num_active_macs, quota, quota_rem;
3936         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3937         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3938         uint16_t id;
3939
3940         memset(&cmd, 0, sizeof(cmd));
3941
3942         /* currently, PHY ID == binding ID */
3943         if (in) {
3944                 id = in->in_phyctxt->id;
3945                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3946                 colors[id] = in->in_phyctxt->color;
3947
3948                 if (1)
3949                         n_ifs[id] = 1;
3950         }
3951
3952         /*
3953          * The FW's scheduling session consists of
3954          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3955          * equally between all the bindings that require quota
3956          */
3957         num_active_macs = 0;
3958         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3959                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3960                 num_active_macs += n_ifs[i];
3961         }
3962
3963         quota = 0;
3964         quota_rem = 0;
3965         if (num_active_macs) {
3966                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3967                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3968         }
3969
3970         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3971                 if (colors[i] < 0)
3972                         continue;
3973
3974                 cmd.quotas[idx].id_and_color =
3975                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3976
3977                 if (n_ifs[i] <= 0) {
3978                         cmd.quotas[idx].quota = htole32(0);
3979                         cmd.quotas[idx].max_duration = htole32(0);
3980                 } else {
3981                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3982                         cmd.quotas[idx].max_duration = htole32(0);
3983                 }
3984                 idx++;
3985         }
3986
3987         /* Give the remainder of the session to the first binding */
3988         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3989
3990         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3991             sizeof(cmd), &cmd);
3992         if (ret)
3993                 device_printf(sc->sc_dev,
3994                     "%s: Failed to send quota: %d\n", __func__, ret);
3995         return ret;
3996 }
3997
3998 /*
3999  * END mvm/quota.c
4000  */
4001
4002 /*
4003  * ieee80211 routines
4004  */
4005
4006 /*
4007  * Change to AUTH state in 80211 state machine.  Roughly matches what
4008  * Linux does in bss_info_changed().
4009  */
4010 static int
4011 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4012 {
4013         struct ieee80211_node *ni;
4014         struct iwm_node *in;
4015         struct iwm_vap *iv = IWM_VAP(vap);
4016         uint32_t duration;
4017         int error;
4018
4019         /*
4020          * XXX i have a feeling that the vap node is being
4021          * freed from underneath us. Grr.
4022          */
4023         ni = ieee80211_ref_node(vap->iv_bss);
4024         in = IWM_NODE(ni);
4025         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4026             "%s: called; vap=%p, bss ni=%p\n",
4027             __func__,
4028             vap,
4029             ni);
4030
4031         in->in_assoc = 0;
4032
4033         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4034         if (error != 0)
4035                 return error;
4036
4037         error = iwm_allow_mcast(vap, sc);
4038         if (error) {
4039                 device_printf(sc->sc_dev,
4040                     "%s: failed to set multicast\n", __func__);
4041                 goto out;
4042         }
4043
4044         /*
4045          * This is where it deviates from what Linux does.
4046          *
4047          * Linux iwlwifi doesn't reset the nic each time, nor does it
4048          * call ctxt_add() here.  Instead, it adds it during vap creation,
4049          * and always does a mac_ctx_changed().
4050          *
4051          * The openbsd port doesn't attempt to do that - it reset things
4052          * at odd states and does the add here.
4053          *
4054          * So, until the state handling is fixed (ie, we never reset
4055          * the NIC except for a firmware failure, which should drag
4056          * the NIC back to IDLE, re-setup and re-add all the mac/phy
4057          * contexts that are required), let's do a dirty hack here.
4058          */
4059         if (iv->is_uploaded) {
4060                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4061                         device_printf(sc->sc_dev,
4062                             "%s: failed to update MAC\n", __func__);
4063                         goto out;
4064                 }
4065                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4066                     in->in_ni.ni_chan, 1, 1)) != 0) {
4067                         device_printf(sc->sc_dev,
4068                             "%s: failed update phy ctxt\n", __func__);
4069                         goto out;
4070                 }
4071                 in->in_phyctxt = &sc->sc_phyctxt[0];
4072
4073                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
4074                         device_printf(sc->sc_dev,
4075                             "%s: binding update cmd\n", __func__);
4076                         goto out;
4077                 }
4078                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4079                         device_printf(sc->sc_dev,
4080                             "%s: failed to update sta\n", __func__);
4081                         goto out;
4082                 }
4083         } else {
4084                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4085                         device_printf(sc->sc_dev,
4086                             "%s: failed to add MAC\n", __func__);
4087                         goto out;
4088                 }
4089                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4090                     in->in_ni.ni_chan, 1, 1)) != 0) {
4091                         device_printf(sc->sc_dev,
4092                             "%s: failed add phy ctxt!\n", __func__);
4093                         error = ETIMEDOUT;
4094                         goto out;
4095                 }
4096                 in->in_phyctxt = &sc->sc_phyctxt[0];
4097
4098                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
4099                         device_printf(sc->sc_dev,
4100                             "%s: binding add cmd\n", __func__);
4101                         goto out;
4102                 }
4103                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4104                         device_printf(sc->sc_dev,
4105                             "%s: failed to add sta\n", __func__);
4106                         goto out;
4107                 }
4108         }
4109
4110         /*
4111          * Prevent the FW from wandering off channel during association
4112          * by "protecting" the session with a time event.
4113          */
4114         /* XXX duration is in units of TU, not MS */
4115         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4116         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4117         DELAY(100);
4118
4119         error = 0;
4120 out:
4121         ieee80211_free_node(ni);
4122         return (error);
4123 }
4124
4125 static int
4126 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4127 {
4128         struct iwm_node *in = IWM_NODE(vap->iv_bss);
4129         int error;
4130
4131         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4132                 device_printf(sc->sc_dev,
4133                     "%s: failed to update STA\n", __func__);
4134                 return error;
4135         }
4136
4137         in->in_assoc = 1;
4138         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4139                 device_printf(sc->sc_dev,
4140                     "%s: failed to update MAC\n", __func__);
4141                 return error;
4142         }
4143
4144         return 0;
4145 }
4146
4147 static int
4148 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4149 {
4150         uint32_t tfd_msk;
4151
4152         /*
4153          * Ok, so *technically* the proper set of calls for going
4154          * from RUN back to SCAN is:
4155          *
4156          * iwm_mvm_power_mac_disable(sc, in);
4157          * iwm_mvm_mac_ctxt_changed(sc, in);
4158          * iwm_mvm_rm_sta(sc, in);
4159          * iwm_mvm_update_quotas(sc, NULL);
4160          * iwm_mvm_mac_ctxt_changed(sc, in);
4161          * iwm_mvm_binding_remove_vif(sc, in);
4162          * iwm_mvm_mac_ctxt_remove(sc, in);
4163          *
4164          * However, that freezes the device not matter which permutations
4165          * and modifications are attempted.  Obviously, this driver is missing
4166          * something since it works in the Linux driver, but figuring out what
4167          * is missing is a little more complicated.  Now, since we're going
4168          * back to nothing anyway, we'll just do a complete device reset.
4169          * Up your's, device!
4170          */
4171         /*
4172          * Just using 0xf for the queues mask is fine as long as we only
4173          * get here from RUN state.
4174          */
4175         tfd_msk = 0xf;
4176         mbufq_drain(&sc->sc_snd);
4177         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4178         /*
4179          * We seem to get away with just synchronously sending the
4180          * IWM_TXPATH_FLUSH command.
4181          */
4182 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4183         iwm_stop_device(sc);
4184         iwm_init_hw(sc);
4185         if (in)
4186                 in->in_assoc = 0;
4187         return 0;
4188
4189 #if 0
4190         int error;
4191
4192         iwm_mvm_power_mac_disable(sc, in);
4193
4194         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4195                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4196                 return error;
4197         }
4198
4199         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4200                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4201                 return error;
4202         }
4203         error = iwm_mvm_rm_sta(sc, in);
4204         in->in_assoc = 0;
4205         iwm_mvm_update_quotas(sc, NULL);
4206         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4207                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4208                 return error;
4209         }
4210         iwm_mvm_binding_remove_vif(sc, in);
4211
4212         iwm_mvm_mac_ctxt_remove(sc, in);
4213
4214         return error;
4215 #endif
4216 }
4217
4218 static struct ieee80211_node *
4219 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4220 {
4221         return malloc(sizeof (struct iwm_node), M_80211_NODE,
4222             M_NOWAIT | M_ZERO);
4223 }
4224
4225 static void
4226 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4227 {
4228         struct ieee80211_node *ni = &in->in_ni;
4229         struct iwm_lq_cmd *lq = &in->in_lq;
4230         int nrates = ni->ni_rates.rs_nrates;
4231         int i, ridx, tab = 0;
4232 //      int txant = 0;
4233
4234         if (nrates > nitems(lq->rs_table)) {
4235                 device_printf(sc->sc_dev,
4236                     "%s: node supports %d rates, driver handles "
4237                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4238                 return;
4239         }
4240         if (nrates == 0) {
4241                 device_printf(sc->sc_dev,
4242                     "%s: node supports 0 rates, odd!\n", __func__);
4243                 return;
4244         }
4245
4246         /*
4247          * XXX .. and most of iwm_node is not initialised explicitly;
4248          * it's all just 0x0 passed to the firmware.
4249          */
4250
4251         /* first figure out which rates we should support */
4252         /* XXX TODO: this isn't 11n aware /at all/ */
4253         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4254         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4255             "%s: nrates=%d\n", __func__, nrates);
4256
4257         /*
4258          * Loop over nrates and populate in_ridx from the highest
4259          * rate to the lowest rate.  Remember, in_ridx[] has
4260          * IEEE80211_RATE_MAXSIZE entries!
4261          */
4262         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4263                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4264
4265                 /* Map 802.11 rate to HW rate index. */
4266                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4267                         if (iwm_rates[ridx].rate == rate)
4268                                 break;
4269                 if (ridx > IWM_RIDX_MAX) {
4270                         device_printf(sc->sc_dev,
4271                             "%s: WARNING: device rate for %d not found!\n",
4272                             __func__, rate);
4273                 } else {
4274                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4275                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4276                             __func__,
4277                             i,
4278                             rate,
4279                             ridx);
4280                         in->in_ridx[i] = ridx;
4281                 }
4282         }
4283
4284         /* then construct a lq_cmd based on those */
4285         memset(lq, 0, sizeof(*lq));
4286         lq->sta_id = IWM_STATION_ID;
4287
4288         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4289         if (ni->ni_flags & IEEE80211_NODE_HT)
4290                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4291
4292         /*
4293          * are these used? (we don't do SISO or MIMO)
4294          * need to set them to non-zero, though, or we get an error.
4295          */
4296         lq->single_stream_ant_msk = 1;
4297         lq->dual_stream_ant_msk = 1;
4298
4299         /*
4300          * Build the actual rate selection table.
4301          * The lowest bits are the rates.  Additionally,
4302          * CCK needs bit 9 to be set.  The rest of the bits
4303          * we add to the table select the tx antenna
4304          * Note that we add the rates in the highest rate first
4305          * (opposite of ni_rates).
4306          */
4307         /*
4308          * XXX TODO: this should be looping over the min of nrates
4309          * and LQ_MAX_RETRY_NUM.  Sigh.
4310          */
4311         for (i = 0; i < nrates; i++) {
4312                 int nextant;
4313
4314 #if 0
4315                 if (txant == 0)
4316                         txant = iwm_mvm_get_valid_tx_ant(sc);
4317                 nextant = 1<<(ffs(txant)-1);
4318                 txant &= ~nextant;
4319 #else
4320                 nextant = iwm_mvm_get_valid_tx_ant(sc);
4321 #endif
4322                 /*
4323                  * Map the rate id into a rate index into
4324                  * our hardware table containing the
4325                  * configuration to use for this rate.
4326                  */
4327                 ridx = in->in_ridx[i];
4328                 tab = iwm_rates[ridx].plcp;
4329                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4330                 if (IWM_RIDX_IS_CCK(ridx))
4331                         tab |= IWM_RATE_MCS_CCK_MSK;
4332                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4333                     "station rate i=%d, rate=%d, hw=%x\n",
4334                     i, iwm_rates[ridx].rate, tab);
4335                 lq->rs_table[i] = htole32(tab);
4336         }
4337         /* then fill the rest with the lowest possible rate */
4338         for (i = nrates; i < nitems(lq->rs_table); i++) {
4339                 KASSERT(tab != 0, ("invalid tab"));
4340                 lq->rs_table[i] = htole32(tab);
4341         }
4342 }
4343
4344 static int
4345 iwm_media_change(struct ifnet *ifp)
4346 {
4347         struct ieee80211vap *vap = ifp->if_softc;
4348         struct ieee80211com *ic = vap->iv_ic;
4349         struct iwm_softc *sc = ic->ic_softc;
4350         int error;
4351
4352         error = ieee80211_media_change(ifp);
4353         if (error != ENETRESET)
4354                 return error;
4355
4356         IWM_LOCK(sc);
4357         if (ic->ic_nrunning > 0) {
4358                 iwm_stop(sc);
4359                 iwm_init(sc);
4360         }
4361         IWM_UNLOCK(sc);
4362         return error;
4363 }
4364
4365
4366 static int
4367 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4368 {
4369         struct iwm_vap *ivp = IWM_VAP(vap);
4370         struct ieee80211com *ic = vap->iv_ic;
4371         struct iwm_softc *sc = ic->ic_softc;
4372         struct iwm_node *in;
4373         int error;
4374
4375         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4376             "switching state %s -> %s\n",
4377             ieee80211_state_name[vap->iv_state],
4378             ieee80211_state_name[nstate]);
4379         IEEE80211_UNLOCK(ic);
4380         IWM_LOCK(sc);
4381
4382         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4383                 iwm_led_blink_stop(sc);
4384
4385         /* disable beacon filtering if we're hopping out of RUN */
4386         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4387                 iwm_mvm_disable_beacon_filter(sc);
4388
4389                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4390                         in->in_assoc = 0;
4391
4392                 if (nstate == IEEE80211_S_INIT) {
4393                         IWM_UNLOCK(sc);
4394                         IEEE80211_LOCK(ic);
4395                         error = ivp->iv_newstate(vap, nstate, arg);
4396                         IEEE80211_UNLOCK(ic);
4397                         IWM_LOCK(sc);
4398                         iwm_release(sc, NULL);
4399                         IWM_UNLOCK(sc);
4400                         IEEE80211_LOCK(ic);
4401                         return error;
4402                 }
4403
4404                 /*
4405                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4406                  * above then the card will be completely reinitialized,
4407                  * so the driver must do everything necessary to bring the card
4408                  * from INIT to SCAN.
4409                  *
4410                  * Additionally, upon receiving deauth frame from AP,
4411                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4412                  * state. This will also fail with this driver, so bring the FSM
4413                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4414                  *
4415                  * XXX TODO: fix this for FreeBSD!
4416                  */
4417                 if (nstate == IEEE80211_S_SCAN ||
4418                     nstate == IEEE80211_S_AUTH ||
4419                     nstate == IEEE80211_S_ASSOC) {
4420                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4421                             "Force transition to INIT; MGT=%d\n", arg);
4422                         IWM_UNLOCK(sc);
4423                         IEEE80211_LOCK(ic);
4424                         /* Always pass arg as -1 since we can't Tx right now. */
4425                         /*
4426                          * XXX arg is just ignored anyway when transitioning
4427                          *     to IEEE80211_S_INIT.
4428                          */
4429                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4430                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4431                             "Going INIT->SCAN\n");
4432                         nstate = IEEE80211_S_SCAN;
4433                         IEEE80211_UNLOCK(ic);
4434                         IWM_LOCK(sc);
4435                 }
4436         }
4437
4438         switch (nstate) {
4439         case IEEE80211_S_INIT:
4440                 break;
4441
4442         case IEEE80211_S_AUTH:
4443                 if ((error = iwm_auth(vap, sc)) != 0) {
4444                         device_printf(sc->sc_dev,
4445                             "%s: could not move to auth state: %d\n",
4446                             __func__, error);
4447                         break;
4448                 }
4449                 break;
4450
4451         case IEEE80211_S_ASSOC:
4452                 if ((error = iwm_assoc(vap, sc)) != 0) {
4453                         device_printf(sc->sc_dev,
4454                             "%s: failed to associate: %d\n", __func__,
4455                             error);
4456                         break;
4457                 }
4458                 break;
4459
4460         case IEEE80211_S_RUN:
4461         {
4462                 struct iwm_host_cmd cmd = {
4463                         .id = IWM_LQ_CMD,
4464                         .len = { sizeof(in->in_lq), },
4465                         .flags = IWM_CMD_SYNC,
4466                 };
4467
4468                 /* Update the association state, now we have it all */
4469                 /* (eg associd comes in at this point */
4470                 error = iwm_assoc(vap, sc);
4471                 if (error != 0) {
4472                         device_printf(sc->sc_dev,
4473                             "%s: failed to update association state: %d\n",
4474                             __func__,
4475                             error);
4476                         break;
4477                 }
4478
4479                 in = IWM_NODE(vap->iv_bss);
4480                 iwm_mvm_power_mac_update_mode(sc, in);
4481                 iwm_mvm_enable_beacon_filter(sc, in);
4482                 iwm_mvm_update_quotas(sc, in);
4483                 iwm_setrates(sc, in);
4484
4485                 cmd.data[0] = &in->in_lq;
4486                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4487                         device_printf(sc->sc_dev,
4488                             "%s: IWM_LQ_CMD failed\n", __func__);
4489                 }
4490
4491                 iwm_mvm_led_enable(sc);
4492                 break;
4493         }
4494
4495         default:
4496                 break;
4497         }
4498         IWM_UNLOCK(sc);
4499         IEEE80211_LOCK(ic);
4500
4501         return (ivp->iv_newstate(vap, nstate, arg));
4502 }
4503
4504 void
4505 iwm_endscan_cb(void *arg, int pending)
4506 {
4507         struct iwm_softc *sc = arg;
4508         struct ieee80211com *ic = &sc->sc_ic;
4509
4510         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4511             "%s: scan ended\n",
4512             __func__);
4513
4514         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4515 }
4516
4517 /*
4518  * Aging and idle timeouts for the different possible scenarios
4519  * in default configuration
4520  */
4521 static const uint32_t
4522 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4523         {
4524                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4525                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4526         },
4527         {
4528                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4529                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4530         },
4531         {
4532                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4533                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4534         },
4535         {
4536                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4537                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4538         },
4539         {
4540                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4541                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4542         },
4543 };
4544
4545 /*
4546  * Aging and idle timeouts for the different possible scenarios
4547  * in single BSS MAC configuration.
4548  */
4549 static const uint32_t
4550 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4551         {
4552                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4553                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4554         },
4555         {
4556                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4557                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4558         },
4559         {
4560                 htole32(IWM_SF_MCAST_AGING_TIMER),
4561                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4562         },
4563         {
4564                 htole32(IWM_SF_BA_AGING_TIMER),
4565                 htole32(IWM_SF_BA_IDLE_TIMER)
4566         },
4567         {
4568                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4569                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4570         },
4571 };
4572
4573 static void
4574 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4575     struct ieee80211_node *ni)
4576 {
4577         int i, j, watermark;
4578
4579         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4580
4581         /*
4582          * If we are in association flow - check antenna configuration
4583          * capabilities of the AP station, and choose the watermark accordingly.
4584          */
4585         if (ni) {
4586                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4587 #ifdef notyet
4588                         if (ni->ni_rxmcs[2] != 0)
4589                                 watermark = IWM_SF_W_MARK_MIMO3;
4590                         else if (ni->ni_rxmcs[1] != 0)
4591                                 watermark = IWM_SF_W_MARK_MIMO2;
4592                         else
4593 #endif
4594                                 watermark = IWM_SF_W_MARK_SISO;
4595                 } else {
4596                         watermark = IWM_SF_W_MARK_LEGACY;
4597                 }
4598         /* default watermark value for unassociated mode. */
4599         } else {
4600                 watermark = IWM_SF_W_MARK_MIMO2;
4601         }
4602         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4603
4604         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4605                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4606                         sf_cmd->long_delay_timeouts[i][j] =
4607                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4608                 }
4609         }
4610
4611         if (ni) {
4612                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4613                        sizeof(iwm_sf_full_timeout));
4614         } else {
4615                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4616                        sizeof(iwm_sf_full_timeout_def));
4617         }
4618 }
4619
4620 static int
4621 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4622 {
4623         struct ieee80211com *ic = &sc->sc_ic;
4624         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4625         struct iwm_sf_cfg_cmd sf_cmd = {
4626                 .state = htole32(IWM_SF_FULL_ON),
4627         };
4628         int ret = 0;
4629
4630         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4631                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4632
4633         switch (new_state) {
4634         case IWM_SF_UNINIT:
4635         case IWM_SF_INIT_OFF:
4636                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4637                 break;
4638         case IWM_SF_FULL_ON:
4639                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4640                 break;
4641         default:
4642                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4643                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4644                           new_state);
4645                 return EINVAL;
4646         }
4647
4648         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4649                                    sizeof(sf_cmd), &sf_cmd);
4650         return ret;
4651 }
4652
4653 static int
4654 iwm_send_bt_init_conf(struct iwm_softc *sc)
4655 {
4656         struct iwm_bt_coex_cmd bt_cmd;
4657
4658         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4659         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4660
4661         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4662             &bt_cmd);
4663 }
4664
4665 static int
4666 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4667 {
4668         struct iwm_mcc_update_cmd mcc_cmd;
4669         struct iwm_host_cmd hcmd = {
4670                 .id = IWM_MCC_UPDATE_CMD,
4671                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4672                 .data = { &mcc_cmd },
4673         };
4674         int ret;
4675 #ifdef IWM_DEBUG
4676         struct iwm_rx_packet *pkt;
4677         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4678         struct iwm_mcc_update_resp *mcc_resp;
4679         int n_channels;
4680         uint16_t mcc;
4681 #endif
4682         int resp_v2 = isset(sc->sc_enabled_capa,
4683             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4684
4685         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4686         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4687         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4688             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4689                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4690         else
4691                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4692
4693         if (resp_v2)
4694                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4695         else
4696                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4697
4698         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4699             "send MCC update to FW with '%c%c' src = %d\n",
4700             alpha2[0], alpha2[1], mcc_cmd.source_id);
4701
4702         ret = iwm_send_cmd(sc, &hcmd);
4703         if (ret)
4704                 return ret;
4705
4706 #ifdef IWM_DEBUG
4707         pkt = hcmd.resp_pkt;
4708
4709         /* Extract MCC response */
4710         if (resp_v2) {
4711                 mcc_resp = (void *)pkt->data;
4712                 mcc = mcc_resp->mcc;
4713                 n_channels =  le32toh(mcc_resp->n_channels);
4714         } else {
4715                 mcc_resp_v1 = (void *)pkt->data;
4716                 mcc = mcc_resp_v1->mcc;
4717                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4718         }
4719
4720         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4721         if (mcc == 0)
4722                 mcc = 0x3030;  /* "00" - world */
4723
4724         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4725             "regulatory domain '%c%c' (%d channels available)\n",
4726             mcc >> 8, mcc & 0xff, n_channels);
4727 #endif
4728         iwm_free_resp(sc, &hcmd);
4729
4730         return 0;
4731 }
4732
4733 static void
4734 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4735 {
4736         struct iwm_host_cmd cmd = {
4737                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4738                 .len = { sizeof(uint32_t), },
4739                 .data = { &backoff, },
4740         };
4741
4742         if (iwm_send_cmd(sc, &cmd) != 0) {
4743                 device_printf(sc->sc_dev,
4744                     "failed to change thermal tx backoff\n");
4745         }
4746 }
4747
4748 static int
4749 iwm_init_hw(struct iwm_softc *sc)
4750 {
4751         struct ieee80211com *ic = &sc->sc_ic;
4752         int error, i, ac;
4753
4754         if ((error = iwm_start_hw(sc)) != 0) {
4755                 printf("iwm_start_hw: failed %d\n", error);
4756                 return error;
4757         }
4758
4759         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4760                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4761                 return error;
4762         }
4763
4764         /*
4765          * should stop and start HW since that INIT
4766          * image just loaded
4767          */
4768         iwm_stop_device(sc);
4769         if ((error = iwm_start_hw(sc)) != 0) {
4770                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4771                 return error;
4772         }
4773
4774         /* omstart, this time with the regular firmware */
4775         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4776         if (error) {
4777                 device_printf(sc->sc_dev, "could not load firmware\n");
4778                 goto error;
4779         }
4780
4781         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4782                 device_printf(sc->sc_dev, "bt init conf failed\n");
4783                 goto error;
4784         }
4785
4786         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4787         if (error != 0) {
4788                 device_printf(sc->sc_dev, "antenna config failed\n");
4789                 goto error;
4790         }
4791
4792         /* Send phy db control command and then phy db calibration */
4793         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4794                 goto error;
4795
4796         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4797                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4798                 goto error;
4799         }
4800
4801         /* Add auxiliary station for scanning */
4802         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4803                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4804                 goto error;
4805         }
4806
4807         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4808                 /*
4809                  * The channel used here isn't relevant as it's
4810                  * going to be overwritten in the other flows.
4811                  * For now use the first channel we have.
4812                  */
4813                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4814                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4815                         goto error;
4816         }
4817
4818         /* Initialize tx backoffs to the minimum. */
4819         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4820                 iwm_mvm_tt_tx_backoff(sc, 0);
4821
4822         error = iwm_mvm_power_update_device(sc);
4823         if (error)
4824                 goto error;
4825
4826         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4827                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4828                         goto error;
4829         }
4830
4831         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4832                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4833                         goto error;
4834         }
4835
4836         /* Enable Tx queues. */
4837         for (ac = 0; ac < WME_NUM_AC; ac++) {
4838                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4839                     iwm_mvm_ac_to_tx_fifo[ac]);
4840                 if (error)
4841                         goto error;
4842         }
4843
4844         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4845                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4846                 goto error;
4847         }
4848
4849         return 0;
4850
4851  error:
4852         iwm_stop_device(sc);
4853         return error;
4854 }
4855
4856 /* Allow multicast from our BSSID. */
4857 static int
4858 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4859 {
4860         struct ieee80211_node *ni = vap->iv_bss;
4861         struct iwm_mcast_filter_cmd *cmd;
4862         size_t size;
4863         int error;
4864
4865         size = roundup(sizeof(*cmd), 4);
4866         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4867         if (cmd == NULL)
4868                 return ENOMEM;
4869         cmd->filter_own = 1;
4870         cmd->port_id = 0;
4871         cmd->count = 0;
4872         cmd->pass_all = 1;
4873         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4874
4875         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4876             IWM_CMD_SYNC, size, cmd);
4877         free(cmd, M_DEVBUF);
4878
4879         return (error);
4880 }
4881
4882 /*
4883  * ifnet interfaces
4884  */
4885
4886 static void
4887 iwm_init(struct iwm_softc *sc)
4888 {
4889         int error;
4890
4891         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4892                 return;
4893         }
4894         sc->sc_generation++;
4895         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4896
4897         if ((error = iwm_init_hw(sc)) != 0) {
4898                 printf("iwm_init_hw failed %d\n", error);
4899                 iwm_stop(sc);
4900                 return;
4901         }
4902
4903         /*
4904          * Ok, firmware loaded and we are jogging
4905          */
4906         sc->sc_flags |= IWM_FLAG_HW_INITED;
4907         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4908 }
4909
4910 static int
4911 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4912 {
4913         struct iwm_softc *sc;
4914         int error;
4915
4916         sc = ic->ic_softc;
4917
4918         IWM_LOCK(sc);
4919         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4920                 IWM_UNLOCK(sc);
4921                 return (ENXIO);
4922         }
4923         error = mbufq_enqueue(&sc->sc_snd, m);
4924         if (error) {
4925                 IWM_UNLOCK(sc);
4926                 return (error);
4927         }
4928         iwm_start(sc);
4929         IWM_UNLOCK(sc);
4930         return (0);
4931 }
4932
4933 /*
4934  * Dequeue packets from sendq and call send.
4935  */
4936 static void
4937 iwm_start(struct iwm_softc *sc)
4938 {
4939         struct ieee80211_node *ni;
4940         struct mbuf *m;
4941         int ac = 0;
4942
4943         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4944         while (sc->qfullmsk == 0 &&
4945                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4946                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4947                 if (iwm_tx(sc, m, ni, ac) != 0) {
4948                         if_inc_counter(ni->ni_vap->iv_ifp,
4949                             IFCOUNTER_OERRORS, 1);
4950                         ieee80211_free_node(ni);
4951                         continue;
4952                 }
4953                 sc->sc_tx_timer = 15;
4954         }
4955         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4956 }
4957
4958 static void
4959 iwm_stop(struct iwm_softc *sc)
4960 {
4961
4962         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4963         sc->sc_flags |= IWM_FLAG_STOPPED;
4964         sc->sc_generation++;
4965         iwm_led_blink_stop(sc);
4966         sc->sc_tx_timer = 0;
4967         iwm_stop_device(sc);
4968         sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4969 }
4970
4971 static void
4972 iwm_watchdog(void *arg)
4973 {
4974         struct iwm_softc *sc = arg;
4975         struct ieee80211com *ic = &sc->sc_ic;
4976
4977         if (sc->sc_tx_timer > 0) {
4978                 if (--sc->sc_tx_timer == 0) {
4979                         device_printf(sc->sc_dev, "device timeout\n");
4980 #ifdef IWM_DEBUG
4981                         iwm_nic_error(sc);
4982 #endif
4983                         ieee80211_restart_all(ic);
4984                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4985                         return;
4986                 }
4987         }
4988         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4989 }
4990
4991 static void
4992 iwm_parent(struct ieee80211com *ic)
4993 {
4994         struct iwm_softc *sc = ic->ic_softc;
4995         int startall = 0;
4996
4997         IWM_LOCK(sc);
4998         if (ic->ic_nrunning > 0) {
4999                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5000                         iwm_init(sc);
5001                         startall = 1;
5002                 }
5003         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5004                 iwm_stop(sc);
5005         IWM_UNLOCK(sc);
5006         if (startall)
5007                 ieee80211_start_all(ic);
5008 }
5009
5010 /*
5011  * The interrupt side of things
5012  */
5013
5014 /*
5015  * error dumping routines are from iwlwifi/mvm/utils.c
5016  */
5017
5018 /*
5019  * Note: This structure is read from the device with IO accesses,
5020  * and the reading already does the endian conversion. As it is
5021  * read with uint32_t-sized accesses, any members with a different size
5022  * need to be ordered correctly though!
5023  */
5024 struct iwm_error_event_table {
5025         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5026         uint32_t error_id;              /* type of error */
5027         uint32_t trm_hw_status0;        /* TRM HW status */
5028         uint32_t trm_hw_status1;        /* TRM HW status */
5029         uint32_t blink2;                /* branch link */
5030         uint32_t ilink1;                /* interrupt link */
5031         uint32_t ilink2;                /* interrupt link */
5032         uint32_t data1;         /* error-specific data */
5033         uint32_t data2;         /* error-specific data */
5034         uint32_t data3;         /* error-specific data */
5035         uint32_t bcon_time;             /* beacon timer */
5036         uint32_t tsf_low;               /* network timestamp function timer */
5037         uint32_t tsf_hi;                /* network timestamp function timer */
5038         uint32_t gp1;           /* GP1 timer register */
5039         uint32_t gp2;           /* GP2 timer register */
5040         uint32_t fw_rev_type;   /* firmware revision type */
5041         uint32_t major;         /* uCode version major */
5042         uint32_t minor;         /* uCode version minor */
5043         uint32_t hw_ver;                /* HW Silicon version */
5044         uint32_t brd_ver;               /* HW board version */
5045         uint32_t log_pc;                /* log program counter */
5046         uint32_t frame_ptr;             /* frame pointer */
5047         uint32_t stack_ptr;             /* stack pointer */
5048         uint32_t hcmd;          /* last host command header */
5049         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
5050                                  * rxtx_flag */
5051         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
5052                                  * host_flag */
5053         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
5054                                  * enc_flag */
5055         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
5056                                  * time_flag */
5057         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
5058                                  * wico interrupt */
5059         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
5060         uint32_t wait_event;            /* wait event() caller address */
5061         uint32_t l2p_control;   /* L2pControlField */
5062         uint32_t l2p_duration;  /* L2pDurationField */
5063         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
5064         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
5065         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
5066                                  * (LMPM_PMG_SEL) */
5067         uint32_t u_timestamp;   /* indicate when the date and time of the
5068                                  * compilation */
5069         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
5070 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5071
5072 /*
5073  * UMAC error struct - relevant starting from family 8000 chip.
5074  * Note: This structure is read from the device with IO accesses,
5075  * and the reading already does the endian conversion. As it is
5076  * read with u32-sized accesses, any members with a different size
5077  * need to be ordered correctly though!
5078  */
5079 struct iwm_umac_error_event_table {
5080         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5081         uint32_t error_id;      /* type of error */
5082         uint32_t blink1;        /* branch link */
5083         uint32_t blink2;        /* branch link */
5084         uint32_t ilink1;        /* interrupt link */
5085         uint32_t ilink2;        /* interrupt link */
5086         uint32_t data1;         /* error-specific data */
5087         uint32_t data2;         /* error-specific data */
5088         uint32_t data3;         /* error-specific data */
5089         uint32_t umac_major;
5090         uint32_t umac_minor;
5091         uint32_t frame_pointer; /* core register 27*/
5092         uint32_t stack_pointer; /* core register 28 */
5093         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
5094         uint32_t nic_isr_pref;  /* ISR status register */
5095 } __packed;
5096
5097 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5098 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5099
5100 #ifdef IWM_DEBUG
5101 struct {
5102         const char *name;
5103         uint8_t num;
5104 } advanced_lookup[] = {
5105         { "NMI_INTERRUPT_WDG", 0x34 },
5106         { "SYSASSERT", 0x35 },
5107         { "UCODE_VERSION_MISMATCH", 0x37 },
5108         { "BAD_COMMAND", 0x38 },
5109         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5110         { "FATAL_ERROR", 0x3D },
5111         { "NMI_TRM_HW_ERR", 0x46 },
5112         { "NMI_INTERRUPT_TRM", 0x4C },
5113         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5114         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5115         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5116         { "NMI_INTERRUPT_HOST", 0x66 },
5117         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5118         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5119         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5120         { "ADVANCED_SYSASSERT", 0 },
5121 };
5122
5123 static const char *
5124 iwm_desc_lookup(uint32_t num)
5125 {
5126         int i;
5127
5128         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5129                 if (advanced_lookup[i].num == num)
5130                         return advanced_lookup[i].name;
5131
5132         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5133         return advanced_lookup[i].name;
5134 }
5135
5136 static void
5137 iwm_nic_umac_error(struct iwm_softc *sc)
5138 {
5139         struct iwm_umac_error_event_table table;
5140         uint32_t base;
5141
5142         base = sc->sc_uc.uc_umac_error_event_table;
5143
5144         if (base < 0x800000) {
5145                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5146                     base);
5147                 return;
5148         }
5149
5150         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5151                 device_printf(sc->sc_dev, "reading errlog failed\n");
5152                 return;
5153         }
5154
5155         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5156                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5157                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5158                     sc->sc_flags, table.valid);
5159         }
5160
5161         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5162                 iwm_desc_lookup(table.error_id));
5163         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5164         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5165         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5166             table.ilink1);
5167         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5168             table.ilink2);
5169         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5170         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5171         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5172         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5173         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5174         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5175             table.frame_pointer);
5176         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5177             table.stack_pointer);
5178         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5179         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5180             table.nic_isr_pref);
5181 }
5182
5183 /*
5184  * Support for dumping the error log seemed like a good idea ...
5185  * but it's mostly hex junk and the only sensible thing is the
5186  * hw/ucode revision (which we know anyway).  Since it's here,
5187  * I'll just leave it in, just in case e.g. the Intel guys want to
5188  * help us decipher some "ADVANCED_SYSASSERT" later.
5189  */
5190 static void
5191 iwm_nic_error(struct iwm_softc *sc)
5192 {
5193         struct iwm_error_event_table table;
5194         uint32_t base;
5195
5196         device_printf(sc->sc_dev, "dumping device error log\n");
5197         base = sc->sc_uc.uc_error_event_table;
5198         if (base < 0x800000) {
5199                 device_printf(sc->sc_dev,
5200                     "Invalid error log pointer 0x%08x\n", base);
5201                 return;
5202         }
5203
5204         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5205                 device_printf(sc->sc_dev, "reading errlog failed\n");
5206                 return;
5207         }
5208
5209         if (!table.valid) {
5210                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5211                 return;
5212         }
5213
5214         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5215                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5216                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5217                     sc->sc_flags, table.valid);
5218         }
5219
5220         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5221             iwm_desc_lookup(table.error_id));
5222         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5223             table.trm_hw_status0);
5224         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5225             table.trm_hw_status1);
5226         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5227         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5228         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5229         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5230         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5231         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5232         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5233         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5234         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5235         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5236         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5237         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5238             table.fw_rev_type);
5239         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5240         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5241         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5242         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5243         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5244         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5245         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5246         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5247         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5248         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5249         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5250         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5251         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5252         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5253         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5254         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5255         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5256         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5257         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5258
5259         if (sc->sc_uc.uc_umac_error_event_table)
5260                 iwm_nic_umac_error(sc);
5261 }
5262 #endif
5263
5264 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5265
5266 /*
5267  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5268  * Basic structure from if_iwn
5269  */
5270 static void
5271 iwm_notif_intr(struct iwm_softc *sc)
5272 {
5273         struct ieee80211com *ic = &sc->sc_ic;
5274         uint16_t hw;
5275
5276         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5277             BUS_DMASYNC_POSTREAD);
5278
5279         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5280
5281         /*
5282          * Process responses
5283          */
5284         while (sc->rxq.cur != hw) {
5285                 struct iwm_rx_ring *ring = &sc->rxq;
5286                 struct iwm_rx_data *data = &ring->data[ring->cur];
5287                 struct iwm_rx_packet *pkt;
5288                 struct iwm_cmd_response *cresp;
5289                 int qid, idx, code;
5290
5291                 bus_dmamap_sync(ring->data_dmat, data->map,
5292                     BUS_DMASYNC_POSTREAD);
5293                 pkt = mtod(data->m, struct iwm_rx_packet *);
5294
5295                 qid = pkt->hdr.qid & ~0x80;
5296                 idx = pkt->hdr.idx;
5297
5298                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5299                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5300                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5301                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5302
5303                 /*
5304                  * randomly get these from the firmware, no idea why.
5305                  * they at least seem harmless, so just ignore them for now
5306                  */
5307                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5308                     || pkt->len_n_flags == htole32(0x55550000))) {
5309                         ADVANCE_RXQ(sc);
5310                         continue;
5311                 }
5312
5313                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5314
5315                 switch (code) {
5316                 case IWM_REPLY_RX_PHY_CMD:
5317                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5318                         break;
5319
5320                 case IWM_REPLY_RX_MPDU_CMD:
5321                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5322                         break;
5323
5324                 case IWM_TX_CMD:
5325                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5326                         break;
5327
5328                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5329                         struct iwm_missed_beacons_notif *resp;
5330                         int missed;
5331
5332                         /* XXX look at mac_id to determine interface ID */
5333                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5334
5335                         resp = (void *)pkt->data;
5336                         missed = le32toh(resp->consec_missed_beacons);
5337
5338                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5339                             "%s: MISSED_BEACON: mac_id=%d, "
5340                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5341                             "num_rx=%d\n",
5342                             __func__,
5343                             le32toh(resp->mac_id),
5344                             le32toh(resp->consec_missed_beacons_since_last_rx),
5345                             le32toh(resp->consec_missed_beacons),
5346                             le32toh(resp->num_expected_beacons),
5347                             le32toh(resp->num_recvd_beacons));
5348
5349                         /* Be paranoid */
5350                         if (vap == NULL)
5351                                 break;
5352
5353                         /* XXX no net80211 locking? */
5354                         if (vap->iv_state == IEEE80211_S_RUN &&
5355                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5356                                 if (missed > vap->iv_bmissthreshold) {
5357                                         /* XXX bad locking; turn into task */
5358                                         IWM_UNLOCK(sc);
5359                                         ieee80211_beacon_miss(ic);
5360                                         IWM_LOCK(sc);
5361                                 }
5362                         }
5363
5364                         break; }
5365
5366                 case IWM_MFUART_LOAD_NOTIFICATION:
5367                         break;
5368
5369                 case IWM_MVM_ALIVE: {
5370                         struct iwm_mvm_alive_resp_v1 *resp1;
5371                         struct iwm_mvm_alive_resp_v2 *resp2;
5372                         struct iwm_mvm_alive_resp_v3 *resp3;
5373
5374                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5375                                 resp1 = (void *)pkt->data;
5376                                 sc->sc_uc.uc_error_event_table
5377                                     = le32toh(resp1->error_event_table_ptr);
5378                                 sc->sc_uc.uc_log_event_table
5379                                     = le32toh(resp1->log_event_table_ptr);
5380                                 sc->sched_base = le32toh(resp1->scd_base_ptr);
5381                                 if (resp1->status == IWM_ALIVE_STATUS_OK)
5382                                         sc->sc_uc.uc_ok = 1;
5383                                 else
5384                                         sc->sc_uc.uc_ok = 0;
5385                         }
5386
5387                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5388                                 resp2 = (void *)pkt->data;
5389                                 sc->sc_uc.uc_error_event_table
5390                                     = le32toh(resp2->error_event_table_ptr);
5391                                 sc->sc_uc.uc_log_event_table
5392                                     = le32toh(resp2->log_event_table_ptr);
5393                                 sc->sched_base = le32toh(resp2->scd_base_ptr);
5394                                 sc->sc_uc.uc_umac_error_event_table
5395                                     = le32toh(resp2->error_info_addr);
5396                                 if (resp2->status == IWM_ALIVE_STATUS_OK)
5397                                         sc->sc_uc.uc_ok = 1;
5398                                 else
5399                                         sc->sc_uc.uc_ok = 0;
5400                         }
5401
5402                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5403                                 resp3 = (void *)pkt->data;
5404                                 sc->sc_uc.uc_error_event_table
5405                                     = le32toh(resp3->error_event_table_ptr);
5406                                 sc->sc_uc.uc_log_event_table
5407                                     = le32toh(resp3->log_event_table_ptr);
5408                                 sc->sched_base = le32toh(resp3->scd_base_ptr);
5409                                 sc->sc_uc.uc_umac_error_event_table
5410                                     = le32toh(resp3->error_info_addr);
5411                                 if (resp3->status == IWM_ALIVE_STATUS_OK)
5412                                         sc->sc_uc.uc_ok = 1;
5413                                 else
5414                                         sc->sc_uc.uc_ok = 0;
5415                         }
5416
5417                         sc->sc_uc.uc_intr = 1;
5418                         wakeup(&sc->sc_uc);
5419                         break; }
5420
5421                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5422                         break;
5423
5424                 case IWM_STATISTICS_NOTIFICATION: {
5425                         struct iwm_notif_statistics *stats;
5426                         stats = (void *)pkt->data;
5427                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5428                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5429                         break; }
5430
5431                 case IWM_NVM_ACCESS_CMD:
5432                 case IWM_MCC_UPDATE_CMD:
5433                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5434                                 memcpy(sc->sc_cmd_resp,
5435                                     pkt, sizeof(sc->sc_cmd_resp));
5436                         }
5437                         break;
5438
5439                 case IWM_MCC_CHUB_UPDATE_CMD: {
5440                         struct iwm_mcc_chub_notif *notif;
5441                         notif = (void *)pkt->data;
5442
5443                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5444                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5445                         sc->sc_fw_mcc[2] = '\0';
5446                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5447                             "fw source %d sent CC '%s'\n",
5448                             notif->source_id, sc->sc_fw_mcc);
5449                         break; }
5450
5451                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5452                 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5453                                  IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5454                         struct iwm_dts_measurement_notif_v1 *notif;
5455
5456                         if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5457                                 device_printf(sc->sc_dev,
5458                                     "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5459                                 break;
5460                         }
5461                         notif = (void *)pkt->data;
5462                         IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5463                             "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5464                             notif->temp);
5465                         break;
5466                 }
5467
5468                 case IWM_PHY_CONFIGURATION_CMD:
5469                 case IWM_TX_ANT_CONFIGURATION_CMD:
5470                 case IWM_ADD_STA:
5471                 case IWM_MAC_CONTEXT_CMD:
5472                 case IWM_REPLY_SF_CFG_CMD:
5473                 case IWM_POWER_TABLE_CMD:
5474                 case IWM_PHY_CONTEXT_CMD:
5475                 case IWM_BINDING_CONTEXT_CMD:
5476                 case IWM_TIME_EVENT_CMD:
5477                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5478                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5479                 case IWM_SCAN_ABORT_UMAC:
5480                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5481                 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5482                 case IWM_REPLY_BEACON_FILTERING_CMD:
5483                 case IWM_MAC_PM_POWER_TABLE:
5484                 case IWM_TIME_QUOTA_CMD:
5485                 case IWM_REMOVE_STA:
5486                 case IWM_TXPATH_FLUSH:
5487                 case IWM_LQ_CMD:
5488                 case IWM_BT_CONFIG:
5489                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5490                         cresp = (void *)pkt->data;
5491                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5492                                 memcpy(sc->sc_cmd_resp,
5493                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5494                         }
5495                         break;
5496
5497                 /* ignore */
5498                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5499                         break;
5500
5501                 case IWM_INIT_COMPLETE_NOTIF:
5502                         break;
5503
5504                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5505                         struct iwm_periodic_scan_complete *notif;
5506                         notif = (void *)pkt->data;
5507                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5508                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5509                                 ieee80211_runtask(ic, &sc->sc_es_task);
5510                         }
5511                         break;
5512                 }
5513
5514                 case IWM_SCAN_ITERATION_COMPLETE: {
5515                         struct iwm_lmac_scan_complete_notif *notif;
5516                         notif = (void *)pkt->data;
5517                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5518                         break;
5519                 }
5520  
5521                 case IWM_SCAN_COMPLETE_UMAC: {
5522                         struct iwm_umac_scan_complete *notif;
5523                         notif = (void *)pkt->data;
5524
5525                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5526                             "UMAC scan complete, status=0x%x\n",
5527                             notif->status);
5528                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5529                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5530                                 ieee80211_runtask(ic, &sc->sc_es_task);
5531                         }
5532                         break;
5533                 }
5534
5535                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5536                         struct iwm_umac_scan_iter_complete_notif *notif;
5537                         notif = (void *)pkt->data;
5538
5539                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5540                             "complete, status=0x%x, %d channels scanned\n",
5541                             notif->status, notif->scanned_channels);
5542                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5543                         break;
5544                 }
5545
5546                 case IWM_REPLY_ERROR: {
5547                         struct iwm_error_resp *resp;
5548                         resp = (void *)pkt->data;
5549
5550                         device_printf(sc->sc_dev,
5551                             "firmware error 0x%x, cmd 0x%x\n",
5552                             le32toh(resp->error_type),
5553                             resp->cmd_id);
5554                         break;
5555                 }
5556
5557                 case IWM_TIME_EVENT_NOTIFICATION: {
5558                         struct iwm_time_event_notif *notif;
5559                         notif = (void *)pkt->data;
5560
5561                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5562                             "TE notif status = 0x%x action = 0x%x\n",
5563                             notif->status, notif->action);
5564                         break;
5565                 }
5566
5567                 case IWM_MCAST_FILTER_CMD:
5568                         break;
5569
5570                 case IWM_SCD_QUEUE_CFG: {
5571                         struct iwm_scd_txq_cfg_rsp *rsp;
5572                         rsp = (void *)pkt->data;
5573
5574                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5575                             "queue cfg token=0x%x sta_id=%d "
5576                             "tid=%d scd_queue=%d\n",
5577                             rsp->token, rsp->sta_id, rsp->tid,
5578                             rsp->scd_queue);
5579                         break;
5580                 }
5581
5582                 default:
5583                         device_printf(sc->sc_dev,
5584                             "frame %d/%d %x UNHANDLED (this should "
5585                             "not happen)\n", qid, idx,
5586                             pkt->len_n_flags);
5587                         break;
5588                 }
5589
5590                 /*
5591                  * Why test bit 0x80?  The Linux driver:
5592                  *
5593                  * There is one exception:  uCode sets bit 15 when it
5594                  * originates the response/notification, i.e. when the
5595                  * response/notification is not a direct response to a
5596                  * command sent by the driver.  For example, uCode issues
5597                  * IWM_REPLY_RX when it sends a received frame to the driver;
5598                  * it is not a direct response to any driver command.
5599                  *
5600                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5601                  * uses a slightly different format for pkt->hdr, and "qid"
5602                  * is actually the upper byte of a two-byte field.
5603                  */
5604                 if (!(pkt->hdr.qid & (1 << 7))) {
5605                         iwm_cmd_done(sc, pkt);
5606                 }
5607
5608                 ADVANCE_RXQ(sc);
5609         }
5610
5611         /*
5612          * Tell the firmware what we have processed.
5613          * Seems like the hardware gets upset unless we align
5614          * the write by 8??
5615          */
5616         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5617         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5618 }
5619
5620 static void
5621 iwm_intr(void *arg)
5622 {
5623         struct iwm_softc *sc = arg;
5624         int handled = 0;
5625         int r1, r2, rv = 0;
5626         int isperiodic = 0;
5627
5628         IWM_LOCK(sc);
5629         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5630
5631         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5632                 uint32_t *ict = sc->ict_dma.vaddr;
5633                 int tmp;
5634
5635                 tmp = htole32(ict[sc->ict_cur]);
5636                 if (!tmp)
5637                         goto out_ena;
5638
5639                 /*
5640                  * ok, there was something.  keep plowing until we have all.
5641                  */
5642                 r1 = r2 = 0;
5643                 while (tmp) {
5644                         r1 |= tmp;
5645                         ict[sc->ict_cur] = 0;
5646                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5647                         tmp = htole32(ict[sc->ict_cur]);
5648                 }
5649
5650                 /* this is where the fun begins.  don't ask */
5651                 if (r1 == 0xffffffff)
5652                         r1 = 0;
5653
5654                 /* i am not expected to understand this */
5655                 if (r1 & 0xc0000)
5656                         r1 |= 0x8000;
5657                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5658         } else {
5659                 r1 = IWM_READ(sc, IWM_CSR_INT);
5660                 /* "hardware gone" (where, fishing?) */
5661                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5662                         goto out;
5663                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5664         }
5665         if (r1 == 0 && r2 == 0) {
5666                 goto out_ena;
5667         }
5668
5669         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5670
5671         /* ignored */
5672         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5673
5674         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5675                 int i;
5676                 struct ieee80211com *ic = &sc->sc_ic;
5677                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5678
5679 #ifdef IWM_DEBUG
5680                 iwm_nic_error(sc);
5681 #endif
5682                 /* Dump driver status (TX and RX rings) while we're here. */
5683                 device_printf(sc->sc_dev, "driver status:\n");
5684                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5685                         struct iwm_tx_ring *ring = &sc->txq[i];
5686                         device_printf(sc->sc_dev,
5687                             "  tx ring %2d: qid=%-2d cur=%-3d "
5688                             "queued=%-3d\n",
5689                             i, ring->qid, ring->cur, ring->queued);
5690                 }
5691                 device_printf(sc->sc_dev,
5692                     "  rx ring: cur=%d\n", sc->rxq.cur);
5693                 device_printf(sc->sc_dev,
5694                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5695
5696                 /* Don't stop the device; just do a VAP restart */
5697                 IWM_UNLOCK(sc);
5698
5699                 if (vap == NULL) {
5700                         printf("%s: null vap\n", __func__);
5701                         return;
5702                 }
5703
5704                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5705                     "restarting\n", __func__, vap->iv_state);
5706
5707                 /* XXX TODO: turn this into a callout/taskqueue */
5708                 ieee80211_restart_all(ic);
5709                 return;
5710         }
5711
5712         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5713                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5714                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5715                 iwm_stop(sc);
5716                 rv = 1;
5717                 goto out;
5718         }
5719
5720         /* firmware chunk loaded */
5721         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5722                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5723                 handled |= IWM_CSR_INT_BIT_FH_TX;
5724                 sc->sc_fw_chunk_done = 1;
5725                 wakeup(&sc->sc_fw);
5726         }
5727
5728         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5729                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5730                 if (iwm_check_rfkill(sc)) {
5731                         device_printf(sc->sc_dev,
5732                             "%s: rfkill switch, disabling interface\n",
5733                             __func__);
5734                         iwm_stop(sc);
5735                 }
5736         }
5737
5738         /*
5739          * The Linux driver uses periodic interrupts to avoid races.
5740          * We cargo-cult like it's going out of fashion.
5741          */
5742         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5743                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5744                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5745                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5746                         IWM_WRITE_1(sc,
5747                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5748                 isperiodic = 1;
5749         }
5750
5751         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5752                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5753                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5754
5755                 iwm_notif_intr(sc);
5756
5757                 /* enable periodic interrupt, see above */
5758                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5759                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5760                             IWM_CSR_INT_PERIODIC_ENA);
5761         }
5762
5763         if (__predict_false(r1 & ~handled))
5764                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5765                     "%s: unhandled interrupts: %x\n", __func__, r1);
5766         rv = 1;
5767
5768  out_ena:
5769         iwm_restore_interrupts(sc);
5770  out:
5771         IWM_UNLOCK(sc);
5772         return;
5773 }
5774
5775 /*
5776  * Autoconf glue-sniffing
5777  */
5778 #define PCI_VENDOR_INTEL                0x8086
5779 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5780 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5781 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5782 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5783 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5784 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5785 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5786 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5787 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5788 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5789
5790 static const struct iwm_devices {
5791         uint16_t        device;
5792         const char      *name;
5793 } iwm_devices[] = {
5794         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5795         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5796         { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5797         { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5798         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5799         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5800         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5801         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5802         { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5803         { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5804 };
5805
5806 static int
5807 iwm_probe(device_t dev)
5808 {
5809         int i;
5810
5811         for (i = 0; i < nitems(iwm_devices); i++) {
5812                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5813                     pci_get_device(dev) == iwm_devices[i].device) {
5814                         device_set_desc(dev, iwm_devices[i].name);
5815                         return (BUS_PROBE_DEFAULT);
5816                 }
5817         }
5818
5819         return (ENXIO);
5820 }
5821
5822 static int
5823 iwm_dev_check(device_t dev)
5824 {
5825         struct iwm_softc *sc;
5826
5827         sc = device_get_softc(dev);
5828
5829         switch (pci_get_device(dev)) {
5830         case PCI_PRODUCT_INTEL_WL_3160_1:
5831         case PCI_PRODUCT_INTEL_WL_3160_2:
5832                 sc->cfg = &iwm3160_cfg;
5833                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5834                 return (0);
5835         case PCI_PRODUCT_INTEL_WL_3165_1:
5836         case PCI_PRODUCT_INTEL_WL_3165_2:
5837                 sc->cfg = &iwm3165_cfg;
5838                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5839                 return (0);
5840         case PCI_PRODUCT_INTEL_WL_7260_1:
5841         case PCI_PRODUCT_INTEL_WL_7260_2:
5842                 sc->cfg = &iwm7260_cfg;
5843                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5844                 return (0);
5845         case PCI_PRODUCT_INTEL_WL_7265_1:
5846         case PCI_PRODUCT_INTEL_WL_7265_2:
5847                 sc->cfg = &iwm7265_cfg;
5848                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5849                 return (0);
5850         case PCI_PRODUCT_INTEL_WL_8260_1:
5851         case PCI_PRODUCT_INTEL_WL_8260_2:
5852                 sc->cfg = &iwm8260_cfg;
5853                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5854                 return (0);
5855         default:
5856                 device_printf(dev, "unknown adapter type\n");
5857                 return ENXIO;
5858         }
5859 }
5860
5861 static int
5862 iwm_pci_attach(device_t dev)
5863 {
5864         struct iwm_softc *sc;
5865         int count, error, rid;
5866         uint16_t reg;
5867
5868         sc = device_get_softc(dev);
5869
5870         /* Clear device-specific "PCI retry timeout" register (41h). */
5871         reg = pci_read_config(dev, 0x40, sizeof(reg));
5872         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5873
5874         /* Enable bus-mastering and hardware bug workaround. */
5875         pci_enable_busmaster(dev);
5876         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5877         /* if !MSI */
5878         if (reg & PCIM_STATUS_INTxSTATE) {
5879                 reg &= ~PCIM_STATUS_INTxSTATE;
5880         }
5881         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5882
5883         rid = PCIR_BAR(0);
5884         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5885             RF_ACTIVE);
5886         if (sc->sc_mem == NULL) {
5887                 device_printf(sc->sc_dev, "can't map mem space\n");
5888                 return (ENXIO);
5889         }
5890         sc->sc_st = rman_get_bustag(sc->sc_mem);
5891         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5892
5893         /* Install interrupt handler. */
5894         count = 1;
5895         rid = 0;
5896         if (pci_alloc_msi(dev, &count) == 0)
5897                 rid = 1;
5898         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5899             (rid != 0 ? 0 : RF_SHAREABLE));
5900         if (sc->sc_irq == NULL) {
5901                 device_printf(dev, "can't map interrupt\n");
5902                         return (ENXIO);
5903         }
5904         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5905             NULL, iwm_intr, sc, &sc->sc_ih);
5906         if (sc->sc_ih == NULL) {
5907                 device_printf(dev, "can't establish interrupt");
5908                         return (ENXIO);
5909         }
5910         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5911
5912         return (0);
5913 }
5914
5915 static void
5916 iwm_pci_detach(device_t dev)
5917 {
5918         struct iwm_softc *sc = device_get_softc(dev);
5919
5920         if (sc->sc_irq != NULL) {
5921                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5922                 bus_release_resource(dev, SYS_RES_IRQ,
5923                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5924                 pci_release_msi(dev);
5925         }
5926         if (sc->sc_mem != NULL)
5927                 bus_release_resource(dev, SYS_RES_MEMORY,
5928                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5929 }
5930
5931
5932
5933 static int
5934 iwm_attach(device_t dev)
5935 {
5936         struct iwm_softc *sc = device_get_softc(dev);
5937         struct ieee80211com *ic = &sc->sc_ic;
5938         int error;
5939         int txq_i, i;
5940
5941         sc->sc_dev = dev;
5942         sc->sc_attached = 1;
5943         IWM_LOCK_INIT(sc);
5944         mbufq_init(&sc->sc_snd, ifqmaxlen);
5945         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5946         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5947         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5948
5949         sc->sc_notif_wait = iwm_notification_wait_init(sc);
5950         if (sc->sc_notif_wait == NULL) {
5951                 device_printf(dev, "failed to init notification wait struct\n");
5952                 goto fail;
5953         }
5954
5955         /* Init phy db */
5956         sc->sc_phy_db = iwm_phy_db_init(sc);
5957         if (!sc->sc_phy_db) {
5958                 device_printf(dev, "Cannot init phy_db\n");
5959                 goto fail;
5960         }
5961
5962         /* PCI attach */
5963         error = iwm_pci_attach(dev);
5964         if (error != 0)
5965                 goto fail;
5966
5967         sc->sc_wantresp = -1;
5968
5969         /* Check device type */
5970         error = iwm_dev_check(dev);
5971         if (error != 0)
5972                 goto fail;
5973
5974         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5975         /*
5976          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5977          * changed, and now the revision step also includes bit 0-1 (no more
5978          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5979          * in the old format.
5980          */
5981         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
5982                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5983                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5984
5985         if (iwm_prepare_card_hw(sc) != 0) {
5986                 device_printf(dev, "could not initialize hardware\n");
5987                 goto fail;
5988         }
5989
5990         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5991                 int ret;
5992                 uint32_t hw_step;
5993
5994                 /*
5995                  * In order to recognize C step the driver should read the
5996                  * chip version id located at the AUX bus MISC address.
5997                  */
5998                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5999                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6000                 DELAY(2);
6001
6002                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6003                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6004                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6005                                    25000);
6006                 if (!ret) {
6007                         device_printf(sc->sc_dev,
6008                             "Failed to wake up the nic\n");
6009                         goto fail;
6010                 }
6011
6012                 if (iwm_nic_lock(sc)) {
6013                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6014                         hw_step |= IWM_ENABLE_WFPM;
6015                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6016                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6017                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6018                         if (hw_step == 0x3)
6019                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6020                                                 (IWM_SILICON_C_STEP << 2);
6021                         iwm_nic_unlock(sc);
6022                 } else {
6023                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
6024                         goto fail;
6025                 }
6026         }
6027
6028         /* special-case 7265D, it has the same PCI IDs. */
6029         if (sc->cfg == &iwm7265_cfg &&
6030             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6031                 sc->cfg = &iwm7265d_cfg;
6032         }
6033
6034         /* Allocate DMA memory for firmware transfers. */
6035         if ((error = iwm_alloc_fwmem(sc)) != 0) {
6036                 device_printf(dev, "could not allocate memory for firmware\n");
6037                 goto fail;
6038         }
6039
6040         /* Allocate "Keep Warm" page. */
6041         if ((error = iwm_alloc_kw(sc)) != 0) {
6042                 device_printf(dev, "could not allocate keep warm page\n");
6043                 goto fail;
6044         }
6045
6046         /* We use ICT interrupts */
6047         if ((error = iwm_alloc_ict(sc)) != 0) {
6048                 device_printf(dev, "could not allocate ICT table\n");
6049                 goto fail;
6050         }
6051
6052         /* Allocate TX scheduler "rings". */
6053         if ((error = iwm_alloc_sched(sc)) != 0) {
6054                 device_printf(dev, "could not allocate TX scheduler rings\n");
6055                 goto fail;
6056         }
6057
6058         /* Allocate TX rings */
6059         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6060                 if ((error = iwm_alloc_tx_ring(sc,
6061                     &sc->txq[txq_i], txq_i)) != 0) {
6062                         device_printf(dev,
6063                             "could not allocate TX ring %d\n",
6064                             txq_i);
6065                         goto fail;
6066                 }
6067         }
6068
6069         /* Allocate RX ring. */
6070         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6071                 device_printf(dev, "could not allocate RX ring\n");
6072                 goto fail;
6073         }
6074
6075         /* Clear pending interrupts. */
6076         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6077
6078         ic->ic_softc = sc;
6079         ic->ic_name = device_get_nameunit(sc->sc_dev);
6080         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
6081         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
6082
6083         /* Set device capabilities. */
6084         ic->ic_caps =
6085             IEEE80211_C_STA |
6086             IEEE80211_C_WPA |           /* WPA/RSN */
6087             IEEE80211_C_WME |
6088             IEEE80211_C_SHSLOT |        /* short slot time supported */
6089             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
6090 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
6091             ;
6092         /* Advertise full-offload scanning */
6093         ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6094         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6095                 sc->sc_phyctxt[i].id = i;
6096                 sc->sc_phyctxt[i].color = 0;
6097                 sc->sc_phyctxt[i].ref = 0;
6098                 sc->sc_phyctxt[i].channel = NULL;
6099         }
6100
6101         /* Default noise floor */
6102         sc->sc_noise = -96;
6103
6104         /* Max RSSI */
6105         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6106
6107         sc->sc_preinit_hook.ich_func = iwm_preinit;
6108         sc->sc_preinit_hook.ich_arg = sc;
6109         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6110                 device_printf(dev, "config_intrhook_establish failed\n");
6111                 goto fail;
6112         }
6113
6114 #ifdef IWM_DEBUG
6115         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6116             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6117             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6118 #endif
6119
6120         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6121             "<-%s\n", __func__);
6122
6123         return 0;
6124
6125         /* Free allocated memory if something failed during attachment. */
6126 fail:
6127         iwm_detach_local(sc, 0);
6128
6129         return ENXIO;
6130 }
6131
6132 static int
6133 iwm_is_valid_ether_addr(uint8_t *addr)
6134 {
6135         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6136
6137         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6138                 return (FALSE);
6139
6140         return (TRUE);
6141 }
6142
6143 static int
6144 iwm_update_edca(struct ieee80211com *ic)
6145 {
6146         struct iwm_softc *sc = ic->ic_softc;
6147
6148         device_printf(sc->sc_dev, "%s: called\n", __func__);
6149         return (0);
6150 }
6151
6152 static void
6153 iwm_preinit(void *arg)
6154 {
6155         struct iwm_softc *sc = arg;
6156         device_t dev = sc->sc_dev;
6157         struct ieee80211com *ic = &sc->sc_ic;
6158         int error;
6159
6160         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6161             "->%s\n", __func__);
6162
6163         IWM_LOCK(sc);
6164         if ((error = iwm_start_hw(sc)) != 0) {
6165                 device_printf(dev, "could not initialize hardware\n");
6166                 IWM_UNLOCK(sc);
6167                 goto fail;
6168         }
6169
6170         error = iwm_run_init_mvm_ucode(sc, 1);
6171         iwm_stop_device(sc);
6172         if (error) {
6173                 IWM_UNLOCK(sc);
6174                 goto fail;
6175         }
6176         device_printf(dev,
6177             "hw rev 0x%x, fw ver %s, address %s\n",
6178             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6179             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6180
6181         /* not all hardware can do 5GHz band */
6182         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6183                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6184                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6185         IWM_UNLOCK(sc);
6186
6187         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6188             ic->ic_channels);
6189
6190         /*
6191          * At this point we've committed - if we fail to do setup,
6192          * we now also have to tear down the net80211 state.
6193          */
6194         ieee80211_ifattach(ic);
6195         ic->ic_vap_create = iwm_vap_create;
6196         ic->ic_vap_delete = iwm_vap_delete;
6197         ic->ic_raw_xmit = iwm_raw_xmit;
6198         ic->ic_node_alloc = iwm_node_alloc;
6199         ic->ic_scan_start = iwm_scan_start;
6200         ic->ic_scan_end = iwm_scan_end;
6201         ic->ic_update_mcast = iwm_update_mcast;
6202         ic->ic_getradiocaps = iwm_init_channel_map;
6203         ic->ic_set_channel = iwm_set_channel;
6204         ic->ic_scan_curchan = iwm_scan_curchan;
6205         ic->ic_scan_mindwell = iwm_scan_mindwell;
6206         ic->ic_wme.wme_update = iwm_update_edca;
6207         ic->ic_parent = iwm_parent;
6208         ic->ic_transmit = iwm_transmit;
6209         iwm_radiotap_attach(sc);
6210         if (bootverbose)
6211                 ieee80211_announce(ic);
6212
6213         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6214             "<-%s\n", __func__);
6215         config_intrhook_disestablish(&sc->sc_preinit_hook);
6216
6217         return;
6218 fail:
6219         config_intrhook_disestablish(&sc->sc_preinit_hook);
6220         iwm_detach_local(sc, 0);
6221 }
6222
6223 /*
6224  * Attach the interface to 802.11 radiotap.
6225  */
6226 static void
6227 iwm_radiotap_attach(struct iwm_softc *sc)
6228 {
6229         struct ieee80211com *ic = &sc->sc_ic;
6230
6231         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6232             "->%s begin\n", __func__);
6233         ieee80211_radiotap_attach(ic,
6234             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6235                 IWM_TX_RADIOTAP_PRESENT,
6236             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6237                 IWM_RX_RADIOTAP_PRESENT);
6238         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6239             "->%s end\n", __func__);
6240 }
6241
6242 static struct ieee80211vap *
6243 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6244     enum ieee80211_opmode opmode, int flags,
6245     const uint8_t bssid[IEEE80211_ADDR_LEN],
6246     const uint8_t mac[IEEE80211_ADDR_LEN])
6247 {
6248         struct iwm_vap *ivp;
6249         struct ieee80211vap *vap;
6250
6251         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6252                 return NULL;
6253         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6254         vap = &ivp->iv_vap;
6255         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6256         vap->iv_bmissthreshold = 10;            /* override default */
6257         /* Override with driver methods. */
6258         ivp->iv_newstate = vap->iv_newstate;
6259         vap->iv_newstate = iwm_newstate;
6260
6261         ieee80211_ratectl_init(vap);
6262         /* Complete setup. */
6263         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6264             mac);
6265         ic->ic_opmode = opmode;
6266
6267         return vap;
6268 }
6269
6270 static void
6271 iwm_vap_delete(struct ieee80211vap *vap)
6272 {
6273         struct iwm_vap *ivp = IWM_VAP(vap);
6274
6275         ieee80211_ratectl_deinit(vap);
6276         ieee80211_vap_detach(vap);
6277         free(ivp, M_80211_VAP);
6278 }
6279
6280 static void
6281 iwm_scan_start(struct ieee80211com *ic)
6282 {
6283         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6284         struct iwm_softc *sc = ic->ic_softc;
6285         int error;
6286
6287         IWM_LOCK(sc);
6288         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6289                 /* This should not be possible */
6290                 device_printf(sc->sc_dev,
6291                     "%s: Previous scan not completed yet\n", __func__);
6292         }
6293         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6294                 error = iwm_mvm_umac_scan(sc);
6295         else
6296                 error = iwm_mvm_lmac_scan(sc);
6297         if (error != 0) {
6298                 device_printf(sc->sc_dev, "could not initiate scan\n");
6299                 IWM_UNLOCK(sc);
6300                 ieee80211_cancel_scan(vap);
6301         } else {
6302                 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6303                 iwm_led_blink_start(sc);
6304                 IWM_UNLOCK(sc);
6305         }
6306 }
6307
6308 static void
6309 iwm_scan_end(struct ieee80211com *ic)
6310 {
6311         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6312         struct iwm_softc *sc = ic->ic_softc;
6313
6314         IWM_LOCK(sc);
6315         iwm_led_blink_stop(sc);
6316         if (vap->iv_state == IEEE80211_S_RUN)
6317                 iwm_mvm_led_enable(sc);
6318         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6319                 /*
6320                  * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6321                  * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6322                  * taskqueue.
6323                  */
6324                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6325                 iwm_mvm_scan_stop_wait(sc);
6326         }
6327         IWM_UNLOCK(sc);
6328
6329         /*
6330          * Make sure we don't race, if sc_es_task is still enqueued here.
6331          * This is to make sure that it won't call ieee80211_scan_done
6332          * when we have already started the next scan.
6333          */
6334         taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6335 }
6336
6337 static void
6338 iwm_update_mcast(struct ieee80211com *ic)
6339 {
6340 }
6341
6342 static void
6343 iwm_set_channel(struct ieee80211com *ic)
6344 {
6345 }
6346
6347 static void
6348 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6349 {
6350 }
6351
6352 static void
6353 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6354 {
6355         return;
6356 }
6357
6358 void
6359 iwm_init_task(void *arg1)
6360 {
6361         struct iwm_softc *sc = arg1;
6362
6363         IWM_LOCK(sc);
6364         while (sc->sc_flags & IWM_FLAG_BUSY)
6365                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6366         sc->sc_flags |= IWM_FLAG_BUSY;
6367         iwm_stop(sc);
6368         if (sc->sc_ic.ic_nrunning > 0)
6369                 iwm_init(sc);
6370         sc->sc_flags &= ~IWM_FLAG_BUSY;
6371         wakeup(&sc->sc_flags);
6372         IWM_UNLOCK(sc);
6373 }
6374
6375 static int
6376 iwm_resume(device_t dev)
6377 {
6378         struct iwm_softc *sc = device_get_softc(dev);
6379         int do_reinit = 0;
6380         uint16_t reg;
6381
6382         /* Clear device-specific "PCI retry timeout" register (41h). */
6383         reg = pci_read_config(dev, 0x40, sizeof(reg));
6384         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
6385         iwm_init_task(device_get_softc(dev));
6386
6387         IWM_LOCK(sc);
6388         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6389                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6390                 do_reinit = 1;
6391         }
6392         IWM_UNLOCK(sc);
6393
6394         if (do_reinit)
6395                 ieee80211_resume_all(&sc->sc_ic);
6396
6397         return 0;
6398 }
6399
6400 static int
6401 iwm_suspend(device_t dev)
6402 {
6403         int do_stop = 0;
6404         struct iwm_softc *sc = device_get_softc(dev);
6405
6406         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6407
6408         ieee80211_suspend_all(&sc->sc_ic);
6409
6410         if (do_stop) {
6411                 IWM_LOCK(sc);
6412                 iwm_stop(sc);
6413                 sc->sc_flags |= IWM_FLAG_SCANNING;
6414                 IWM_UNLOCK(sc);
6415         }
6416
6417         return (0);
6418 }
6419
6420 static int
6421 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6422 {
6423         struct iwm_fw_info *fw = &sc->sc_fw;
6424         device_t dev = sc->sc_dev;
6425         int i;
6426
6427         if (!sc->sc_attached)
6428                 return 0;
6429         sc->sc_attached = 0;
6430
6431         if (do_net80211)
6432                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6433
6434         callout_drain(&sc->sc_led_blink_to);
6435         callout_drain(&sc->sc_watchdog_to);
6436         iwm_stop_device(sc);
6437         if (do_net80211) {
6438                 ieee80211_ifdetach(&sc->sc_ic);
6439         }
6440
6441         iwm_phy_db_free(sc->sc_phy_db);
6442         sc->sc_phy_db = NULL;
6443
6444         iwm_free_nvm_data(sc->nvm_data);
6445
6446         /* Free descriptor rings */
6447         iwm_free_rx_ring(sc, &sc->rxq);
6448         for (i = 0; i < nitems(sc->txq); i++)
6449                 iwm_free_tx_ring(sc, &sc->txq[i]);
6450
6451         /* Free firmware */
6452         if (fw->fw_fp != NULL)
6453                 iwm_fw_info_free(fw);
6454
6455         /* Free scheduler */
6456         iwm_dma_contig_free(&sc->sched_dma);
6457         iwm_dma_contig_free(&sc->ict_dma);
6458         iwm_dma_contig_free(&sc->kw_dma);
6459         iwm_dma_contig_free(&sc->fw_dma);
6460
6461         /* Finished with the hardware - detach things */
6462         iwm_pci_detach(dev);
6463
6464         if (sc->sc_notif_wait != NULL) {
6465                 iwm_notification_wait_free(sc->sc_notif_wait);
6466                 sc->sc_notif_wait = NULL;
6467         }
6468
6469         mbufq_drain(&sc->sc_snd);
6470         IWM_LOCK_DESTROY(sc);
6471
6472         return (0);
6473 }
6474
6475 static int
6476 iwm_detach(device_t dev)
6477 {
6478         struct iwm_softc *sc = device_get_softc(dev);
6479
6480         return (iwm_detach_local(sc, 1));
6481 }
6482
6483 static device_method_t iwm_pci_methods[] = {
6484         /* Device interface */
6485         DEVMETHOD(device_probe,         iwm_probe),
6486         DEVMETHOD(device_attach,        iwm_attach),
6487         DEVMETHOD(device_detach,        iwm_detach),
6488         DEVMETHOD(device_suspend,       iwm_suspend),
6489         DEVMETHOD(device_resume,        iwm_resume),
6490
6491         DEVMETHOD_END
6492 };
6493
6494 static driver_t iwm_pci_driver = {
6495         "iwm",
6496         iwm_pci_methods,
6497         sizeof (struct iwm_softc)
6498 };
6499
6500 static devclass_t iwm_devclass;
6501
6502 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6503 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6504 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6505 MODULE_DEPEND(iwm, wlan, 1, 1, 1);