]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
MFC r313427:
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133
134 #include <net/bpf.h>
135
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_notif_wait.h>
157 #include <dev/iwm/if_iwm_util.h>
158 #include <dev/iwm/if_iwm_binding.h>
159 #include <dev/iwm/if_iwm_phy_db.h>
160 #include <dev/iwm/if_iwm_mac_ctxt.h>
161 #include <dev/iwm/if_iwm_phy_ctxt.h>
162 #include <dev/iwm/if_iwm_time_event.h>
163 #include <dev/iwm/if_iwm_power.h>
164 #include <dev/iwm/if_iwm_scan.h>
165
166 #include <dev/iwm/if_iwm_pcie_trans.h>
167 #include <dev/iwm/if_iwm_led.h>
168
169 #define IWM_NVM_HW_SECTION_NUM_FAMILY_7000      0
170 #define IWM_NVM_HW_SECTION_NUM_FAMILY_8000      10
171
172 /* lower blocks contain EEPROM image and calibration data */
173 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000      (16 * 512 * sizeof(uint16_t)) /* 16 KB */
174 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000      (32 * 512 * sizeof(uint16_t)) /* 32 KB */
175
176 #define IWM7260_FW      "iwm7260fw"
177 #define IWM3160_FW      "iwm3160fw"
178 #define IWM7265_FW      "iwm7265fw"
179 #define IWM7265D_FW     "iwm7265Dfw"
180 #define IWM8000_FW      "iwm8000Cfw"
181
182 #define IWM_DEVICE_7000_COMMON                                          \
183         .device_family = IWM_DEVICE_FAMILY_7000,                        \
184         .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000,              \
185         .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_7000,       \
186         .apmg_wake_up_wa = 1
187
188 const struct iwm_cfg iwm7260_cfg = {
189         .fw_name = IWM7260_FW,
190         IWM_DEVICE_7000_COMMON,
191         .host_interrupt_operation_mode = 1,
192 };
193
194 const struct iwm_cfg iwm3160_cfg = {
195         .fw_name = IWM3160_FW,
196         IWM_DEVICE_7000_COMMON,
197         .host_interrupt_operation_mode = 1,
198 };
199
200 const struct iwm_cfg iwm3165_cfg = {
201         /* XXX IWM7265D_FW doesn't seem to work properly yet */
202         .fw_name = IWM7265_FW,
203         IWM_DEVICE_7000_COMMON,
204         .host_interrupt_operation_mode = 0,
205 };
206
207 const struct iwm_cfg iwm7265_cfg = {
208         .fw_name = IWM7265_FW,
209         IWM_DEVICE_7000_COMMON,
210         .host_interrupt_operation_mode = 0,
211 };
212
213 const struct iwm_cfg iwm7265d_cfg = {
214         /* XXX IWM7265D_FW doesn't seem to work properly yet */
215         .fw_name = IWM7265_FW,
216         IWM_DEVICE_7000_COMMON,
217         .host_interrupt_operation_mode = 0,
218 };
219
220 #define IWM_DEVICE_8000_COMMON                                          \
221         .device_family = IWM_DEVICE_FAMILY_8000,                        \
222         .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000,              \
223         .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_8000
224
225 const struct iwm_cfg iwm8260_cfg = {
226         .fw_name = IWM8000_FW,
227         IWM_DEVICE_8000_COMMON,
228         .host_interrupt_operation_mode = 0,
229 };
230
231 const uint8_t iwm_nvm_channels[] = {
232         /* 2.4 GHz */
233         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
234         /* 5 GHz */
235         36, 40, 44, 48, 52, 56, 60, 64,
236         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
237         149, 153, 157, 161, 165
238 };
239 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
240     "IWM_NUM_CHANNELS is too small");
241
242 const uint8_t iwm_nvm_channels_8000[] = {
243         /* 2.4 GHz */
244         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
245         /* 5 GHz */
246         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
247         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
248         149, 153, 157, 161, 165, 169, 173, 177, 181
249 };
250 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
251     "IWM_NUM_CHANNELS_8000 is too small");
252
253 #define IWM_NUM_2GHZ_CHANNELS   14
254 #define IWM_N_HW_ADDR_MASK      0xF
255
256 /*
257  * XXX For now, there's simply a fixed set of rate table entries
258  * that are populated.
259  */
260 const struct iwm_rate {
261         uint8_t rate;
262         uint8_t plcp;
263 } iwm_rates[] = {
264         {   2,  IWM_RATE_1M_PLCP  },
265         {   4,  IWM_RATE_2M_PLCP  },
266         {  11,  IWM_RATE_5M_PLCP  },
267         {  22,  IWM_RATE_11M_PLCP },
268         {  12,  IWM_RATE_6M_PLCP  },
269         {  18,  IWM_RATE_9M_PLCP  },
270         {  24,  IWM_RATE_12M_PLCP },
271         {  36,  IWM_RATE_18M_PLCP },
272         {  48,  IWM_RATE_24M_PLCP },
273         {  72,  IWM_RATE_36M_PLCP },
274         {  96,  IWM_RATE_48M_PLCP },
275         { 108,  IWM_RATE_54M_PLCP },
276 };
277 #define IWM_RIDX_CCK    0
278 #define IWM_RIDX_OFDM   4
279 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
280 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
281 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
282
283 struct iwm_nvm_section {
284         uint16_t length;
285         uint8_t *data;
286 };
287
288 #define IWM_MVM_UCODE_CALIB_TIMEOUT     (2*hz)
289
290 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
291 static int      iwm_firmware_store_section(struct iwm_softc *,
292                                            enum iwm_ucode_type,
293                                            const uint8_t *, size_t);
294 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
295 static void     iwm_fw_info_free(struct iwm_fw_info *);
296 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
297 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
298 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
299                                      bus_size_t, bus_size_t);
300 static void     iwm_dma_contig_free(struct iwm_dma_info *);
301 static int      iwm_alloc_fwmem(struct iwm_softc *);
302 static int      iwm_alloc_sched(struct iwm_softc *);
303 static int      iwm_alloc_kw(struct iwm_softc *);
304 static int      iwm_alloc_ict(struct iwm_softc *);
305 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
306 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
307 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
308 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
309                                   int);
310 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
311 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
312 static void     iwm_enable_interrupts(struct iwm_softc *);
313 static void     iwm_restore_interrupts(struct iwm_softc *);
314 static void     iwm_disable_interrupts(struct iwm_softc *);
315 static void     iwm_ict_reset(struct iwm_softc *);
316 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
317 static void     iwm_stop_device(struct iwm_softc *);
318 static void     iwm_mvm_nic_config(struct iwm_softc *);
319 static int      iwm_nic_rx_init(struct iwm_softc *);
320 static int      iwm_nic_tx_init(struct iwm_softc *);
321 static int      iwm_nic_init(struct iwm_softc *);
322 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
323 static int      iwm_post_alive(struct iwm_softc *);
324 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
325                                    uint16_t, uint8_t *, uint16_t *);
326 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
327                                      uint16_t *, uint32_t);
328 static uint32_t iwm_eeprom_channel_flags(uint16_t);
329 static void     iwm_add_channel_band(struct iwm_softc *,
330                     struct ieee80211_channel[], int, int *, int, size_t,
331                     const uint8_t[]);
332 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
333                     struct ieee80211_channel[]);
334 static struct iwm_nvm_data *
335         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
336                            const uint16_t *, const uint16_t *,
337                            const uint16_t *, const uint16_t *,
338                            const uint16_t *);
339 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
340 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
341                                                struct iwm_nvm_data *,
342                                                const uint16_t *,
343                                                const uint16_t *);
344 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
345                             const uint16_t *);
346 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
347 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
348                                   const uint16_t *);
349 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
350                                    const uint16_t *);
351 static void     iwm_set_radio_cfg(const struct iwm_softc *,
352                                   struct iwm_nvm_data *, uint32_t);
353 static struct iwm_nvm_data *
354         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
355 static int      iwm_nvm_init(struct iwm_softc *);
356 static int      iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
357                                        const uint8_t *, uint32_t);
358 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
359                                         const uint8_t *, uint32_t);
360 static int      iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
361 static int      iwm_load_cpu_sections_8000(struct iwm_softc *,
362                                            struct iwm_fw_sects *, int , int *);
363 static int      iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
364 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
365 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
366 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
367 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
368 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
369                                               enum iwm_ucode_type);
370 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
371 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
372 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
373 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
374                                             struct iwm_rx_phy_info *);
375 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
376                                       struct iwm_rx_packet *,
377                                       struct iwm_rx_data *);
378 static int      iwm_get_noise(struct iwm_softc *sc,
379                     const struct iwm_mvm_statistics_rx_non_phy *);
380 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
381                                    struct iwm_rx_data *);
382 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
383                                          struct iwm_rx_packet *,
384                                          struct iwm_node *);
385 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
386                                   struct iwm_rx_data *);
387 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
388 #if 0
389 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
390                                  uint16_t);
391 #endif
392 static const struct iwm_rate *
393         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
394                         struct mbuf *, struct iwm_tx_cmd *);
395 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
396                        struct ieee80211_node *, int);
397 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
398                              const struct ieee80211_bpf_params *);
399 static int      iwm_mvm_flush_tx_path(struct iwm_softc *sc,
400                                       uint32_t tfd_msk, uint32_t flags);
401 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
402                                                 struct iwm_mvm_add_sta_cmd_v7 *,
403                                                 int *);
404 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
405                                        int);
406 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
407 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
408 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
409                                            struct iwm_int_sta *,
410                                            const uint8_t *, uint16_t, uint16_t);
411 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
412 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
413 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
414 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
415 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
416 static struct ieee80211_node *
417                 iwm_node_alloc(struct ieee80211vap *,
418                                const uint8_t[IEEE80211_ADDR_LEN]);
419 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
420 static int      iwm_media_change(struct ifnet *);
421 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
422 static void     iwm_endscan_cb(void *, int);
423 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
424                                         struct iwm_sf_cfg_cmd *,
425                                         struct ieee80211_node *);
426 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
427 static int      iwm_send_bt_init_conf(struct iwm_softc *);
428 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
429 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
430 static int      iwm_init_hw(struct iwm_softc *);
431 static void     iwm_init(struct iwm_softc *);
432 static void     iwm_start(struct iwm_softc *);
433 static void     iwm_stop(struct iwm_softc *);
434 static void     iwm_watchdog(void *);
435 static void     iwm_parent(struct ieee80211com *);
436 #ifdef IWM_DEBUG
437 static const char *
438                 iwm_desc_lookup(uint32_t);
439 static void     iwm_nic_error(struct iwm_softc *);
440 static void     iwm_nic_umac_error(struct iwm_softc *);
441 #endif
442 static void     iwm_notif_intr(struct iwm_softc *);
443 static void     iwm_intr(void *);
444 static int      iwm_attach(device_t);
445 static int      iwm_is_valid_ether_addr(uint8_t *);
446 static void     iwm_preinit(void *);
447 static int      iwm_detach_local(struct iwm_softc *sc, int);
448 static void     iwm_init_task(void *);
449 static void     iwm_radiotap_attach(struct iwm_softc *);
450 static struct ieee80211vap *
451                 iwm_vap_create(struct ieee80211com *,
452                                const char [IFNAMSIZ], int,
453                                enum ieee80211_opmode, int,
454                                const uint8_t [IEEE80211_ADDR_LEN],
455                                const uint8_t [IEEE80211_ADDR_LEN]);
456 static void     iwm_vap_delete(struct ieee80211vap *);
457 static void     iwm_scan_start(struct ieee80211com *);
458 static void     iwm_scan_end(struct ieee80211com *);
459 static void     iwm_update_mcast(struct ieee80211com *);
460 static void     iwm_set_channel(struct ieee80211com *);
461 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
462 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
463 static int      iwm_detach(device_t);
464
465 /*
466  * Firmware parser.
467  */
468
469 static int
470 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
471 {
472         const struct iwm_fw_cscheme_list *l = (const void *)data;
473
474         if (dlen < sizeof(*l) ||
475             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
476                 return EINVAL;
477
478         /* we don't actually store anything for now, always use s/w crypto */
479
480         return 0;
481 }
482
483 static int
484 iwm_firmware_store_section(struct iwm_softc *sc,
485     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
486 {
487         struct iwm_fw_sects *fws;
488         struct iwm_fw_onesect *fwone;
489
490         if (type >= IWM_UCODE_TYPE_MAX)
491                 return EINVAL;
492         if (dlen < sizeof(uint32_t))
493                 return EINVAL;
494
495         fws = &sc->sc_fw.fw_sects[type];
496         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
497                 return EINVAL;
498
499         fwone = &fws->fw_sect[fws->fw_count];
500
501         /* first 32bit are device load offset */
502         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
503
504         /* rest is data */
505         fwone->fws_data = data + sizeof(uint32_t);
506         fwone->fws_len = dlen - sizeof(uint32_t);
507
508         fws->fw_count++;
509
510         return 0;
511 }
512
513 #define IWM_DEFAULT_SCAN_CHANNELS 40
514
515 /* iwlwifi: iwl-drv.c */
516 struct iwm_tlv_calib_data {
517         uint32_t ucode_type;
518         struct iwm_tlv_calib_ctrl calib;
519 } __packed;
520
521 static int
522 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
523 {
524         const struct iwm_tlv_calib_data *def_calib = data;
525         uint32_t ucode_type = le32toh(def_calib->ucode_type);
526
527         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
528                 device_printf(sc->sc_dev,
529                     "Wrong ucode_type %u for default "
530                     "calibration.\n", ucode_type);
531                 return EINVAL;
532         }
533
534         sc->sc_default_calib[ucode_type].flow_trigger =
535             def_calib->calib.flow_trigger;
536         sc->sc_default_calib[ucode_type].event_trigger =
537             def_calib->calib.event_trigger;
538
539         return 0;
540 }
541
542 static void
543 iwm_fw_info_free(struct iwm_fw_info *fw)
544 {
545         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
546         fw->fw_fp = NULL;
547         /* don't touch fw->fw_status */
548         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
549 }
550
551 static int
552 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
553 {
554         struct iwm_fw_info *fw = &sc->sc_fw;
555         const struct iwm_tlv_ucode_header *uhdr;
556         struct iwm_ucode_tlv tlv;
557         enum iwm_ucode_tlv_type tlv_type;
558         const struct firmware *fwp;
559         const uint8_t *data;
560         uint32_t usniffer_img;
561         uint32_t paging_mem_size;
562         int error = 0;
563         size_t len;
564
565         if (fw->fw_status == IWM_FW_STATUS_DONE &&
566             ucode_type != IWM_UCODE_INIT)
567                 return 0;
568
569         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
570                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
571         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
572
573         if (fw->fw_fp != NULL)
574                 iwm_fw_info_free(fw);
575
576         /*
577          * Load firmware into driver memory.
578          * fw_fp will be set.
579          */
580         IWM_UNLOCK(sc);
581         fwp = firmware_get(sc->cfg->fw_name);
582         IWM_LOCK(sc);
583         if (fwp == NULL) {
584                 device_printf(sc->sc_dev,
585                     "could not read firmware %s (error %d)\n",
586                     sc->cfg->fw_name, error);
587                 goto out;
588         }
589         fw->fw_fp = fwp;
590
591         /* (Re-)Initialize default values. */
592         sc->sc_capaflags = 0;
593         sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
594         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
595         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
596
597         /*
598          * Parse firmware contents
599          */
600
601         uhdr = (const void *)fw->fw_fp->data;
602         if (*(const uint32_t *)fw->fw_fp->data != 0
603             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
604                 device_printf(sc->sc_dev, "invalid firmware %s\n",
605                     sc->cfg->fw_name);
606                 error = EINVAL;
607                 goto out;
608         }
609
610         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
611             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
612             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
613             IWM_UCODE_API(le32toh(uhdr->ver)));
614         data = uhdr->data;
615         len = fw->fw_fp->datasize - sizeof(*uhdr);
616
617         while (len >= sizeof(tlv)) {
618                 size_t tlv_len;
619                 const void *tlv_data;
620
621                 memcpy(&tlv, data, sizeof(tlv));
622                 tlv_len = le32toh(tlv.length);
623                 tlv_type = le32toh(tlv.type);
624
625                 len -= sizeof(tlv);
626                 data += sizeof(tlv);
627                 tlv_data = data;
628
629                 if (len < tlv_len) {
630                         device_printf(sc->sc_dev,
631                             "firmware too short: %zu bytes\n",
632                             len);
633                         error = EINVAL;
634                         goto parse_out;
635                 }
636
637                 switch ((int)tlv_type) {
638                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
639                         if (tlv_len < sizeof(uint32_t)) {
640                                 device_printf(sc->sc_dev,
641                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
642                                     __func__,
643                                     (int) tlv_len);
644                                 error = EINVAL;
645                                 goto parse_out;
646                         }
647                         sc->sc_capa_max_probe_len
648                             = le32toh(*(const uint32_t *)tlv_data);
649                         /* limit it to something sensible */
650                         if (sc->sc_capa_max_probe_len >
651                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
652                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
653                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
654                                     "ridiculous\n", __func__);
655                                 error = EINVAL;
656                                 goto parse_out;
657                         }
658                         break;
659                 case IWM_UCODE_TLV_PAN:
660                         if (tlv_len) {
661                                 device_printf(sc->sc_dev,
662                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
663                                     __func__,
664                                     (int) tlv_len);
665                                 error = EINVAL;
666                                 goto parse_out;
667                         }
668                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
669                         break;
670                 case IWM_UCODE_TLV_FLAGS:
671                         if (tlv_len < sizeof(uint32_t)) {
672                                 device_printf(sc->sc_dev,
673                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
674                                     __func__,
675                                     (int) tlv_len);
676                                 error = EINVAL;
677                                 goto parse_out;
678                         }
679                         /*
680                          * Apparently there can be many flags, but Linux driver
681                          * parses only the first one, and so do we.
682                          *
683                          * XXX: why does this override IWM_UCODE_TLV_PAN?
684                          * Intentional or a bug?  Observations from
685                          * current firmware file:
686                          *  1) TLV_PAN is parsed first
687                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
688                          * ==> this resets TLV_PAN to itself... hnnnk
689                          */
690                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
691                         break;
692                 case IWM_UCODE_TLV_CSCHEME:
693                         if ((error = iwm_store_cscheme(sc,
694                             tlv_data, tlv_len)) != 0) {
695                                 device_printf(sc->sc_dev,
696                                     "%s: iwm_store_cscheme(): returned %d\n",
697                                     __func__,
698                                     error);
699                                 goto parse_out;
700                         }
701                         break;
702                 case IWM_UCODE_TLV_NUM_OF_CPU: {
703                         uint32_t num_cpu;
704                         if (tlv_len != sizeof(uint32_t)) {
705                                 device_printf(sc->sc_dev,
706                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
707                                     __func__,
708                                     (int) tlv_len);
709                                 error = EINVAL;
710                                 goto parse_out;
711                         }
712                         num_cpu = le32toh(*(const uint32_t *)tlv_data);
713                         if (num_cpu < 1 || num_cpu > 2) {
714                                 device_printf(sc->sc_dev,
715                                     "%s: Driver supports only 1 or 2 CPUs\n",
716                                     __func__);
717                                 error = EINVAL;
718                                 goto parse_out;
719                         }
720                         break;
721                 }
722                 case IWM_UCODE_TLV_SEC_RT:
723                         if ((error = iwm_firmware_store_section(sc,
724                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
725                                 device_printf(sc->sc_dev,
726                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
727                                     __func__,
728                                     error);
729                                 goto parse_out;
730                         }
731                         break;
732                 case IWM_UCODE_TLV_SEC_INIT:
733                         if ((error = iwm_firmware_store_section(sc,
734                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
735                                 device_printf(sc->sc_dev,
736                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
737                                     __func__,
738                                     error);
739                                 goto parse_out;
740                         }
741                         break;
742                 case IWM_UCODE_TLV_SEC_WOWLAN:
743                         if ((error = iwm_firmware_store_section(sc,
744                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
745                                 device_printf(sc->sc_dev,
746                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
747                                     __func__,
748                                     error);
749                                 goto parse_out;
750                         }
751                         break;
752                 case IWM_UCODE_TLV_DEF_CALIB:
753                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
754                                 device_printf(sc->sc_dev,
755                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
756                                     __func__,
757                                     (int) tlv_len,
758                                     (int) sizeof(struct iwm_tlv_calib_data));
759                                 error = EINVAL;
760                                 goto parse_out;
761                         }
762                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
763                                 device_printf(sc->sc_dev,
764                                     "%s: iwm_set_default_calib() failed: %d\n",
765                                     __func__,
766                                     error);
767                                 goto parse_out;
768                         }
769                         break;
770                 case IWM_UCODE_TLV_PHY_SKU:
771                         if (tlv_len != sizeof(uint32_t)) {
772                                 error = EINVAL;
773                                 device_printf(sc->sc_dev,
774                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
775                                     __func__,
776                                     (int) tlv_len);
777                                 goto parse_out;
778                         }
779                         sc->sc_fw.phy_config =
780                             le32toh(*(const uint32_t *)tlv_data);
781                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
782                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
783                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
784                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
785                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
786                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
787                         break;
788
789                 case IWM_UCODE_TLV_API_CHANGES_SET: {
790                         const struct iwm_ucode_api *api;
791                         if (tlv_len != sizeof(*api)) {
792                                 error = EINVAL;
793                                 goto parse_out;
794                         }
795                         api = (const struct iwm_ucode_api *)tlv_data;
796                         /* Flags may exceed 32 bits in future firmware. */
797                         if (le32toh(api->api_index) > 0) {
798                                 device_printf(sc->sc_dev,
799                                     "unsupported API index %d\n",
800                                     le32toh(api->api_index));
801                                 goto parse_out;
802                         }
803                         sc->sc_ucode_api = le32toh(api->api_flags);
804                         break;
805                 }
806
807                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
808                         const struct iwm_ucode_capa *capa;
809                         int idx, i;
810                         if (tlv_len != sizeof(*capa)) {
811                                 error = EINVAL;
812                                 goto parse_out;
813                         }
814                         capa = (const struct iwm_ucode_capa *)tlv_data;
815                         idx = le32toh(capa->api_index);
816                         if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
817                                 device_printf(sc->sc_dev,
818                                     "unsupported API index %d\n", idx);
819                                 goto parse_out;
820                         }
821                         for (i = 0; i < 32; i++) {
822                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
823                                         continue;
824                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
825                         }
826                         break;
827                 }
828
829                 case 48: /* undocumented TLV */
830                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
831                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
832                         /* ignore, not used by current driver */
833                         break;
834
835                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
836                         if ((error = iwm_firmware_store_section(sc,
837                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
838                             tlv_len)) != 0)
839                                 goto parse_out;
840                         break;
841
842                 case IWM_UCODE_TLV_PAGING:
843                         if (tlv_len != sizeof(uint32_t)) {
844                                 error = EINVAL;
845                                 goto parse_out;
846                         }
847                         paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
848
849                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
850                             "%s: Paging: paging enabled (size = %u bytes)\n",
851                             __func__, paging_mem_size);
852                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
853                                 device_printf(sc->sc_dev,
854                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
855                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
856                                 error = EINVAL;
857                                 goto out;
858                         }
859                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
860                                 device_printf(sc->sc_dev,
861                                     "%s: Paging: image isn't multiple %u\n",
862                                     __func__, IWM_FW_PAGING_SIZE);
863                                 error = EINVAL;
864                                 goto out;
865                         }
866
867                         sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
868                             paging_mem_size;
869                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
870                         sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
871                             paging_mem_size;
872                         break;
873
874                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
875                         if (tlv_len != sizeof(uint32_t)) {
876                                 error = EINVAL;
877                                 goto parse_out;
878                         }
879                         sc->sc_capa_n_scan_channels =
880                           le32toh(*(const uint32_t *)tlv_data);
881                         break;
882
883                 case IWM_UCODE_TLV_FW_VERSION:
884                         if (tlv_len != sizeof(uint32_t) * 3) {
885                                 error = EINVAL;
886                                 goto parse_out;
887                         }
888                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
889                             "%d.%d.%d",
890                             le32toh(((const uint32_t *)tlv_data)[0]),
891                             le32toh(((const uint32_t *)tlv_data)[1]),
892                             le32toh(((const uint32_t *)tlv_data)[2]));
893                         break;
894
895                 case IWM_UCODE_TLV_FW_MEM_SEG:
896                         break;
897
898                 default:
899                         device_printf(sc->sc_dev,
900                             "%s: unknown firmware section %d, abort\n",
901                             __func__, tlv_type);
902                         error = EINVAL;
903                         goto parse_out;
904                 }
905
906                 len -= roundup(tlv_len, 4);
907                 data += roundup(tlv_len, 4);
908         }
909
910         KASSERT(error == 0, ("unhandled error"));
911
912  parse_out:
913         if (error) {
914                 device_printf(sc->sc_dev, "firmware parse error %d, "
915                     "section type %d\n", error, tlv_type);
916         }
917
918         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
919                 device_printf(sc->sc_dev,
920                     "device uses unsupported power ops\n");
921                 error = ENOTSUP;
922         }
923
924  out:
925         if (error) {
926                 fw->fw_status = IWM_FW_STATUS_NONE;
927                 if (fw->fw_fp != NULL)
928                         iwm_fw_info_free(fw);
929         } else
930                 fw->fw_status = IWM_FW_STATUS_DONE;
931         wakeup(&sc->sc_fw);
932
933         return error;
934 }
935
936 /*
937  * DMA resource routines
938  */
939
940 static void
941 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
942 {
943         if (error != 0)
944                 return;
945         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
946         *(bus_addr_t *)arg = segs[0].ds_addr;
947 }
948
949 static int
950 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
951     bus_size_t size, bus_size_t alignment)
952 {
953         int error;
954
955         dma->tag = NULL;
956         dma->map = NULL;
957         dma->size = size;
958         dma->vaddr = NULL;
959
960         error = bus_dma_tag_create(tag, alignment,
961             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
962             1, size, 0, NULL, NULL, &dma->tag);
963         if (error != 0)
964                 goto fail;
965
966         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
967             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
968         if (error != 0)
969                 goto fail;
970
971         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
972             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
973         if (error != 0) {
974                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
975                 dma->vaddr = NULL;
976                 goto fail;
977         }
978
979         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
980
981         return 0;
982
983 fail:
984         iwm_dma_contig_free(dma);
985
986         return error;
987 }
988
989 static void
990 iwm_dma_contig_free(struct iwm_dma_info *dma)
991 {
992         if (dma->vaddr != NULL) {
993                 bus_dmamap_sync(dma->tag, dma->map,
994                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
995                 bus_dmamap_unload(dma->tag, dma->map);
996                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
997                 dma->vaddr = NULL;
998         }
999         if (dma->tag != NULL) {
1000                 bus_dma_tag_destroy(dma->tag);
1001                 dma->tag = NULL;
1002         }
1003 }
1004
1005 /* fwmem is used to load firmware onto the card */
1006 static int
1007 iwm_alloc_fwmem(struct iwm_softc *sc)
1008 {
1009         /* Must be aligned on a 16-byte boundary. */
1010         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
1011             sc->sc_fwdmasegsz, 16);
1012 }
1013
1014 /* tx scheduler rings.  not used? */
1015 static int
1016 iwm_alloc_sched(struct iwm_softc *sc)
1017 {
1018         /* TX scheduler rings must be aligned on a 1KB boundary. */
1019         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1020             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
1021 }
1022
1023 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
1024 static int
1025 iwm_alloc_kw(struct iwm_softc *sc)
1026 {
1027         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
1028 }
1029
1030 /* interrupt cause table */
1031 static int
1032 iwm_alloc_ict(struct iwm_softc *sc)
1033 {
1034         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1035             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
1036 }
1037
1038 static int
1039 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1040 {
1041         bus_size_t size;
1042         int i, error;
1043
1044         ring->cur = 0;
1045
1046         /* Allocate RX descriptors (256-byte aligned). */
1047         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1048         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1049         if (error != 0) {
1050                 device_printf(sc->sc_dev,
1051                     "could not allocate RX ring DMA memory\n");
1052                 goto fail;
1053         }
1054         ring->desc = ring->desc_dma.vaddr;
1055
1056         /* Allocate RX status area (16-byte aligned). */
1057         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1058             sizeof(*ring->stat), 16);
1059         if (error != 0) {
1060                 device_printf(sc->sc_dev,
1061                     "could not allocate RX status DMA memory\n");
1062                 goto fail;
1063         }
1064         ring->stat = ring->stat_dma.vaddr;
1065
1066         /* Create RX buffer DMA tag. */
1067         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1068             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1069             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1070         if (error != 0) {
1071                 device_printf(sc->sc_dev,
1072                     "%s: could not create RX buf DMA tag, error %d\n",
1073                     __func__, error);
1074                 goto fail;
1075         }
1076
1077         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1078         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1079         if (error != 0) {
1080                 device_printf(sc->sc_dev,
1081                     "%s: could not create RX buf DMA map, error %d\n",
1082                     __func__, error);
1083                 goto fail;
1084         }
1085         /*
1086          * Allocate and map RX buffers.
1087          */
1088         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1089                 struct iwm_rx_data *data = &ring->data[i];
1090                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1091                 if (error != 0) {
1092                         device_printf(sc->sc_dev,
1093                             "%s: could not create RX buf DMA map, error %d\n",
1094                             __func__, error);
1095                         goto fail;
1096                 }
1097                 data->m = NULL;
1098
1099                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1100                         goto fail;
1101                 }
1102         }
1103         return 0;
1104
1105 fail:   iwm_free_rx_ring(sc, ring);
1106         return error;
1107 }
1108
1109 static void
1110 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1111 {
1112         /* Reset the ring state */
1113         ring->cur = 0;
1114
1115         /*
1116          * The hw rx ring index in shared memory must also be cleared,
1117          * otherwise the discrepancy can cause reprocessing chaos.
1118          */
1119         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1120 }
1121
1122 static void
1123 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1124 {
1125         int i;
1126
1127         iwm_dma_contig_free(&ring->desc_dma);
1128         iwm_dma_contig_free(&ring->stat_dma);
1129
1130         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1131                 struct iwm_rx_data *data = &ring->data[i];
1132
1133                 if (data->m != NULL) {
1134                         bus_dmamap_sync(ring->data_dmat, data->map,
1135                             BUS_DMASYNC_POSTREAD);
1136                         bus_dmamap_unload(ring->data_dmat, data->map);
1137                         m_freem(data->m);
1138                         data->m = NULL;
1139                 }
1140                 if (data->map != NULL) {
1141                         bus_dmamap_destroy(ring->data_dmat, data->map);
1142                         data->map = NULL;
1143                 }
1144         }
1145         if (ring->spare_map != NULL) {
1146                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1147                 ring->spare_map = NULL;
1148         }
1149         if (ring->data_dmat != NULL) {
1150                 bus_dma_tag_destroy(ring->data_dmat);
1151                 ring->data_dmat = NULL;
1152         }
1153 }
1154
1155 static int
1156 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1157 {
1158         bus_addr_t paddr;
1159         bus_size_t size;
1160         size_t maxsize;
1161         int nsegments;
1162         int i, error;
1163
1164         ring->qid = qid;
1165         ring->queued = 0;
1166         ring->cur = 0;
1167
1168         /* Allocate TX descriptors (256-byte aligned). */
1169         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1170         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1171         if (error != 0) {
1172                 device_printf(sc->sc_dev,
1173                     "could not allocate TX ring DMA memory\n");
1174                 goto fail;
1175         }
1176         ring->desc = ring->desc_dma.vaddr;
1177
1178         /*
1179          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1180          * to allocate commands space for other rings.
1181          */
1182         if (qid > IWM_MVM_CMD_QUEUE)
1183                 return 0;
1184
1185         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1186         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1187         if (error != 0) {
1188                 device_printf(sc->sc_dev,
1189                     "could not allocate TX cmd DMA memory\n");
1190                 goto fail;
1191         }
1192         ring->cmd = ring->cmd_dma.vaddr;
1193
1194         /* FW commands may require more mapped space than packets. */
1195         if (qid == IWM_MVM_CMD_QUEUE) {
1196                 maxsize = IWM_RBUF_SIZE;
1197                 nsegments = 1;
1198         } else {
1199                 maxsize = MCLBYTES;
1200                 nsegments = IWM_MAX_SCATTER - 2;
1201         }
1202
1203         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1204             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1205             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1206         if (error != 0) {
1207                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1208                 goto fail;
1209         }
1210
1211         paddr = ring->cmd_dma.paddr;
1212         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1213                 struct iwm_tx_data *data = &ring->data[i];
1214
1215                 data->cmd_paddr = paddr;
1216                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1217                     + offsetof(struct iwm_tx_cmd, scratch);
1218                 paddr += sizeof(struct iwm_device_cmd);
1219
1220                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1221                 if (error != 0) {
1222                         device_printf(sc->sc_dev,
1223                             "could not create TX buf DMA map\n");
1224                         goto fail;
1225                 }
1226         }
1227         KASSERT(paddr == ring->cmd_dma.paddr + size,
1228             ("invalid physical address"));
1229         return 0;
1230
1231 fail:   iwm_free_tx_ring(sc, ring);
1232         return error;
1233 }
1234
1235 static void
1236 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1237 {
1238         int i;
1239
1240         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1241                 struct iwm_tx_data *data = &ring->data[i];
1242
1243                 if (data->m != NULL) {
1244                         bus_dmamap_sync(ring->data_dmat, data->map,
1245                             BUS_DMASYNC_POSTWRITE);
1246                         bus_dmamap_unload(ring->data_dmat, data->map);
1247                         m_freem(data->m);
1248                         data->m = NULL;
1249                 }
1250         }
1251         /* Clear TX descriptors. */
1252         memset(ring->desc, 0, ring->desc_dma.size);
1253         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1254             BUS_DMASYNC_PREWRITE);
1255         sc->qfullmsk &= ~(1 << ring->qid);
1256         ring->queued = 0;
1257         ring->cur = 0;
1258
1259         if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1260                 iwm_pcie_clear_cmd_in_flight(sc);
1261 }
1262
1263 static void
1264 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1265 {
1266         int i;
1267
1268         iwm_dma_contig_free(&ring->desc_dma);
1269         iwm_dma_contig_free(&ring->cmd_dma);
1270
1271         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1272                 struct iwm_tx_data *data = &ring->data[i];
1273
1274                 if (data->m != NULL) {
1275                         bus_dmamap_sync(ring->data_dmat, data->map,
1276                             BUS_DMASYNC_POSTWRITE);
1277                         bus_dmamap_unload(ring->data_dmat, data->map);
1278                         m_freem(data->m);
1279                         data->m = NULL;
1280                 }
1281                 if (data->map != NULL) {
1282                         bus_dmamap_destroy(ring->data_dmat, data->map);
1283                         data->map = NULL;
1284                 }
1285         }
1286         if (ring->data_dmat != NULL) {
1287                 bus_dma_tag_destroy(ring->data_dmat);
1288                 ring->data_dmat = NULL;
1289         }
1290 }
1291
1292 /*
1293  * High-level hardware frobbing routines
1294  */
1295
1296 static void
1297 iwm_enable_interrupts(struct iwm_softc *sc)
1298 {
1299         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1300         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1301 }
1302
1303 static void
1304 iwm_restore_interrupts(struct iwm_softc *sc)
1305 {
1306         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1307 }
1308
1309 static void
1310 iwm_disable_interrupts(struct iwm_softc *sc)
1311 {
1312         /* disable interrupts */
1313         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1314
1315         /* acknowledge all interrupts */
1316         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1317         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1318 }
1319
1320 static void
1321 iwm_ict_reset(struct iwm_softc *sc)
1322 {
1323         iwm_disable_interrupts(sc);
1324
1325         /* Reset ICT table. */
1326         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1327         sc->ict_cur = 0;
1328
1329         /* Set physical address of ICT table (4KB aligned). */
1330         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1331             IWM_CSR_DRAM_INT_TBL_ENABLE
1332             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1333             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1334             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1335
1336         /* Switch to ICT interrupt mode in driver. */
1337         sc->sc_flags |= IWM_FLAG_USE_ICT;
1338
1339         /* Re-enable interrupts. */
1340         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1341         iwm_enable_interrupts(sc);
1342 }
1343
1344 /* iwlwifi pcie/trans.c */
1345
1346 /*
1347  * Since this .. hard-resets things, it's time to actually
1348  * mark the first vap (if any) as having no mac context.
1349  * It's annoying, but since the driver is potentially being
1350  * stop/start'ed whilst active (thanks openbsd port!) we
1351  * have to correctly track this.
1352  */
1353 static void
1354 iwm_stop_device(struct iwm_softc *sc)
1355 {
1356         struct ieee80211com *ic = &sc->sc_ic;
1357         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1358         int chnl, qid;
1359         uint32_t mask = 0;
1360
1361         /* tell the device to stop sending interrupts */
1362         iwm_disable_interrupts(sc);
1363
1364         /*
1365          * FreeBSD-local: mark the first vap as not-uploaded,
1366          * so the next transition through auth/assoc
1367          * will correctly populate the MAC context.
1368          */
1369         if (vap) {
1370                 struct iwm_vap *iv = IWM_VAP(vap);
1371                 iv->is_uploaded = 0;
1372         }
1373
1374         /* device going down, Stop using ICT table */
1375         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1376
1377         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1378
1379         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1380
1381         if (iwm_nic_lock(sc)) {
1382                 /* Stop each Tx DMA channel */
1383                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1384                         IWM_WRITE(sc,
1385                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1386                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1387                 }
1388
1389                 /* Wait for DMA channels to be idle */
1390                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1391                     5000)) {
1392                         device_printf(sc->sc_dev,
1393                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1394                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1395                 }
1396                 iwm_nic_unlock(sc);
1397         }
1398         iwm_pcie_rx_stop(sc);
1399
1400         /* Stop RX ring. */
1401         iwm_reset_rx_ring(sc, &sc->rxq);
1402
1403         /* Reset all TX rings. */
1404         for (qid = 0; qid < nitems(sc->txq); qid++)
1405                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1406
1407         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1408                 /* Power-down device's busmaster DMA clocks */
1409                 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1410                     IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1411                 DELAY(5);
1412         }
1413
1414         /* Make sure (redundant) we've released our request to stay awake */
1415         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1416             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1417
1418         /* Stop the device, and put it in low power state */
1419         iwm_apm_stop(sc);
1420
1421         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1422          * Clean again the interrupt here
1423          */
1424         iwm_disable_interrupts(sc);
1425         /* stop and reset the on-board processor */
1426         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1427
1428         /*
1429          * Even if we stop the HW, we still want the RF kill
1430          * interrupt
1431          */
1432         iwm_enable_rfkill_int(sc);
1433         iwm_check_rfkill(sc);
1434 }
1435
1436 /* iwlwifi: mvm/ops.c */
1437 static void
1438 iwm_mvm_nic_config(struct iwm_softc *sc)
1439 {
1440         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1441         uint32_t reg_val = 0;
1442         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1443
1444         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1445             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1446         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1447             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1448         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1449             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1450
1451         /* SKU control */
1452         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1453             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1454         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1455             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1456
1457         /* radio configuration */
1458         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1459         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1460         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1461
1462         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1463
1464         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1465             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1466             radio_cfg_step, radio_cfg_dash);
1467
1468         /*
1469          * W/A : NIC is stuck in a reset state after Early PCIe power off
1470          * (PCIe power is lost before PERST# is asserted), causing ME FW
1471          * to lose ownership and not being able to obtain it back.
1472          */
1473         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1474                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1475                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1476                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1477         }
1478 }
1479
1480 static int
1481 iwm_nic_rx_init(struct iwm_softc *sc)
1482 {
1483         /*
1484          * Initialize RX ring.  This is from the iwn driver.
1485          */
1486         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1487
1488         /* Stop Rx DMA */
1489         iwm_pcie_rx_stop(sc);
1490
1491         if (!iwm_nic_lock(sc))
1492                 return EBUSY;
1493
1494         /* reset and flush pointers */
1495         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1496         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1497         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1498         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1499
1500         /* Set physical address of RX ring (256-byte aligned). */
1501         IWM_WRITE(sc,
1502             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1503
1504         /* Set physical address of RX status (16-byte aligned). */
1505         IWM_WRITE(sc,
1506             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1507
1508         /* Enable RX. */
1509         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1510             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1511             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1512             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1513             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1514             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1515             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1516             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1517
1518         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1519
1520         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1521         if (sc->cfg->host_interrupt_operation_mode)
1522                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1523
1524         /*
1525          * Thus sayeth el jefe (iwlwifi) via a comment:
1526          *
1527          * This value should initially be 0 (before preparing any
1528          * RBs), should be 8 after preparing the first 8 RBs (for example)
1529          */
1530         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1531
1532         iwm_nic_unlock(sc);
1533
1534         return 0;
1535 }
1536
1537 static int
1538 iwm_nic_tx_init(struct iwm_softc *sc)
1539 {
1540         int qid;
1541
1542         if (!iwm_nic_lock(sc))
1543                 return EBUSY;
1544
1545         /* Deactivate TX scheduler. */
1546         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1547
1548         /* Set physical address of "keep warm" page (16-byte aligned). */
1549         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1550
1551         /* Initialize TX rings. */
1552         for (qid = 0; qid < nitems(sc->txq); qid++) {
1553                 struct iwm_tx_ring *txq = &sc->txq[qid];
1554
1555                 /* Set physical address of TX ring (256-byte aligned). */
1556                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1557                     txq->desc_dma.paddr >> 8);
1558                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1559                     "%s: loading ring %d descriptors (%p) at %lx\n",
1560                     __func__,
1561                     qid, txq->desc,
1562                     (unsigned long) (txq->desc_dma.paddr >> 8));
1563         }
1564
1565         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1566
1567         iwm_nic_unlock(sc);
1568
1569         return 0;
1570 }
1571
1572 static int
1573 iwm_nic_init(struct iwm_softc *sc)
1574 {
1575         int error;
1576
1577         iwm_apm_init(sc);
1578         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1579                 iwm_set_pwr(sc);
1580
1581         iwm_mvm_nic_config(sc);
1582
1583         if ((error = iwm_nic_rx_init(sc)) != 0)
1584                 return error;
1585
1586         /*
1587          * Ditto for TX, from iwn
1588          */
1589         if ((error = iwm_nic_tx_init(sc)) != 0)
1590                 return error;
1591
1592         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1593             "%s: shadow registers enabled\n", __func__);
1594         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1595
1596         return 0;
1597 }
1598
1599 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1600         IWM_MVM_TX_FIFO_VO,
1601         IWM_MVM_TX_FIFO_VI,
1602         IWM_MVM_TX_FIFO_BE,
1603         IWM_MVM_TX_FIFO_BK,
1604 };
1605
1606 static int
1607 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1608 {
1609         if (!iwm_nic_lock(sc)) {
1610                 device_printf(sc->sc_dev,
1611                     "%s: cannot enable txq %d\n",
1612                     __func__,
1613                     qid);
1614                 return EBUSY;
1615         }
1616
1617         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1618
1619         if (qid == IWM_MVM_CMD_QUEUE) {
1620                 /* unactivate before configuration */
1621                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1622                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1623                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1624
1625                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1626
1627                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1628
1629                 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1630                 /* Set scheduler window size and frame limit. */
1631                 iwm_write_mem32(sc,
1632                     sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1633                     sizeof(uint32_t),
1634                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1635                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1636                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1637                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1638
1639                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1640                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1641                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1642                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1643                     IWM_SCD_QUEUE_STTS_REG_MSK);
1644         } else {
1645                 struct iwm_scd_txq_cfg_cmd cmd;
1646                 int error;
1647
1648                 iwm_nic_unlock(sc);
1649
1650                 memset(&cmd, 0, sizeof(cmd));
1651                 cmd.scd_queue = qid;
1652                 cmd.enable = 1;
1653                 cmd.sta_id = sta_id;
1654                 cmd.tx_fifo = fifo;
1655                 cmd.aggregate = 0;
1656                 cmd.window = IWM_FRAME_LIMIT;
1657
1658                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1659                     sizeof(cmd), &cmd);
1660                 if (error) {
1661                         device_printf(sc->sc_dev,
1662                             "cannot enable txq %d\n", qid);
1663                         return error;
1664                 }
1665
1666                 if (!iwm_nic_lock(sc))
1667                         return EBUSY;
1668         }
1669
1670         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1671             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1672
1673         iwm_nic_unlock(sc);
1674
1675         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1676             __func__, qid, fifo);
1677
1678         return 0;
1679 }
1680
1681 static int
1682 iwm_post_alive(struct iwm_softc *sc)
1683 {
1684         int nwords;
1685         int error, chnl;
1686         uint32_t base;
1687
1688         if (!iwm_nic_lock(sc))
1689                 return EBUSY;
1690
1691         base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1692         if (sc->sched_base != base) {
1693                 device_printf(sc->sc_dev,
1694                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1695                     __func__, sc->sched_base, base);
1696         }
1697
1698         iwm_ict_reset(sc);
1699
1700         /* Clear TX scheduler state in SRAM. */
1701         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1702             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1703             / sizeof(uint32_t);
1704         error = iwm_write_mem(sc,
1705             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1706             NULL, nwords);
1707         if (error)
1708                 goto out;
1709
1710         /* Set physical address of TX scheduler rings (1KB aligned). */
1711         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1712
1713         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1714
1715         iwm_nic_unlock(sc);
1716
1717         /* enable command channel */
1718         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1719         if (error)
1720                 return error;
1721
1722         if (!iwm_nic_lock(sc))
1723                 return EBUSY;
1724
1725         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1726
1727         /* Enable DMA channels. */
1728         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1729                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1730                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1731                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1732         }
1733
1734         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1735             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1736
1737         /* Enable L1-Active */
1738         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1739                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1740                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1741         }
1742
1743  out:
1744         iwm_nic_unlock(sc);
1745         return error;
1746 }
1747
1748 /*
1749  * NVM read access and content parsing.  We do not support
1750  * external NVM or writing NVM.
1751  * iwlwifi/mvm/nvm.c
1752  */
1753
1754 /* Default NVM size to read */
1755 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1756
1757 #define IWM_NVM_WRITE_OPCODE 1
1758 #define IWM_NVM_READ_OPCODE 0
1759
1760 /* load nvm chunk response */
1761 enum {
1762         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1763         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1764 };
1765
1766 static int
1767 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1768         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1769 {
1770         struct iwm_nvm_access_cmd nvm_access_cmd = {
1771                 .offset = htole16(offset),
1772                 .length = htole16(length),
1773                 .type = htole16(section),
1774                 .op_code = IWM_NVM_READ_OPCODE,
1775         };
1776         struct iwm_nvm_access_resp *nvm_resp;
1777         struct iwm_rx_packet *pkt;
1778         struct iwm_host_cmd cmd = {
1779                 .id = IWM_NVM_ACCESS_CMD,
1780                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1781                 .data = { &nvm_access_cmd, },
1782         };
1783         int ret, bytes_read, offset_read;
1784         uint8_t *resp_data;
1785
1786         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1787
1788         ret = iwm_send_cmd(sc, &cmd);
1789         if (ret) {
1790                 device_printf(sc->sc_dev,
1791                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1792                 return ret;
1793         }
1794
1795         pkt = cmd.resp_pkt;
1796
1797         /* Extract NVM response */
1798         nvm_resp = (void *)pkt->data;
1799         ret = le16toh(nvm_resp->status);
1800         bytes_read = le16toh(nvm_resp->length);
1801         offset_read = le16toh(nvm_resp->offset);
1802         resp_data = nvm_resp->data;
1803         if (ret) {
1804                 if ((offset != 0) &&
1805                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1806                         /*
1807                          * meaning of NOT_VALID_ADDRESS:
1808                          * driver try to read chunk from address that is
1809                          * multiple of 2K and got an error since addr is empty.
1810                          * meaning of (offset != 0): driver already
1811                          * read valid data from another chunk so this case
1812                          * is not an error.
1813                          */
1814                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1815                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1816                                     offset);
1817                         *len = 0;
1818                         ret = 0;
1819                 } else {
1820                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1821                                     "NVM access command failed with status %d\n", ret);
1822                         ret = EIO;
1823                 }
1824                 goto exit;
1825         }
1826
1827         if (offset_read != offset) {
1828                 device_printf(sc->sc_dev,
1829                     "NVM ACCESS response with invalid offset %d\n",
1830                     offset_read);
1831                 ret = EINVAL;
1832                 goto exit;
1833         }
1834
1835         if (bytes_read > length) {
1836                 device_printf(sc->sc_dev,
1837                     "NVM ACCESS response with too much data "
1838                     "(%d bytes requested, %d bytes received)\n",
1839                     length, bytes_read);
1840                 ret = EINVAL;
1841                 goto exit;
1842         }
1843
1844         /* Write data to NVM */
1845         memcpy(data + offset, resp_data, bytes_read);
1846         *len = bytes_read;
1847
1848  exit:
1849         iwm_free_resp(sc, &cmd);
1850         return ret;
1851 }
1852
1853 /*
1854  * Reads an NVM section completely.
1855  * NICs prior to 7000 family don't have a real NVM, but just read
1856  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1857  * by uCode, we need to manually check in this case that we don't
1858  * overflow and try to read more than the EEPROM size.
1859  * For 7000 family NICs, we supply the maximal size we can read, and
1860  * the uCode fills the response with as much data as we can,
1861  * without overflowing, so no check is needed.
1862  */
1863 static int
1864 iwm_nvm_read_section(struct iwm_softc *sc,
1865         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1866 {
1867         uint16_t seglen, length, offset = 0;
1868         int ret;
1869
1870         /* Set nvm section read length */
1871         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1872
1873         seglen = length;
1874
1875         /* Read the NVM until exhausted (reading less than requested) */
1876         while (seglen == length) {
1877                 /* Check no memory assumptions fail and cause an overflow */
1878                 if ((size_read + offset + length) >
1879                     sc->cfg->eeprom_size) {
1880                         device_printf(sc->sc_dev,
1881                             "EEPROM size is too small for NVM\n");
1882                         return ENOBUFS;
1883                 }
1884
1885                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1886                 if (ret) {
1887                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1888                                     "Cannot read NVM from section %d offset %d, length %d\n",
1889                                     section, offset, length);
1890                         return ret;
1891                 }
1892                 offset += seglen;
1893         }
1894
1895         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1896                     "NVM section %d read completed\n", section);
1897         *len = offset;
1898         return 0;
1899 }
1900
1901 /*
1902  * BEGIN IWM_NVM_PARSE
1903  */
1904
1905 /* iwlwifi/iwl-nvm-parse.c */
1906
1907 /* NVM offsets (in words) definitions */
1908 enum iwm_nvm_offsets {
1909         /* NVM HW-Section offset (in words) definitions */
1910         IWM_HW_ADDR = 0x15,
1911
1912 /* NVM SW-Section offset (in words) definitions */
1913         IWM_NVM_SW_SECTION = 0x1C0,
1914         IWM_NVM_VERSION = 0,
1915         IWM_RADIO_CFG = 1,
1916         IWM_SKU = 2,
1917         IWM_N_HW_ADDRS = 3,
1918         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1919
1920 /* NVM calibration section offset (in words) definitions */
1921         IWM_NVM_CALIB_SECTION = 0x2B8,
1922         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1923 };
1924
1925 enum iwm_8000_nvm_offsets {
1926         /* NVM HW-Section offset (in words) definitions */
1927         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1928         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1929         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1930         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1931         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1932
1933         /* NVM SW-Section offset (in words) definitions */
1934         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1935         IWM_NVM_VERSION_8000 = 0,
1936         IWM_RADIO_CFG_8000 = 0,
1937         IWM_SKU_8000 = 2,
1938         IWM_N_HW_ADDRS_8000 = 3,
1939
1940         /* NVM REGULATORY -Section offset (in words) definitions */
1941         IWM_NVM_CHANNELS_8000 = 0,
1942         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1943         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1944         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1945
1946         /* NVM calibration section offset (in words) definitions */
1947         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1948         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1949 };
1950
1951 /* SKU Capabilities (actual values from NVM definition) */
1952 enum nvm_sku_bits {
1953         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1954         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1955         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1956         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1957 };
1958
1959 /* radio config bits (actual values from NVM definition) */
1960 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1961 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1962 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1963 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1964 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1965 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1966
1967 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1968 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1969 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1970 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1971 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1972 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1973
1974 #define DEFAULT_MAX_TX_POWER 16
1975
1976 /**
1977  * enum iwm_nvm_channel_flags - channel flags in NVM
1978  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1979  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1980  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1981  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1982  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1983  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1984  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1985  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1986  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1987  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1988  */
1989 enum iwm_nvm_channel_flags {
1990         IWM_NVM_CHANNEL_VALID = (1 << 0),
1991         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1992         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1993         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1994         IWM_NVM_CHANNEL_DFS = (1 << 7),
1995         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1996         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1997         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1998         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1999 };
2000
2001 /*
2002  * Translate EEPROM flags to net80211.
2003  */
2004 static uint32_t
2005 iwm_eeprom_channel_flags(uint16_t ch_flags)
2006 {
2007         uint32_t nflags;
2008
2009         nflags = 0;
2010         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2011                 nflags |= IEEE80211_CHAN_PASSIVE;
2012         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2013                 nflags |= IEEE80211_CHAN_NOADHOC;
2014         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2015                 nflags |= IEEE80211_CHAN_DFS;
2016                 /* Just in case. */
2017                 nflags |= IEEE80211_CHAN_NOADHOC;
2018         }
2019
2020         return (nflags);
2021 }
2022
2023 static void
2024 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2025     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2026     const uint8_t bands[])
2027 {
2028         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2029         uint32_t nflags;
2030         uint16_t ch_flags;
2031         uint8_t ieee;
2032         int error;
2033
2034         for (; ch_idx < ch_num; ch_idx++) {
2035                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2036                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2037                         ieee = iwm_nvm_channels[ch_idx];
2038                 else
2039                         ieee = iwm_nvm_channels_8000[ch_idx];
2040
2041                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2042                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2043                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
2044                             ieee, ch_flags,
2045                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2046                             "5.2" : "2.4");
2047                         continue;
2048                 }
2049
2050                 nflags = iwm_eeprom_channel_flags(ch_flags);
2051                 error = ieee80211_add_channel(chans, maxchans, nchans,
2052                     ieee, 0, 0, nflags, bands);
2053                 if (error != 0)
2054                         break;
2055
2056                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2057                     "Ch. %d Flags %x [%sGHz] - Added\n",
2058                     ieee, ch_flags,
2059                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2060                     "5.2" : "2.4");
2061         }
2062 }
2063
2064 static void
2065 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2066     struct ieee80211_channel chans[])
2067 {
2068         struct iwm_softc *sc = ic->ic_softc;
2069         struct iwm_nvm_data *data = sc->nvm_data;
2070         uint8_t bands[IEEE80211_MODE_BYTES];
2071         size_t ch_num;
2072
2073         memset(bands, 0, sizeof(bands));
2074         /* 1-13: 11b/g channels. */
2075         setbit(bands, IEEE80211_MODE_11B);
2076         setbit(bands, IEEE80211_MODE_11G);
2077         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2078             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2079
2080         /* 14: 11b channel only. */
2081         clrbit(bands, IEEE80211_MODE_11G);
2082         iwm_add_channel_band(sc, chans, maxchans, nchans,
2083             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2084
2085         if (data->sku_cap_band_52GHz_enable) {
2086                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2087                         ch_num = nitems(iwm_nvm_channels);
2088                 else
2089                         ch_num = nitems(iwm_nvm_channels_8000);
2090                 memset(bands, 0, sizeof(bands));
2091                 setbit(bands, IEEE80211_MODE_11A);
2092                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2093                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2094         }
2095 }
2096
2097 static void
2098 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2099         const uint16_t *mac_override, const uint16_t *nvm_hw)
2100 {
2101         const uint8_t *hw_addr;
2102
2103         if (mac_override) {
2104                 static const uint8_t reserved_mac[] = {
2105                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2106                 };
2107
2108                 hw_addr = (const uint8_t *)(mac_override +
2109                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2110
2111                 /*
2112                  * Store the MAC address from MAO section.
2113                  * No byte swapping is required in MAO section
2114                  */
2115                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2116
2117                 /*
2118                  * Force the use of the OTP MAC address in case of reserved MAC
2119                  * address in the NVM, or if address is given but invalid.
2120                  */
2121                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2122                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2123                     iwm_is_valid_ether_addr(data->hw_addr) &&
2124                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2125                         return;
2126
2127                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2128                     "%s: mac address from nvm override section invalid\n",
2129                     __func__);
2130         }
2131
2132         if (nvm_hw) {
2133                 /* read the mac address from WFMP registers */
2134                 uint32_t mac_addr0 =
2135                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2136                 uint32_t mac_addr1 =
2137                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2138
2139                 hw_addr = (const uint8_t *)&mac_addr0;
2140                 data->hw_addr[0] = hw_addr[3];
2141                 data->hw_addr[1] = hw_addr[2];
2142                 data->hw_addr[2] = hw_addr[1];
2143                 data->hw_addr[3] = hw_addr[0];
2144
2145                 hw_addr = (const uint8_t *)&mac_addr1;
2146                 data->hw_addr[4] = hw_addr[1];
2147                 data->hw_addr[5] = hw_addr[0];
2148
2149                 return;
2150         }
2151
2152         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2153         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2154 }
2155
2156 static int
2157 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2158             const uint16_t *phy_sku)
2159 {
2160         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2161                 return le16_to_cpup(nvm_sw + IWM_SKU);
2162
2163         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2164 }
2165
2166 static int
2167 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2168 {
2169         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2170                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2171         else
2172                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2173                                                 IWM_NVM_VERSION_8000));
2174 }
2175
2176 static int
2177 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2178                   const uint16_t *phy_sku)
2179 {
2180         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2181                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2182
2183         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2184 }
2185
2186 static int
2187 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2188 {
2189         int n_hw_addr;
2190
2191         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2192                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2193
2194         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2195
2196         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2197 }
2198
2199 static void
2200 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2201                   uint32_t radio_cfg)
2202 {
2203         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2204                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2205                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2206                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2207                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2208                 return;
2209         }
2210
2211         /* set the radio configuration for family 8000 */
2212         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2213         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2214         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2215         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2216         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2217         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2218 }
2219
2220 static int
2221 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2222                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2223 {
2224 #ifdef notyet /* for FAMILY 9000 */
2225         if (cfg->mac_addr_from_csr) {
2226                 iwm_set_hw_address_from_csr(sc, data);
2227         } else
2228 #endif
2229         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2230                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2231
2232                 /* The byte order is little endian 16 bit, meaning 214365 */
2233                 data->hw_addr[0] = hw_addr[1];
2234                 data->hw_addr[1] = hw_addr[0];
2235                 data->hw_addr[2] = hw_addr[3];
2236                 data->hw_addr[3] = hw_addr[2];
2237                 data->hw_addr[4] = hw_addr[5];
2238                 data->hw_addr[5] = hw_addr[4];
2239         } else {
2240                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2241         }
2242
2243         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2244                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2245                 return EINVAL;
2246         }
2247
2248         return 0;
2249 }
2250
2251 static struct iwm_nvm_data *
2252 iwm_parse_nvm_data(struct iwm_softc *sc,
2253                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2254                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2255                    const uint16_t *phy_sku, const uint16_t *regulatory)
2256 {
2257         struct iwm_nvm_data *data;
2258         uint32_t sku, radio_cfg;
2259
2260         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2261                 data = malloc(sizeof(*data) +
2262                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2263                     M_DEVBUF, M_NOWAIT | M_ZERO);
2264         } else {
2265                 data = malloc(sizeof(*data) +
2266                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2267                     M_DEVBUF, M_NOWAIT | M_ZERO);
2268         }
2269         if (!data)
2270                 return NULL;
2271
2272         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2273
2274         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2275         iwm_set_radio_cfg(sc, data, radio_cfg);
2276
2277         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2278         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2279         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2280         data->sku_cap_11n_enable = 0;
2281
2282         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2283
2284         /* If no valid mac address was found - bail out */
2285         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2286                 free(data, M_DEVBUF);
2287                 return NULL;
2288         }
2289
2290         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2291                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2292                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2293         } else {
2294                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2295                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2296         }
2297
2298         return data;
2299 }
2300
2301 static void
2302 iwm_free_nvm_data(struct iwm_nvm_data *data)
2303 {
2304         if (data != NULL)
2305                 free(data, M_DEVBUF);
2306 }
2307
2308 static struct iwm_nvm_data *
2309 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2310 {
2311         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2312
2313         /* Checking for required sections */
2314         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2315                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2316                     !sections[sc->cfg->nvm_hw_section_num].data) {
2317                         device_printf(sc->sc_dev,
2318                             "Can't parse empty OTP/NVM sections\n");
2319                         return NULL;
2320                 }
2321         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2322                 /* SW and REGULATORY sections are mandatory */
2323                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2324                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2325                         device_printf(sc->sc_dev,
2326                             "Can't parse empty OTP/NVM sections\n");
2327                         return NULL;
2328                 }
2329                 /* MAC_OVERRIDE or at least HW section must exist */
2330                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2331                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2332                         device_printf(sc->sc_dev,
2333                             "Can't parse mac_address, empty sections\n");
2334                         return NULL;
2335                 }
2336
2337                 /* PHY_SKU section is mandatory in B0 */
2338                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2339                         device_printf(sc->sc_dev,
2340                             "Can't parse phy_sku in B0, empty sections\n");
2341                         return NULL;
2342                 }
2343         } else {
2344                 panic("unknown device family %d\n", sc->cfg->device_family);
2345         }
2346
2347         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2348         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2349         calib = (const uint16_t *)
2350             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2351         regulatory = (const uint16_t *)
2352             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2353         mac_override = (const uint16_t *)
2354             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2355         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2356
2357         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2358             phy_sku, regulatory);
2359 }
2360
2361 static int
2362 iwm_nvm_init(struct iwm_softc *sc)
2363 {
2364         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2365         int i, ret, section;
2366         uint32_t size_read = 0;
2367         uint8_t *nvm_buffer, *temp;
2368         uint16_t len;
2369
2370         memset(nvm_sections, 0, sizeof(nvm_sections));
2371
2372         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2373                 return EINVAL;
2374
2375         /* load NVM values from nic */
2376         /* Read From FW NVM */
2377         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2378
2379         nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2380         if (!nvm_buffer)
2381                 return ENOMEM;
2382         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2383                 /* we override the constness for initial read */
2384                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2385                                            &len, size_read);
2386                 if (ret)
2387                         continue;
2388                 size_read += len;
2389                 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2390                 if (!temp) {
2391                         ret = ENOMEM;
2392                         break;
2393                 }
2394                 memcpy(temp, nvm_buffer, len);
2395
2396                 nvm_sections[section].data = temp;
2397                 nvm_sections[section].length = len;
2398         }
2399         if (!size_read)
2400                 device_printf(sc->sc_dev, "OTP is blank\n");
2401         free(nvm_buffer, M_DEVBUF);
2402
2403         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2404         if (!sc->nvm_data)
2405                 return EINVAL;
2406         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2407                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2408
2409         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2410                 if (nvm_sections[i].data != NULL)
2411                         free(nvm_sections[i].data, M_DEVBUF);
2412         }
2413
2414         return 0;
2415 }
2416
2417 /*
2418  * Firmware loading gunk.  This is kind of a weird hybrid between the
2419  * iwn driver and the Linux iwlwifi driver.
2420  */
2421
2422 static int
2423 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2424         const uint8_t *section, uint32_t byte_cnt)
2425 {
2426         int error = EINVAL;
2427         uint32_t chunk_sz, offset;
2428
2429         chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2430
2431         for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2432                 uint32_t addr, len;
2433                 const uint8_t *data;
2434
2435                 addr = dst_addr + offset;
2436                 len = MIN(chunk_sz, byte_cnt - offset);
2437                 data = section + offset;
2438
2439                 error = iwm_firmware_load_chunk(sc, addr, data, len);
2440                 if (error)
2441                         break;
2442         }
2443
2444         return error;
2445 }
2446
2447 static int
2448 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2449         const uint8_t *chunk, uint32_t byte_cnt)
2450 {
2451         struct iwm_dma_info *dma = &sc->fw_dma;
2452         int error;
2453
2454         /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2455         memcpy(dma->vaddr, chunk, byte_cnt);
2456         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2457
2458         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2459             dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2460                 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2461                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2462         }
2463
2464         sc->sc_fw_chunk_done = 0;
2465
2466         if (!iwm_nic_lock(sc))
2467                 return EBUSY;
2468
2469         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2470             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2471         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2472             dst_addr);
2473         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2474             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2475         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2476             (iwm_get_dma_hi_addr(dma->paddr)
2477               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2478         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2479             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2480             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2481             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2482         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2483             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2484             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2485             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2486
2487         iwm_nic_unlock(sc);
2488
2489         /* wait 1s for this segment to load */
2490         while (!sc->sc_fw_chunk_done)
2491                 if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
2492                         break;
2493
2494         if (!sc->sc_fw_chunk_done) {
2495                 device_printf(sc->sc_dev,
2496                     "fw chunk addr 0x%x len %d failed to load\n",
2497                     dst_addr, byte_cnt);
2498         }
2499
2500         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2501             dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2502                 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2503                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2504                 iwm_nic_unlock(sc);
2505         }
2506
2507         return error;
2508 }
2509
2510 int
2511 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2512     int cpu, int *first_ucode_section)
2513 {
2514         int shift_param;
2515         int i, error = 0, sec_num = 0x1;
2516         uint32_t val, last_read_idx = 0;
2517         const void *data;
2518         uint32_t dlen;
2519         uint32_t offset;
2520
2521         if (cpu == 1) {
2522                 shift_param = 0;
2523                 *first_ucode_section = 0;
2524         } else {
2525                 shift_param = 16;
2526                 (*first_ucode_section)++;
2527         }
2528
2529         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2530                 last_read_idx = i;
2531                 data = fws->fw_sect[i].fws_data;
2532                 dlen = fws->fw_sect[i].fws_len;
2533                 offset = fws->fw_sect[i].fws_devoff;
2534
2535                 /*
2536                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2537                  * CPU1 to CPU2.
2538                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2539                  * CPU2 non paged to CPU2 paging sec.
2540                  */
2541                 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2542                     offset == IWM_PAGING_SEPARATOR_SECTION)
2543                         break;
2544
2545                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2546                     "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2547                     i, offset, dlen, cpu);
2548
2549                 if (dlen > sc->sc_fwdmasegsz) {
2550                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2551                             "chunk %d too large (%d bytes)\n", i, dlen);
2552                         error = EFBIG;
2553                 } else {
2554                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2555                 }
2556                 if (error) {
2557                         device_printf(sc->sc_dev,
2558                             "could not load firmware chunk %d (error %d)\n",
2559                             i, error);
2560                         return error;
2561                 }
2562
2563                 /* Notify the ucode of the loaded section number and status */
2564                 if (iwm_nic_lock(sc)) {
2565                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2566                         val = val | (sec_num << shift_param);
2567                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2568                         sec_num = (sec_num << 1) | 0x1;
2569                         iwm_nic_unlock(sc);
2570
2571                         /*
2572                          * The firmware won't load correctly without this delay.
2573                          */
2574                         DELAY(8000);
2575                 }
2576         }
2577
2578         *first_ucode_section = last_read_idx;
2579
2580         if (iwm_nic_lock(sc)) {
2581                 if (cpu == 1)
2582                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2583                 else
2584                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2585                 iwm_nic_unlock(sc);
2586         }
2587
2588         return 0;
2589 }
2590
2591 int
2592 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2593 {
2594         struct iwm_fw_sects *fws;
2595         int error = 0;
2596         int first_ucode_section;
2597
2598         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2599             ucode_type);
2600
2601         fws = &sc->sc_fw.fw_sects[ucode_type];
2602
2603         /* configure the ucode to be ready to get the secured image */
2604         /* release CPU reset */
2605         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2606
2607         /* load to FW the binary Secured sections of CPU1 */
2608         error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2609         if (error)
2610                 return error;
2611
2612         /* load to FW the binary sections of CPU2 */
2613         return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2614 }
2615
2616 static int
2617 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2618 {
2619         struct iwm_fw_sects *fws;
2620         int error, i;
2621         const void *data;
2622         uint32_t dlen;
2623         uint32_t offset;
2624
2625         sc->sc_uc.uc_intr = 0;
2626
2627         fws = &sc->sc_fw.fw_sects[ucode_type];
2628         for (i = 0; i < fws->fw_count; i++) {
2629                 data = fws->fw_sect[i].fws_data;
2630                 dlen = fws->fw_sect[i].fws_len;
2631                 offset = fws->fw_sect[i].fws_devoff;
2632                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2633                     "LOAD FIRMWARE type %d offset %u len %d\n",
2634                     ucode_type, offset, dlen);
2635                 if (dlen > sc->sc_fwdmasegsz) {
2636                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2637                             "chunk %d too large (%d bytes)\n", i, dlen);
2638                         error = EFBIG;
2639                 } else {
2640                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2641                 }
2642                 if (error) {
2643                         device_printf(sc->sc_dev,
2644                             "could not load firmware chunk %u of %u "
2645                             "(error=%d)\n", i, fws->fw_count, error);
2646                         return error;
2647                 }
2648         }
2649
2650         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2651
2652         return 0;
2653 }
2654
2655 static int
2656 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2657 {
2658         int error, w;
2659
2660         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2661                 error = iwm_load_firmware_8000(sc, ucode_type);
2662         else
2663                 error = iwm_load_firmware_7000(sc, ucode_type);
2664         if (error)
2665                 return error;
2666
2667         /* wait for the firmware to load */
2668         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2669                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2670         }
2671         if (error || !sc->sc_uc.uc_ok) {
2672                 device_printf(sc->sc_dev, "could not load firmware\n");
2673                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2674                         device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2675                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2676                         device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2677                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2678                 }
2679         }
2680
2681         /*
2682          * Give the firmware some time to initialize.
2683          * Accessing it too early causes errors.
2684          */
2685         msleep(&w, &sc->sc_mtx, 0, "iwmfwinit", hz);
2686
2687         return error;
2688 }
2689
2690 /* iwlwifi: pcie/trans.c */
2691 static int
2692 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2693 {
2694         int error;
2695
2696         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2697
2698         if ((error = iwm_nic_init(sc)) != 0) {
2699                 device_printf(sc->sc_dev, "unable to init nic\n");
2700                 return error;
2701         }
2702
2703         /* make sure rfkill handshake bits are cleared */
2704         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2705         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2706             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2707
2708         /* clear (again), then enable host interrupts */
2709         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2710         iwm_enable_interrupts(sc);
2711
2712         /* really make sure rfkill handshake bits are cleared */
2713         /* maybe we should write a few times more?  just to make sure */
2714         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2715         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2716
2717         /* Load the given image to the HW */
2718         return iwm_load_firmware(sc, ucode_type);
2719 }
2720
2721 static int
2722 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2723 {
2724         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2725                 .valid = htole32(valid_tx_ant),
2726         };
2727
2728         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2729             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2730 }
2731
2732 /* iwlwifi: mvm/fw.c */
2733 static int
2734 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2735 {
2736         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2737         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2738
2739         /* Set parameters */
2740         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2741         phy_cfg_cmd.calib_control.event_trigger =
2742             sc->sc_default_calib[ucode_type].event_trigger;
2743         phy_cfg_cmd.calib_control.flow_trigger =
2744             sc->sc_default_calib[ucode_type].flow_trigger;
2745
2746         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2747             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2748         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2749             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2750 }
2751
2752 static int
2753 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2754         struct iwm_rx_packet *pkt, void *data)
2755 {
2756         struct iwm_phy_db *phy_db = data;
2757
2758         if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2759                 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2760                         device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2761                             __func__, pkt->hdr.code);
2762                 }
2763                 return TRUE;
2764         }
2765
2766         if (iwm_phy_db_set_section(phy_db, pkt)) {
2767                 device_printf(sc->sc_dev,
2768                     "%s: iwm_phy_db_set_section failed\n", __func__);
2769         }
2770
2771         return FALSE;
2772 }
2773
2774 static int
2775 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2776         enum iwm_ucode_type ucode_type)
2777 {
2778         enum iwm_ucode_type old_type = sc->sc_uc_current;
2779         int error;
2780
2781         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2782                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2783                         error);
2784                 return error;
2785         }
2786
2787         sc->sc_uc_current = ucode_type;
2788         error = iwm_start_fw(sc, ucode_type);
2789         if (error) {
2790                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2791                 sc->sc_uc_current = old_type;
2792                 return error;
2793         }
2794
2795         error = iwm_post_alive(sc);
2796         if (error) {
2797                 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2798         }
2799         return error;
2800 }
2801
2802 /*
2803  * mvm misc bits
2804  */
2805
2806 /*
2807  * follows iwlwifi/fw.c
2808  */
2809 static int
2810 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2811 {
2812         struct iwm_notification_wait calib_wait;
2813         static const uint16_t init_complete[] = {
2814                 IWM_INIT_COMPLETE_NOTIF,
2815                 IWM_CALIB_RES_NOTIF_PHY_DB
2816         };
2817         int ret;
2818
2819         /* do not operate with rfkill switch turned on */
2820         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2821                 device_printf(sc->sc_dev,
2822                     "radio is disabled by hardware switch\n");
2823                 return EPERM;
2824         }
2825
2826         iwm_init_notification_wait(sc->sc_notif_wait,
2827                                    &calib_wait,
2828                                    init_complete,
2829                                    nitems(init_complete),
2830                                    iwm_wait_phy_db_entry,
2831                                    sc->sc_phy_db);
2832
2833         /* Will also start the device */
2834         ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2835         if (ret) {
2836                 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2837                     ret);
2838                 goto error;
2839         }
2840
2841         if (justnvm) {
2842                 /* Read nvm */
2843                 ret = iwm_nvm_init(sc);
2844                 if (ret) {
2845                         device_printf(sc->sc_dev, "failed to read nvm\n");
2846                         goto error;
2847                 }
2848                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2849                 goto error;
2850         }
2851
2852         ret = iwm_send_bt_init_conf(sc);
2853         if (ret) {
2854                 device_printf(sc->sc_dev,
2855                     "failed to send bt coex configuration: %d\n", ret);
2856                 goto error;
2857         }
2858
2859         /* Init Smart FIFO. */
2860         ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2861         if (ret)
2862                 goto error;
2863
2864         /* Send TX valid antennas before triggering calibrations */
2865         ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2866         if (ret) {
2867                 device_printf(sc->sc_dev,
2868                     "failed to send antennas before calibration: %d\n", ret);
2869                 goto error;
2870         }
2871
2872         /*
2873          * Send phy configurations command to init uCode
2874          * to start the 16.0 uCode init image internal calibrations.
2875          */
2876         ret = iwm_send_phy_cfg_cmd(sc);
2877         if (ret) {
2878                 device_printf(sc->sc_dev,
2879                     "%s: Failed to run INIT calibrations: %d\n",
2880                     __func__, ret);
2881                 goto error;
2882         }
2883
2884         /*
2885          * Nothing to do but wait for the init complete notification
2886          * from the firmware.
2887          */
2888         IWM_UNLOCK(sc);
2889         ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
2890             IWM_MVM_UCODE_CALIB_TIMEOUT);
2891         IWM_LOCK(sc);
2892
2893
2894         goto out;
2895
2896 error:
2897         iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
2898 out:
2899         return ret;
2900 }
2901
2902 /*
2903  * receive side
2904  */
2905
2906 /* (re)stock rx ring, called at init-time and at runtime */
2907 static int
2908 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2909 {
2910         struct iwm_rx_ring *ring = &sc->rxq;
2911         struct iwm_rx_data *data = &ring->data[idx];
2912         struct mbuf *m;
2913         bus_dmamap_t dmamap = NULL;
2914         bus_dma_segment_t seg;
2915         int nsegs, error;
2916
2917         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2918         if (m == NULL)
2919                 return ENOBUFS;
2920
2921         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2922         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2923             &seg, &nsegs, BUS_DMA_NOWAIT);
2924         if (error != 0) {
2925                 device_printf(sc->sc_dev,
2926                     "%s: can't map mbuf, error %d\n", __func__, error);
2927                 goto fail;
2928         }
2929
2930         if (data->m != NULL)
2931                 bus_dmamap_unload(ring->data_dmat, data->map);
2932
2933         /* Swap ring->spare_map with data->map */
2934         dmamap = data->map;
2935         data->map = ring->spare_map;
2936         ring->spare_map = dmamap;
2937
2938         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2939         data->m = m;
2940
2941         /* Update RX descriptor. */
2942         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
2943         ring->desc[idx] = htole32(seg.ds_addr >> 8);
2944         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2945             BUS_DMASYNC_PREWRITE);
2946
2947         return 0;
2948 fail:
2949         m_freem(m);
2950         return error;
2951 }
2952
2953 /* iwlwifi: mvm/rx.c */
2954 #define IWM_RSSI_OFFSET 50
2955 static int
2956 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2957 {
2958         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2959         uint32_t agc_a, agc_b;
2960         uint32_t val;
2961
2962         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2963         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2964         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2965
2966         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2967         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2968         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2969
2970         /*
2971          * dBm = rssi dB - agc dB - constant.
2972          * Higher AGC (higher radio gain) means lower signal.
2973          */
2974         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2975         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2976         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2977
2978         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2979             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2980             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2981
2982         return max_rssi_dbm;
2983 }
2984
2985 /* iwlwifi: mvm/rx.c */
2986 /*
2987  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2988  * values are reported by the fw as positive values - need to negate
2989  * to obtain their dBM.  Account for missing antennas by replacing 0
2990  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2991  */
2992 static int
2993 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2994 {
2995         int energy_a, energy_b, energy_c, max_energy;
2996         uint32_t val;
2997
2998         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2999         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3000             IWM_RX_INFO_ENERGY_ANT_A_POS;
3001         energy_a = energy_a ? -energy_a : -256;
3002         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3003             IWM_RX_INFO_ENERGY_ANT_B_POS;
3004         energy_b = energy_b ? -energy_b : -256;
3005         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3006             IWM_RX_INFO_ENERGY_ANT_C_POS;
3007         energy_c = energy_c ? -energy_c : -256;
3008         max_energy = MAX(energy_a, energy_b);
3009         max_energy = MAX(max_energy, energy_c);
3010
3011         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3012             "energy In A %d B %d C %d , and max %d\n",
3013             energy_a, energy_b, energy_c, max_energy);
3014
3015         return max_energy;
3016 }
3017
3018 static void
3019 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3020         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3021 {
3022         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3023
3024         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3025         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3026
3027         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3028 }
3029
3030 /*
3031  * Retrieve the average noise (in dBm) among receivers.
3032  */
3033 static int
3034 iwm_get_noise(struct iwm_softc *sc,
3035     const struct iwm_mvm_statistics_rx_non_phy *stats)
3036 {
3037         int i, total, nbant, noise;
3038
3039         total = nbant = noise = 0;
3040         for (i = 0; i < 3; i++) {
3041                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3042                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3043                     __func__,
3044                     i,
3045                     noise);
3046
3047                 if (noise) {
3048                         total += noise;
3049                         nbant++;
3050                 }
3051         }
3052
3053         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3054             __func__, nbant, total);
3055 #if 0
3056         /* There should be at least one antenna but check anyway. */
3057         return (nbant == 0) ? -127 : (total / nbant) - 107;
3058 #else
3059         /* For now, just hard-code it to -96 to be safe */
3060         return (-96);
3061 #endif
3062 }
3063
3064 /*
3065  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3066  *
3067  * Handles the actual data of the Rx packet from the fw
3068  */
3069 static void
3070 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3071         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3072 {
3073         struct ieee80211com *ic = &sc->sc_ic;
3074         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3075         struct ieee80211_frame *wh;
3076         struct ieee80211_node *ni;
3077         struct ieee80211_rx_stats rxs;
3078         struct mbuf *m;
3079         struct iwm_rx_phy_info *phy_info;
3080         struct iwm_rx_mpdu_res_start *rx_res;
3081         uint32_t len;
3082         uint32_t rx_pkt_status;
3083         int rssi;
3084
3085         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3086
3087         phy_info = &sc->sc_last_phy_info;
3088         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3089         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3090         len = le16toh(rx_res->byte_count);
3091         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3092
3093         m = data->m;
3094         m->m_data = pkt->data + sizeof(*rx_res);
3095         m->m_pkthdr.len = m->m_len = len;
3096
3097         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3098                 device_printf(sc->sc_dev,
3099                     "dsp size out of range [0,20]: %d\n",
3100                     phy_info->cfg_phy_cnt);
3101                 goto fail;
3102         }
3103
3104         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3105             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3106                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3107                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3108                 goto fail;
3109         }
3110
3111         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3112                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3113         } else {
3114                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3115         }
3116
3117         /* Note: RSSI is absolute (ie a -ve value) */
3118         if (rssi < IWM_MIN_DBM)
3119                 rssi = IWM_MIN_DBM;
3120         else if (rssi > IWM_MAX_DBM)
3121                 rssi = IWM_MAX_DBM;
3122
3123         /* Map it to relative value */
3124         rssi = rssi - sc->sc_noise;
3125
3126         /* replenish ring for the buffer we're going to feed to the sharks */
3127         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3128                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3129                     __func__);
3130                 goto fail;
3131         }
3132
3133         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3134             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3135
3136         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3137
3138         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3139             "%s: phy_info: channel=%d, flags=0x%08x\n",
3140             __func__,
3141             le16toh(phy_info->channel),
3142             le16toh(phy_info->phy_flags));
3143
3144         /*
3145          * Populate an RX state struct with the provided information.
3146          */
3147         bzero(&rxs, sizeof(rxs));
3148         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3149         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3150         rxs.c_ieee = le16toh(phy_info->channel);
3151         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3152                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3153         } else {
3154                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3155         }
3156
3157         /* rssi is in 1/2db units */
3158         rxs.rssi = rssi * 2;
3159         rxs.nf = sc->sc_noise;
3160
3161         if (ieee80211_radiotap_active_vap(vap)) {
3162                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3163
3164                 tap->wr_flags = 0;
3165                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3166                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3167                 tap->wr_chan_freq = htole16(rxs.c_freq);
3168                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3169                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3170                 tap->wr_dbm_antsignal = (int8_t)rssi;
3171                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3172                 tap->wr_tsft = phy_info->system_timestamp;
3173                 switch (phy_info->rate) {
3174                 /* CCK rates. */
3175                 case  10: tap->wr_rate =   2; break;
3176                 case  20: tap->wr_rate =   4; break;
3177                 case  55: tap->wr_rate =  11; break;
3178                 case 110: tap->wr_rate =  22; break;
3179                 /* OFDM rates. */
3180                 case 0xd: tap->wr_rate =  12; break;
3181                 case 0xf: tap->wr_rate =  18; break;
3182                 case 0x5: tap->wr_rate =  24; break;
3183                 case 0x7: tap->wr_rate =  36; break;
3184                 case 0x9: tap->wr_rate =  48; break;
3185                 case 0xb: tap->wr_rate =  72; break;
3186                 case 0x1: tap->wr_rate =  96; break;
3187                 case 0x3: tap->wr_rate = 108; break;
3188                 /* Unknown rate: should not happen. */
3189                 default:  tap->wr_rate =   0;
3190                 }
3191         }
3192
3193         IWM_UNLOCK(sc);
3194         if (ni != NULL) {
3195                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3196                 ieee80211_input_mimo(ni, m, &rxs);
3197                 ieee80211_free_node(ni);
3198         } else {
3199                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3200                 ieee80211_input_mimo_all(ic, m, &rxs);
3201         }
3202         IWM_LOCK(sc);
3203
3204         return;
3205
3206 fail:   counter_u64_add(ic->ic_ierrors, 1);
3207 }
3208
3209 static int
3210 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3211         struct iwm_node *in)
3212 {
3213         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3214         struct ieee80211_node *ni = &in->in_ni;
3215         struct ieee80211vap *vap = ni->ni_vap;
3216         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3217         int failack = tx_resp->failure_frame;
3218
3219         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3220
3221         /* Update rate control statistics. */
3222         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3223             __func__,
3224             (int) le16toh(tx_resp->status.status),
3225             (int) le16toh(tx_resp->status.sequence),
3226             tx_resp->frame_count,
3227             tx_resp->bt_kill_count,
3228             tx_resp->failure_rts,
3229             tx_resp->failure_frame,
3230             le32toh(tx_resp->initial_rate),
3231             (int) le16toh(tx_resp->wireless_media_time));
3232
3233         if (status != IWM_TX_STATUS_SUCCESS &&
3234             status != IWM_TX_STATUS_DIRECT_DONE) {
3235                 ieee80211_ratectl_tx_complete(vap, ni,
3236                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3237                 return (1);
3238         } else {
3239                 ieee80211_ratectl_tx_complete(vap, ni,
3240                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3241                 return (0);
3242         }
3243 }
3244
3245 static void
3246 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3247         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3248 {
3249         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3250         int idx = cmd_hdr->idx;
3251         int qid = cmd_hdr->qid;
3252         struct iwm_tx_ring *ring = &sc->txq[qid];
3253         struct iwm_tx_data *txd = &ring->data[idx];
3254         struct iwm_node *in = txd->in;
3255         struct mbuf *m = txd->m;
3256         int status;
3257
3258         KASSERT(txd->done == 0, ("txd not done"));
3259         KASSERT(txd->in != NULL, ("txd without node"));
3260         KASSERT(txd->m != NULL, ("txd without mbuf"));
3261
3262         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3263
3264         sc->sc_tx_timer = 0;
3265
3266         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3267
3268         /* Unmap and free mbuf. */
3269         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3270         bus_dmamap_unload(ring->data_dmat, txd->map);
3271
3272         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3273             "free txd %p, in %p\n", txd, txd->in);
3274         txd->done = 1;
3275         txd->m = NULL;
3276         txd->in = NULL;
3277
3278         ieee80211_tx_complete(&in->in_ni, m, status);
3279
3280         if (--ring->queued < IWM_TX_RING_LOMARK) {
3281                 sc->qfullmsk &= ~(1 << ring->qid);
3282                 if (sc->qfullmsk == 0) {
3283                         iwm_start(sc);
3284                 }
3285         }
3286 }
3287
3288 /*
3289  * transmit side
3290  */
3291
3292 /*
3293  * Process a "command done" firmware notification.  This is where we wakeup
3294  * processes waiting for a synchronous command completion.
3295  * from if_iwn
3296  */
3297 static void
3298 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3299 {
3300         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3301         struct iwm_tx_data *data;
3302
3303         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3304                 return; /* Not a command ack. */
3305         }
3306
3307         /* XXX wide commands? */
3308         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3309             "cmd notification type 0x%x qid %d idx %d\n",
3310             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3311
3312         data = &ring->data[pkt->hdr.idx];
3313
3314         /* If the command was mapped in an mbuf, free it. */
3315         if (data->m != NULL) {
3316                 bus_dmamap_sync(ring->data_dmat, data->map,
3317                     BUS_DMASYNC_POSTWRITE);
3318                 bus_dmamap_unload(ring->data_dmat, data->map);
3319                 m_freem(data->m);
3320                 data->m = NULL;
3321         }
3322         wakeup(&ring->desc[pkt->hdr.idx]);
3323
3324         if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3325                 device_printf(sc->sc_dev,
3326                     "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3327                     __func__, pkt->hdr.idx, ring->queued, ring->cur);
3328                 /* XXX call iwm_force_nmi() */
3329         }
3330
3331         KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3332         ring->queued--;
3333         if (ring->queued == 0)
3334                 iwm_pcie_clear_cmd_in_flight(sc);
3335 }
3336
3337 #if 0
3338 /*
3339  * necessary only for block ack mode
3340  */
3341 void
3342 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3343         uint16_t len)
3344 {
3345         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3346         uint16_t w_val;
3347
3348         scd_bc_tbl = sc->sched_dma.vaddr;
3349
3350         len += 8; /* magic numbers came naturally from paris */
3351         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3352                 len = roundup(len, 4) / 4;
3353
3354         w_val = htole16(sta_id << 12 | len);
3355
3356         /* Update TX scheduler. */
3357         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3358         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3359             BUS_DMASYNC_PREWRITE);
3360
3361         /* I really wonder what this is ?!? */
3362         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3363                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3364                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3365                     BUS_DMASYNC_PREWRITE);
3366         }
3367 }
3368 #endif
3369
3370 /*
3371  * Take an 802.11 (non-n) rate, find the relevant rate
3372  * table entry.  return the index into in_ridx[].
3373  *
3374  * The caller then uses that index back into in_ridx
3375  * to figure out the rate index programmed /into/
3376  * the firmware for this given node.
3377  */
3378 static int
3379 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3380     uint8_t rate)
3381 {
3382         int i;
3383         uint8_t r;
3384
3385         for (i = 0; i < nitems(in->in_ridx); i++) {
3386                 r = iwm_rates[in->in_ridx[i]].rate;
3387                 if (rate == r)
3388                         return (i);
3389         }
3390
3391         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3392             "%s: couldn't find an entry for rate=%d\n",
3393             __func__,
3394             rate);
3395
3396         /* XXX Return the first */
3397         /* XXX TODO: have it return the /lowest/ */
3398         return (0);
3399 }
3400
3401 static int
3402 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3403 {
3404         int i;
3405
3406         for (i = 0; i < nitems(iwm_rates); i++) {
3407                 if (iwm_rates[i].rate == rate)
3408                         return (i);
3409         }
3410         /* XXX error? */
3411         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3412             "%s: couldn't find an entry for rate=%d\n",
3413             __func__,
3414             rate);
3415         return (0);
3416 }
3417
3418 /*
3419  * Fill in the rate related information for a transmit command.
3420  */
3421 static const struct iwm_rate *
3422 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3423         struct mbuf *m, struct iwm_tx_cmd *tx)
3424 {
3425         struct ieee80211_node *ni = &in->in_ni;
3426         struct ieee80211_frame *wh;
3427         const struct ieee80211_txparam *tp = ni->ni_txparms;
3428         const struct iwm_rate *rinfo;
3429         int type;
3430         int ridx, rate_flags;
3431
3432         wh = mtod(m, struct ieee80211_frame *);
3433         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3434
3435         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3436         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3437
3438         if (type == IEEE80211_FC0_TYPE_MGT) {
3439                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3440                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3441                     "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3442         } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3443                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3444                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3445                     "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3446         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3447                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3448                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3449                     "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3450         } else if (m->m_flags & M_EAPOL) {
3451                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3452                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3453                     "%s: EAPOL\n", __func__);
3454         } else if (type == IEEE80211_FC0_TYPE_DATA) {
3455                 int i;
3456
3457                 /* for data frames, use RS table */
3458                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3459                 /* XXX pass pktlen */
3460                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3461                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3462                 ridx = in->in_ridx[i];
3463
3464                 /* This is the index into the programmed table */
3465                 tx->initial_rate_index = i;
3466                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3467
3468                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3469                     "%s: start with i=%d, txrate %d\n",
3470                     __func__, i, iwm_rates[ridx].rate);
3471         } else {
3472                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3473                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3474                     __func__, tp->mgmtrate);
3475         }
3476
3477         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3478             "%s: frame type=%d txrate %d\n",
3479                 __func__, type, iwm_rates[ridx].rate);
3480
3481         rinfo = &iwm_rates[ridx];
3482
3483         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3484             __func__, ridx,
3485             rinfo->rate,
3486             !! (IWM_RIDX_IS_CCK(ridx))
3487             );
3488
3489         /* XXX TODO: hard-coded TX antenna? */
3490         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3491         if (IWM_RIDX_IS_CCK(ridx))
3492                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3493         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3494
3495         return rinfo;
3496 }
3497
3498 #define TB0_SIZE 16
3499 static int
3500 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3501 {
3502         struct ieee80211com *ic = &sc->sc_ic;
3503         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3504         struct iwm_node *in = IWM_NODE(ni);
3505         struct iwm_tx_ring *ring;
3506         struct iwm_tx_data *data;
3507         struct iwm_tfd *desc;
3508         struct iwm_device_cmd *cmd;
3509         struct iwm_tx_cmd *tx;
3510         struct ieee80211_frame *wh;
3511         struct ieee80211_key *k = NULL;
3512         struct mbuf *m1;
3513         const struct iwm_rate *rinfo;
3514         uint32_t flags;
3515         u_int hdrlen;
3516         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3517         int nsegs;
3518         uint8_t tid, type;
3519         int i, totlen, error, pad;
3520
3521         wh = mtod(m, struct ieee80211_frame *);
3522         hdrlen = ieee80211_anyhdrsize(wh);
3523         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3524         tid = 0;
3525         ring = &sc->txq[ac];
3526         desc = &ring->desc[ring->cur];
3527         memset(desc, 0, sizeof(*desc));
3528         data = &ring->data[ring->cur];
3529
3530         /* Fill out iwm_tx_cmd to send to the firmware */
3531         cmd = &ring->cmd[ring->cur];
3532         cmd->hdr.code = IWM_TX_CMD;
3533         cmd->hdr.flags = 0;
3534         cmd->hdr.qid = ring->qid;
3535         cmd->hdr.idx = ring->cur;
3536
3537         tx = (void *)cmd->data;
3538         memset(tx, 0, sizeof(*tx));
3539
3540         rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3541
3542         /* Encrypt the frame if need be. */
3543         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3544                 /* Retrieve key for TX && do software encryption. */
3545                 k = ieee80211_crypto_encap(ni, m);
3546                 if (k == NULL) {
3547                         m_freem(m);
3548                         return (ENOBUFS);
3549                 }
3550                 /* 802.11 header may have moved. */
3551                 wh = mtod(m, struct ieee80211_frame *);
3552         }
3553
3554         if (ieee80211_radiotap_active_vap(vap)) {
3555                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3556
3557                 tap->wt_flags = 0;
3558                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3559                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3560                 tap->wt_rate = rinfo->rate;
3561                 if (k != NULL)
3562                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3563                 ieee80211_radiotap_tx(vap, m);
3564         }
3565
3566
3567         totlen = m->m_pkthdr.len;
3568
3569         flags = 0;
3570         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3571                 flags |= IWM_TX_CMD_FLG_ACK;
3572         }
3573
3574         if (type == IEEE80211_FC0_TYPE_DATA
3575             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3576             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3577                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3578         }
3579
3580         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3581             type != IEEE80211_FC0_TYPE_DATA)
3582                 tx->sta_id = sc->sc_aux_sta.sta_id;
3583         else
3584                 tx->sta_id = IWM_STATION_ID;
3585
3586         if (type == IEEE80211_FC0_TYPE_MGT) {
3587                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3588
3589                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3590                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3591                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3592                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3593                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3594                 } else {
3595                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3596                 }
3597         } else {
3598                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3599         }
3600
3601         if (hdrlen & 3) {
3602                 /* First segment length must be a multiple of 4. */
3603                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3604                 pad = 4 - (hdrlen & 3);
3605         } else
3606                 pad = 0;
3607
3608         tx->driver_txop = 0;
3609         tx->next_frame_len = 0;
3610
3611         tx->len = htole16(totlen);
3612         tx->tid_tspec = tid;
3613         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3614
3615         /* Set physical address of "scratch area". */
3616         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3617         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3618
3619         /* Copy 802.11 header in TX command. */
3620         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3621
3622         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3623
3624         tx->sec_ctl = 0;
3625         tx->tx_flags |= htole32(flags);
3626
3627         /* Trim 802.11 header. */
3628         m_adj(m, hdrlen);
3629         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3630             segs, &nsegs, BUS_DMA_NOWAIT);
3631         if (error != 0) {
3632                 if (error != EFBIG) {
3633                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3634                             error);
3635                         m_freem(m);
3636                         return error;
3637                 }
3638                 /* Too many DMA segments, linearize mbuf. */
3639                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3640                 if (m1 == NULL) {
3641                         device_printf(sc->sc_dev,
3642                             "%s: could not defrag mbuf\n", __func__);
3643                         m_freem(m);
3644                         return (ENOBUFS);
3645                 }
3646                 m = m1;
3647
3648                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3649                     segs, &nsegs, BUS_DMA_NOWAIT);
3650                 if (error != 0) {
3651                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3652                             error);
3653                         m_freem(m);
3654                         return error;
3655                 }
3656         }
3657         data->m = m;
3658         data->in = in;
3659         data->done = 0;
3660
3661         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3662             "sending txd %p, in %p\n", data, data->in);
3663         KASSERT(data->in != NULL, ("node is NULL"));
3664
3665         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3666             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3667             ring->qid, ring->cur, totlen, nsegs,
3668             le32toh(tx->tx_flags),
3669             le32toh(tx->rate_n_flags),
3670             tx->initial_rate_index
3671             );
3672
3673         /* Fill TX descriptor. */
3674         desc->num_tbs = 2 + nsegs;
3675
3676         desc->tbs[0].lo = htole32(data->cmd_paddr);
3677         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3678             (TB0_SIZE << 4);
3679         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3680         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3681             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3682               + hdrlen + pad - TB0_SIZE) << 4);
3683
3684         /* Other DMA segments are for data payload. */
3685         for (i = 0; i < nsegs; i++) {
3686                 seg = &segs[i];
3687                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3688                 desc->tbs[i+2].hi_n_len = \
3689                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3690                     | ((seg->ds_len) << 4);
3691         }
3692
3693         bus_dmamap_sync(ring->data_dmat, data->map,
3694             BUS_DMASYNC_PREWRITE);
3695         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3696             BUS_DMASYNC_PREWRITE);
3697         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3698             BUS_DMASYNC_PREWRITE);
3699
3700 #if 0
3701         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3702 #endif
3703
3704         /* Kick TX ring. */
3705         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3706         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3707
3708         /* Mark TX ring as full if we reach a certain threshold. */
3709         if (++ring->queued > IWM_TX_RING_HIMARK) {
3710                 sc->qfullmsk |= 1 << ring->qid;
3711         }
3712
3713         return 0;
3714 }
3715
3716 static int
3717 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3718     const struct ieee80211_bpf_params *params)
3719 {
3720         struct ieee80211com *ic = ni->ni_ic;
3721         struct iwm_softc *sc = ic->ic_softc;
3722         int error = 0;
3723
3724         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3725             "->%s begin\n", __func__);
3726
3727         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3728                 m_freem(m);
3729                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3730                     "<-%s not RUNNING\n", __func__);
3731                 return (ENETDOWN);
3732         }
3733
3734         IWM_LOCK(sc);
3735         /* XXX fix this */
3736         if (params == NULL) {
3737                 error = iwm_tx(sc, m, ni, 0);
3738         } else {
3739                 error = iwm_tx(sc, m, ni, 0);
3740         }
3741         sc->sc_tx_timer = 5;
3742         IWM_UNLOCK(sc);
3743
3744         return (error);
3745 }
3746
3747 /*
3748  * mvm/tx.c
3749  */
3750
3751 /*
3752  * Note that there are transports that buffer frames before they reach
3753  * the firmware. This means that after flush_tx_path is called, the
3754  * queue might not be empty. The race-free way to handle this is to:
3755  * 1) set the station as draining
3756  * 2) flush the Tx path
3757  * 3) wait for the transport queues to be empty
3758  */
3759 int
3760 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3761 {
3762         int ret;
3763         struct iwm_tx_path_flush_cmd flush_cmd = {
3764                 .queues_ctl = htole32(tfd_msk),
3765                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3766         };
3767
3768         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3769             sizeof(flush_cmd), &flush_cmd);
3770         if (ret)
3771                 device_printf(sc->sc_dev,
3772                     "Flushing tx queue failed: %d\n", ret);
3773         return ret;
3774 }
3775
3776 /*
3777  * BEGIN mvm/sta.c
3778  */
3779
3780 static int
3781 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3782         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3783 {
3784         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3785             cmd, status);
3786 }
3787
3788 /* send station add/update command to firmware */
3789 static int
3790 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3791 {
3792         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3793         int ret;
3794         uint32_t status;
3795
3796         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3797
3798         add_sta_cmd.sta_id = IWM_STATION_ID;
3799         add_sta_cmd.mac_id_n_color
3800             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3801                 IWM_DEFAULT_COLOR));
3802         if (!update) {
3803                 int ac;
3804                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3805                         add_sta_cmd.tfd_queue_msk |=
3806                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3807                 }
3808                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3809         }
3810         add_sta_cmd.add_modify = update ? 1 : 0;
3811         add_sta_cmd.station_flags_msk
3812             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3813         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3814         if (update)
3815                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3816
3817         status = IWM_ADD_STA_SUCCESS;
3818         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3819         if (ret)
3820                 return ret;
3821
3822         switch (status) {
3823         case IWM_ADD_STA_SUCCESS:
3824                 break;
3825         default:
3826                 ret = EIO;
3827                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3828                 break;
3829         }
3830
3831         return ret;
3832 }
3833
3834 static int
3835 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3836 {
3837         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3838 }
3839
3840 static int
3841 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3842 {
3843         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3844 }
3845
3846 static int
3847 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3848         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3849 {
3850         struct iwm_mvm_add_sta_cmd_v7 cmd;
3851         int ret;
3852         uint32_t status;
3853
3854         memset(&cmd, 0, sizeof(cmd));
3855         cmd.sta_id = sta->sta_id;
3856         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3857
3858         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3859         cmd.tid_disable_tx = htole16(0xffff);
3860
3861         if (addr)
3862                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3863
3864         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3865         if (ret)
3866                 return ret;
3867
3868         switch (status) {
3869         case IWM_ADD_STA_SUCCESS:
3870                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3871                     "%s: Internal station added.\n", __func__);
3872                 return 0;
3873         default:
3874                 device_printf(sc->sc_dev,
3875                     "%s: Add internal station failed, status=0x%x\n",
3876                     __func__, status);
3877                 ret = EIO;
3878                 break;
3879         }
3880         return ret;
3881 }
3882
3883 static int
3884 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3885 {
3886         int ret;
3887
3888         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3889         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3890
3891         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3892         if (ret)
3893                 return ret;
3894
3895         ret = iwm_mvm_add_int_sta_common(sc,
3896             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3897
3898         if (ret)
3899                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3900         return ret;
3901 }
3902
3903 /*
3904  * END mvm/sta.c
3905  */
3906
3907 /*
3908  * BEGIN mvm/quota.c
3909  */
3910
3911 static int
3912 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3913 {
3914         struct iwm_time_quota_cmd cmd;
3915         int i, idx, ret, num_active_macs, quota, quota_rem;
3916         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3917         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3918         uint16_t id;
3919
3920         memset(&cmd, 0, sizeof(cmd));
3921
3922         /* currently, PHY ID == binding ID */
3923         if (in) {
3924                 id = in->in_phyctxt->id;
3925                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3926                 colors[id] = in->in_phyctxt->color;
3927
3928                 if (1)
3929                         n_ifs[id] = 1;
3930         }
3931
3932         /*
3933          * The FW's scheduling session consists of
3934          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3935          * equally between all the bindings that require quota
3936          */
3937         num_active_macs = 0;
3938         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3939                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3940                 num_active_macs += n_ifs[i];
3941         }
3942
3943         quota = 0;
3944         quota_rem = 0;
3945         if (num_active_macs) {
3946                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3947                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3948         }
3949
3950         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3951                 if (colors[i] < 0)
3952                         continue;
3953
3954                 cmd.quotas[idx].id_and_color =
3955                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3956
3957                 if (n_ifs[i] <= 0) {
3958                         cmd.quotas[idx].quota = htole32(0);
3959                         cmd.quotas[idx].max_duration = htole32(0);
3960                 } else {
3961                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3962                         cmd.quotas[idx].max_duration = htole32(0);
3963                 }
3964                 idx++;
3965         }
3966
3967         /* Give the remainder of the session to the first binding */
3968         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3969
3970         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3971             sizeof(cmd), &cmd);
3972         if (ret)
3973                 device_printf(sc->sc_dev,
3974                     "%s: Failed to send quota: %d\n", __func__, ret);
3975         return ret;
3976 }
3977
3978 /*
3979  * END mvm/quota.c
3980  */
3981
3982 /*
3983  * ieee80211 routines
3984  */
3985
3986 /*
3987  * Change to AUTH state in 80211 state machine.  Roughly matches what
3988  * Linux does in bss_info_changed().
3989  */
3990 static int
3991 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3992 {
3993         struct ieee80211_node *ni;
3994         struct iwm_node *in;
3995         struct iwm_vap *iv = IWM_VAP(vap);
3996         uint32_t duration;
3997         int error;
3998
3999         /*
4000          * XXX i have a feeling that the vap node is being
4001          * freed from underneath us. Grr.
4002          */
4003         ni = ieee80211_ref_node(vap->iv_bss);
4004         in = IWM_NODE(ni);
4005         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4006             "%s: called; vap=%p, bss ni=%p\n",
4007             __func__,
4008             vap,
4009             ni);
4010
4011         in->in_assoc = 0;
4012
4013         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4014         if (error != 0)
4015                 return error;
4016
4017         error = iwm_allow_mcast(vap, sc);
4018         if (error) {
4019                 device_printf(sc->sc_dev,
4020                     "%s: failed to set multicast\n", __func__);
4021                 goto out;
4022         }
4023
4024         /*
4025          * This is where it deviates from what Linux does.
4026          *
4027          * Linux iwlwifi doesn't reset the nic each time, nor does it
4028          * call ctxt_add() here.  Instead, it adds it during vap creation,
4029          * and always does a mac_ctx_changed().
4030          *
4031          * The openbsd port doesn't attempt to do that - it reset things
4032          * at odd states and does the add here.
4033          *
4034          * So, until the state handling is fixed (ie, we never reset
4035          * the NIC except for a firmware failure, which should drag
4036          * the NIC back to IDLE, re-setup and re-add all the mac/phy
4037          * contexts that are required), let's do a dirty hack here.
4038          */
4039         if (iv->is_uploaded) {
4040                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4041                         device_printf(sc->sc_dev,
4042                             "%s: failed to update MAC\n", __func__);
4043                         goto out;
4044                 }
4045                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4046                     in->in_ni.ni_chan, 1, 1)) != 0) {
4047                         device_printf(sc->sc_dev,
4048                             "%s: failed update phy ctxt\n", __func__);
4049                         goto out;
4050                 }
4051                 in->in_phyctxt = &sc->sc_phyctxt[0];
4052
4053                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
4054                         device_printf(sc->sc_dev,
4055                             "%s: binding update cmd\n", __func__);
4056                         goto out;
4057                 }
4058                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4059                         device_printf(sc->sc_dev,
4060                             "%s: failed to update sta\n", __func__);
4061                         goto out;
4062                 }
4063         } else {
4064                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4065                         device_printf(sc->sc_dev,
4066                             "%s: failed to add MAC\n", __func__);
4067                         goto out;
4068                 }
4069                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4070                     in->in_ni.ni_chan, 1, 1)) != 0) {
4071                         device_printf(sc->sc_dev,
4072                             "%s: failed add phy ctxt!\n", __func__);
4073                         error = ETIMEDOUT;
4074                         goto out;
4075                 }
4076                 in->in_phyctxt = &sc->sc_phyctxt[0];
4077
4078                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
4079                         device_printf(sc->sc_dev,
4080                             "%s: binding add cmd\n", __func__);
4081                         goto out;
4082                 }
4083                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4084                         device_printf(sc->sc_dev,
4085                             "%s: failed to add sta\n", __func__);
4086                         goto out;
4087                 }
4088         }
4089
4090         /*
4091          * Prevent the FW from wandering off channel during association
4092          * by "protecting" the session with a time event.
4093          */
4094         /* XXX duration is in units of TU, not MS */
4095         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4096         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4097         DELAY(100);
4098
4099         error = 0;
4100 out:
4101         ieee80211_free_node(ni);
4102         return (error);
4103 }
4104
4105 static int
4106 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4107 {
4108         struct iwm_node *in = IWM_NODE(vap->iv_bss);
4109         int error;
4110
4111         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4112                 device_printf(sc->sc_dev,
4113                     "%s: failed to update STA\n", __func__);
4114                 return error;
4115         }
4116
4117         in->in_assoc = 1;
4118         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4119                 device_printf(sc->sc_dev,
4120                     "%s: failed to update MAC\n", __func__);
4121                 return error;
4122         }
4123
4124         return 0;
4125 }
4126
4127 static int
4128 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4129 {
4130         uint32_t tfd_msk;
4131
4132         /*
4133          * Ok, so *technically* the proper set of calls for going
4134          * from RUN back to SCAN is:
4135          *
4136          * iwm_mvm_power_mac_disable(sc, in);
4137          * iwm_mvm_mac_ctxt_changed(sc, in);
4138          * iwm_mvm_rm_sta(sc, in);
4139          * iwm_mvm_update_quotas(sc, NULL);
4140          * iwm_mvm_mac_ctxt_changed(sc, in);
4141          * iwm_mvm_binding_remove_vif(sc, in);
4142          * iwm_mvm_mac_ctxt_remove(sc, in);
4143          *
4144          * However, that freezes the device not matter which permutations
4145          * and modifications are attempted.  Obviously, this driver is missing
4146          * something since it works in the Linux driver, but figuring out what
4147          * is missing is a little more complicated.  Now, since we're going
4148          * back to nothing anyway, we'll just do a complete device reset.
4149          * Up your's, device!
4150          */
4151         /*
4152          * Just using 0xf for the queues mask is fine as long as we only
4153          * get here from RUN state.
4154          */
4155         tfd_msk = 0xf;
4156         mbufq_drain(&sc->sc_snd);
4157         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4158         /*
4159          * We seem to get away with just synchronously sending the
4160          * IWM_TXPATH_FLUSH command.
4161          */
4162 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4163         iwm_stop_device(sc);
4164         iwm_init_hw(sc);
4165         if (in)
4166                 in->in_assoc = 0;
4167         return 0;
4168
4169 #if 0
4170         int error;
4171
4172         iwm_mvm_power_mac_disable(sc, in);
4173
4174         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4175                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4176                 return error;
4177         }
4178
4179         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4180                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4181                 return error;
4182         }
4183         error = iwm_mvm_rm_sta(sc, in);
4184         in->in_assoc = 0;
4185         iwm_mvm_update_quotas(sc, NULL);
4186         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4187                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4188                 return error;
4189         }
4190         iwm_mvm_binding_remove_vif(sc, in);
4191
4192         iwm_mvm_mac_ctxt_remove(sc, in);
4193
4194         return error;
4195 #endif
4196 }
4197
4198 static struct ieee80211_node *
4199 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4200 {
4201         return malloc(sizeof (struct iwm_node), M_80211_NODE,
4202             M_NOWAIT | M_ZERO);
4203 }
4204
4205 static void
4206 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4207 {
4208         struct ieee80211_node *ni = &in->in_ni;
4209         struct iwm_lq_cmd *lq = &in->in_lq;
4210         int nrates = ni->ni_rates.rs_nrates;
4211         int i, ridx, tab = 0;
4212 //      int txant = 0;
4213
4214         if (nrates > nitems(lq->rs_table)) {
4215                 device_printf(sc->sc_dev,
4216                     "%s: node supports %d rates, driver handles "
4217                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4218                 return;
4219         }
4220         if (nrates == 0) {
4221                 device_printf(sc->sc_dev,
4222                     "%s: node supports 0 rates, odd!\n", __func__);
4223                 return;
4224         }
4225
4226         /*
4227          * XXX .. and most of iwm_node is not initialised explicitly;
4228          * it's all just 0x0 passed to the firmware.
4229          */
4230
4231         /* first figure out which rates we should support */
4232         /* XXX TODO: this isn't 11n aware /at all/ */
4233         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4234         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4235             "%s: nrates=%d\n", __func__, nrates);
4236
4237         /*
4238          * Loop over nrates and populate in_ridx from the highest
4239          * rate to the lowest rate.  Remember, in_ridx[] has
4240          * IEEE80211_RATE_MAXSIZE entries!
4241          */
4242         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4243                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4244
4245                 /* Map 802.11 rate to HW rate index. */
4246                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4247                         if (iwm_rates[ridx].rate == rate)
4248                                 break;
4249                 if (ridx > IWM_RIDX_MAX) {
4250                         device_printf(sc->sc_dev,
4251                             "%s: WARNING: device rate for %d not found!\n",
4252                             __func__, rate);
4253                 } else {
4254                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4255                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4256                             __func__,
4257                             i,
4258                             rate,
4259                             ridx);
4260                         in->in_ridx[i] = ridx;
4261                 }
4262         }
4263
4264         /* then construct a lq_cmd based on those */
4265         memset(lq, 0, sizeof(*lq));
4266         lq->sta_id = IWM_STATION_ID;
4267
4268         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4269         if (ni->ni_flags & IEEE80211_NODE_HT)
4270                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4271
4272         /*
4273          * are these used? (we don't do SISO or MIMO)
4274          * need to set them to non-zero, though, or we get an error.
4275          */
4276         lq->single_stream_ant_msk = 1;
4277         lq->dual_stream_ant_msk = 1;
4278
4279         /*
4280          * Build the actual rate selection table.
4281          * The lowest bits are the rates.  Additionally,
4282          * CCK needs bit 9 to be set.  The rest of the bits
4283          * we add to the table select the tx antenna
4284          * Note that we add the rates in the highest rate first
4285          * (opposite of ni_rates).
4286          */
4287         /*
4288          * XXX TODO: this should be looping over the min of nrates
4289          * and LQ_MAX_RETRY_NUM.  Sigh.
4290          */
4291         for (i = 0; i < nrates; i++) {
4292                 int nextant;
4293
4294 #if 0
4295                 if (txant == 0)
4296                         txant = iwm_mvm_get_valid_tx_ant(sc);
4297                 nextant = 1<<(ffs(txant)-1);
4298                 txant &= ~nextant;
4299 #else
4300                 nextant = iwm_mvm_get_valid_tx_ant(sc);
4301 #endif
4302                 /*
4303                  * Map the rate id into a rate index into
4304                  * our hardware table containing the
4305                  * configuration to use for this rate.
4306                  */
4307                 ridx = in->in_ridx[i];
4308                 tab = iwm_rates[ridx].plcp;
4309                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4310                 if (IWM_RIDX_IS_CCK(ridx))
4311                         tab |= IWM_RATE_MCS_CCK_MSK;
4312                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4313                     "station rate i=%d, rate=%d, hw=%x\n",
4314                     i, iwm_rates[ridx].rate, tab);
4315                 lq->rs_table[i] = htole32(tab);
4316         }
4317         /* then fill the rest with the lowest possible rate */
4318         for (i = nrates; i < nitems(lq->rs_table); i++) {
4319                 KASSERT(tab != 0, ("invalid tab"));
4320                 lq->rs_table[i] = htole32(tab);
4321         }
4322 }
4323
4324 static int
4325 iwm_media_change(struct ifnet *ifp)
4326 {
4327         struct ieee80211vap *vap = ifp->if_softc;
4328         struct ieee80211com *ic = vap->iv_ic;
4329         struct iwm_softc *sc = ic->ic_softc;
4330         int error;
4331
4332         error = ieee80211_media_change(ifp);
4333         if (error != ENETRESET)
4334                 return error;
4335
4336         IWM_LOCK(sc);
4337         if (ic->ic_nrunning > 0) {
4338                 iwm_stop(sc);
4339                 iwm_init(sc);
4340         }
4341         IWM_UNLOCK(sc);
4342         return error;
4343 }
4344
4345
4346 static int
4347 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4348 {
4349         struct iwm_vap *ivp = IWM_VAP(vap);
4350         struct ieee80211com *ic = vap->iv_ic;
4351         struct iwm_softc *sc = ic->ic_softc;
4352         struct iwm_node *in;
4353         int error;
4354
4355         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4356             "switching state %s -> %s\n",
4357             ieee80211_state_name[vap->iv_state],
4358             ieee80211_state_name[nstate]);
4359         IEEE80211_UNLOCK(ic);
4360         IWM_LOCK(sc);
4361
4362         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4363                 iwm_led_blink_stop(sc);
4364
4365         /* disable beacon filtering if we're hopping out of RUN */
4366         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4367                 iwm_mvm_disable_beacon_filter(sc);
4368
4369                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4370                         in->in_assoc = 0;
4371
4372                 if (nstate == IEEE80211_S_INIT) {
4373                         IWM_UNLOCK(sc);
4374                         IEEE80211_LOCK(ic);
4375                         error = ivp->iv_newstate(vap, nstate, arg);
4376                         IEEE80211_UNLOCK(ic);
4377                         IWM_LOCK(sc);
4378                         iwm_release(sc, NULL);
4379                         IWM_UNLOCK(sc);
4380                         IEEE80211_LOCK(ic);
4381                         return error;
4382                 }
4383
4384                 /*
4385                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4386                  * above then the card will be completely reinitialized,
4387                  * so the driver must do everything necessary to bring the card
4388                  * from INIT to SCAN.
4389                  *
4390                  * Additionally, upon receiving deauth frame from AP,
4391                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4392                  * state. This will also fail with this driver, so bring the FSM
4393                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4394                  *
4395                  * XXX TODO: fix this for FreeBSD!
4396                  */
4397                 if (nstate == IEEE80211_S_SCAN ||
4398                     nstate == IEEE80211_S_AUTH ||
4399                     nstate == IEEE80211_S_ASSOC) {
4400                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4401                             "Force transition to INIT; MGT=%d\n", arg);
4402                         IWM_UNLOCK(sc);
4403                         IEEE80211_LOCK(ic);
4404                         /* Always pass arg as -1 since we can't Tx right now. */
4405                         /*
4406                          * XXX arg is just ignored anyway when transitioning
4407                          *     to IEEE80211_S_INIT.
4408                          */
4409                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4410                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4411                             "Going INIT->SCAN\n");
4412                         nstate = IEEE80211_S_SCAN;
4413                         IEEE80211_UNLOCK(ic);
4414                         IWM_LOCK(sc);
4415                 }
4416         }
4417
4418         switch (nstate) {
4419         case IEEE80211_S_INIT:
4420                 break;
4421
4422         case IEEE80211_S_AUTH:
4423                 if ((error = iwm_auth(vap, sc)) != 0) {
4424                         device_printf(sc->sc_dev,
4425                             "%s: could not move to auth state: %d\n",
4426                             __func__, error);
4427                         break;
4428                 }
4429                 break;
4430
4431         case IEEE80211_S_ASSOC:
4432                 if ((error = iwm_assoc(vap, sc)) != 0) {
4433                         device_printf(sc->sc_dev,
4434                             "%s: failed to associate: %d\n", __func__,
4435                             error);
4436                         break;
4437                 }
4438                 break;
4439
4440         case IEEE80211_S_RUN:
4441         {
4442                 struct iwm_host_cmd cmd = {
4443                         .id = IWM_LQ_CMD,
4444                         .len = { sizeof(in->in_lq), },
4445                         .flags = IWM_CMD_SYNC,
4446                 };
4447
4448                 /* Update the association state, now we have it all */
4449                 /* (eg associd comes in at this point */
4450                 error = iwm_assoc(vap, sc);
4451                 if (error != 0) {
4452                         device_printf(sc->sc_dev,
4453                             "%s: failed to update association state: %d\n",
4454                             __func__,
4455                             error);
4456                         break;
4457                 }
4458
4459                 in = IWM_NODE(vap->iv_bss);
4460                 iwm_mvm_power_mac_update_mode(sc, in);
4461                 iwm_mvm_enable_beacon_filter(sc, in);
4462                 iwm_mvm_update_quotas(sc, in);
4463                 iwm_setrates(sc, in);
4464
4465                 cmd.data[0] = &in->in_lq;
4466                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4467                         device_printf(sc->sc_dev,
4468                             "%s: IWM_LQ_CMD failed\n", __func__);
4469                 }
4470
4471                 iwm_mvm_led_enable(sc);
4472                 break;
4473         }
4474
4475         default:
4476                 break;
4477         }
4478         IWM_UNLOCK(sc);
4479         IEEE80211_LOCK(ic);
4480
4481         return (ivp->iv_newstate(vap, nstate, arg));
4482 }
4483
4484 void
4485 iwm_endscan_cb(void *arg, int pending)
4486 {
4487         struct iwm_softc *sc = arg;
4488         struct ieee80211com *ic = &sc->sc_ic;
4489
4490         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4491             "%s: scan ended\n",
4492             __func__);
4493
4494         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4495 }
4496
4497 /*
4498  * Aging and idle timeouts for the different possible scenarios
4499  * in default configuration
4500  */
4501 static const uint32_t
4502 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4503         {
4504                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4505                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4506         },
4507         {
4508                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4509                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4510         },
4511         {
4512                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4513                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4514         },
4515         {
4516                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4517                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4518         },
4519         {
4520                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4521                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4522         },
4523 };
4524
4525 /*
4526  * Aging and idle timeouts for the different possible scenarios
4527  * in single BSS MAC configuration.
4528  */
4529 static const uint32_t
4530 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4531         {
4532                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4533                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4534         },
4535         {
4536                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4537                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4538         },
4539         {
4540                 htole32(IWM_SF_MCAST_AGING_TIMER),
4541                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4542         },
4543         {
4544                 htole32(IWM_SF_BA_AGING_TIMER),
4545                 htole32(IWM_SF_BA_IDLE_TIMER)
4546         },
4547         {
4548                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4549                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4550         },
4551 };
4552
4553 static void
4554 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4555     struct ieee80211_node *ni)
4556 {
4557         int i, j, watermark;
4558
4559         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4560
4561         /*
4562          * If we are in association flow - check antenna configuration
4563          * capabilities of the AP station, and choose the watermark accordingly.
4564          */
4565         if (ni) {
4566                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4567 #ifdef notyet
4568                         if (ni->ni_rxmcs[2] != 0)
4569                                 watermark = IWM_SF_W_MARK_MIMO3;
4570                         else if (ni->ni_rxmcs[1] != 0)
4571                                 watermark = IWM_SF_W_MARK_MIMO2;
4572                         else
4573 #endif
4574                                 watermark = IWM_SF_W_MARK_SISO;
4575                 } else {
4576                         watermark = IWM_SF_W_MARK_LEGACY;
4577                 }
4578         /* default watermark value for unassociated mode. */
4579         } else {
4580                 watermark = IWM_SF_W_MARK_MIMO2;
4581         }
4582         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4583
4584         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4585                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4586                         sf_cmd->long_delay_timeouts[i][j] =
4587                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4588                 }
4589         }
4590
4591         if (ni) {
4592                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4593                        sizeof(iwm_sf_full_timeout));
4594         } else {
4595                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4596                        sizeof(iwm_sf_full_timeout_def));
4597         }
4598 }
4599
4600 static int
4601 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4602 {
4603         struct ieee80211com *ic = &sc->sc_ic;
4604         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4605         struct iwm_sf_cfg_cmd sf_cmd = {
4606                 .state = htole32(IWM_SF_FULL_ON),
4607         };
4608         int ret = 0;
4609
4610         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4611                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4612
4613         switch (new_state) {
4614         case IWM_SF_UNINIT:
4615         case IWM_SF_INIT_OFF:
4616                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4617                 break;
4618         case IWM_SF_FULL_ON:
4619                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4620                 break;
4621         default:
4622                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4623                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4624                           new_state);
4625                 return EINVAL;
4626         }
4627
4628         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4629                                    sizeof(sf_cmd), &sf_cmd);
4630         return ret;
4631 }
4632
4633 static int
4634 iwm_send_bt_init_conf(struct iwm_softc *sc)
4635 {
4636         struct iwm_bt_coex_cmd bt_cmd;
4637
4638         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4639         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4640
4641         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4642             &bt_cmd);
4643 }
4644
4645 static int
4646 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4647 {
4648         struct iwm_mcc_update_cmd mcc_cmd;
4649         struct iwm_host_cmd hcmd = {
4650                 .id = IWM_MCC_UPDATE_CMD,
4651                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4652                 .data = { &mcc_cmd },
4653         };
4654         int ret;
4655 #ifdef IWM_DEBUG
4656         struct iwm_rx_packet *pkt;
4657         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4658         struct iwm_mcc_update_resp *mcc_resp;
4659         int n_channels;
4660         uint16_t mcc;
4661 #endif
4662         int resp_v2 = isset(sc->sc_enabled_capa,
4663             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4664
4665         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4666         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4667         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4668             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4669                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4670         else
4671                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4672
4673         if (resp_v2)
4674                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4675         else
4676                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4677
4678         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4679             "send MCC update to FW with '%c%c' src = %d\n",
4680             alpha2[0], alpha2[1], mcc_cmd.source_id);
4681
4682         ret = iwm_send_cmd(sc, &hcmd);
4683         if (ret)
4684                 return ret;
4685
4686 #ifdef IWM_DEBUG
4687         pkt = hcmd.resp_pkt;
4688
4689         /* Extract MCC response */
4690         if (resp_v2) {
4691                 mcc_resp = (void *)pkt->data;
4692                 mcc = mcc_resp->mcc;
4693                 n_channels =  le32toh(mcc_resp->n_channels);
4694         } else {
4695                 mcc_resp_v1 = (void *)pkt->data;
4696                 mcc = mcc_resp_v1->mcc;
4697                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4698         }
4699
4700         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4701         if (mcc == 0)
4702                 mcc = 0x3030;  /* "00" - world */
4703
4704         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4705             "regulatory domain '%c%c' (%d channels available)\n",
4706             mcc >> 8, mcc & 0xff, n_channels);
4707 #endif
4708         iwm_free_resp(sc, &hcmd);
4709
4710         return 0;
4711 }
4712
4713 static void
4714 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4715 {
4716         struct iwm_host_cmd cmd = {
4717                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4718                 .len = { sizeof(uint32_t), },
4719                 .data = { &backoff, },
4720         };
4721
4722         if (iwm_send_cmd(sc, &cmd) != 0) {
4723                 device_printf(sc->sc_dev,
4724                     "failed to change thermal tx backoff\n");
4725         }
4726 }
4727
4728 static int
4729 iwm_init_hw(struct iwm_softc *sc)
4730 {
4731         struct ieee80211com *ic = &sc->sc_ic;
4732         int error, i, ac;
4733
4734         if ((error = iwm_start_hw(sc)) != 0) {
4735                 printf("iwm_start_hw: failed %d\n", error);
4736                 return error;
4737         }
4738
4739         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4740                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4741                 return error;
4742         }
4743
4744         /*
4745          * should stop and start HW since that INIT
4746          * image just loaded
4747          */
4748         iwm_stop_device(sc);
4749         if ((error = iwm_start_hw(sc)) != 0) {
4750                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4751                 return error;
4752         }
4753
4754         /* omstart, this time with the regular firmware */
4755         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4756         if (error) {
4757                 device_printf(sc->sc_dev, "could not load firmware\n");
4758                 goto error;
4759         }
4760
4761         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4762                 device_printf(sc->sc_dev, "bt init conf failed\n");
4763                 goto error;
4764         }
4765
4766         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4767         if (error != 0) {
4768                 device_printf(sc->sc_dev, "antenna config failed\n");
4769                 goto error;
4770         }
4771
4772         /* Send phy db control command and then phy db calibration */
4773         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4774                 goto error;
4775
4776         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4777                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4778                 goto error;
4779         }
4780
4781         /* Add auxiliary station for scanning */
4782         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4783                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4784                 goto error;
4785         }
4786
4787         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4788                 /*
4789                  * The channel used here isn't relevant as it's
4790                  * going to be overwritten in the other flows.
4791                  * For now use the first channel we have.
4792                  */
4793                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4794                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4795                         goto error;
4796         }
4797
4798         /* Initialize tx backoffs to the minimum. */
4799         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4800                 iwm_mvm_tt_tx_backoff(sc, 0);
4801
4802         error = iwm_mvm_power_update_device(sc);
4803         if (error)
4804                 goto error;
4805
4806         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4807                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4808                         goto error;
4809         }
4810
4811         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4812                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4813                         goto error;
4814         }
4815
4816         /* Enable Tx queues. */
4817         for (ac = 0; ac < WME_NUM_AC; ac++) {
4818                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4819                     iwm_mvm_ac_to_tx_fifo[ac]);
4820                 if (error)
4821                         goto error;
4822         }
4823
4824         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4825                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4826                 goto error;
4827         }
4828
4829         return 0;
4830
4831  error:
4832         iwm_stop_device(sc);
4833         return error;
4834 }
4835
4836 /* Allow multicast from our BSSID. */
4837 static int
4838 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4839 {
4840         struct ieee80211_node *ni = vap->iv_bss;
4841         struct iwm_mcast_filter_cmd *cmd;
4842         size_t size;
4843         int error;
4844
4845         size = roundup(sizeof(*cmd), 4);
4846         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4847         if (cmd == NULL)
4848                 return ENOMEM;
4849         cmd->filter_own = 1;
4850         cmd->port_id = 0;
4851         cmd->count = 0;
4852         cmd->pass_all = 1;
4853         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4854
4855         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4856             IWM_CMD_SYNC, size, cmd);
4857         free(cmd, M_DEVBUF);
4858
4859         return (error);
4860 }
4861
4862 /*
4863  * ifnet interfaces
4864  */
4865
4866 static void
4867 iwm_init(struct iwm_softc *sc)
4868 {
4869         int error;
4870
4871         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4872                 return;
4873         }
4874         sc->sc_generation++;
4875         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4876
4877         if ((error = iwm_init_hw(sc)) != 0) {
4878                 printf("iwm_init_hw failed %d\n", error);
4879                 iwm_stop(sc);
4880                 return;
4881         }
4882
4883         /*
4884          * Ok, firmware loaded and we are jogging
4885          */
4886         sc->sc_flags |= IWM_FLAG_HW_INITED;
4887         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4888 }
4889
4890 static int
4891 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4892 {
4893         struct iwm_softc *sc;
4894         int error;
4895
4896         sc = ic->ic_softc;
4897
4898         IWM_LOCK(sc);
4899         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4900                 IWM_UNLOCK(sc);
4901                 return (ENXIO);
4902         }
4903         error = mbufq_enqueue(&sc->sc_snd, m);
4904         if (error) {
4905                 IWM_UNLOCK(sc);
4906                 return (error);
4907         }
4908         iwm_start(sc);
4909         IWM_UNLOCK(sc);
4910         return (0);
4911 }
4912
4913 /*
4914  * Dequeue packets from sendq and call send.
4915  */
4916 static void
4917 iwm_start(struct iwm_softc *sc)
4918 {
4919         struct ieee80211_node *ni;
4920         struct mbuf *m;
4921         int ac = 0;
4922
4923         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4924         while (sc->qfullmsk == 0 &&
4925                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4926                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4927                 if (iwm_tx(sc, m, ni, ac) != 0) {
4928                         if_inc_counter(ni->ni_vap->iv_ifp,
4929                             IFCOUNTER_OERRORS, 1);
4930                         ieee80211_free_node(ni);
4931                         continue;
4932                 }
4933                 sc->sc_tx_timer = 15;
4934         }
4935         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4936 }
4937
4938 static void
4939 iwm_stop(struct iwm_softc *sc)
4940 {
4941
4942         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4943         sc->sc_flags |= IWM_FLAG_STOPPED;
4944         sc->sc_generation++;
4945         iwm_led_blink_stop(sc);
4946         sc->sc_tx_timer = 0;
4947         iwm_stop_device(sc);
4948         sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4949 }
4950
4951 static void
4952 iwm_watchdog(void *arg)
4953 {
4954         struct iwm_softc *sc = arg;
4955         struct ieee80211com *ic = &sc->sc_ic;
4956
4957         if (sc->sc_tx_timer > 0) {
4958                 if (--sc->sc_tx_timer == 0) {
4959                         device_printf(sc->sc_dev, "device timeout\n");
4960 #ifdef IWM_DEBUG
4961                         iwm_nic_error(sc);
4962 #endif
4963                         ieee80211_restart_all(ic);
4964                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4965                         return;
4966                 }
4967         }
4968         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4969 }
4970
4971 static void
4972 iwm_parent(struct ieee80211com *ic)
4973 {
4974         struct iwm_softc *sc = ic->ic_softc;
4975         int startall = 0;
4976
4977         IWM_LOCK(sc);
4978         if (ic->ic_nrunning > 0) {
4979                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4980                         iwm_init(sc);
4981                         startall = 1;
4982                 }
4983         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4984                 iwm_stop(sc);
4985         IWM_UNLOCK(sc);
4986         if (startall)
4987                 ieee80211_start_all(ic);
4988 }
4989
4990 /*
4991  * The interrupt side of things
4992  */
4993
4994 /*
4995  * error dumping routines are from iwlwifi/mvm/utils.c
4996  */
4997
4998 /*
4999  * Note: This structure is read from the device with IO accesses,
5000  * and the reading already does the endian conversion. As it is
5001  * read with uint32_t-sized accesses, any members with a different size
5002  * need to be ordered correctly though!
5003  */
5004 struct iwm_error_event_table {
5005         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5006         uint32_t error_id;              /* type of error */
5007         uint32_t trm_hw_status0;        /* TRM HW status */
5008         uint32_t trm_hw_status1;        /* TRM HW status */
5009         uint32_t blink2;                /* branch link */
5010         uint32_t ilink1;                /* interrupt link */
5011         uint32_t ilink2;                /* interrupt link */
5012         uint32_t data1;         /* error-specific data */
5013         uint32_t data2;         /* error-specific data */
5014         uint32_t data3;         /* error-specific data */
5015         uint32_t bcon_time;             /* beacon timer */
5016         uint32_t tsf_low;               /* network timestamp function timer */
5017         uint32_t tsf_hi;                /* network timestamp function timer */
5018         uint32_t gp1;           /* GP1 timer register */
5019         uint32_t gp2;           /* GP2 timer register */
5020         uint32_t fw_rev_type;   /* firmware revision type */
5021         uint32_t major;         /* uCode version major */
5022         uint32_t minor;         /* uCode version minor */
5023         uint32_t hw_ver;                /* HW Silicon version */
5024         uint32_t brd_ver;               /* HW board version */
5025         uint32_t log_pc;                /* log program counter */
5026         uint32_t frame_ptr;             /* frame pointer */
5027         uint32_t stack_ptr;             /* stack pointer */
5028         uint32_t hcmd;          /* last host command header */
5029         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
5030                                  * rxtx_flag */
5031         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
5032                                  * host_flag */
5033         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
5034                                  * enc_flag */
5035         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
5036                                  * time_flag */
5037         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
5038                                  * wico interrupt */
5039         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
5040         uint32_t wait_event;            /* wait event() caller address */
5041         uint32_t l2p_control;   /* L2pControlField */
5042         uint32_t l2p_duration;  /* L2pDurationField */
5043         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
5044         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
5045         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
5046                                  * (LMPM_PMG_SEL) */
5047         uint32_t u_timestamp;   /* indicate when the date and time of the
5048                                  * compilation */
5049         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
5050 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5051
5052 /*
5053  * UMAC error struct - relevant starting from family 8000 chip.
5054  * Note: This structure is read from the device with IO accesses,
5055  * and the reading already does the endian conversion. As it is
5056  * read with u32-sized accesses, any members with a different size
5057  * need to be ordered correctly though!
5058  */
5059 struct iwm_umac_error_event_table {
5060         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5061         uint32_t error_id;      /* type of error */
5062         uint32_t blink1;        /* branch link */
5063         uint32_t blink2;        /* branch link */
5064         uint32_t ilink1;        /* interrupt link */
5065         uint32_t ilink2;        /* interrupt link */
5066         uint32_t data1;         /* error-specific data */
5067         uint32_t data2;         /* error-specific data */
5068         uint32_t data3;         /* error-specific data */
5069         uint32_t umac_major;
5070         uint32_t umac_minor;
5071         uint32_t frame_pointer; /* core register 27*/
5072         uint32_t stack_pointer; /* core register 28 */
5073         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
5074         uint32_t nic_isr_pref;  /* ISR status register */
5075 } __packed;
5076
5077 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5078 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5079
5080 #ifdef IWM_DEBUG
5081 struct {
5082         const char *name;
5083         uint8_t num;
5084 } advanced_lookup[] = {
5085         { "NMI_INTERRUPT_WDG", 0x34 },
5086         { "SYSASSERT", 0x35 },
5087         { "UCODE_VERSION_MISMATCH", 0x37 },
5088         { "BAD_COMMAND", 0x38 },
5089         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5090         { "FATAL_ERROR", 0x3D },
5091         { "NMI_TRM_HW_ERR", 0x46 },
5092         { "NMI_INTERRUPT_TRM", 0x4C },
5093         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5094         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5095         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5096         { "NMI_INTERRUPT_HOST", 0x66 },
5097         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5098         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5099         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5100         { "ADVANCED_SYSASSERT", 0 },
5101 };
5102
5103 static const char *
5104 iwm_desc_lookup(uint32_t num)
5105 {
5106         int i;
5107
5108         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5109                 if (advanced_lookup[i].num == num)
5110                         return advanced_lookup[i].name;
5111
5112         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5113         return advanced_lookup[i].name;
5114 }
5115
5116 static void
5117 iwm_nic_umac_error(struct iwm_softc *sc)
5118 {
5119         struct iwm_umac_error_event_table table;
5120         uint32_t base;
5121
5122         base = sc->sc_uc.uc_umac_error_event_table;
5123
5124         if (base < 0x800000) {
5125                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5126                     base);
5127                 return;
5128         }
5129
5130         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5131                 device_printf(sc->sc_dev, "reading errlog failed\n");
5132                 return;
5133         }
5134
5135         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5136                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5137                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5138                     sc->sc_flags, table.valid);
5139         }
5140
5141         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5142                 iwm_desc_lookup(table.error_id));
5143         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5144         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5145         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5146             table.ilink1);
5147         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5148             table.ilink2);
5149         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5150         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5151         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5152         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5153         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5154         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5155             table.frame_pointer);
5156         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5157             table.stack_pointer);
5158         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5159         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5160             table.nic_isr_pref);
5161 }
5162
5163 /*
5164  * Support for dumping the error log seemed like a good idea ...
5165  * but it's mostly hex junk and the only sensible thing is the
5166  * hw/ucode revision (which we know anyway).  Since it's here,
5167  * I'll just leave it in, just in case e.g. the Intel guys want to
5168  * help us decipher some "ADVANCED_SYSASSERT" later.
5169  */
5170 static void
5171 iwm_nic_error(struct iwm_softc *sc)
5172 {
5173         struct iwm_error_event_table table;
5174         uint32_t base;
5175
5176         device_printf(sc->sc_dev, "dumping device error log\n");
5177         base = sc->sc_uc.uc_error_event_table;
5178         if (base < 0x800000) {
5179                 device_printf(sc->sc_dev,
5180                     "Invalid error log pointer 0x%08x\n", base);
5181                 return;
5182         }
5183
5184         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5185                 device_printf(sc->sc_dev, "reading errlog failed\n");
5186                 return;
5187         }
5188
5189         if (!table.valid) {
5190                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5191                 return;
5192         }
5193
5194         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5195                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5196                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5197                     sc->sc_flags, table.valid);
5198         }
5199
5200         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5201             iwm_desc_lookup(table.error_id));
5202         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5203             table.trm_hw_status0);
5204         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5205             table.trm_hw_status1);
5206         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5207         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5208         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5209         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5210         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5211         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5212         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5213         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5214         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5215         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5216         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5217         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5218             table.fw_rev_type);
5219         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5220         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5221         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5222         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5223         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5224         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5225         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5226         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5227         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5228         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5229         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5230         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5231         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5232         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5233         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5234         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5235         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5236         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5237         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5238
5239         if (sc->sc_uc.uc_umac_error_event_table)
5240                 iwm_nic_umac_error(sc);
5241 }
5242 #endif
5243
5244 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5245
5246 /*
5247  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5248  * Basic structure from if_iwn
5249  */
5250 static void
5251 iwm_notif_intr(struct iwm_softc *sc)
5252 {
5253         struct ieee80211com *ic = &sc->sc_ic;
5254         uint16_t hw;
5255
5256         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5257             BUS_DMASYNC_POSTREAD);
5258
5259         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5260
5261         /*
5262          * Process responses
5263          */
5264         while (sc->rxq.cur != hw) {
5265                 struct iwm_rx_ring *ring = &sc->rxq;
5266                 struct iwm_rx_data *data = &ring->data[ring->cur];
5267                 struct iwm_rx_packet *pkt;
5268                 struct iwm_cmd_response *cresp;
5269                 int qid, idx, code;
5270
5271                 bus_dmamap_sync(ring->data_dmat, data->map,
5272                     BUS_DMASYNC_POSTREAD);
5273                 pkt = mtod(data->m, struct iwm_rx_packet *);
5274
5275                 qid = pkt->hdr.qid & ~0x80;
5276                 idx = pkt->hdr.idx;
5277
5278                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5279                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5280                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5281                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5282
5283                 /*
5284                  * randomly get these from the firmware, no idea why.
5285                  * they at least seem harmless, so just ignore them for now
5286                  */
5287                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5288                     || pkt->len_n_flags == htole32(0x55550000))) {
5289                         ADVANCE_RXQ(sc);
5290                         continue;
5291                 }
5292
5293                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5294
5295                 switch (code) {
5296                 case IWM_REPLY_RX_PHY_CMD:
5297                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5298                         break;
5299
5300                 case IWM_REPLY_RX_MPDU_CMD:
5301                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5302                         break;
5303
5304                 case IWM_TX_CMD:
5305                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5306                         break;
5307
5308                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5309                         struct iwm_missed_beacons_notif *resp;
5310                         int missed;
5311
5312                         /* XXX look at mac_id to determine interface ID */
5313                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5314
5315                         resp = (void *)pkt->data;
5316                         missed = le32toh(resp->consec_missed_beacons);
5317
5318                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5319                             "%s: MISSED_BEACON: mac_id=%d, "
5320                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5321                             "num_rx=%d\n",
5322                             __func__,
5323                             le32toh(resp->mac_id),
5324                             le32toh(resp->consec_missed_beacons_since_last_rx),
5325                             le32toh(resp->consec_missed_beacons),
5326                             le32toh(resp->num_expected_beacons),
5327                             le32toh(resp->num_recvd_beacons));
5328
5329                         /* Be paranoid */
5330                         if (vap == NULL)
5331                                 break;
5332
5333                         /* XXX no net80211 locking? */
5334                         if (vap->iv_state == IEEE80211_S_RUN &&
5335                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5336                                 if (missed > vap->iv_bmissthreshold) {
5337                                         /* XXX bad locking; turn into task */
5338                                         IWM_UNLOCK(sc);
5339                                         ieee80211_beacon_miss(ic);
5340                                         IWM_LOCK(sc);
5341                                 }
5342                         }
5343
5344                         break; }
5345
5346                 case IWM_MFUART_LOAD_NOTIFICATION:
5347                         break;
5348
5349                 case IWM_MVM_ALIVE: {
5350                         struct iwm_mvm_alive_resp_v1 *resp1;
5351                         struct iwm_mvm_alive_resp_v2 *resp2;
5352                         struct iwm_mvm_alive_resp_v3 *resp3;
5353
5354                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5355                                 resp1 = (void *)pkt->data;
5356                                 sc->sc_uc.uc_error_event_table
5357                                     = le32toh(resp1->error_event_table_ptr);
5358                                 sc->sc_uc.uc_log_event_table
5359                                     = le32toh(resp1->log_event_table_ptr);
5360                                 sc->sched_base = le32toh(resp1->scd_base_ptr);
5361                                 if (resp1->status == IWM_ALIVE_STATUS_OK)
5362                                         sc->sc_uc.uc_ok = 1;
5363                                 else
5364                                         sc->sc_uc.uc_ok = 0;
5365                         }
5366
5367                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5368                                 resp2 = (void *)pkt->data;
5369                                 sc->sc_uc.uc_error_event_table
5370                                     = le32toh(resp2->error_event_table_ptr);
5371                                 sc->sc_uc.uc_log_event_table
5372                                     = le32toh(resp2->log_event_table_ptr);
5373                                 sc->sched_base = le32toh(resp2->scd_base_ptr);
5374                                 sc->sc_uc.uc_umac_error_event_table
5375                                     = le32toh(resp2->error_info_addr);
5376                                 if (resp2->status == IWM_ALIVE_STATUS_OK)
5377                                         sc->sc_uc.uc_ok = 1;
5378                                 else
5379                                         sc->sc_uc.uc_ok = 0;
5380                         }
5381
5382                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5383                                 resp3 = (void *)pkt->data;
5384                                 sc->sc_uc.uc_error_event_table
5385                                     = le32toh(resp3->error_event_table_ptr);
5386                                 sc->sc_uc.uc_log_event_table
5387                                     = le32toh(resp3->log_event_table_ptr);
5388                                 sc->sched_base = le32toh(resp3->scd_base_ptr);
5389                                 sc->sc_uc.uc_umac_error_event_table
5390                                     = le32toh(resp3->error_info_addr);
5391                                 if (resp3->status == IWM_ALIVE_STATUS_OK)
5392                                         sc->sc_uc.uc_ok = 1;
5393                                 else
5394                                         sc->sc_uc.uc_ok = 0;
5395                         }
5396
5397                         sc->sc_uc.uc_intr = 1;
5398                         wakeup(&sc->sc_uc);
5399                         break; }
5400
5401                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5402                         break;
5403
5404                 case IWM_STATISTICS_NOTIFICATION: {
5405                         struct iwm_notif_statistics *stats;
5406                         stats = (void *)pkt->data;
5407                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5408                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5409                         break; }
5410
5411                 case IWM_NVM_ACCESS_CMD:
5412                 case IWM_MCC_UPDATE_CMD:
5413                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5414                                 memcpy(sc->sc_cmd_resp,
5415                                     pkt, sizeof(sc->sc_cmd_resp));
5416                         }
5417                         break;
5418
5419                 case IWM_MCC_CHUB_UPDATE_CMD: {
5420                         struct iwm_mcc_chub_notif *notif;
5421                         notif = (void *)pkt->data;
5422
5423                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5424                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5425                         sc->sc_fw_mcc[2] = '\0';
5426                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5427                             "fw source %d sent CC '%s'\n",
5428                             notif->source_id, sc->sc_fw_mcc);
5429                         break; }
5430
5431                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5432                 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5433                                  IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5434                         struct iwm_dts_measurement_notif_v1 *notif;
5435
5436                         if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5437                                 device_printf(sc->sc_dev,
5438                                     "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5439                                 break;
5440                         }
5441                         notif = (void *)pkt->data;
5442                         IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5443                             "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5444                             notif->temp);
5445                         break;
5446                 }
5447
5448                 case IWM_PHY_CONFIGURATION_CMD:
5449                 case IWM_TX_ANT_CONFIGURATION_CMD:
5450                 case IWM_ADD_STA:
5451                 case IWM_MAC_CONTEXT_CMD:
5452                 case IWM_REPLY_SF_CFG_CMD:
5453                 case IWM_POWER_TABLE_CMD:
5454                 case IWM_PHY_CONTEXT_CMD:
5455                 case IWM_BINDING_CONTEXT_CMD:
5456                 case IWM_TIME_EVENT_CMD:
5457                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5458                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5459                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5460                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5461                 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5462                 case IWM_REPLY_BEACON_FILTERING_CMD:
5463                 case IWM_MAC_PM_POWER_TABLE:
5464                 case IWM_TIME_QUOTA_CMD:
5465                 case IWM_REMOVE_STA:
5466                 case IWM_TXPATH_FLUSH:
5467                 case IWM_LQ_CMD:
5468                 case IWM_BT_CONFIG:
5469                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5470                         cresp = (void *)pkt->data;
5471                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5472                                 memcpy(sc->sc_cmd_resp,
5473                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5474                         }
5475                         break;
5476
5477                 /* ignore */
5478                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5479                         break;
5480
5481                 case IWM_INIT_COMPLETE_NOTIF:
5482                         break;
5483
5484                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5485                         struct iwm_periodic_scan_complete *notif;
5486                         notif = (void *)pkt->data;
5487                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5488                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5489                                 ieee80211_runtask(ic, &sc->sc_es_task);
5490                         }
5491                         break;
5492                 }
5493
5494                 case IWM_SCAN_ITERATION_COMPLETE: {
5495                         struct iwm_lmac_scan_complete_notif *notif;
5496                         notif = (void *)pkt->data;
5497                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5498                         break;
5499                 }
5500  
5501                 case IWM_SCAN_COMPLETE_UMAC: {
5502                         struct iwm_umac_scan_complete *notif;
5503                         notif = (void *)pkt->data;
5504
5505                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5506                             "UMAC scan complete, status=0x%x\n",
5507                             notif->status);
5508                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5509                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5510                                 ieee80211_runtask(ic, &sc->sc_es_task);
5511                         }
5512                         break;
5513                 }
5514
5515                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5516                         struct iwm_umac_scan_iter_complete_notif *notif;
5517                         notif = (void *)pkt->data;
5518
5519                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5520                             "complete, status=0x%x, %d channels scanned\n",
5521                             notif->status, notif->scanned_channels);
5522                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5523                         break;
5524                 }
5525
5526                 case IWM_REPLY_ERROR: {
5527                         struct iwm_error_resp *resp;
5528                         resp = (void *)pkt->data;
5529
5530                         device_printf(sc->sc_dev,
5531                             "firmware error 0x%x, cmd 0x%x\n",
5532                             le32toh(resp->error_type),
5533                             resp->cmd_id);
5534                         break;
5535                 }
5536
5537                 case IWM_TIME_EVENT_NOTIFICATION: {
5538                         struct iwm_time_event_notif *notif;
5539                         notif = (void *)pkt->data;
5540
5541                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5542                             "TE notif status = 0x%x action = 0x%x\n",
5543                             notif->status, notif->action);
5544                         break;
5545                 }
5546
5547                 case IWM_MCAST_FILTER_CMD:
5548                         break;
5549
5550                 case IWM_SCD_QUEUE_CFG: {
5551                         struct iwm_scd_txq_cfg_rsp *rsp;
5552                         rsp = (void *)pkt->data;
5553
5554                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5555                             "queue cfg token=0x%x sta_id=%d "
5556                             "tid=%d scd_queue=%d\n",
5557                             rsp->token, rsp->sta_id, rsp->tid,
5558                             rsp->scd_queue);
5559                         break;
5560                 }
5561
5562                 default:
5563                         device_printf(sc->sc_dev,
5564                             "frame %d/%d %x UNHANDLED (this should "
5565                             "not happen)\n", qid, idx,
5566                             pkt->len_n_flags);
5567                         break;
5568                 }
5569
5570                 /*
5571                  * Why test bit 0x80?  The Linux driver:
5572                  *
5573                  * There is one exception:  uCode sets bit 15 when it
5574                  * originates the response/notification, i.e. when the
5575                  * response/notification is not a direct response to a
5576                  * command sent by the driver.  For example, uCode issues
5577                  * IWM_REPLY_RX when it sends a received frame to the driver;
5578                  * it is not a direct response to any driver command.
5579                  *
5580                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5581                  * uses a slightly different format for pkt->hdr, and "qid"
5582                  * is actually the upper byte of a two-byte field.
5583                  */
5584                 if (!(pkt->hdr.qid & (1 << 7))) {
5585                         iwm_cmd_done(sc, pkt);
5586                 }
5587
5588                 ADVANCE_RXQ(sc);
5589         }
5590
5591         /*
5592          * Tell the firmware what we have processed.
5593          * Seems like the hardware gets upset unless we align
5594          * the write by 8??
5595          */
5596         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5597         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5598 }
5599
5600 static void
5601 iwm_intr(void *arg)
5602 {
5603         struct iwm_softc *sc = arg;
5604         int handled = 0;
5605         int r1, r2, rv = 0;
5606         int isperiodic = 0;
5607
5608         IWM_LOCK(sc);
5609         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5610
5611         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5612                 uint32_t *ict = sc->ict_dma.vaddr;
5613                 int tmp;
5614
5615                 tmp = htole32(ict[sc->ict_cur]);
5616                 if (!tmp)
5617                         goto out_ena;
5618
5619                 /*
5620                  * ok, there was something.  keep plowing until we have all.
5621                  */
5622                 r1 = r2 = 0;
5623                 while (tmp) {
5624                         r1 |= tmp;
5625                         ict[sc->ict_cur] = 0;
5626                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5627                         tmp = htole32(ict[sc->ict_cur]);
5628                 }
5629
5630                 /* this is where the fun begins.  don't ask */
5631                 if (r1 == 0xffffffff)
5632                         r1 = 0;
5633
5634                 /* i am not expected to understand this */
5635                 if (r1 & 0xc0000)
5636                         r1 |= 0x8000;
5637                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5638         } else {
5639                 r1 = IWM_READ(sc, IWM_CSR_INT);
5640                 /* "hardware gone" (where, fishing?) */
5641                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5642                         goto out;
5643                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5644         }
5645         if (r1 == 0 && r2 == 0) {
5646                 goto out_ena;
5647         }
5648
5649         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5650
5651         /* ignored */
5652         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5653
5654         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5655                 int i;
5656                 struct ieee80211com *ic = &sc->sc_ic;
5657                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5658
5659 #ifdef IWM_DEBUG
5660                 iwm_nic_error(sc);
5661 #endif
5662                 /* Dump driver status (TX and RX rings) while we're here. */
5663                 device_printf(sc->sc_dev, "driver status:\n");
5664                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5665                         struct iwm_tx_ring *ring = &sc->txq[i];
5666                         device_printf(sc->sc_dev,
5667                             "  tx ring %2d: qid=%-2d cur=%-3d "
5668                             "queued=%-3d\n",
5669                             i, ring->qid, ring->cur, ring->queued);
5670                 }
5671                 device_printf(sc->sc_dev,
5672                     "  rx ring: cur=%d\n", sc->rxq.cur);
5673                 device_printf(sc->sc_dev,
5674                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5675
5676                 /* Don't stop the device; just do a VAP restart */
5677                 IWM_UNLOCK(sc);
5678
5679                 if (vap == NULL) {
5680                         printf("%s: null vap\n", __func__);
5681                         return;
5682                 }
5683
5684                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5685                     "restarting\n", __func__, vap->iv_state);
5686
5687                 /* XXX TODO: turn this into a callout/taskqueue */
5688                 ieee80211_restart_all(ic);
5689                 return;
5690         }
5691
5692         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5693                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5694                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5695                 iwm_stop(sc);
5696                 rv = 1;
5697                 goto out;
5698         }
5699
5700         /* firmware chunk loaded */
5701         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5702                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5703                 handled |= IWM_CSR_INT_BIT_FH_TX;
5704                 sc->sc_fw_chunk_done = 1;
5705                 wakeup(&sc->sc_fw);
5706         }
5707
5708         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5709                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5710                 if (iwm_check_rfkill(sc)) {
5711                         device_printf(sc->sc_dev,
5712                             "%s: rfkill switch, disabling interface\n",
5713                             __func__);
5714                         iwm_stop(sc);
5715                 }
5716         }
5717
5718         /*
5719          * The Linux driver uses periodic interrupts to avoid races.
5720          * We cargo-cult like it's going out of fashion.
5721          */
5722         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5723                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5724                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5725                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5726                         IWM_WRITE_1(sc,
5727                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5728                 isperiodic = 1;
5729         }
5730
5731         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5732                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5733                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5734
5735                 iwm_notif_intr(sc);
5736
5737                 /* enable periodic interrupt, see above */
5738                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5739                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5740                             IWM_CSR_INT_PERIODIC_ENA);
5741         }
5742
5743         if (__predict_false(r1 & ~handled))
5744                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5745                     "%s: unhandled interrupts: %x\n", __func__, r1);
5746         rv = 1;
5747
5748  out_ena:
5749         iwm_restore_interrupts(sc);
5750  out:
5751         IWM_UNLOCK(sc);
5752         return;
5753 }
5754
5755 /*
5756  * Autoconf glue-sniffing
5757  */
5758 #define PCI_VENDOR_INTEL                0x8086
5759 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5760 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5761 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5762 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5763 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5764 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5765 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5766 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5767 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5768 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5769
5770 static const struct iwm_devices {
5771         uint16_t        device;
5772         const char      *name;
5773 } iwm_devices[] = {
5774         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5775         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5776         { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5777         { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5778         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5779         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5780         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5781         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5782         { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5783         { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5784 };
5785
5786 static int
5787 iwm_probe(device_t dev)
5788 {
5789         int i;
5790
5791         for (i = 0; i < nitems(iwm_devices); i++) {
5792                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5793                     pci_get_device(dev) == iwm_devices[i].device) {
5794                         device_set_desc(dev, iwm_devices[i].name);
5795                         return (BUS_PROBE_DEFAULT);
5796                 }
5797         }
5798
5799         return (ENXIO);
5800 }
5801
5802 static int
5803 iwm_dev_check(device_t dev)
5804 {
5805         struct iwm_softc *sc;
5806
5807         sc = device_get_softc(dev);
5808
5809         switch (pci_get_device(dev)) {
5810         case PCI_PRODUCT_INTEL_WL_3160_1:
5811         case PCI_PRODUCT_INTEL_WL_3160_2:
5812                 sc->cfg = &iwm3160_cfg;
5813                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5814                 return (0);
5815         case PCI_PRODUCT_INTEL_WL_3165_1:
5816         case PCI_PRODUCT_INTEL_WL_3165_2:
5817                 sc->cfg = &iwm3165_cfg;
5818                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5819                 return (0);
5820         case PCI_PRODUCT_INTEL_WL_7260_1:
5821         case PCI_PRODUCT_INTEL_WL_7260_2:
5822                 sc->cfg = &iwm7260_cfg;
5823                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5824                 return (0);
5825         case PCI_PRODUCT_INTEL_WL_7265_1:
5826         case PCI_PRODUCT_INTEL_WL_7265_2:
5827                 sc->cfg = &iwm7265_cfg;
5828                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5829                 return (0);
5830         case PCI_PRODUCT_INTEL_WL_8260_1:
5831         case PCI_PRODUCT_INTEL_WL_8260_2:
5832                 sc->cfg = &iwm8260_cfg;
5833                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5834                 return (0);
5835         default:
5836                 device_printf(dev, "unknown adapter type\n");
5837                 return ENXIO;
5838         }
5839 }
5840
5841 /* PCI registers */
5842 #define PCI_CFG_RETRY_TIMEOUT   0x041
5843
5844 static int
5845 iwm_pci_attach(device_t dev)
5846 {
5847         struct iwm_softc *sc;
5848         int count, error, rid;
5849         uint16_t reg;
5850
5851         sc = device_get_softc(dev);
5852
5853         /* We disable the RETRY_TIMEOUT register (0x41) to keep
5854          * PCI Tx retries from interfering with C3 CPU state */
5855         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5856
5857         /* Enable bus-mastering and hardware bug workaround. */
5858         pci_enable_busmaster(dev);
5859         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5860         /* if !MSI */
5861         if (reg & PCIM_STATUS_INTxSTATE) {
5862                 reg &= ~PCIM_STATUS_INTxSTATE;
5863         }
5864         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5865
5866         rid = PCIR_BAR(0);
5867         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5868             RF_ACTIVE);
5869         if (sc->sc_mem == NULL) {
5870                 device_printf(sc->sc_dev, "can't map mem space\n");
5871                 return (ENXIO);
5872         }
5873         sc->sc_st = rman_get_bustag(sc->sc_mem);
5874         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5875
5876         /* Install interrupt handler. */
5877         count = 1;
5878         rid = 0;
5879         if (pci_alloc_msi(dev, &count) == 0)
5880                 rid = 1;
5881         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5882             (rid != 0 ? 0 : RF_SHAREABLE));
5883         if (sc->sc_irq == NULL) {
5884                 device_printf(dev, "can't map interrupt\n");
5885                         return (ENXIO);
5886         }
5887         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5888             NULL, iwm_intr, sc, &sc->sc_ih);
5889         if (sc->sc_ih == NULL) {
5890                 device_printf(dev, "can't establish interrupt");
5891                         return (ENXIO);
5892         }
5893         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5894
5895         return (0);
5896 }
5897
5898 static void
5899 iwm_pci_detach(device_t dev)
5900 {
5901         struct iwm_softc *sc = device_get_softc(dev);
5902
5903         if (sc->sc_irq != NULL) {
5904                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5905                 bus_release_resource(dev, SYS_RES_IRQ,
5906                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5907                 pci_release_msi(dev);
5908         }
5909         if (sc->sc_mem != NULL)
5910                 bus_release_resource(dev, SYS_RES_MEMORY,
5911                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5912 }
5913
5914
5915
5916 static int
5917 iwm_attach(device_t dev)
5918 {
5919         struct iwm_softc *sc = device_get_softc(dev);
5920         struct ieee80211com *ic = &sc->sc_ic;
5921         int error;
5922         int txq_i, i;
5923
5924         sc->sc_dev = dev;
5925         sc->sc_attached = 1;
5926         IWM_LOCK_INIT(sc);
5927         mbufq_init(&sc->sc_snd, ifqmaxlen);
5928         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5929         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5930         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5931
5932         sc->sc_notif_wait = iwm_notification_wait_init(sc);
5933         if (sc->sc_notif_wait == NULL) {
5934                 device_printf(dev, "failed to init notification wait struct\n");
5935                 goto fail;
5936         }
5937
5938         /* Init phy db */
5939         sc->sc_phy_db = iwm_phy_db_init(sc);
5940         if (!sc->sc_phy_db) {
5941                 device_printf(dev, "Cannot init phy_db\n");
5942                 goto fail;
5943         }
5944
5945         /* PCI attach */
5946         error = iwm_pci_attach(dev);
5947         if (error != 0)
5948                 goto fail;
5949
5950         sc->sc_wantresp = -1;
5951
5952         /* Check device type */
5953         error = iwm_dev_check(dev);
5954         if (error != 0)
5955                 goto fail;
5956
5957         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5958         /*
5959          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5960          * changed, and now the revision step also includes bit 0-1 (no more
5961          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5962          * in the old format.
5963          */
5964         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
5965                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5966                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5967
5968         if (iwm_prepare_card_hw(sc) != 0) {
5969                 device_printf(dev, "could not initialize hardware\n");
5970                 goto fail;
5971         }
5972
5973         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5974                 int ret;
5975                 uint32_t hw_step;
5976
5977                 /*
5978                  * In order to recognize C step the driver should read the
5979                  * chip version id located at the AUX bus MISC address.
5980                  */
5981                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5982                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5983                 DELAY(2);
5984
5985                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5986                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5987                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5988                                    25000);
5989                 if (!ret) {
5990                         device_printf(sc->sc_dev,
5991                             "Failed to wake up the nic\n");
5992                         goto fail;
5993                 }
5994
5995                 if (iwm_nic_lock(sc)) {
5996                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5997                         hw_step |= IWM_ENABLE_WFPM;
5998                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5999                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6000                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6001                         if (hw_step == 0x3)
6002                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6003                                                 (IWM_SILICON_C_STEP << 2);
6004                         iwm_nic_unlock(sc);
6005                 } else {
6006                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
6007                         goto fail;
6008                 }
6009         }
6010
6011         /* special-case 7265D, it has the same PCI IDs. */
6012         if (sc->cfg == &iwm7265_cfg &&
6013             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6014                 sc->cfg = &iwm7265d_cfg;
6015         }
6016
6017         /* Allocate DMA memory for firmware transfers. */
6018         if ((error = iwm_alloc_fwmem(sc)) != 0) {
6019                 device_printf(dev, "could not allocate memory for firmware\n");
6020                 goto fail;
6021         }
6022
6023         /* Allocate "Keep Warm" page. */
6024         if ((error = iwm_alloc_kw(sc)) != 0) {
6025                 device_printf(dev, "could not allocate keep warm page\n");
6026                 goto fail;
6027         }
6028
6029         /* We use ICT interrupts */
6030         if ((error = iwm_alloc_ict(sc)) != 0) {
6031                 device_printf(dev, "could not allocate ICT table\n");
6032                 goto fail;
6033         }
6034
6035         /* Allocate TX scheduler "rings". */
6036         if ((error = iwm_alloc_sched(sc)) != 0) {
6037                 device_printf(dev, "could not allocate TX scheduler rings\n");
6038                 goto fail;
6039         }
6040
6041         /* Allocate TX rings */
6042         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6043                 if ((error = iwm_alloc_tx_ring(sc,
6044                     &sc->txq[txq_i], txq_i)) != 0) {
6045                         device_printf(dev,
6046                             "could not allocate TX ring %d\n",
6047                             txq_i);
6048                         goto fail;
6049                 }
6050         }
6051
6052         /* Allocate RX ring. */
6053         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6054                 device_printf(dev, "could not allocate RX ring\n");
6055                 goto fail;
6056         }
6057
6058         /* Clear pending interrupts. */
6059         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6060
6061         ic->ic_softc = sc;
6062         ic->ic_name = device_get_nameunit(sc->sc_dev);
6063         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
6064         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
6065
6066         /* Set device capabilities. */
6067         ic->ic_caps =
6068             IEEE80211_C_STA |
6069             IEEE80211_C_WPA |           /* WPA/RSN */
6070             IEEE80211_C_WME |
6071             IEEE80211_C_SHSLOT |        /* short slot time supported */
6072             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
6073 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
6074             ;
6075         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6076                 sc->sc_phyctxt[i].id = i;
6077                 sc->sc_phyctxt[i].color = 0;
6078                 sc->sc_phyctxt[i].ref = 0;
6079                 sc->sc_phyctxt[i].channel = NULL;
6080         }
6081
6082         /* Default noise floor */
6083         sc->sc_noise = -96;
6084
6085         /* Max RSSI */
6086         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6087
6088         sc->sc_preinit_hook.ich_func = iwm_preinit;
6089         sc->sc_preinit_hook.ich_arg = sc;
6090         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6091                 device_printf(dev, "config_intrhook_establish failed\n");
6092                 goto fail;
6093         }
6094
6095 #ifdef IWM_DEBUG
6096         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6097             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6098             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6099 #endif
6100
6101         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6102             "<-%s\n", __func__);
6103
6104         return 0;
6105
6106         /* Free allocated memory if something failed during attachment. */
6107 fail:
6108         iwm_detach_local(sc, 0);
6109
6110         return ENXIO;
6111 }
6112
6113 static int
6114 iwm_is_valid_ether_addr(uint8_t *addr)
6115 {
6116         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6117
6118         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6119                 return (FALSE);
6120
6121         return (TRUE);
6122 }
6123
6124 static int
6125 iwm_update_edca(struct ieee80211com *ic)
6126 {
6127         struct iwm_softc *sc = ic->ic_softc;
6128
6129         device_printf(sc->sc_dev, "%s: called\n", __func__);
6130         return (0);
6131 }
6132
6133 static void
6134 iwm_preinit(void *arg)
6135 {
6136         struct iwm_softc *sc = arg;
6137         device_t dev = sc->sc_dev;
6138         struct ieee80211com *ic = &sc->sc_ic;
6139         int error;
6140
6141         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6142             "->%s\n", __func__);
6143
6144         IWM_LOCK(sc);
6145         if ((error = iwm_start_hw(sc)) != 0) {
6146                 device_printf(dev, "could not initialize hardware\n");
6147                 IWM_UNLOCK(sc);
6148                 goto fail;
6149         }
6150
6151         error = iwm_run_init_mvm_ucode(sc, 1);
6152         iwm_stop_device(sc);
6153         if (error) {
6154                 IWM_UNLOCK(sc);
6155                 goto fail;
6156         }
6157         device_printf(dev,
6158             "hw rev 0x%x, fw ver %s, address %s\n",
6159             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6160             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6161
6162         /* not all hardware can do 5GHz band */
6163         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6164                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6165                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6166         IWM_UNLOCK(sc);
6167
6168         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6169             ic->ic_channels);
6170
6171         /*
6172          * At this point we've committed - if we fail to do setup,
6173          * we now also have to tear down the net80211 state.
6174          */
6175         ieee80211_ifattach(ic);
6176         ic->ic_vap_create = iwm_vap_create;
6177         ic->ic_vap_delete = iwm_vap_delete;
6178         ic->ic_raw_xmit = iwm_raw_xmit;
6179         ic->ic_node_alloc = iwm_node_alloc;
6180         ic->ic_scan_start = iwm_scan_start;
6181         ic->ic_scan_end = iwm_scan_end;
6182         ic->ic_update_mcast = iwm_update_mcast;
6183         ic->ic_getradiocaps = iwm_init_channel_map;
6184         ic->ic_set_channel = iwm_set_channel;
6185         ic->ic_scan_curchan = iwm_scan_curchan;
6186         ic->ic_scan_mindwell = iwm_scan_mindwell;
6187         ic->ic_wme.wme_update = iwm_update_edca;
6188         ic->ic_parent = iwm_parent;
6189         ic->ic_transmit = iwm_transmit;
6190         iwm_radiotap_attach(sc);
6191         if (bootverbose)
6192                 ieee80211_announce(ic);
6193
6194         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6195             "<-%s\n", __func__);
6196         config_intrhook_disestablish(&sc->sc_preinit_hook);
6197
6198         return;
6199 fail:
6200         config_intrhook_disestablish(&sc->sc_preinit_hook);
6201         iwm_detach_local(sc, 0);
6202 }
6203
6204 /*
6205  * Attach the interface to 802.11 radiotap.
6206  */
6207 static void
6208 iwm_radiotap_attach(struct iwm_softc *sc)
6209 {
6210         struct ieee80211com *ic = &sc->sc_ic;
6211
6212         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6213             "->%s begin\n", __func__);
6214         ieee80211_radiotap_attach(ic,
6215             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6216                 IWM_TX_RADIOTAP_PRESENT,
6217             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6218                 IWM_RX_RADIOTAP_PRESENT);
6219         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6220             "->%s end\n", __func__);
6221 }
6222
6223 static struct ieee80211vap *
6224 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6225     enum ieee80211_opmode opmode, int flags,
6226     const uint8_t bssid[IEEE80211_ADDR_LEN],
6227     const uint8_t mac[IEEE80211_ADDR_LEN])
6228 {
6229         struct iwm_vap *ivp;
6230         struct ieee80211vap *vap;
6231
6232         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6233                 return NULL;
6234         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6235         vap = &ivp->iv_vap;
6236         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6237         vap->iv_bmissthreshold = 10;            /* override default */
6238         /* Override with driver methods. */
6239         ivp->iv_newstate = vap->iv_newstate;
6240         vap->iv_newstate = iwm_newstate;
6241
6242         ieee80211_ratectl_init(vap);
6243         /* Complete setup. */
6244         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6245             mac);
6246         ic->ic_opmode = opmode;
6247
6248         return vap;
6249 }
6250
6251 static void
6252 iwm_vap_delete(struct ieee80211vap *vap)
6253 {
6254         struct iwm_vap *ivp = IWM_VAP(vap);
6255
6256         ieee80211_ratectl_deinit(vap);
6257         ieee80211_vap_detach(vap);
6258         free(ivp, M_80211_VAP);
6259 }
6260
6261 static void
6262 iwm_scan_start(struct ieee80211com *ic)
6263 {
6264         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6265         struct iwm_softc *sc = ic->ic_softc;
6266         int error;
6267
6268         IWM_LOCK(sc);
6269         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6270                 /* This should not be possible */
6271                 device_printf(sc->sc_dev,
6272                     "%s: Previous scan not completed yet\n", __func__);
6273         }
6274         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6275                 error = iwm_mvm_umac_scan(sc);
6276         else
6277                 error = iwm_mvm_lmac_scan(sc);
6278         if (error != 0) {
6279                 device_printf(sc->sc_dev, "could not initiate scan\n");
6280                 IWM_UNLOCK(sc);
6281                 ieee80211_cancel_scan(vap);
6282         } else {
6283                 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6284                 iwm_led_blink_start(sc);
6285                 IWM_UNLOCK(sc);
6286         }
6287 }
6288
6289 static void
6290 iwm_scan_end(struct ieee80211com *ic)
6291 {
6292         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6293         struct iwm_softc *sc = ic->ic_softc;
6294
6295         IWM_LOCK(sc);
6296         iwm_led_blink_stop(sc);
6297         if (vap->iv_state == IEEE80211_S_RUN)
6298                 iwm_mvm_led_enable(sc);
6299         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6300                 /*
6301                  * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6302                  * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6303                  * taskqueue.
6304                  */
6305                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6306                 iwm_mvm_scan_stop_wait(sc);
6307         }
6308         IWM_UNLOCK(sc);
6309
6310         /*
6311          * Make sure we don't race, if sc_es_task is still enqueued here.
6312          * This is to make sure that it won't call ieee80211_scan_done
6313          * when we have already started the next scan.
6314          */
6315         taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6316 }
6317
6318 static void
6319 iwm_update_mcast(struct ieee80211com *ic)
6320 {
6321 }
6322
6323 static void
6324 iwm_set_channel(struct ieee80211com *ic)
6325 {
6326 }
6327
6328 static void
6329 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6330 {
6331 }
6332
6333 static void
6334 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6335 {
6336         return;
6337 }
6338
6339 void
6340 iwm_init_task(void *arg1)
6341 {
6342         struct iwm_softc *sc = arg1;
6343
6344         IWM_LOCK(sc);
6345         while (sc->sc_flags & IWM_FLAG_BUSY)
6346                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6347         sc->sc_flags |= IWM_FLAG_BUSY;
6348         iwm_stop(sc);
6349         if (sc->sc_ic.ic_nrunning > 0)
6350                 iwm_init(sc);
6351         sc->sc_flags &= ~IWM_FLAG_BUSY;
6352         wakeup(&sc->sc_flags);
6353         IWM_UNLOCK(sc);
6354 }
6355
6356 static int
6357 iwm_resume(device_t dev)
6358 {
6359         struct iwm_softc *sc = device_get_softc(dev);
6360         int do_reinit = 0;
6361
6362         /*
6363          * We disable the RETRY_TIMEOUT register (0x41) to keep
6364          * PCI Tx retries from interfering with C3 CPU state.
6365          */
6366         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6367         iwm_init_task(device_get_softc(dev));
6368
6369         IWM_LOCK(sc);
6370         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6371                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6372                 do_reinit = 1;
6373         }
6374         IWM_UNLOCK(sc);
6375
6376         if (do_reinit)
6377                 ieee80211_resume_all(&sc->sc_ic);
6378
6379         return 0;
6380 }
6381
6382 static int
6383 iwm_suspend(device_t dev)
6384 {
6385         int do_stop = 0;
6386         struct iwm_softc *sc = device_get_softc(dev);
6387
6388         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6389
6390         ieee80211_suspend_all(&sc->sc_ic);
6391
6392         if (do_stop) {
6393                 IWM_LOCK(sc);
6394                 iwm_stop(sc);
6395                 sc->sc_flags |= IWM_FLAG_SCANNING;
6396                 IWM_UNLOCK(sc);
6397         }
6398
6399         return (0);
6400 }
6401
6402 static int
6403 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6404 {
6405         struct iwm_fw_info *fw = &sc->sc_fw;
6406         device_t dev = sc->sc_dev;
6407         int i;
6408
6409         if (!sc->sc_attached)
6410                 return 0;
6411         sc->sc_attached = 0;
6412
6413         if (do_net80211)
6414                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6415
6416         callout_drain(&sc->sc_led_blink_to);
6417         callout_drain(&sc->sc_watchdog_to);
6418         iwm_stop_device(sc);
6419         if (do_net80211) {
6420                 ieee80211_ifdetach(&sc->sc_ic);
6421         }
6422
6423         iwm_phy_db_free(sc->sc_phy_db);
6424         sc->sc_phy_db = NULL;
6425
6426         iwm_free_nvm_data(sc->nvm_data);
6427
6428         /* Free descriptor rings */
6429         iwm_free_rx_ring(sc, &sc->rxq);
6430         for (i = 0; i < nitems(sc->txq); i++)
6431                 iwm_free_tx_ring(sc, &sc->txq[i]);
6432
6433         /* Free firmware */
6434         if (fw->fw_fp != NULL)
6435                 iwm_fw_info_free(fw);
6436
6437         /* Free scheduler */
6438         iwm_dma_contig_free(&sc->sched_dma);
6439         iwm_dma_contig_free(&sc->ict_dma);
6440         iwm_dma_contig_free(&sc->kw_dma);
6441         iwm_dma_contig_free(&sc->fw_dma);
6442
6443         /* Finished with the hardware - detach things */
6444         iwm_pci_detach(dev);
6445
6446         if (sc->sc_notif_wait != NULL) {
6447                 iwm_notification_wait_free(sc->sc_notif_wait);
6448                 sc->sc_notif_wait = NULL;
6449         }
6450
6451         mbufq_drain(&sc->sc_snd);
6452         IWM_LOCK_DESTROY(sc);
6453
6454         return (0);
6455 }
6456
6457 static int
6458 iwm_detach(device_t dev)
6459 {
6460         struct iwm_softc *sc = device_get_softc(dev);
6461
6462         return (iwm_detach_local(sc, 1));
6463 }
6464
6465 static device_method_t iwm_pci_methods[] = {
6466         /* Device interface */
6467         DEVMETHOD(device_probe,         iwm_probe),
6468         DEVMETHOD(device_attach,        iwm_attach),
6469         DEVMETHOD(device_detach,        iwm_detach),
6470         DEVMETHOD(device_suspend,       iwm_suspend),
6471         DEVMETHOD(device_resume,        iwm_resume),
6472
6473         DEVMETHOD_END
6474 };
6475
6476 static driver_t iwm_pci_driver = {
6477         "iwm",
6478         iwm_pci_methods,
6479         sizeof (struct iwm_softc)
6480 };
6481
6482 static devclass_t iwm_devclass;
6483
6484 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6485 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6486 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6487 MODULE_DEPEND(iwm, wlan, 1, 1, 1);