1/*	$OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $	*/
2
3/*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
23 *
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26 *
27 ***********************************************************************
28 *
29 * This file is provided under a dual BSD/GPLv2 license.  When using or
30 * redistributing this file, you may do so under either license.
31 *
32 * GPL LICENSE SUMMARY
33 *
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43 * General Public License for more details.
44 *
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
49 *
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
52 *
53 * Contact Information:
54 *  Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56 *
57 *
58 * BSD LICENSE
59 *
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
62 *
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
66 *
67 *  * Redistributions of source code must retain the above copyright
68 *    notice, this list of conditions and the following disclaimer.
69 *  * Redistributions in binary form must reproduce the above copyright
70 *    notice, this list of conditions and the following disclaimer in
71 *    the documentation and/or other materials provided with the
72 *    distribution.
73 *  * Neither the name Intel Corporation nor the names of its
74 *    contributors may be used to endorse or promote products derived
75 *    from this software without specific prior written permission.
76 *
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 */
89
90/*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92 *
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
96 *
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104 */
105#include <sys/cdefs.h>
106__FBSDID("$FreeBSD: releng/12.0/sys/dev/iwm/if_iwm.c 339038 2018-10-01 10:44:33Z bz $");
107
108#include "opt_wlan.h"
109#include "opt_iwm.h"
110
111#include <sys/param.h>
112#include <sys/bus.h>
113#include <sys/conf.h>
114#include <sys/endian.h>
115#include <sys/firmware.h>
116#include <sys/kernel.h>
117#include <sys/malloc.h>
118#include <sys/mbuf.h>
119#include <sys/mutex.h>
120#include <sys/module.h>
121#include <sys/proc.h>
122#include <sys/rman.h>
123#include <sys/socket.h>
124#include <sys/sockio.h>
125#include <sys/sysctl.h>
126#include <sys/linker.h>
127
128#include <machine/bus.h>
129#include <machine/endian.h>
130#include <machine/resource.h>
131
132#include <dev/pci/pcivar.h>
133#include <dev/pci/pcireg.h>
134
135#include <net/bpf.h>
136
137#include <net/if.h>
138#include <net/if_var.h>
139#include <net/if_arp.h>
140#include <net/if_dl.h>
141#include <net/if_media.h>
142#include <net/if_types.h>
143
144#include <netinet/in.h>
145#include <netinet/in_systm.h>
146#include <netinet/if_ether.h>
147#include <netinet/ip.h>
148
149#include <net80211/ieee80211_var.h>
150#include <net80211/ieee80211_regdomain.h>
151#include <net80211/ieee80211_ratectl.h>
152#include <net80211/ieee80211_radiotap.h>
153
154#include <dev/iwm/if_iwmreg.h>
155#include <dev/iwm/if_iwmvar.h>
156#include <dev/iwm/if_iwm_config.h>
157#include <dev/iwm/if_iwm_debug.h>
158#include <dev/iwm/if_iwm_notif_wait.h>
159#include <dev/iwm/if_iwm_util.h>
160#include <dev/iwm/if_iwm_binding.h>
161#include <dev/iwm/if_iwm_phy_db.h>
162#include <dev/iwm/if_iwm_mac_ctxt.h>
163#include <dev/iwm/if_iwm_phy_ctxt.h>
164#include <dev/iwm/if_iwm_time_event.h>
165#include <dev/iwm/if_iwm_power.h>
166#include <dev/iwm/if_iwm_scan.h>
167#include <dev/iwm/if_iwm_sf.h>
168#include <dev/iwm/if_iwm_sta.h>
169
170#include <dev/iwm/if_iwm_pcie_trans.h>
171#include <dev/iwm/if_iwm_led.h>
172#include <dev/iwm/if_iwm_fw.h>
173
174/* From DragonflyBSD */
175#define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
176
177const uint8_t iwm_nvm_channels[] = {
178	/* 2.4 GHz */
179	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
180	/* 5 GHz */
181	36, 40, 44, 48, 52, 56, 60, 64,
182	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
183	149, 153, 157, 161, 165
184};
185_Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
186    "IWM_NUM_CHANNELS is too small");
187
188const uint8_t iwm_nvm_channels_8000[] = {
189	/* 2.4 GHz */
190	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
191	/* 5 GHz */
192	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
193	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
194	149, 153, 157, 161, 165, 169, 173, 177, 181
195};
196_Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
197    "IWM_NUM_CHANNELS_8000 is too small");
198
199#define IWM_NUM_2GHZ_CHANNELS	14
200#define IWM_N_HW_ADDR_MASK	0xF
201
202/*
203 * XXX For now, there's simply a fixed set of rate table entries
204 * that are populated.
205 */
206const struct iwm_rate {
207	uint8_t rate;
208	uint8_t plcp;
209} iwm_rates[] = {
210	{   2,	IWM_RATE_1M_PLCP  },
211	{   4,	IWM_RATE_2M_PLCP  },
212	{  11,	IWM_RATE_5M_PLCP  },
213	{  22,	IWM_RATE_11M_PLCP },
214	{  12,	IWM_RATE_6M_PLCP  },
215	{  18,	IWM_RATE_9M_PLCP  },
216	{  24,	IWM_RATE_12M_PLCP },
217	{  36,	IWM_RATE_18M_PLCP },
218	{  48,	IWM_RATE_24M_PLCP },
219	{  72,	IWM_RATE_36M_PLCP },
220	{  96,	IWM_RATE_48M_PLCP },
221	{ 108,	IWM_RATE_54M_PLCP },
222};
223#define IWM_RIDX_CCK	0
224#define IWM_RIDX_OFDM	4
225#define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
226#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
227#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
228
229struct iwm_nvm_section {
230	uint16_t length;
231	uint8_t *data;
232};
233
234#define IWM_MVM_UCODE_ALIVE_TIMEOUT	(5*hz)
235#define IWM_MVM_UCODE_CALIB_TIMEOUT	(10*hz)
236
237struct iwm_mvm_alive_data {
238	int valid;
239	uint32_t scd_base_addr;
240};
241
242static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
243static int	iwm_firmware_store_section(struct iwm_softc *,
244                                           enum iwm_ucode_type,
245                                           const uint8_t *, size_t);
246static int	iwm_set_default_calib(struct iwm_softc *, const void *);
247static void	iwm_fw_info_free(struct iwm_fw_info *);
248static int	iwm_read_firmware(struct iwm_softc *);
249static int	iwm_alloc_fwmem(struct iwm_softc *);
250static int	iwm_alloc_sched(struct iwm_softc *);
251static int	iwm_alloc_kw(struct iwm_softc *);
252static int	iwm_alloc_ict(struct iwm_softc *);
253static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
257                                  int);
258static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
260static void	iwm_enable_interrupts(struct iwm_softc *);
261static void	iwm_restore_interrupts(struct iwm_softc *);
262static void	iwm_disable_interrupts(struct iwm_softc *);
263static void	iwm_ict_reset(struct iwm_softc *);
264static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
265static void	iwm_stop_device(struct iwm_softc *);
266static void	iwm_mvm_nic_config(struct iwm_softc *);
267static int	iwm_nic_rx_init(struct iwm_softc *);
268static int	iwm_nic_tx_init(struct iwm_softc *);
269static int	iwm_nic_init(struct iwm_softc *);
270static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
271static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272                                   uint16_t, uint8_t *, uint16_t *);
273static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274				     uint16_t *, uint32_t);
275static uint32_t	iwm_eeprom_channel_flags(uint16_t);
276static void	iwm_add_channel_band(struct iwm_softc *,
277		    struct ieee80211_channel[], int, int *, int, size_t,
278		    const uint8_t[]);
279static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
280		    struct ieee80211_channel[]);
281static struct iwm_nvm_data *
282	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
283			   const uint16_t *, const uint16_t *,
284			   const uint16_t *, const uint16_t *,
285			   const uint16_t *);
286static void	iwm_free_nvm_data(struct iwm_nvm_data *);
287static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
288					       struct iwm_nvm_data *,
289					       const uint16_t *,
290					       const uint16_t *);
291static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
292			    const uint16_t *);
293static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
294static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
295				  const uint16_t *);
296static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
297				   const uint16_t *);
298static void	iwm_set_radio_cfg(const struct iwm_softc *,
299				  struct iwm_nvm_data *, uint32_t);
300static struct iwm_nvm_data *
301	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
302static int	iwm_nvm_init(struct iwm_softc *);
303static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
304				      const struct iwm_fw_desc *);
305static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
306					     bus_addr_t, uint32_t);
307static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
308						const struct iwm_fw_img *,
309						int, int *);
310static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
311					   const struct iwm_fw_img *,
312					   int, int *);
313static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
314					       const struct iwm_fw_img *);
315static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
316					  const struct iwm_fw_img *);
317static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
318static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
319static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
320static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
321                                              enum iwm_ucode_type);
322static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
323static int	iwm_mvm_config_ltr(struct iwm_softc *sc);
324static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
325static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
326					    struct iwm_rx_phy_info *);
327static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
328                                      struct iwm_rx_packet *);
329static int	iwm_get_noise(struct iwm_softc *,
330		    const struct iwm_mvm_statistics_rx_non_phy *);
331static void	iwm_mvm_handle_rx_statistics(struct iwm_softc *,
332		    struct iwm_rx_packet *);
333static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
334				    uint32_t, boolean_t);
335static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
336                                         struct iwm_rx_packet *,
337				         struct iwm_node *);
338static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
339static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
340#if 0
341static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
342                                 uint16_t);
343#endif
344static const struct iwm_rate *
345	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
346			struct mbuf *, struct iwm_tx_cmd *);
347static int	iwm_tx(struct iwm_softc *, struct mbuf *,
348                       struct ieee80211_node *, int);
349static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
350			     const struct ieee80211_bpf_params *);
351static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
352static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
353static struct ieee80211_node *
354		iwm_node_alloc(struct ieee80211vap *,
355		               const uint8_t[IEEE80211_ADDR_LEN]);
356static uint8_t	iwm_rate_from_ucode_rate(uint32_t);
357static int	iwm_rate2ridx(struct iwm_softc *, uint8_t);
358static void	iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
359static int	iwm_media_change(struct ifnet *);
360static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
361static void	iwm_endscan_cb(void *, int);
362static int	iwm_send_bt_init_conf(struct iwm_softc *);
363static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
364static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
365static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
366static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
367static int	iwm_init_hw(struct iwm_softc *);
368static void	iwm_init(struct iwm_softc *);
369static void	iwm_start(struct iwm_softc *);
370static void	iwm_stop(struct iwm_softc *);
371static void	iwm_watchdog(void *);
372static void	iwm_parent(struct ieee80211com *);
373#ifdef IWM_DEBUG
374static const char *
375		iwm_desc_lookup(uint32_t);
376static void	iwm_nic_error(struct iwm_softc *);
377static void	iwm_nic_umac_error(struct iwm_softc *);
378#endif
379static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
380static void	iwm_notif_intr(struct iwm_softc *);
381static void	iwm_intr(void *);
382static int	iwm_attach(device_t);
383static int	iwm_is_valid_ether_addr(uint8_t *);
384static void	iwm_preinit(void *);
385static int	iwm_detach_local(struct iwm_softc *sc, int);
386static void	iwm_init_task(void *);
387static void	iwm_radiotap_attach(struct iwm_softc *);
388static struct ieee80211vap *
389		iwm_vap_create(struct ieee80211com *,
390		               const char [IFNAMSIZ], int,
391		               enum ieee80211_opmode, int,
392		               const uint8_t [IEEE80211_ADDR_LEN],
393		               const uint8_t [IEEE80211_ADDR_LEN]);
394static void	iwm_vap_delete(struct ieee80211vap *);
395static void	iwm_xmit_queue_drain(struct iwm_softc *);
396static void	iwm_scan_start(struct ieee80211com *);
397static void	iwm_scan_end(struct ieee80211com *);
398static void	iwm_update_mcast(struct ieee80211com *);
399static void	iwm_set_channel(struct ieee80211com *);
400static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
401static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
402static int	iwm_detach(device_t);
403
404static int	iwm_lar_disable = 0;
405TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
406
407/*
408 * Firmware parser.
409 */
410
411static int
412iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
413{
414	const struct iwm_fw_cscheme_list *l = (const void *)data;
415
416	if (dlen < sizeof(*l) ||
417	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
418		return EINVAL;
419
420	/* we don't actually store anything for now, always use s/w crypto */
421
422	return 0;
423}
424
425static int
426iwm_firmware_store_section(struct iwm_softc *sc,
427    enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
428{
429	struct iwm_fw_img *fws;
430	struct iwm_fw_desc *fwone;
431
432	if (type >= IWM_UCODE_TYPE_MAX)
433		return EINVAL;
434	if (dlen < sizeof(uint32_t))
435		return EINVAL;
436
437	fws = &sc->sc_fw.img[type];
438	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
439		return EINVAL;
440
441	fwone = &fws->sec[fws->fw_count];
442
443	/* first 32bit are device load offset */
444	memcpy(&fwone->offset, data, sizeof(uint32_t));
445
446	/* rest is data */
447	fwone->data = data + sizeof(uint32_t);
448	fwone->len = dlen - sizeof(uint32_t);
449
450	fws->fw_count++;
451
452	return 0;
453}
454
455#define IWM_DEFAULT_SCAN_CHANNELS 40
456
457/* iwlwifi: iwl-drv.c */
458struct iwm_tlv_calib_data {
459	uint32_t ucode_type;
460	struct iwm_tlv_calib_ctrl calib;
461} __packed;
462
463static int
464iwm_set_default_calib(struct iwm_softc *sc, const void *data)
465{
466	const struct iwm_tlv_calib_data *def_calib = data;
467	uint32_t ucode_type = le32toh(def_calib->ucode_type);
468
469	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
470		device_printf(sc->sc_dev,
471		    "Wrong ucode_type %u for default "
472		    "calibration.\n", ucode_type);
473		return EINVAL;
474	}
475
476	sc->sc_default_calib[ucode_type].flow_trigger =
477	    def_calib->calib.flow_trigger;
478	sc->sc_default_calib[ucode_type].event_trigger =
479	    def_calib->calib.event_trigger;
480
481	return 0;
482}
483
484static int
485iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
486			struct iwm_ucode_capabilities *capa)
487{
488	const struct iwm_ucode_api *ucode_api = (const void *)data;
489	uint32_t api_index = le32toh(ucode_api->api_index);
490	uint32_t api_flags = le32toh(ucode_api->api_flags);
491	int i;
492
493	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
494		device_printf(sc->sc_dev,
495		    "api flags index %d larger than supported by driver\n",
496		    api_index);
497		/* don't return an error so we can load FW that has more bits */
498		return 0;
499	}
500
501	for (i = 0; i < 32; i++) {
502		if (api_flags & (1U << i))
503			setbit(capa->enabled_api, i + 32 * api_index);
504	}
505
506	return 0;
507}
508
509static int
510iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
511			   struct iwm_ucode_capabilities *capa)
512{
513	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
514	uint32_t api_index = le32toh(ucode_capa->api_index);
515	uint32_t api_flags = le32toh(ucode_capa->api_capa);
516	int i;
517
518	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
519		device_printf(sc->sc_dev,
520		    "capa flags index %d larger than supported by driver\n",
521		    api_index);
522		/* don't return an error so we can load FW that has more bits */
523		return 0;
524	}
525
526	for (i = 0; i < 32; i++) {
527		if (api_flags & (1U << i))
528			setbit(capa->enabled_capa, i + 32 * api_index);
529	}
530
531	return 0;
532}
533
534static void
535iwm_fw_info_free(struct iwm_fw_info *fw)
536{
537	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
538	fw->fw_fp = NULL;
539	memset(fw->img, 0, sizeof(fw->img));
540}
541
542static int
543iwm_read_firmware(struct iwm_softc *sc)
544{
545	struct iwm_fw_info *fw = &sc->sc_fw;
546	const struct iwm_tlv_ucode_header *uhdr;
547	const struct iwm_ucode_tlv *tlv;
548	struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
549	enum iwm_ucode_tlv_type tlv_type;
550	const struct firmware *fwp;
551	const uint8_t *data;
552	uint32_t tlv_len;
553	uint32_t usniffer_img;
554	const uint8_t *tlv_data;
555	uint32_t paging_mem_size;
556	int num_of_cpus;
557	int error = 0;
558	size_t len;
559
560	/*
561	 * Load firmware into driver memory.
562	 * fw_fp will be set.
563	 */
564	fwp = firmware_get(sc->cfg->fw_name);
565	if (fwp == NULL) {
566		device_printf(sc->sc_dev,
567		    "could not read firmware %s (error %d)\n",
568		    sc->cfg->fw_name, error);
569		goto out;
570	}
571	fw->fw_fp = fwp;
572
573	/* (Re-)Initialize default values. */
574	capa->flags = 0;
575	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
576	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
577	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
578	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
579	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
580
581	/*
582	 * Parse firmware contents
583	 */
584
585	uhdr = (const void *)fw->fw_fp->data;
586	if (*(const uint32_t *)fw->fw_fp->data != 0
587	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
588		device_printf(sc->sc_dev, "invalid firmware %s\n",
589		    sc->cfg->fw_name);
590		error = EINVAL;
591		goto out;
592	}
593
594	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
595	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
596	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
597	    IWM_UCODE_API(le32toh(uhdr->ver)));
598	data = uhdr->data;
599	len = fw->fw_fp->datasize - sizeof(*uhdr);
600
601	while (len >= sizeof(*tlv)) {
602		len -= sizeof(*tlv);
603		tlv = (const void *)data;
604
605		tlv_len = le32toh(tlv->length);
606		tlv_type = le32toh(tlv->type);
607		tlv_data = tlv->data;
608
609		if (len < tlv_len) {
610			device_printf(sc->sc_dev,
611			    "firmware too short: %zu bytes\n",
612			    len);
613			error = EINVAL;
614			goto parse_out;
615		}
616		len -= roundup2(tlv_len, 4);
617		data += sizeof(*tlv) + roundup2(tlv_len, 4);
618
619		switch ((int)tlv_type) {
620		case IWM_UCODE_TLV_PROBE_MAX_LEN:
621			if (tlv_len != sizeof(uint32_t)) {
622				device_printf(sc->sc_dev,
623				    "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
624				    __func__, tlv_len);
625				error = EINVAL;
626				goto parse_out;
627			}
628			capa->max_probe_length =
629			    le32_to_cpup((const uint32_t *)tlv_data);
630			/* limit it to something sensible */
631			if (capa->max_probe_length >
632			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
633				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
634				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
635				    "ridiculous\n", __func__);
636				error = EINVAL;
637				goto parse_out;
638			}
639			break;
640		case IWM_UCODE_TLV_PAN:
641			if (tlv_len) {
642				device_printf(sc->sc_dev,
643				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
644				    __func__, tlv_len);
645				error = EINVAL;
646				goto parse_out;
647			}
648			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
649			break;
650		case IWM_UCODE_TLV_FLAGS:
651			if (tlv_len < sizeof(uint32_t)) {
652				device_printf(sc->sc_dev,
653				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
654				    __func__, tlv_len);
655				error = EINVAL;
656				goto parse_out;
657			}
658			if (tlv_len % sizeof(uint32_t)) {
659				device_printf(sc->sc_dev,
660				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
661				    __func__, tlv_len);
662				error = EINVAL;
663				goto parse_out;
664			}
665			/*
666			 * Apparently there can be many flags, but Linux driver
667			 * parses only the first one, and so do we.
668			 *
669			 * XXX: why does this override IWM_UCODE_TLV_PAN?
670			 * Intentional or a bug?  Observations from
671			 * current firmware file:
672			 *  1) TLV_PAN is parsed first
673			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
674			 * ==> this resets TLV_PAN to itself... hnnnk
675			 */
676			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
677			break;
678		case IWM_UCODE_TLV_CSCHEME:
679			if ((error = iwm_store_cscheme(sc,
680			    tlv_data, tlv_len)) != 0) {
681				device_printf(sc->sc_dev,
682				    "%s: iwm_store_cscheme(): returned %d\n",
683				    __func__, error);
684				goto parse_out;
685			}
686			break;
687		case IWM_UCODE_TLV_NUM_OF_CPU:
688			if (tlv_len != sizeof(uint32_t)) {
689				device_printf(sc->sc_dev,
690				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
691				    __func__, tlv_len);
692				error = EINVAL;
693				goto parse_out;
694			}
695			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
696			if (num_of_cpus == 2) {
697				fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
698					TRUE;
699				fw->img[IWM_UCODE_INIT].is_dual_cpus =
700					TRUE;
701				fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
702					TRUE;
703			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
704				device_printf(sc->sc_dev,
705				    "%s: Driver supports only 1 or 2 CPUs\n",
706				    __func__);
707				error = EINVAL;
708				goto parse_out;
709			}
710			break;
711		case IWM_UCODE_TLV_SEC_RT:
712			if ((error = iwm_firmware_store_section(sc,
713			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
714				device_printf(sc->sc_dev,
715				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
716				    __func__, error);
717				goto parse_out;
718			}
719			break;
720		case IWM_UCODE_TLV_SEC_INIT:
721			if ((error = iwm_firmware_store_section(sc,
722			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
723				device_printf(sc->sc_dev,
724				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
725				    __func__, error);
726				goto parse_out;
727			}
728			break;
729		case IWM_UCODE_TLV_SEC_WOWLAN:
730			if ((error = iwm_firmware_store_section(sc,
731			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
732				device_printf(sc->sc_dev,
733				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
734				    __func__, error);
735				goto parse_out;
736			}
737			break;
738		case IWM_UCODE_TLV_DEF_CALIB:
739			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
740				device_printf(sc->sc_dev,
741				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
742				    __func__, tlv_len,
743				    sizeof(struct iwm_tlv_calib_data));
744				error = EINVAL;
745				goto parse_out;
746			}
747			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
748				device_printf(sc->sc_dev,
749				    "%s: iwm_set_default_calib() failed: %d\n",
750				    __func__, error);
751				goto parse_out;
752			}
753			break;
754		case IWM_UCODE_TLV_PHY_SKU:
755			if (tlv_len != sizeof(uint32_t)) {
756				error = EINVAL;
757				device_printf(sc->sc_dev,
758				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
759				    __func__, tlv_len);
760				goto parse_out;
761			}
762			sc->sc_fw.phy_config =
763			    le32_to_cpup((const uint32_t *)tlv_data);
764			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
765						  IWM_FW_PHY_CFG_TX_CHAIN) >>
766						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
767			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
768						  IWM_FW_PHY_CFG_RX_CHAIN) >>
769						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
770			break;
771
772		case IWM_UCODE_TLV_API_CHANGES_SET: {
773			if (tlv_len != sizeof(struct iwm_ucode_api)) {
774				error = EINVAL;
775				goto parse_out;
776			}
777			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
778				error = EINVAL;
779				goto parse_out;
780			}
781			break;
782		}
783
784		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
785			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
786				error = EINVAL;
787				goto parse_out;
788			}
789			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
790				error = EINVAL;
791				goto parse_out;
792			}
793			break;
794		}
795
796		case 48: /* undocumented TLV */
797		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
798		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
799			/* ignore, not used by current driver */
800			break;
801
802		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
803			if ((error = iwm_firmware_store_section(sc,
804			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
805			    tlv_len)) != 0)
806				goto parse_out;
807			break;
808
809		case IWM_UCODE_TLV_PAGING:
810			if (tlv_len != sizeof(uint32_t)) {
811				error = EINVAL;
812				goto parse_out;
813			}
814			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
815
816			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
817			    "%s: Paging: paging enabled (size = %u bytes)\n",
818			    __func__, paging_mem_size);
819			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
820				device_printf(sc->sc_dev,
821					"%s: Paging: driver supports up to %u bytes for paging image\n",
822					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
823				error = EINVAL;
824				goto out;
825			}
826			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
827				device_printf(sc->sc_dev,
828				    "%s: Paging: image isn't multiple %u\n",
829				    __func__, IWM_FW_PAGING_SIZE);
830				error = EINVAL;
831				goto out;
832			}
833
834			sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
835			    paging_mem_size;
836			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
837			sc->sc_fw.img[usniffer_img].paging_mem_size =
838			    paging_mem_size;
839			break;
840
841		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
842			if (tlv_len != sizeof(uint32_t)) {
843				error = EINVAL;
844				goto parse_out;
845			}
846			capa->n_scan_channels =
847			    le32_to_cpup((const uint32_t *)tlv_data);
848			break;
849
850		case IWM_UCODE_TLV_FW_VERSION:
851			if (tlv_len != sizeof(uint32_t) * 3) {
852				error = EINVAL;
853				goto parse_out;
854			}
855			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
856			    "%d.%d.%d",
857			    le32toh(((const uint32_t *)tlv_data)[0]),
858			    le32toh(((const uint32_t *)tlv_data)[1]),
859			    le32toh(((const uint32_t *)tlv_data)[2]));
860			break;
861
862		case IWM_UCODE_TLV_FW_MEM_SEG:
863			break;
864
865		default:
866			device_printf(sc->sc_dev,
867			    "%s: unknown firmware section %d, abort\n",
868			    __func__, tlv_type);
869			error = EINVAL;
870			goto parse_out;
871		}
872	}
873
874	KASSERT(error == 0, ("unhandled error"));
875
876 parse_out:
877	if (error) {
878		device_printf(sc->sc_dev, "firmware parse error %d, "
879		    "section type %d\n", error, tlv_type);
880	}
881
882 out:
883	if (error) {
884		if (fw->fw_fp != NULL)
885			iwm_fw_info_free(fw);
886	}
887
888	return error;
889}
890
891/*
892 * DMA resource routines
893 */
894
895/* fwmem is used to load firmware onto the card */
896static int
897iwm_alloc_fwmem(struct iwm_softc *sc)
898{
899	/* Must be aligned on a 16-byte boundary. */
900	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
901	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
902}
903
904/* tx scheduler rings.  not used? */
905static int
906iwm_alloc_sched(struct iwm_softc *sc)
907{
908	/* TX scheduler rings must be aligned on a 1KB boundary. */
909	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
910	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
911}
912
913/* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
914static int
915iwm_alloc_kw(struct iwm_softc *sc)
916{
917	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
918}
919
920/* interrupt cause table */
921static int
922iwm_alloc_ict(struct iwm_softc *sc)
923{
924	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
925	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
926}
927
928static int
929iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
930{
931	bus_size_t size;
932	int i, error;
933
934	ring->cur = 0;
935
936	/* Allocate RX descriptors (256-byte aligned). */
937	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
938	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
939	if (error != 0) {
940		device_printf(sc->sc_dev,
941		    "could not allocate RX ring DMA memory\n");
942		goto fail;
943	}
944	ring->desc = ring->desc_dma.vaddr;
945
946	/* Allocate RX status area (16-byte aligned). */
947	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
948	    sizeof(*ring->stat), 16);
949	if (error != 0) {
950		device_printf(sc->sc_dev,
951		    "could not allocate RX status DMA memory\n");
952		goto fail;
953	}
954	ring->stat = ring->stat_dma.vaddr;
955
956        /* Create RX buffer DMA tag. */
957        error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
958            BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
959            IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
960        if (error != 0) {
961                device_printf(sc->sc_dev,
962                    "%s: could not create RX buf DMA tag, error %d\n",
963                    __func__, error);
964                goto fail;
965        }
966
967	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
968	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
969	if (error != 0) {
970		device_printf(sc->sc_dev,
971		    "%s: could not create RX buf DMA map, error %d\n",
972		    __func__, error);
973		goto fail;
974	}
975	/*
976	 * Allocate and map RX buffers.
977	 */
978	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
979		struct iwm_rx_data *data = &ring->data[i];
980		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
981		if (error != 0) {
982			device_printf(sc->sc_dev,
983			    "%s: could not create RX buf DMA map, error %d\n",
984			    __func__, error);
985			goto fail;
986		}
987		data->m = NULL;
988
989		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
990			goto fail;
991		}
992	}
993	return 0;
994
995fail:	iwm_free_rx_ring(sc, ring);
996	return error;
997}
998
999static void
1000iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1001{
1002	/* Reset the ring state */
1003	ring->cur = 0;
1004
1005	/*
1006	 * The hw rx ring index in shared memory must also be cleared,
1007	 * otherwise the discrepancy can cause reprocessing chaos.
1008	 */
1009	if (sc->rxq.stat)
1010		memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1011}
1012
1013static void
1014iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1015{
1016	int i;
1017
1018	iwm_dma_contig_free(&ring->desc_dma);
1019	iwm_dma_contig_free(&ring->stat_dma);
1020
1021	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1022		struct iwm_rx_data *data = &ring->data[i];
1023
1024		if (data->m != NULL) {
1025			bus_dmamap_sync(ring->data_dmat, data->map,
1026			    BUS_DMASYNC_POSTREAD);
1027			bus_dmamap_unload(ring->data_dmat, data->map);
1028			m_freem(data->m);
1029			data->m = NULL;
1030		}
1031		if (data->map != NULL) {
1032			bus_dmamap_destroy(ring->data_dmat, data->map);
1033			data->map = NULL;
1034		}
1035	}
1036	if (ring->spare_map != NULL) {
1037		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1038		ring->spare_map = NULL;
1039	}
1040	if (ring->data_dmat != NULL) {
1041		bus_dma_tag_destroy(ring->data_dmat);
1042		ring->data_dmat = NULL;
1043	}
1044}
1045
1046static int
1047iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1048{
1049	bus_addr_t paddr;
1050	bus_size_t size;
1051	size_t maxsize;
1052	int nsegments;
1053	int i, error;
1054
1055	ring->qid = qid;
1056	ring->queued = 0;
1057	ring->cur = 0;
1058
1059	/* Allocate TX descriptors (256-byte aligned). */
1060	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1061	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1062	if (error != 0) {
1063		device_printf(sc->sc_dev,
1064		    "could not allocate TX ring DMA memory\n");
1065		goto fail;
1066	}
1067	ring->desc = ring->desc_dma.vaddr;
1068
1069	/*
1070	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1071	 * to allocate commands space for other rings.
1072	 */
1073	if (qid > IWM_MVM_CMD_QUEUE)
1074		return 0;
1075
1076	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1077	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1078	if (error != 0) {
1079		device_printf(sc->sc_dev,
1080		    "could not allocate TX cmd DMA memory\n");
1081		goto fail;
1082	}
1083	ring->cmd = ring->cmd_dma.vaddr;
1084
1085	/* FW commands may require more mapped space than packets. */
1086	if (qid == IWM_MVM_CMD_QUEUE) {
1087		maxsize = IWM_RBUF_SIZE;
1088		nsegments = 1;
1089	} else {
1090		maxsize = MCLBYTES;
1091		nsegments = IWM_MAX_SCATTER - 2;
1092	}
1093
1094	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1095	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1096            nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1097	if (error != 0) {
1098		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1099		goto fail;
1100	}
1101
1102	paddr = ring->cmd_dma.paddr;
1103	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1104		struct iwm_tx_data *data = &ring->data[i];
1105
1106		data->cmd_paddr = paddr;
1107		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1108		    + offsetof(struct iwm_tx_cmd, scratch);
1109		paddr += sizeof(struct iwm_device_cmd);
1110
1111		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1112		if (error != 0) {
1113			device_printf(sc->sc_dev,
1114			    "could not create TX buf DMA map\n");
1115			goto fail;
1116		}
1117	}
1118	KASSERT(paddr == ring->cmd_dma.paddr + size,
1119	    ("invalid physical address"));
1120	return 0;
1121
1122fail:	iwm_free_tx_ring(sc, ring);
1123	return error;
1124}
1125
1126static void
1127iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1128{
1129	int i;
1130
1131	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1132		struct iwm_tx_data *data = &ring->data[i];
1133
1134		if (data->m != NULL) {
1135			bus_dmamap_sync(ring->data_dmat, data->map,
1136			    BUS_DMASYNC_POSTWRITE);
1137			bus_dmamap_unload(ring->data_dmat, data->map);
1138			m_freem(data->m);
1139			data->m = NULL;
1140		}
1141	}
1142	/* Clear TX descriptors. */
1143	if (ring->desc)
1144		memset(ring->desc, 0, ring->desc_dma.size);
1145	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1146	    BUS_DMASYNC_PREWRITE);
1147	sc->qfullmsk &= ~(1 << ring->qid);
1148	ring->queued = 0;
1149	ring->cur = 0;
1150
1151	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1152		iwm_pcie_clear_cmd_in_flight(sc);
1153}
1154
1155static void
1156iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1157{
1158	int i;
1159
1160	iwm_dma_contig_free(&ring->desc_dma);
1161	iwm_dma_contig_free(&ring->cmd_dma);
1162
1163	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1164		struct iwm_tx_data *data = &ring->data[i];
1165
1166		if (data->m != NULL) {
1167			bus_dmamap_sync(ring->data_dmat, data->map,
1168			    BUS_DMASYNC_POSTWRITE);
1169			bus_dmamap_unload(ring->data_dmat, data->map);
1170			m_freem(data->m);
1171			data->m = NULL;
1172		}
1173		if (data->map != NULL) {
1174			bus_dmamap_destroy(ring->data_dmat, data->map);
1175			data->map = NULL;
1176		}
1177	}
1178	if (ring->data_dmat != NULL) {
1179		bus_dma_tag_destroy(ring->data_dmat);
1180		ring->data_dmat = NULL;
1181	}
1182}
1183
1184/*
1185 * High-level hardware frobbing routines
1186 */
1187
1188static void
1189iwm_enable_interrupts(struct iwm_softc *sc)
1190{
1191	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1192	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1193}
1194
1195static void
1196iwm_restore_interrupts(struct iwm_softc *sc)
1197{
1198	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1199}
1200
1201static void
1202iwm_disable_interrupts(struct iwm_softc *sc)
1203{
1204	/* disable interrupts */
1205	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1206
1207	/* acknowledge all interrupts */
1208	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1209	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1210}
1211
1212static void
1213iwm_ict_reset(struct iwm_softc *sc)
1214{
1215	iwm_disable_interrupts(sc);
1216
1217	/* Reset ICT table. */
1218	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1219	sc->ict_cur = 0;
1220
1221	/* Set physical address of ICT table (4KB aligned). */
1222	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1223	    IWM_CSR_DRAM_INT_TBL_ENABLE
1224	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1225	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1226	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1227
1228	/* Switch to ICT interrupt mode in driver. */
1229	sc->sc_flags |= IWM_FLAG_USE_ICT;
1230
1231	/* Re-enable interrupts. */
1232	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1233	iwm_enable_interrupts(sc);
1234}
1235
1236/* iwlwifi pcie/trans.c */
1237
1238/*
1239 * Since this .. hard-resets things, it's time to actually
1240 * mark the first vap (if any) as having no mac context.
1241 * It's annoying, but since the driver is potentially being
1242 * stop/start'ed whilst active (thanks openbsd port!) we
1243 * have to correctly track this.
1244 */
1245static void
1246iwm_stop_device(struct iwm_softc *sc)
1247{
1248	struct ieee80211com *ic = &sc->sc_ic;
1249	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1250	int chnl, qid;
1251	uint32_t mask = 0;
1252
1253	/* tell the device to stop sending interrupts */
1254	iwm_disable_interrupts(sc);
1255
1256	/*
1257	 * FreeBSD-local: mark the first vap as not-uploaded,
1258	 * so the next transition through auth/assoc
1259	 * will correctly populate the MAC context.
1260	 */
1261	if (vap) {
1262		struct iwm_vap *iv = IWM_VAP(vap);
1263		iv->phy_ctxt = NULL;
1264		iv->is_uploaded = 0;
1265	}
1266	sc->sc_firmware_state = 0;
1267	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1268
1269	/* device going down, Stop using ICT table */
1270	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1271
1272	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1273
1274	if (iwm_nic_lock(sc)) {
1275		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1276
1277		/* Stop each Tx DMA channel */
1278		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1279			IWM_WRITE(sc,
1280			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1281			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1282		}
1283
1284		/* Wait for DMA channels to be idle */
1285		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1286		    5000)) {
1287			device_printf(sc->sc_dev,
1288			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1289			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1290		}
1291		iwm_nic_unlock(sc);
1292	}
1293	iwm_pcie_rx_stop(sc);
1294
1295	/* Stop RX ring. */
1296	iwm_reset_rx_ring(sc, &sc->rxq);
1297
1298	/* Reset all TX rings. */
1299	for (qid = 0; qid < nitems(sc->txq); qid++)
1300		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1301
1302	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1303		/* Power-down device's busmaster DMA clocks */
1304		if (iwm_nic_lock(sc)) {
1305			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1306			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1307			iwm_nic_unlock(sc);
1308		}
1309		DELAY(5);
1310	}
1311
1312	/* Make sure (redundant) we've released our request to stay awake */
1313	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1314	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1315
1316	/* Stop the device, and put it in low power state */
1317	iwm_apm_stop(sc);
1318
1319	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1320	 * Clean again the interrupt here
1321	 */
1322	iwm_disable_interrupts(sc);
1323	/* stop and reset the on-board processor */
1324	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1325
1326	/*
1327	 * Even if we stop the HW, we still want the RF kill
1328	 * interrupt
1329	 */
1330	iwm_enable_rfkill_int(sc);
1331	iwm_check_rfkill(sc);
1332}
1333
1334/* iwlwifi: mvm/ops.c */
1335static void
1336iwm_mvm_nic_config(struct iwm_softc *sc)
1337{
1338	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1339	uint32_t reg_val = 0;
1340	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1341
1342	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1343	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1344	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1345	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1346	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1347	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1348
1349	/* SKU control */
1350	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1351	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1352	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1353	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1354
1355	/* radio configuration */
1356	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1357	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1358	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1359
1360	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1361
1362	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1363	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1364	    radio_cfg_step, radio_cfg_dash);
1365
1366	/*
1367	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1368	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1369	 * to lose ownership and not being able to obtain it back.
1370	 */
1371	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1372		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1373		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1374		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1375	}
1376}
1377
1378static int
1379iwm_nic_rx_init(struct iwm_softc *sc)
1380{
1381	/*
1382	 * Initialize RX ring.  This is from the iwn driver.
1383	 */
1384	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1385
1386	/* Stop Rx DMA */
1387	iwm_pcie_rx_stop(sc);
1388
1389	if (!iwm_nic_lock(sc))
1390		return EBUSY;
1391
1392	/* reset and flush pointers */
1393	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1394	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1395	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1396	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1397
1398	/* Set physical address of RX ring (256-byte aligned). */
1399	IWM_WRITE(sc,
1400	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1401
1402	/* Set physical address of RX status (16-byte aligned). */
1403	IWM_WRITE(sc,
1404	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1405
1406	/* Enable Rx DMA
1407	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1408	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1409	 *      the credit mechanism in 5000 HW RX FIFO
1410	 * Direct rx interrupts to hosts
1411	 * Rx buffer size 4 or 8k or 12k
1412	 * RB timeout 0x10
1413	 * 256 RBDs
1414	 */
1415	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1416	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1417	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1418//#ifdef __HAIKU__
1419		/* multi-frame RX seems to cause UAFs and double-frees */
1420		IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
1421//#endif
1422	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1423	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1424	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1425	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1426
1427	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1428
1429	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1430	if (sc->cfg->host_interrupt_operation_mode)
1431		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1432
1433	/*
1434	 * Thus sayeth el jefe (iwlwifi) via a comment:
1435	 *
1436	 * This value should initially be 0 (before preparing any
1437	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1438	 */
1439	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1440
1441	iwm_nic_unlock(sc);
1442
1443	return 0;
1444}
1445
1446static int
1447iwm_nic_tx_init(struct iwm_softc *sc)
1448{
1449	int qid;
1450
1451	if (!iwm_nic_lock(sc))
1452		return EBUSY;
1453
1454	/* Deactivate TX scheduler. */
1455	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1456
1457	/* Set physical address of "keep warm" page (16-byte aligned). */
1458	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1459
1460	/* Initialize TX rings. */
1461	for (qid = 0; qid < nitems(sc->txq); qid++) {
1462		struct iwm_tx_ring *txq = &sc->txq[qid];
1463
1464		/* Set physical address of TX ring (256-byte aligned). */
1465		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1466		    txq->desc_dma.paddr >> 8);
1467		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1468		    "%s: loading ring %d descriptors (%p) at %lx\n",
1469		    __func__,
1470		    qid, txq->desc,
1471		    (unsigned long) (txq->desc_dma.paddr >> 8));
1472	}
1473
1474	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1475
1476	iwm_nic_unlock(sc);
1477
1478	return 0;
1479}
1480
1481static int
1482iwm_nic_init(struct iwm_softc *sc)
1483{
1484	int error;
1485
1486	iwm_apm_init(sc);
1487	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1488		iwm_set_pwr(sc);
1489
1490	iwm_mvm_nic_config(sc);
1491
1492	if ((error = iwm_nic_rx_init(sc)) != 0)
1493		return error;
1494
1495	/*
1496	 * Ditto for TX, from iwn
1497	 */
1498	if ((error = iwm_nic_tx_init(sc)) != 0)
1499		return error;
1500
1501	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1502	    "%s: shadow registers enabled\n", __func__);
1503	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1504
1505	return 0;
1506}
1507
1508int
1509iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1510{
1511	if (!iwm_nic_lock(sc)) {
1512		device_printf(sc->sc_dev,
1513		    "%s: cannot enable txq %d\n",
1514		    __func__,
1515		    qid);
1516		return EBUSY;
1517	}
1518
1519	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1520
1521	if (qid == IWM_MVM_CMD_QUEUE) {
1522		/* unactivate before configuration */
1523		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1524		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1525		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1526
1527		iwm_nic_unlock(sc);
1528
1529		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1530
1531		if (!iwm_nic_lock(sc)) {
1532			device_printf(sc->sc_dev,
1533			    "%s: cannot enable txq %d\n", __func__, qid);
1534			return EBUSY;
1535		}
1536		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1537		iwm_nic_unlock(sc);
1538
1539		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1540		/* Set scheduler window size and frame limit. */
1541		iwm_write_mem32(sc,
1542		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1543		    sizeof(uint32_t),
1544		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1545		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1546		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1547		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1548
1549		if (!iwm_nic_lock(sc)) {
1550			device_printf(sc->sc_dev,
1551			    "%s: cannot enable txq %d\n", __func__, qid);
1552			return EBUSY;
1553		}
1554		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1555		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1556		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1557		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1558		    IWM_SCD_QUEUE_STTS_REG_MSK);
1559	} else {
1560		struct iwm_scd_txq_cfg_cmd cmd;
1561		int error;
1562
1563		iwm_nic_unlock(sc);
1564
1565		memset(&cmd, 0, sizeof(cmd));
1566		cmd.scd_queue = qid;
1567		cmd.enable = 1;
1568		cmd.sta_id = sta_id;
1569		cmd.tx_fifo = fifo;
1570		cmd.aggregate = 0;
1571		cmd.window = IWM_FRAME_LIMIT;
1572
1573		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1574		    sizeof(cmd), &cmd);
1575		if (error) {
1576			device_printf(sc->sc_dev,
1577			    "cannot enable txq %d\n", qid);
1578			return error;
1579		}
1580
1581		if (!iwm_nic_lock(sc))
1582			return EBUSY;
1583	}
1584
1585	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1586	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1587
1588	iwm_nic_unlock(sc);
1589
1590	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1591	    __func__, qid, fifo);
1592
1593	return 0;
1594}
1595
1596static int
1597iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1598{
1599	int error, chnl;
1600
1601	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1602	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1603
1604	if (!iwm_nic_lock(sc))
1605		return EBUSY;
1606
1607#ifndef __HAIKU__
1608	iwm_ict_reset(sc);
1609#endif
1610
1611	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1612	if (scd_base_addr != 0 &&
1613	    scd_base_addr != sc->scd_base_addr) {
1614		device_printf(sc->sc_dev,
1615		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1616		    __func__, sc->scd_base_addr, scd_base_addr);
1617	}
1618
1619	iwm_nic_unlock(sc);
1620
1621	/* reset context data, TX status and translation data */
1622	error = iwm_write_mem(sc,
1623	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1624	    NULL, clear_dwords);
1625	if (error)
1626		return EBUSY;
1627
1628	if (!iwm_nic_lock(sc))
1629		return EBUSY;
1630
1631	/* Set physical address of TX scheduler rings (1KB aligned). */
1632	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1633
1634	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1635
1636	iwm_nic_unlock(sc);
1637
1638	/* enable command channel */
1639	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1640	if (error)
1641		return error;
1642
1643	if (!iwm_nic_lock(sc))
1644		return EBUSY;
1645
1646	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1647
1648	/* Enable DMA channels. */
1649	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1650		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1651		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1652		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1653	}
1654
1655	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1656	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1657
1658	iwm_nic_unlock(sc);
1659
1660	/* Enable L1-Active */
1661	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1662		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1663		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1664	}
1665
1666	return error;
1667}
1668
1669/*
1670 * NVM read access and content parsing.  We do not support
1671 * external NVM or writing NVM.
1672 * iwlwifi/mvm/nvm.c
1673 */
1674
1675/* Default NVM size to read */
1676#define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1677
1678#define IWM_NVM_WRITE_OPCODE 1
1679#define IWM_NVM_READ_OPCODE 0
1680
1681/* load nvm chunk response */
1682enum {
1683	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1684	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1685};
1686
1687static int
1688iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1689	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1690{
1691	struct iwm_nvm_access_cmd nvm_access_cmd = {
1692		.offset = htole16(offset),
1693		.length = htole16(length),
1694		.type = htole16(section),
1695		.op_code = IWM_NVM_READ_OPCODE,
1696	};
1697	struct iwm_nvm_access_resp *nvm_resp;
1698	struct iwm_rx_packet *pkt;
1699	struct iwm_host_cmd cmd = {
1700		.id = IWM_NVM_ACCESS_CMD,
1701		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1702		.data = { &nvm_access_cmd, },
1703	};
1704	int ret, bytes_read, offset_read;
1705	uint8_t *resp_data;
1706
1707	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1708
1709	ret = iwm_send_cmd(sc, &cmd);
1710	if (ret) {
1711		device_printf(sc->sc_dev,
1712		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1713		return ret;
1714	}
1715
1716	pkt = cmd.resp_pkt;
1717
1718	/* Extract NVM response */
1719	nvm_resp = (void *)pkt->data;
1720	ret = le16toh(nvm_resp->status);
1721	bytes_read = le16toh(nvm_resp->length);
1722	offset_read = le16toh(nvm_resp->offset);
1723	resp_data = nvm_resp->data;
1724	if (ret) {
1725		if ((offset != 0) &&
1726		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1727			/*
1728			 * meaning of NOT_VALID_ADDRESS:
1729			 * driver try to read chunk from address that is
1730			 * multiple of 2K and got an error since addr is empty.
1731			 * meaning of (offset != 0): driver already
1732			 * read valid data from another chunk so this case
1733			 * is not an error.
1734			 */
1735			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1736				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1737				    offset);
1738			*len = 0;
1739			ret = 0;
1740		} else {
1741			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1742				    "NVM access command failed with status %d\n", ret);
1743			ret = EIO;
1744		}
1745		goto exit;
1746	}
1747
1748	if (offset_read != offset) {
1749		device_printf(sc->sc_dev,
1750		    "NVM ACCESS response with invalid offset %d\n",
1751		    offset_read);
1752		ret = EINVAL;
1753		goto exit;
1754	}
1755
1756	if (bytes_read > length) {
1757		device_printf(sc->sc_dev,
1758		    "NVM ACCESS response with too much data "
1759		    "(%d bytes requested, %d bytes received)\n",
1760		    length, bytes_read);
1761		ret = EINVAL;
1762		goto exit;
1763	}
1764
1765	/* Write data to NVM */
1766	memcpy(data + offset, resp_data, bytes_read);
1767	*len = bytes_read;
1768
1769 exit:
1770	iwm_free_resp(sc, &cmd);
1771	return ret;
1772}
1773
1774/*
1775 * Reads an NVM section completely.
1776 * NICs prior to 7000 family don't have a real NVM, but just read
1777 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1778 * by uCode, we need to manually check in this case that we don't
1779 * overflow and try to read more than the EEPROM size.
1780 * For 7000 family NICs, we supply the maximal size we can read, and
1781 * the uCode fills the response with as much data as we can,
1782 * without overflowing, so no check is needed.
1783 */
1784static int
1785iwm_nvm_read_section(struct iwm_softc *sc,
1786	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1787{
1788	uint16_t seglen, length, offset = 0;
1789	int ret;
1790
1791	/* Set nvm section read length */
1792	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1793
1794	seglen = length;
1795
1796	/* Read the NVM until exhausted (reading less than requested) */
1797	while (seglen == length) {
1798		/* Check no memory assumptions fail and cause an overflow */
1799		if ((size_read + offset + length) >
1800		    sc->cfg->eeprom_size) {
1801			device_printf(sc->sc_dev,
1802			    "EEPROM size is too small for NVM\n");
1803			return ENOBUFS;
1804		}
1805
1806		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1807		if (ret) {
1808			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1809				    "Cannot read NVM from section %d offset %d, length %d\n",
1810				    section, offset, length);
1811			return ret;
1812		}
1813		offset += seglen;
1814	}
1815
1816	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1817		    "NVM section %d read completed\n", section);
1818	*len = offset;
1819	return 0;
1820}
1821
1822/*
1823 * BEGIN IWM_NVM_PARSE
1824 */
1825
1826/* iwlwifi/iwl-nvm-parse.c */
1827
1828/* NVM offsets (in words) definitions */
1829enum iwm_nvm_offsets {
1830	/* NVM HW-Section offset (in words) definitions */
1831	IWM_HW_ADDR = 0x15,
1832
1833/* NVM SW-Section offset (in words) definitions */
1834	IWM_NVM_SW_SECTION = 0x1C0,
1835	IWM_NVM_VERSION = 0,
1836	IWM_RADIO_CFG = 1,
1837	IWM_SKU = 2,
1838	IWM_N_HW_ADDRS = 3,
1839	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1840
1841/* NVM calibration section offset (in words) definitions */
1842	IWM_NVM_CALIB_SECTION = 0x2B8,
1843	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1844};
1845
1846enum iwm_8000_nvm_offsets {
1847	/* NVM HW-Section offset (in words) definitions */
1848	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1849	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1850	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1851	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1852	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1853
1854	/* NVM SW-Section offset (in words) definitions */
1855	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1856	IWM_NVM_VERSION_8000 = 0,
1857	IWM_RADIO_CFG_8000 = 0,
1858	IWM_SKU_8000 = 2,
1859	IWM_N_HW_ADDRS_8000 = 3,
1860
1861	/* NVM REGULATORY -Section offset (in words) definitions */
1862	IWM_NVM_CHANNELS_8000 = 0,
1863	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1864	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1865	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1866
1867	/* NVM calibration section offset (in words) definitions */
1868	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1869	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1870};
1871
1872/* SKU Capabilities (actual values from NVM definition) */
1873enum nvm_sku_bits {
1874	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1875	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1876	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1877	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1878};
1879
1880/* radio config bits (actual values from NVM definition) */
1881#define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1882#define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1883#define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1884#define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1885#define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1886#define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1887
1888#define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1889#define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1890#define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1891#define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1892#define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1893#define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1894
1895/**
1896 * enum iwm_nvm_channel_flags - channel flags in NVM
1897 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1898 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1899 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1900 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1901 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1902 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1903 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1904 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1905 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1906 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1907 */
1908enum iwm_nvm_channel_flags {
1909	IWM_NVM_CHANNEL_VALID = (1 << 0),
1910	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1911	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1912	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1913	IWM_NVM_CHANNEL_DFS = (1 << 7),
1914	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1915	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1916	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1917	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1918};
1919
1920/*
1921 * Translate EEPROM flags to net80211.
1922 */
1923static uint32_t
1924iwm_eeprom_channel_flags(uint16_t ch_flags)
1925{
1926	uint32_t nflags;
1927
1928	nflags = 0;
1929	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1930		nflags |= IEEE80211_CHAN_PASSIVE;
1931	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1932		nflags |= IEEE80211_CHAN_NOADHOC;
1933	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1934		nflags |= IEEE80211_CHAN_DFS;
1935		/* Just in case. */
1936		nflags |= IEEE80211_CHAN_NOADHOC;
1937	}
1938
1939	return (nflags);
1940}
1941
1942static void
1943iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1944    int maxchans, int *nchans, int ch_idx, size_t ch_num,
1945    const uint8_t bands[])
1946{
1947	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1948	uint32_t nflags;
1949	uint16_t ch_flags;
1950	uint8_t ieee;
1951	int error;
1952
1953	for (; ch_idx < ch_num; ch_idx++) {
1954		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1955		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1956			ieee = iwm_nvm_channels[ch_idx];
1957		else
1958			ieee = iwm_nvm_channels_8000[ch_idx];
1959
1960		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1961			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1962			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1963			    ieee, ch_flags,
1964			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1965			    "5.2" : "2.4");
1966			continue;
1967		}
1968
1969		nflags = iwm_eeprom_channel_flags(ch_flags);
1970		error = ieee80211_add_channel(chans, maxchans, nchans,
1971		    ieee, 0, 0, nflags, bands);
1972		if (error != 0)
1973			break;
1974
1975		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1976		    "Ch. %d Flags %x [%sGHz] - Added\n",
1977		    ieee, ch_flags,
1978		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1979		    "5.2" : "2.4");
1980	}
1981}
1982
1983static void
1984iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1985    struct ieee80211_channel chans[])
1986{
1987	struct iwm_softc *sc = ic->ic_softc;
1988	struct iwm_nvm_data *data = sc->nvm_data;
1989	uint8_t bands[IEEE80211_MODE_BYTES];
1990	size_t ch_num;
1991
1992	memset(bands, 0, sizeof(bands));
1993	/* 1-13: 11b/g channels. */
1994	setbit(bands, IEEE80211_MODE_11B);
1995	setbit(bands, IEEE80211_MODE_11G);
1996	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1997	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
1998
1999	/* 14: 11b channel only. */
2000	clrbit(bands, IEEE80211_MODE_11G);
2001	iwm_add_channel_band(sc, chans, maxchans, nchans,
2002	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2003
2004	if (data->sku_cap_band_52GHz_enable) {
2005		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2006			ch_num = nitems(iwm_nvm_channels);
2007		else
2008			ch_num = nitems(iwm_nvm_channels_8000);
2009		memset(bands, 0, sizeof(bands));
2010		setbit(bands, IEEE80211_MODE_11A);
2011		iwm_add_channel_band(sc, chans, maxchans, nchans,
2012		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2013	}
2014}
2015
2016static void
2017iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2018	const uint16_t *mac_override, const uint16_t *nvm_hw)
2019{
2020	const uint8_t *hw_addr;
2021
2022	if (mac_override) {
2023		static const uint8_t reserved_mac[] = {
2024			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2025		};
2026
2027		hw_addr = (const uint8_t *)(mac_override +
2028				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2029
2030		/*
2031		 * Store the MAC address from MAO section.
2032		 * No byte swapping is required in MAO section
2033		 */
2034		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2035
2036		/*
2037		 * Force the use of the OTP MAC address in case of reserved MAC
2038		 * address in the NVM, or if address is given but invalid.
2039		 */
2040		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2041		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2042		    iwm_is_valid_ether_addr(data->hw_addr) &&
2043		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2044			return;
2045
2046		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2047		    "%s: mac address from nvm override section invalid\n",
2048		    __func__);
2049	}
2050
2051	if (nvm_hw) {
2052		/* read the mac address from WFMP registers */
2053		uint32_t mac_addr0 =
2054		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2055		uint32_t mac_addr1 =
2056		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2057
2058		hw_addr = (const uint8_t *)&mac_addr0;
2059		data->hw_addr[0] = hw_addr[3];
2060		data->hw_addr[1] = hw_addr[2];
2061		data->hw_addr[2] = hw_addr[1];
2062		data->hw_addr[3] = hw_addr[0];
2063
2064		hw_addr = (const uint8_t *)&mac_addr1;
2065		data->hw_addr[4] = hw_addr[1];
2066		data->hw_addr[5] = hw_addr[0];
2067
2068		return;
2069	}
2070
2071	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2072	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2073}
2074
2075static int
2076iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2077	    const uint16_t *phy_sku)
2078{
2079	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2080		return le16_to_cpup(nvm_sw + IWM_SKU);
2081
2082	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2083}
2084
2085static int
2086iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2087{
2088	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2089		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2090	else
2091		return le32_to_cpup((const uint32_t *)(nvm_sw +
2092						IWM_NVM_VERSION_8000));
2093}
2094
2095static int
2096iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2097		  const uint16_t *phy_sku)
2098{
2099        if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2100                return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2101
2102        return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2103}
2104
2105static int
2106iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2107{
2108	int n_hw_addr;
2109
2110	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2111		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2112
2113	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2114
2115        return n_hw_addr & IWM_N_HW_ADDR_MASK;
2116}
2117
2118static void
2119iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2120		  uint32_t radio_cfg)
2121{
2122	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2123		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2124		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2125		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2126		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2127		return;
2128	}
2129
2130	/* set the radio configuration for family 8000 */
2131	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2132	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2133	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2134	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2135	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2136	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2137}
2138
2139static int
2140iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2141		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2142{
2143#ifdef notyet /* for FAMILY 9000 */
2144	if (cfg->mac_addr_from_csr) {
2145		iwm_set_hw_address_from_csr(sc, data);
2146        } else
2147#endif
2148	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2149		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2150
2151		/* The byte order is little endian 16 bit, meaning 214365 */
2152		data->hw_addr[0] = hw_addr[1];
2153		data->hw_addr[1] = hw_addr[0];
2154		data->hw_addr[2] = hw_addr[3];
2155		data->hw_addr[3] = hw_addr[2];
2156		data->hw_addr[4] = hw_addr[5];
2157		data->hw_addr[5] = hw_addr[4];
2158	} else {
2159		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2160	}
2161
2162	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2163		device_printf(sc->sc_dev, "no valid mac address was found\n");
2164		return EINVAL;
2165	}
2166
2167	return 0;
2168}
2169
2170static struct iwm_nvm_data *
2171iwm_parse_nvm_data(struct iwm_softc *sc,
2172		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2173		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2174		   const uint16_t *phy_sku, const uint16_t *regulatory)
2175{
2176	struct iwm_nvm_data *data;
2177	uint32_t sku, radio_cfg;
2178	uint16_t lar_config;
2179
2180	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2181		data = malloc(sizeof(*data) +
2182		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2183		    M_DEVBUF, M_NOWAIT | M_ZERO);
2184	} else {
2185		data = malloc(sizeof(*data) +
2186		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2187		    M_DEVBUF, M_NOWAIT | M_ZERO);
2188	}
2189	if (!data)
2190		return NULL;
2191
2192	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2193
2194	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2195	iwm_set_radio_cfg(sc, data, radio_cfg);
2196
2197	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2198	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2199	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2200	data->sku_cap_11n_enable = 0;
2201
2202	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2203
2204	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2205		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2206				       IWM_NVM_LAR_OFFSET_8000_OLD :
2207				       IWM_NVM_LAR_OFFSET_8000;
2208
2209		lar_config = le16_to_cpup(regulatory + lar_offset);
2210		data->lar_enabled = !!(lar_config &
2211				       IWM_NVM_LAR_ENABLED_8000);
2212	}
2213
2214	/* If no valid mac address was found - bail out */
2215	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2216		free(data, M_DEVBUF);
2217		return NULL;
2218	}
2219
2220	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2221		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2222		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2223	} else {
2224		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2225		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2226	}
2227
2228	return data;
2229}
2230
2231static void
2232iwm_free_nvm_data(struct iwm_nvm_data *data)
2233{
2234	if (data != NULL)
2235		free(data, M_DEVBUF);
2236}
2237
2238static struct iwm_nvm_data *
2239iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2240{
2241	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2242
2243	/* Checking for required sections */
2244	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2245		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2246		    !sections[sc->cfg->nvm_hw_section_num].data) {
2247			device_printf(sc->sc_dev,
2248			    "Can't parse empty OTP/NVM sections\n");
2249			return NULL;
2250		}
2251	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2252		/* SW and REGULATORY sections are mandatory */
2253		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2254		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2255			device_printf(sc->sc_dev,
2256			    "Can't parse empty OTP/NVM sections\n");
2257			return NULL;
2258		}
2259		/* MAC_OVERRIDE or at least HW section must exist */
2260		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2261		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2262			device_printf(sc->sc_dev,
2263			    "Can't parse mac_address, empty sections\n");
2264			return NULL;
2265		}
2266
2267		/* PHY_SKU section is mandatory in B0 */
2268		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2269			device_printf(sc->sc_dev,
2270			    "Can't parse phy_sku in B0, empty sections\n");
2271			return NULL;
2272		}
2273	} else {
2274		panic("unknown device family %d\n", sc->cfg->device_family);
2275	}
2276
2277	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2278	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2279	calib = (const uint16_t *)
2280	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2281	regulatory = (const uint16_t *)
2282	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2283	mac_override = (const uint16_t *)
2284	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2285	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2286
2287	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2288	    phy_sku, regulatory);
2289}
2290
2291static int
2292iwm_nvm_init(struct iwm_softc *sc)
2293{
2294	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2295	int i, ret, section;
2296	uint32_t size_read = 0;
2297	uint8_t *nvm_buffer, *temp;
2298	uint16_t len;
2299
2300	memset(nvm_sections, 0, sizeof(nvm_sections));
2301
2302	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2303		return EINVAL;
2304
2305	/* load NVM values from nic */
2306	/* Read From FW NVM */
2307	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2308
2309	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2310	if (!nvm_buffer)
2311		return ENOMEM;
2312	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2313		/* we override the constness for initial read */
2314		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2315					   &len, size_read);
2316		if (ret)
2317			continue;
2318		size_read += len;
2319		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2320		if (!temp) {
2321			ret = ENOMEM;
2322			break;
2323		}
2324		memcpy(temp, nvm_buffer, len);
2325
2326		nvm_sections[section].data = temp;
2327		nvm_sections[section].length = len;
2328	}
2329	if (!size_read)
2330		device_printf(sc->sc_dev, "OTP is blank\n");
2331	free(nvm_buffer, M_DEVBUF);
2332
2333	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2334	if (!sc->nvm_data)
2335		return EINVAL;
2336	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2337		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2338
2339	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2340		if (nvm_sections[i].data != NULL)
2341			free(nvm_sections[i].data, M_DEVBUF);
2342	}
2343
2344	return 0;
2345}
2346
2347static int
2348iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2349	const struct iwm_fw_desc *section)
2350{
2351	struct iwm_dma_info *dma = &sc->fw_dma;
2352	uint8_t *v_addr;
2353	bus_addr_t p_addr;
2354	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2355	int ret = 0;
2356
2357	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2358		    "%s: [%d] uCode section being loaded...\n",
2359		    __func__, section_num);
2360
2361	v_addr = dma->vaddr;
2362	p_addr = dma->paddr;
2363
2364	for (offset = 0; offset < section->len; offset += chunk_sz) {
2365		uint32_t copy_size, dst_addr;
2366		int extended_addr = FALSE;
2367
2368		copy_size = MIN(chunk_sz, section->len - offset);
2369		dst_addr = section->offset + offset;
2370
2371		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2372		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2373			extended_addr = TRUE;
2374
2375		if (extended_addr)
2376			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2377					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2378
2379		memcpy(v_addr, (const uint8_t *)section->data + offset,
2380		    copy_size);
2381		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2382		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2383						   copy_size);
2384
2385		if (extended_addr)
2386			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2387					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2388
2389		if (ret) {
2390			device_printf(sc->sc_dev,
2391			    "%s: Could not load the [%d] uCode section\n",
2392			    __func__, section_num);
2393			break;
2394		}
2395	}
2396
2397	return ret;
2398}
2399
2400/*
2401 * ucode
2402 */
2403static int
2404iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2405			     bus_addr_t phy_addr, uint32_t byte_cnt)
2406{
2407	sc->sc_fw_chunk_done = 0;
2408
2409	if (!iwm_nic_lock(sc))
2410		return EBUSY;
2411
2412	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2413	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2414
2415	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2416	    dst_addr);
2417
2418	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2419	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2420
2421	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2422	    (iwm_get_dma_hi_addr(phy_addr)
2423	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2424
2425	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2426	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2427	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2428	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2429
2430	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2431	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2432	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2433	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2434
2435	iwm_nic_unlock(sc);
2436
2437	/* wait up to 5s for this segment to load */
2438	msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz * 5);
2439
2440	if (!sc->sc_fw_chunk_done) {
2441		device_printf(sc->sc_dev,
2442		    "fw chunk addr 0x%x len %d failed to load\n",
2443		    dst_addr, byte_cnt);
2444		return ETIMEDOUT;
2445	}
2446
2447	return 0;
2448}
2449
2450static int
2451iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2452	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2453{
2454	int shift_param;
2455	int i, ret = 0, sec_num = 0x1;
2456	uint32_t val, last_read_idx = 0;
2457
2458	if (cpu == 1) {
2459		shift_param = 0;
2460		*first_ucode_section = 0;
2461	} else {
2462		shift_param = 16;
2463		(*first_ucode_section)++;
2464	}
2465
2466	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2467		last_read_idx = i;
2468
2469		/*
2470		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2471		 * CPU1 to CPU2.
2472		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2473		 * CPU2 non paged to CPU2 paging sec.
2474		 */
2475		if (!image->sec[i].data ||
2476		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2477		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2478			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2479				    "Break since Data not valid or Empty section, sec = %d\n",
2480				    i);
2481			break;
2482		}
2483		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2484		if (ret)
2485			return ret;
2486
2487		/* Notify the ucode of the loaded section number and status */
2488		if (iwm_nic_lock(sc)) {
2489			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2490			val = val | (sec_num << shift_param);
2491			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2492			sec_num = (sec_num << 1) | 0x1;
2493			iwm_nic_unlock(sc);
2494		}
2495	}
2496
2497	*first_ucode_section = last_read_idx;
2498
2499	iwm_enable_interrupts(sc);
2500
2501	if (iwm_nic_lock(sc)) {
2502		if (cpu == 1)
2503			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2504		else
2505			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2506		iwm_nic_unlock(sc);
2507	}
2508
2509	return 0;
2510}
2511
2512static int
2513iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2514	const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2515{
2516	int shift_param;
2517	int i, ret = 0;
2518	uint32_t last_read_idx = 0;
2519
2520	if (cpu == 1) {
2521		shift_param = 0;
2522		*first_ucode_section = 0;
2523	} else {
2524		shift_param = 16;
2525		(*first_ucode_section)++;
2526	}
2527
2528	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2529		last_read_idx = i;
2530
2531		/*
2532		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2533		 * CPU1 to CPU2.
2534		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2535		 * CPU2 non paged to CPU2 paging sec.
2536		 */
2537		if (!image->sec[i].data ||
2538		    image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2539		    image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2540			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2541				    "Break since Data not valid or Empty section, sec = %d\n",
2542				     i);
2543			break;
2544		}
2545
2546		ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2547		if (ret)
2548			return ret;
2549	}
2550
2551	*first_ucode_section = last_read_idx;
2552
2553	return 0;
2554
2555}
2556
2557static int
2558iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2559{
2560	int ret = 0;
2561	int first_ucode_section;
2562
2563	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2564		     image->is_dual_cpus ? "Dual" : "Single");
2565
2566	/* load to FW the binary non secured sections of CPU1 */
2567	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2568	if (ret)
2569		return ret;
2570
2571	if (image->is_dual_cpus) {
2572		/* set CPU2 header address */
2573		if (iwm_nic_lock(sc)) {
2574			iwm_write_prph(sc,
2575				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2576				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2577			iwm_nic_unlock(sc);
2578		}
2579
2580		/* load to FW the binary sections of CPU2 */
2581		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2582						 &first_ucode_section);
2583		if (ret)
2584			return ret;
2585	}
2586
2587	iwm_enable_interrupts(sc);
2588
2589	/* release CPU reset */
2590	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2591
2592	return 0;
2593}
2594
2595int
2596iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2597	const struct iwm_fw_img *image)
2598{
2599	int ret = 0;
2600	int first_ucode_section;
2601
2602	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2603		    image->is_dual_cpus ? "Dual" : "Single");
2604
2605	/* configure the ucode to be ready to get the secured image */
2606	/* release CPU reset */
2607	if (iwm_nic_lock(sc)) {
2608		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2609		    IWM_RELEASE_CPU_RESET_BIT);
2610		iwm_nic_unlock(sc);
2611	}
2612
2613	/* load to FW the binary Secured sections of CPU1 */
2614	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2615	    &first_ucode_section);
2616	if (ret)
2617		return ret;
2618
2619	/* load to FW the binary sections of CPU2 */
2620	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2621	    &first_ucode_section);
2622}
2623
2624/* XXX Get rid of this definition */
2625static inline void
2626iwm_enable_fw_load_int(struct iwm_softc *sc)
2627{
2628	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2629	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2630	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2631}
2632
2633/* XXX Add proper rfkill support code */
2634static int
2635iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2636{
2637	int ret;
2638
2639	/* This may fail if AMT took ownership of the device */
2640	if (iwm_prepare_card_hw(sc)) {
2641		device_printf(sc->sc_dev,
2642		    "%s: Exit HW not ready\n", __func__);
2643		ret = EIO;
2644		goto out;
2645	}
2646
2647	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2648
2649	iwm_disable_interrupts(sc);
2650
2651	/* make sure rfkill handshake bits are cleared */
2652	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2653	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2654	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2655
2656	/* clear (again), then enable host interrupts */
2657	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2658
2659	ret = iwm_nic_init(sc);
2660	if (ret) {
2661		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2662		goto out;
2663	}
2664
2665	/*
2666	 * Now, we load the firmware and don't want to be interrupted, even
2667	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2668	 * FH_TX interrupt which is needed to load the firmware). If the
2669	 * RF-Kill switch is toggled, we will find out after having loaded
2670	 * the firmware and return the proper value to the caller.
2671	 */
2672	iwm_enable_fw_load_int(sc);
2673
2674	/* really make sure rfkill handshake bits are cleared */
2675	/* maybe we should write a few times more?  just to make sure */
2676	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2677	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2678
2679	/* Load the given image to the HW */
2680	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2681		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2682	else
2683		ret = iwm_pcie_load_given_ucode(sc, fw);
2684
2685	/* XXX re-check RF-Kill state */
2686
2687out:
2688	return ret;
2689}
2690
2691static int
2692iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2693{
2694	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2695		.valid = htole32(valid_tx_ant),
2696	};
2697
2698	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2699	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2700}
2701
2702/* iwlwifi: mvm/fw.c */
2703static int
2704iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2705{
2706	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2707	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2708
2709	/* Set parameters */
2710	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2711	phy_cfg_cmd.calib_control.event_trigger =
2712	    sc->sc_default_calib[ucode_type].event_trigger;
2713	phy_cfg_cmd.calib_control.flow_trigger =
2714	    sc->sc_default_calib[ucode_type].flow_trigger;
2715
2716	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2717	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2718	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2719	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2720}
2721
2722static int
2723iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2724{
2725	struct iwm_mvm_alive_data *alive_data = data;
2726	struct iwm_mvm_alive_resp_v3 *palive3;
2727	struct iwm_mvm_alive_resp *palive;
2728	struct iwm_umac_alive *umac;
2729	struct iwm_lmac_alive *lmac1;
2730	struct iwm_lmac_alive *lmac2 = NULL;
2731	uint16_t status;
2732
2733	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2734		palive = (void *)pkt->data;
2735		umac = &palive->umac_data;
2736		lmac1 = &palive->lmac_data[0];
2737		lmac2 = &palive->lmac_data[1];
2738		status = le16toh(palive->status);
2739	} else {
2740		palive3 = (void *)pkt->data;
2741		umac = &palive3->umac_data;
2742		lmac1 = &palive3->lmac_data;
2743		status = le16toh(palive3->status);
2744	}
2745
2746	sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2747	if (lmac2)
2748		sc->error_event_table[1] =
2749			le32toh(lmac2->error_event_table_ptr);
2750	sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2751	sc->umac_error_event_table = le32toh(umac->error_info_addr);
2752	alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2753	alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2754	if (sc->umac_error_event_table)
2755		sc->support_umac_log = TRUE;
2756
2757	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2758		    "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2759		    status, lmac1->ver_type, lmac1->ver_subtype);
2760
2761	if (lmac2)
2762		IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2763
2764	IWM_DPRINTF(sc, IWM_DEBUG_FW,
2765		    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2766		    le32toh(umac->umac_major),
2767		    le32toh(umac->umac_minor));
2768
2769	return TRUE;
2770}
2771
2772static int
2773iwm_wait_phy_db_entry(struct iwm_softc *sc,
2774	struct iwm_rx_packet *pkt, void *data)
2775{
2776	struct iwm_phy_db *phy_db = data;
2777
2778	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2779		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2780			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2781			    __func__, pkt->hdr.code);
2782		}
2783		return TRUE;
2784	}
2785
2786	if (iwm_phy_db_set_section(phy_db, pkt)) {
2787		device_printf(sc->sc_dev,
2788		    "%s: iwm_phy_db_set_section failed\n", __func__);
2789	}
2790
2791	return FALSE;
2792}
2793
2794static int
2795iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2796	enum iwm_ucode_type ucode_type)
2797{
2798	struct iwm_notification_wait alive_wait;
2799	struct iwm_mvm_alive_data alive_data;
2800	const struct iwm_fw_img *fw;
2801	enum iwm_ucode_type old_type = sc->cur_ucode;
2802	int error;
2803	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2804
2805	fw = &sc->sc_fw.img[ucode_type];
2806	sc->cur_ucode = ucode_type;
2807	sc->ucode_loaded = FALSE;
2808
2809	memset(&alive_data, 0, sizeof(alive_data));
2810	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2811				   alive_cmd, nitems(alive_cmd),
2812				   iwm_alive_fn, &alive_data);
2813
2814	error = iwm_start_fw(sc, fw);
2815	if (error) {
2816		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2817		sc->cur_ucode = old_type;
2818		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2819		return error;
2820	}
2821
2822	/*
2823	 * Some things may run in the background now, but we
2824	 * just wait for the ALIVE notification here.
2825	 */
2826	IWM_UNLOCK(sc);
2827	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2828				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2829	IWM_LOCK(sc);
2830	if (error) {
2831		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2832			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2833			if (iwm_nic_lock(sc)) {
2834				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2835				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2836				iwm_nic_unlock(sc);
2837			}
2838			device_printf(sc->sc_dev,
2839			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2840			    a, b);
2841		}
2842		sc->cur_ucode = old_type;
2843		return error;
2844	}
2845
2846	if (!alive_data.valid) {
2847		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2848		    __func__);
2849		sc->cur_ucode = old_type;
2850		return EIO;
2851	}
2852
2853	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2854
2855	/*
2856	 * configure and operate fw paging mechanism.
2857	 * driver configures the paging flow only once, CPU2 paging image
2858	 * included in the IWM_UCODE_INIT image.
2859	 */
2860	if (fw->paging_mem_size) {
2861		error = iwm_save_fw_paging(sc, fw);
2862		if (error) {
2863			device_printf(sc->sc_dev,
2864			    "%s: failed to save the FW paging image\n",
2865			    __func__);
2866			return error;
2867		}
2868
2869		error = iwm_send_paging_cmd(sc, fw);
2870		if (error) {
2871			device_printf(sc->sc_dev,
2872			    "%s: failed to send the paging cmd\n", __func__);
2873			iwm_free_fw_paging(sc);
2874			return error;
2875		}
2876	}
2877
2878	if (!error)
2879		sc->ucode_loaded = TRUE;
2880	return error;
2881}
2882
2883/*
2884 * mvm misc bits
2885 */
2886
2887/*
2888 * follows iwlwifi/fw.c
2889 */
2890static int
2891iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2892{
2893	struct iwm_notification_wait calib_wait;
2894	static const uint16_t init_complete[] = {
2895		IWM_INIT_COMPLETE_NOTIF,
2896		IWM_CALIB_RES_NOTIF_PHY_DB
2897	};
2898	int ret;
2899
2900	/* do not operate with rfkill switch turned on */
2901	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2902		device_printf(sc->sc_dev,
2903		    "radio is disabled by hardware switch\n");
2904		return EPERM;
2905	}
2906
2907	iwm_init_notification_wait(sc->sc_notif_wait,
2908				   &calib_wait,
2909				   init_complete,
2910				   nitems(init_complete),
2911				   iwm_wait_phy_db_entry,
2912				   sc->sc_phy_db);
2913
2914	/* Will also start the device */
2915	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2916	if (ret) {
2917		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2918		    ret);
2919		goto error;
2920	}
2921
2922	if (justnvm) {
2923		/* Read nvm */
2924		ret = iwm_nvm_init(sc);
2925		if (ret) {
2926			device_printf(sc->sc_dev, "failed to read nvm\n");
2927			goto error;
2928		}
2929		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2930		goto error;
2931	}
2932
2933	ret = iwm_send_bt_init_conf(sc);
2934	if (ret) {
2935		device_printf(sc->sc_dev,
2936		    "failed to send bt coex configuration: %d\n", ret);
2937		goto error;
2938	}
2939
2940	/* Send TX valid antennas before triggering calibrations */
2941	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2942	if (ret) {
2943		device_printf(sc->sc_dev,
2944		    "failed to send antennas before calibration: %d\n", ret);
2945		goto error;
2946	}
2947
2948	/*
2949	 * Send phy configurations command to init uCode
2950	 * to start the 16.0 uCode init image internal calibrations.
2951	 */
2952	ret = iwm_send_phy_cfg_cmd(sc);
2953	if (ret) {
2954		device_printf(sc->sc_dev,
2955		    "%s: Failed to run INIT calibrations: %d\n",
2956		    __func__, ret);
2957		goto error;
2958	}
2959
2960	/*
2961	 * Nothing to do but wait for the init complete notification
2962	 * from the firmware.
2963	 */
2964	IWM_UNLOCK(sc);
2965	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
2966	    IWM_MVM_UCODE_CALIB_TIMEOUT);
2967	IWM_LOCK(sc);
2968
2969
2970	goto out;
2971
2972error:
2973	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
2974out:
2975	return ret;
2976}
2977
2978static int
2979iwm_mvm_config_ltr(struct iwm_softc *sc)
2980{
2981	struct iwm_ltr_config_cmd cmd = {
2982		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
2983	};
2984
2985	if (!sc->sc_ltr_enabled)
2986		return 0;
2987
2988	return iwm_mvm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
2989}
2990
2991/*
2992 * receive side
2993 */
2994
2995/* (re)stock rx ring, called at init-time and at runtime */
2996static int
2997iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2998{
2999	struct iwm_rx_ring *ring = &sc->rxq;
3000	struct iwm_rx_data *data = &ring->data[idx];
3001	struct mbuf *m;
3002	bus_dmamap_t dmamap;
3003	bus_dma_segment_t seg;
3004	int nsegs, error;
3005
3006	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3007	if (m == NULL)
3008		return ENOBUFS;
3009
3010	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3011	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3012	    &seg, &nsegs, BUS_DMA_NOWAIT);
3013	if (error != 0) {
3014		device_printf(sc->sc_dev,
3015		    "%s: can't map mbuf, error %d\n", __func__, error);
3016		m_freem(m);
3017		return error;
3018	}
3019
3020	if (data->m != NULL)
3021		bus_dmamap_unload(ring->data_dmat, data->map);
3022
3023	/* Swap ring->spare_map with data->map */
3024	dmamap = data->map;
3025	data->map = ring->spare_map;
3026	ring->spare_map = dmamap;
3027
3028	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3029	data->m = m;
3030
3031	/* Update RX descriptor. */
3032	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3033	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3034	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3035	    BUS_DMASYNC_PREWRITE);
3036
3037	return 0;
3038}
3039
3040/* iwlwifi: mvm/rx.c */
3041/*
3042 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3043 * values are reported by the fw as positive values - need to negate
3044 * to obtain their dBM.  Account for missing antennas by replacing 0
3045 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3046 */
3047static int
3048iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3049{
3050	int energy_a, energy_b, energy_c, max_energy;
3051	uint32_t val;
3052
3053	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3054	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3055	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3056	energy_a = energy_a ? -energy_a : -256;
3057	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3058	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3059	energy_b = energy_b ? -energy_b : -256;
3060	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3061	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3062	energy_c = energy_c ? -energy_c : -256;
3063	max_energy = MAX(energy_a, energy_b);
3064	max_energy = MAX(max_energy, energy_c);
3065
3066	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3067	    "energy In A %d B %d C %d , and max %d\n",
3068	    energy_a, energy_b, energy_c, max_energy);
3069
3070	return max_energy;
3071}
3072
3073static void
3074iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3075{
3076	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3077
3078	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3079
3080	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3081}
3082
3083/*
3084 * Retrieve the average noise (in dBm) among receivers.
3085 */
3086static int
3087iwm_get_noise(struct iwm_softc *sc,
3088    const struct iwm_mvm_statistics_rx_non_phy *stats)
3089{
3090	int i, total, nbant, noise;
3091
3092	total = nbant = noise = 0;
3093	for (i = 0; i < 3; i++) {
3094		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3095		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3096		    __func__,
3097		    i,
3098		    noise);
3099
3100		if (noise) {
3101			total += noise;
3102			nbant++;
3103		}
3104	}
3105
3106	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3107	    __func__, nbant, total);
3108#if 0
3109	/* There should be at least one antenna but check anyway. */
3110	return (nbant == 0) ? -127 : (total / nbant) - 107;
3111#else
3112	/* For now, just hard-code it to -96 to be safe */
3113	return (-96);
3114#endif
3115}
3116
3117static void
3118iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3119{
3120	struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3121
3122	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3123	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3124}
3125
3126/*
3127 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3128 *
3129 * Handles the actual data of the Rx packet from the fw
3130 */
3131static boolean_t
3132iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3133	boolean_t stolen)
3134{
3135	struct ieee80211com *ic = &sc->sc_ic;
3136	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3137	struct ieee80211_frame *wh;
3138	struct ieee80211_node *ni;
3139	struct ieee80211_rx_stats rxs;
3140	struct iwm_rx_phy_info *phy_info;
3141	struct iwm_rx_mpdu_res_start *rx_res;
3142	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3143	uint32_t len;
3144	uint32_t rx_pkt_status;
3145	int rssi;
3146
3147	phy_info = &sc->sc_last_phy_info;
3148	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3149	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3150	len = le16toh(rx_res->byte_count);
3151	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3152
3153	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3154		device_printf(sc->sc_dev,
3155		    "dsp size out of range [0,20]: %d\n",
3156		    phy_info->cfg_phy_cnt);
3157		goto fail;
3158	}
3159
3160	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3161	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3162		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3163		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3164		goto fail;
3165	}
3166
3167	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3168
3169	/* Map it to relative value */
3170	rssi = rssi - sc->sc_noise;
3171
3172	/* replenish ring for the buffer we're going to feed to the sharks */
3173	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3174		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3175		    __func__);
3176		goto fail;
3177	}
3178
3179	m->m_data = pkt->data + sizeof(*rx_res);
3180	m->m_pkthdr.len = m->m_len = len;
3181
3182	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3183	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3184
3185	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3186
3187	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3188	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3189	    __func__,
3190	    le16toh(phy_info->channel),
3191	    le16toh(phy_info->phy_flags));
3192
3193	/*
3194	 * Populate an RX state struct with the provided information.
3195	 */
3196	bzero(&rxs, sizeof(rxs));
3197	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3198	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3199	rxs.c_ieee = le16toh(phy_info->channel);
3200	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3201		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3202	} else {
3203		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3204	}
3205
3206	/* rssi is in 1/2db units */
3207	rxs.c_rssi = rssi * 2;
3208	rxs.c_nf = sc->sc_noise;
3209	if (ieee80211_add_rx_params(m, &rxs) == 0) {
3210		if (ni)
3211			ieee80211_free_node(ni);
3212		goto fail;
3213	}
3214
3215	if (ieee80211_radiotap_active_vap(vap)) {
3216		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3217
3218		tap->wr_flags = 0;
3219		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3220			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3221		tap->wr_chan_freq = htole16(rxs.c_freq);
3222		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3223		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3224		tap->wr_dbm_antsignal = (int8_t)rssi;
3225		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3226		tap->wr_tsft = phy_info->system_timestamp;
3227		switch (phy_info->rate) {
3228		/* CCK rates. */
3229		case  10: tap->wr_rate =   2; break;
3230		case  20: tap->wr_rate =   4; break;
3231		case  55: tap->wr_rate =  11; break;
3232		case 110: tap->wr_rate =  22; break;
3233		/* OFDM rates. */
3234		case 0xd: tap->wr_rate =  12; break;
3235		case 0xf: tap->wr_rate =  18; break;
3236		case 0x5: tap->wr_rate =  24; break;
3237		case 0x7: tap->wr_rate =  36; break;
3238		case 0x9: tap->wr_rate =  48; break;
3239		case 0xb: tap->wr_rate =  72; break;
3240		case 0x1: tap->wr_rate =  96; break;
3241		case 0x3: tap->wr_rate = 108; break;
3242		/* Unknown rate: should not happen. */
3243		default:  tap->wr_rate =   0;
3244		}
3245	}
3246
3247	IWM_UNLOCK(sc);
3248	if (ni != NULL) {
3249		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3250		ieee80211_input_mimo(ni, m);
3251		ieee80211_free_node(ni);
3252	} else {
3253		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3254		ieee80211_input_mimo_all(ic, m);
3255	}
3256	IWM_LOCK(sc);
3257
3258	return TRUE;
3259
3260fail:
3261	counter_u64_add(ic->ic_ierrors, 1);
3262	return FALSE;
3263}
3264
3265static int
3266iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3267	struct iwm_node *in)
3268{
3269	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3270	struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3271	struct ieee80211_node *ni = &in->in_ni;
3272	struct ieee80211vap *vap = ni->ni_vap;
3273	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3274	int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3275	boolean_t rate_matched;
3276	uint8_t tx_resp_rate;
3277
3278	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3279
3280	/* Update rate control statistics. */
3281	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3282	    __func__,
3283	    (int) le16toh(tx_resp->status.status),
3284	    (int) le16toh(tx_resp->status.sequence),
3285	    tx_resp->frame_count,
3286	    tx_resp->bt_kill_count,
3287	    tx_resp->failure_rts,
3288	    tx_resp->failure_frame,
3289	    le32toh(tx_resp->initial_rate),
3290	    (int) le16toh(tx_resp->wireless_media_time));
3291
3292	tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3293
3294	/* For rate control, ignore frames sent at different initial rate */
3295	rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3296
3297	if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3298		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3299		    "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3300		    "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3301	}
3302
3303	txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3304		     IEEE80211_RATECTL_STATUS_LONG_RETRY;
3305	txs->short_retries = tx_resp->failure_rts;
3306	txs->long_retries = tx_resp->failure_frame;
3307	if (status != IWM_TX_STATUS_SUCCESS &&
3308	    status != IWM_TX_STATUS_DIRECT_DONE) {
3309		switch (status) {
3310		case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3311			txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3312			break;
3313		case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3314			txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3315			break;
3316		case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3317			txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3318			break;
3319		default:
3320			txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3321			break;
3322		}
3323	} else {
3324		txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3325	}
3326
3327	if (rate_matched) {
3328		int rix;
3329		ieee80211_ratectl_tx_complete(ni, txs);
3330
3331		rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3332		new_rate = vap->iv_bss->ni_txrate;
3333		if (new_rate != 0 && new_rate != cur_rate) {
3334			struct iwm_node *in = IWM_NODE(vap->iv_bss);
3335			iwm_setrates(sc, in, rix);
3336			iwm_mvm_send_lq_cmd(sc, &in->in_lq, FALSE);
3337		}
3338 	}
3339
3340	return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3341}
3342
3343static void
3344iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3345{
3346	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3347	int idx = cmd_hdr->idx;
3348	int qid = cmd_hdr->qid;
3349	struct iwm_tx_ring *ring = &sc->txq[qid];
3350	struct iwm_tx_data *txd = &ring->data[idx];
3351	struct iwm_node *in = txd->in;
3352	struct mbuf *m = txd->m;
3353	int status;
3354
3355	KASSERT(txd->done == 0, ("txd not done"));
3356	KASSERT(txd->in != NULL, ("txd without node"));
3357	KASSERT(txd->m != NULL, ("txd without mbuf"));
3358
3359	sc->sc_tx_timer = 0;
3360
3361	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3362
3363	/* Unmap and free mbuf. */
3364	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3365	bus_dmamap_unload(ring->data_dmat, txd->map);
3366
3367	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3368	    "free txd %p, in %p\n", txd, txd->in);
3369	txd->done = 1;
3370	txd->m = NULL;
3371	txd->in = NULL;
3372
3373	ieee80211_tx_complete(&in->in_ni, m, status);
3374
3375	if (--ring->queued < IWM_TX_RING_LOMARK) {
3376		sc->qfullmsk &= ~(1 << ring->qid);
3377		if (sc->qfullmsk == 0) {
3378			iwm_start(sc);
3379		}
3380	}
3381}
3382
3383/*
3384 * transmit side
3385 */
3386
3387/*
3388 * Process a "command done" firmware notification.  This is where we wakeup
3389 * processes waiting for a synchronous command completion.
3390 * from if_iwn
3391 */
3392static void
3393iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3394{
3395	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3396	struct iwm_tx_data *data;
3397
3398	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3399		return;	/* Not a command ack. */
3400	}
3401
3402	/* XXX wide commands? */
3403	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3404	    "cmd notification type 0x%x qid %d idx %d\n",
3405	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3406
3407	data = &ring->data[pkt->hdr.idx];
3408
3409	/* If the command was mapped in an mbuf, free it. */
3410	if (data->m != NULL) {
3411		bus_dmamap_sync(ring->data_dmat, data->map,
3412		    BUS_DMASYNC_POSTWRITE);
3413		bus_dmamap_unload(ring->data_dmat, data->map);
3414		m_freem(data->m);
3415		data->m = NULL;
3416	}
3417	wakeup(&ring->desc[pkt->hdr.idx]);
3418
3419	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3420		device_printf(sc->sc_dev,
3421		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3422		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3423		/* XXX call iwm_force_nmi() */
3424	}
3425
3426	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3427	ring->queued--;
3428	if (ring->queued == 0)
3429		iwm_pcie_clear_cmd_in_flight(sc);
3430}
3431
3432#if 0
3433/*
3434 * necessary only for block ack mode
3435 */
3436void
3437iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3438	uint16_t len)
3439{
3440	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3441	uint16_t w_val;
3442
3443	scd_bc_tbl = sc->sched_dma.vaddr;
3444
3445	len += 8; /* magic numbers came naturally from paris */
3446	len = roundup(len, 4) / 4;
3447
3448	w_val = htole16(sta_id << 12 | len);
3449
3450	/* Update TX scheduler. */
3451	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3452	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3453	    BUS_DMASYNC_PREWRITE);
3454
3455	/* I really wonder what this is ?!? */
3456	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3457		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3458		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3459		    BUS_DMASYNC_PREWRITE);
3460	}
3461}
3462#endif
3463
3464static int
3465iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3466{
3467	int i;
3468
3469	for (i = 0; i < nitems(iwm_rates); i++) {
3470		if (iwm_rates[i].rate == rate)
3471			return (i);
3472	}
3473	/* XXX error? */
3474	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3475	    "%s: couldn't find an entry for rate=%d\n",
3476	    __func__,
3477	    rate);
3478	return (0);
3479}
3480
3481/*
3482 * Fill in the rate related information for a transmit command.
3483 */
3484static const struct iwm_rate *
3485iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3486	struct mbuf *m, struct iwm_tx_cmd *tx)
3487{
3488	struct ieee80211_node *ni = &in->in_ni;
3489	struct ieee80211_frame *wh;
3490	const struct ieee80211_txparam *tp = ni->ni_txparms;
3491	const struct iwm_rate *rinfo;
3492	int type;
3493	int ridx, rate_flags;
3494
3495	wh = mtod(m, struct ieee80211_frame *);
3496	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3497
3498	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3499	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3500
3501	if (type == IEEE80211_FC0_TYPE_MGT ||
3502	    type == IEEE80211_FC0_TYPE_CTL ||
3503	    (m->m_flags & M_EAPOL) != 0) {
3504		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3505		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3506		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3507	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3508		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3509		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3510		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3511	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3512		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3513		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3514		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3515	} else {
3516		/* for data frames, use RS table */
3517		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3518		ridx = iwm_rate2ridx(sc, ni->ni_txrate);
3519		if (ridx == -1)
3520			ridx = 0;
3521
3522		/* This is the index into the programmed table */
3523		tx->initial_rate_index = 0;
3524		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3525	}
3526
3527	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3528	    "%s: frame type=%d txrate %d\n",
3529	        __func__, type, iwm_rates[ridx].rate);
3530
3531	rinfo = &iwm_rates[ridx];
3532
3533	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3534	    __func__, ridx,
3535	    rinfo->rate,
3536	    !! (IWM_RIDX_IS_CCK(ridx))
3537	    );
3538
3539	/* XXX TODO: hard-coded TX antenna? */
3540	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3541	if (IWM_RIDX_IS_CCK(ridx))
3542		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3543	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3544
3545	return rinfo;
3546}
3547
3548#define TB0_SIZE 16
3549static int
3550iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3551{
3552	struct ieee80211com *ic = &sc->sc_ic;
3553	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3554	struct iwm_node *in = IWM_NODE(ni);
3555	struct iwm_tx_ring *ring;
3556	struct iwm_tx_data *data;
3557	struct iwm_tfd *desc;
3558	struct iwm_device_cmd *cmd;
3559	struct iwm_tx_cmd *tx;
3560	struct ieee80211_frame *wh;
3561	struct ieee80211_key *k = NULL;
3562	struct mbuf *m1;
3563	const struct iwm_rate *rinfo;
3564	uint32_t flags;
3565	u_int hdrlen;
3566	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3567	int nsegs;
3568	uint8_t tid, type;
3569	int i, totlen, error, pad;
3570
3571	wh = mtod(m, struct ieee80211_frame *);
3572	hdrlen = ieee80211_anyhdrsize(wh);
3573	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3574	tid = 0;
3575	ring = &sc->txq[ac];
3576	desc = &ring->desc[ring->cur];
3577	memset(desc, 0, sizeof(*desc));
3578	data = &ring->data[ring->cur];
3579
3580	/* Fill out iwm_tx_cmd to send to the firmware */
3581	cmd = &ring->cmd[ring->cur];
3582	cmd->hdr.code = IWM_TX_CMD;
3583	cmd->hdr.flags = 0;
3584	cmd->hdr.qid = ring->qid;
3585	cmd->hdr.idx = ring->cur;
3586
3587	tx = (void *)cmd->data;
3588	memset(tx, 0, sizeof(*tx));
3589
3590	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3591
3592	/* Encrypt the frame if need be. */
3593	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3594		/* Retrieve key for TX && do software encryption. */
3595		k = ieee80211_crypto_encap(ni, m);
3596		if (k == NULL) {
3597			m_freem(m);
3598			return (ENOBUFS);
3599		}
3600		/* 802.11 header may have moved. */
3601		wh = mtod(m, struct ieee80211_frame *);
3602	}
3603
3604	if (ieee80211_radiotap_active_vap(vap)) {
3605		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3606
3607		tap->wt_flags = 0;
3608		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3609		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3610		tap->wt_rate = rinfo->rate;
3611		if (k != NULL)
3612			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3613		ieee80211_radiotap_tx(vap, m);
3614	}
3615
3616
3617	totlen = m->m_pkthdr.len;
3618
3619	flags = 0;
3620	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3621		flags |= IWM_TX_CMD_FLG_ACK;
3622	}
3623
3624	if (type == IEEE80211_FC0_TYPE_DATA
3625	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3626	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3627		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3628	}
3629
3630	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3631	    type != IEEE80211_FC0_TYPE_DATA)
3632		tx->sta_id = sc->sc_aux_sta.sta_id;
3633	else
3634		tx->sta_id = IWM_STATION_ID;
3635
3636	if (type == IEEE80211_FC0_TYPE_MGT) {
3637		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3638
3639		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3640		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3641			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3642		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3643			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3644		} else {
3645			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3646		}
3647	} else {
3648		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3649	}
3650
3651	if (hdrlen & 3) {
3652		/* First segment length must be a multiple of 4. */
3653		flags |= IWM_TX_CMD_FLG_MH_PAD;
3654		pad = 4 - (hdrlen & 3);
3655	} else
3656		pad = 0;
3657
3658	tx->driver_txop = 0;
3659	tx->next_frame_len = 0;
3660
3661	tx->len = htole16(totlen);
3662	tx->tid_tspec = tid;
3663	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3664
3665	/* Set physical address of "scratch area". */
3666	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3667	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3668
3669	/* Copy 802.11 header in TX command. */
3670	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3671
3672	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3673
3674	tx->sec_ctl = 0;
3675	tx->tx_flags |= htole32(flags);
3676
3677	/* Trim 802.11 header. */
3678	m_adj(m, hdrlen);
3679	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3680	    segs, &nsegs, BUS_DMA_NOWAIT);
3681	if (error != 0) {
3682		if (error != EFBIG) {
3683			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3684			    error);
3685			m_freem(m);
3686			return error;
3687		}
3688		/* Too many DMA segments, linearize mbuf. */
3689		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3690		if (m1 == NULL) {
3691			device_printf(sc->sc_dev,
3692			    "%s: could not defrag mbuf\n", __func__);
3693			m_freem(m);
3694			return (ENOBUFS);
3695		}
3696		m = m1;
3697
3698		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3699		    segs, &nsegs, BUS_DMA_NOWAIT);
3700		if (error != 0) {
3701			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3702			    error);
3703			m_freem(m);
3704			return error;
3705		}
3706	}
3707	data->m = m;
3708	data->in = in;
3709	data->done = 0;
3710
3711	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3712	    "sending txd %p, in %p\n", data, data->in);
3713	KASSERT(data->in != NULL, ("node is NULL"));
3714
3715	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3716	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3717	    ring->qid, ring->cur, totlen, nsegs,
3718	    le32toh(tx->tx_flags),
3719	    le32toh(tx->rate_n_flags),
3720	    tx->initial_rate_index
3721	    );
3722
3723	/* Fill TX descriptor. */
3724	desc->num_tbs = 2 + nsegs;
3725
3726	desc->tbs[0].lo = htole32(data->cmd_paddr);
3727	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3728	    (TB0_SIZE << 4);
3729	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3730	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3731	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3732	      + hdrlen + pad - TB0_SIZE) << 4);
3733
3734	/* Other DMA segments are for data payload. */
3735	for (i = 0; i < nsegs; i++) {
3736		seg = &segs[i];
3737		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3738		desc->tbs[i+2].hi_n_len = \
3739		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3740		    | ((seg->ds_len) << 4);
3741	}
3742
3743	bus_dmamap_sync(ring->data_dmat, data->map,
3744	    BUS_DMASYNC_PREWRITE);
3745	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3746	    BUS_DMASYNC_PREWRITE);
3747	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3748	    BUS_DMASYNC_PREWRITE);
3749
3750#if 0
3751	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3752#endif
3753
3754	/* Kick TX ring. */
3755	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3756	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3757
3758	/* Mark TX ring as full if we reach a certain threshold. */
3759	if (++ring->queued > IWM_TX_RING_HIMARK) {
3760		sc->qfullmsk |= 1 << ring->qid;
3761	}
3762
3763	return 0;
3764}
3765
3766static int
3767iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3768    const struct ieee80211_bpf_params *params)
3769{
3770	struct ieee80211com *ic = ni->ni_ic;
3771	struct iwm_softc *sc = ic->ic_softc;
3772	int error = 0;
3773
3774	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3775	    "->%s begin\n", __func__);
3776
3777	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3778		m_freem(m);
3779		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3780		    "<-%s not RUNNING\n", __func__);
3781		return (ENETDOWN);
3782        }
3783
3784	IWM_LOCK(sc);
3785	/* XXX fix this */
3786        if (params == NULL) {
3787		error = iwm_tx(sc, m, ni, 0);
3788	} else {
3789		error = iwm_tx(sc, m, ni, 0);
3790	}
3791	if (sc->sc_tx_timer == 0)
3792		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3793	sc->sc_tx_timer = 5;
3794	IWM_UNLOCK(sc);
3795
3796        return (error);
3797}
3798
3799/*
3800 * mvm/tx.c
3801 */
3802
3803/*
3804 * Note that there are transports that buffer frames before they reach
3805 * the firmware. This means that after flush_tx_path is called, the
3806 * queue might not be empty. The race-free way to handle this is to:
3807 * 1) set the station as draining
3808 * 2) flush the Tx path
3809 * 3) wait for the transport queues to be empty
3810 */
3811int
3812iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3813{
3814	int ret;
3815	struct iwm_tx_path_flush_cmd flush_cmd = {
3816		.queues_ctl = htole32(tfd_msk),
3817		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3818	};
3819
3820	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3821	    sizeof(flush_cmd), &flush_cmd);
3822	if (ret)
3823                device_printf(sc->sc_dev,
3824		    "Flushing tx queue failed: %d\n", ret);
3825	return ret;
3826}
3827
3828/*
3829 * BEGIN mvm/quota.c
3830 */
3831
3832static int
3833iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3834{
3835	struct iwm_time_quota_cmd cmd;
3836	int i, idx, ret, num_active_macs, quota, quota_rem;
3837	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3838	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3839	uint16_t id;
3840
3841	memset(&cmd, 0, sizeof(cmd));
3842
3843	/* currently, PHY ID == binding ID */
3844	if (ivp) {
3845		id = ivp->phy_ctxt->id;
3846		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3847		colors[id] = ivp->phy_ctxt->color;
3848
3849		if (1)
3850			n_ifs[id] = 1;
3851	}
3852
3853	/*
3854	 * The FW's scheduling session consists of
3855	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3856	 * equally between all the bindings that require quota
3857	 */
3858	num_active_macs = 0;
3859	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3860		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3861		num_active_macs += n_ifs[i];
3862	}
3863
3864	quota = 0;
3865	quota_rem = 0;
3866	if (num_active_macs) {
3867		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3868		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3869	}
3870
3871	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3872		if (colors[i] < 0)
3873			continue;
3874
3875		cmd.quotas[idx].id_and_color =
3876			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3877
3878		if (n_ifs[i] <= 0) {
3879			cmd.quotas[idx].quota = htole32(0);
3880			cmd.quotas[idx].max_duration = htole32(0);
3881		} else {
3882			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3883			cmd.quotas[idx].max_duration = htole32(0);
3884		}
3885		idx++;
3886	}
3887
3888	/* Give the remainder of the session to the first binding */
3889	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3890
3891	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3892	    sizeof(cmd), &cmd);
3893	if (ret)
3894		device_printf(sc->sc_dev,
3895		    "%s: Failed to send quota: %d\n", __func__, ret);
3896	return ret;
3897}
3898
3899/*
3900 * END mvm/quota.c
3901 */
3902
3903/*
3904 * ieee80211 routines
3905 */
3906
3907/*
3908 * Change to AUTH state in 80211 state machine.  Roughly matches what
3909 * Linux does in bss_info_changed().
3910 */
3911static int
3912iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3913{
3914	struct ieee80211_node *ni;
3915	struct iwm_node *in;
3916	struct iwm_vap *iv = IWM_VAP(vap);
3917	uint32_t duration;
3918	int error;
3919
3920	/*
3921	 * XXX i have a feeling that the vap node is being
3922	 * freed from underneath us. Grr.
3923	 */
3924	ni = ieee80211_ref_node(vap->iv_bss);
3925	in = IWM_NODE(ni);
3926	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3927	    "%s: called; vap=%p, bss ni=%p\n",
3928	    __func__,
3929	    vap,
3930	    ni);
3931	IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
3932	    __func__, ether_sprintf(ni->ni_bssid));
3933
3934	in->in_assoc = 0;
3935	iv->iv_auth = 1;
3936
3937	/*
3938	 * Firmware bug - it'll crash if the beacon interval is less
3939	 * than 16. We can't avoid connecting at all, so refuse the
3940	 * station state change, this will cause net80211 to abandon
3941	 * attempts to connect to this AP, and eventually wpa_s will
3942	 * blacklist the AP...
3943	 */
3944	if (ni->ni_intval < 16) {
3945		device_printf(sc->sc_dev,
3946		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3947		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
3948		error = EINVAL;
3949		goto out;
3950	}
3951
3952	error = iwm_allow_mcast(vap, sc);
3953	if (error) {
3954		device_printf(sc->sc_dev,
3955		    "%s: failed to set multicast\n", __func__);
3956		goto out;
3957	}
3958
3959	/*
3960	 * This is where it deviates from what Linux does.
3961	 *
3962	 * Linux iwlwifi doesn't reset the nic each time, nor does it
3963	 * call ctxt_add() here.  Instead, it adds it during vap creation,
3964	 * and always does a mac_ctx_changed().
3965	 *
3966	 * The openbsd port doesn't attempt to do that - it reset things
3967	 * at odd states and does the add here.
3968	 *
3969	 * So, until the state handling is fixed (ie, we never reset
3970	 * the NIC except for a firmware failure, which should drag
3971	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
3972	 * contexts that are required), let's do a dirty hack here.
3973	 */
3974	if (iv->is_uploaded) {
3975		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3976			device_printf(sc->sc_dev,
3977			    "%s: failed to update MAC\n", __func__);
3978			goto out;
3979		}
3980	} else {
3981		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3982			device_printf(sc->sc_dev,
3983			    "%s: failed to add MAC\n", __func__);
3984			goto out;
3985		}
3986	}
3987	sc->sc_firmware_state = 1;
3988
3989	if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3990	    in->in_ni.ni_chan, 1, 1)) != 0) {
3991		device_printf(sc->sc_dev,
3992		    "%s: failed update phy ctxt\n", __func__);
3993		goto out;
3994	}
3995	iv->phy_ctxt = &sc->sc_phyctxt[0];
3996
3997	if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
3998		device_printf(sc->sc_dev,
3999		    "%s: binding update cmd\n", __func__);
4000		goto out;
4001	}
4002	sc->sc_firmware_state = 2;
4003	/*
4004	 * Authentication becomes unreliable when powersaving is left enabled
4005	 * here. Powersaving will be activated again when association has
4006	 * finished or is aborted.
4007	 */
4008	iv->ps_disabled = TRUE;
4009	error = iwm_mvm_power_update_mac(sc);
4010	iv->ps_disabled = FALSE;
4011	if (error != 0) {
4012		device_printf(sc->sc_dev,
4013		    "%s: failed to update power management\n",
4014		    __func__);
4015		goto out;
4016	}
4017	if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4018		device_printf(sc->sc_dev,
4019		    "%s: failed to add sta\n", __func__);
4020		goto out;
4021	}
4022	sc->sc_firmware_state = 3;
4023
4024	/*
4025	 * Prevent the FW from wandering off channel during association
4026	 * by "protecting" the session with a time event.
4027	 */
4028	/* XXX duration is in units of TU, not MS */
4029	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4030	iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4031
4032	error = 0;
4033out:
4034	if (error != 0)
4035		iv->iv_auth = 0;
4036	ieee80211_free_node(ni);
4037	return (error);
4038}
4039
4040static struct ieee80211_node *
4041iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4042{
4043	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4044	    M_NOWAIT | M_ZERO);
4045}
4046
4047static uint8_t
4048iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4049{
4050	uint8_t plcp = rate_n_flags & 0xff;
4051	int i;
4052
4053	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4054		if (iwm_rates[i].plcp == plcp)
4055			return iwm_rates[i].rate;
4056	}
4057	return 0;
4058}
4059
4060uint8_t
4061iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4062{
4063	int i;
4064	uint8_t rval;
4065
4066	for (i = 0; i < rs->rs_nrates; i++) {
4067		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4068		if (rval == iwm_rates[ridx].rate)
4069			return rs->rs_rates[i];
4070	}
4071
4072	return 0;
4073}
4074
4075static int
4076iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4077{
4078	int i;
4079
4080	for (i = 0; i <= IWM_RIDX_MAX; i++) {
4081		if (iwm_rates[i].rate == rate)
4082			return i;
4083	}
4084
4085	device_printf(sc->sc_dev,
4086	    "%s: WARNING: device rate for %u not found!\n",
4087	    __func__, rate);
4088
4089	return -1;
4090}
4091
4092
4093static void
4094iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4095{
4096	struct ieee80211_node *ni = &in->in_ni;
4097	struct iwm_lq_cmd *lq = &in->in_lq;
4098	struct ieee80211_rateset *rs = &ni->ni_rates;
4099	int nrates = rs->rs_nrates;
4100	int i, ridx, tab = 0;
4101//	int txant = 0;
4102
4103	KASSERT(rix >= 0 && rix < nrates, ("invalid rix"));
4104
4105	if (nrates > nitems(lq->rs_table)) {
4106		device_printf(sc->sc_dev,
4107		    "%s: node supports %d rates, driver handles "
4108		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4109		return;
4110	}
4111	if (nrates == 0) {
4112		device_printf(sc->sc_dev,
4113		    "%s: node supports 0 rates, odd!\n", __func__);
4114		return;
4115	}
4116	nrates = imin(rix + 1, nrates);
4117
4118	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4119	    "%s: nrates=%d\n", __func__, nrates);
4120
4121	/* then construct a lq_cmd based on those */
4122	memset(lq, 0, sizeof(*lq));
4123	lq->sta_id = IWM_STATION_ID;
4124
4125	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4126	if (ni->ni_flags & IEEE80211_NODE_HT)
4127		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4128
4129	/*
4130	 * are these used? (we don't do SISO or MIMO)
4131	 * need to set them to non-zero, though, or we get an error.
4132	 */
4133	lq->single_stream_ant_msk = 1;
4134	lq->dual_stream_ant_msk = 1;
4135
4136	/*
4137	 * Build the actual rate selection table.
4138	 * The lowest bits are the rates.  Additionally,
4139	 * CCK needs bit 9 to be set.  The rest of the bits
4140	 * we add to the table select the tx antenna
4141	 * Note that we add the rates in the highest rate first
4142	 * (opposite of ni_rates).
4143	 */
4144	for (i = 0; i < nrates; i++) {
4145		int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4146		int nextant;
4147
4148		/* Map 802.11 rate to HW rate index. */
4149		ridx = iwm_rate2ridx(sc, rate);
4150		if (ridx == -1)
4151			continue;
4152
4153#if 0
4154		if (txant == 0)
4155			txant = iwm_mvm_get_valid_tx_ant(sc);
4156		nextant = 1<<(ffs(txant)-1);
4157		txant &= ~nextant;
4158#else
4159		nextant = iwm_mvm_get_valid_tx_ant(sc);
4160#endif
4161		tab = iwm_rates[ridx].plcp;
4162		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4163		if (IWM_RIDX_IS_CCK(ridx))
4164			tab |= IWM_RATE_MCS_CCK_MSK;
4165		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4166		    "station rate i=%d, rate=%d, hw=%x\n",
4167		    i, iwm_rates[ridx].rate, tab);
4168		lq->rs_table[i] = htole32(tab);
4169	}
4170	/* then fill the rest with the lowest possible rate */
4171	for (i = nrates; i < nitems(lq->rs_table); i++) {
4172		KASSERT(tab != 0, ("invalid tab"));
4173		lq->rs_table[i] = htole32(tab);
4174	}
4175}
4176
4177static int
4178iwm_media_change(struct ifnet *ifp)
4179{
4180	struct ieee80211vap *vap = ifp->if_softc;
4181	struct ieee80211com *ic = vap->iv_ic;
4182	struct iwm_softc *sc = ic->ic_softc;
4183	int error;
4184
4185	error = ieee80211_media_change(ifp);
4186	if (error != ENETRESET)
4187		return error;
4188
4189	IWM_LOCK(sc);
4190	if (ic->ic_nrunning > 0) {
4191		iwm_stop(sc);
4192		iwm_init(sc);
4193	}
4194	IWM_UNLOCK(sc);
4195	return error;
4196}
4197
4198static void
4199iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4200{
4201	struct iwm_vap *ivp = IWM_VAP(vap);
4202	int error;
4203
4204	/* Avoid Tx watchdog triggering, when transfers get dropped here. */
4205	sc->sc_tx_timer = 0;
4206
4207	ivp->iv_auth = 0;
4208	if (sc->sc_firmware_state == 3) {
4209		iwm_xmit_queue_drain(sc);
4210//		iwm_mvm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4211		error = iwm_mvm_rm_sta(sc, vap, TRUE);
4212		if (error) {
4213			device_printf(sc->sc_dev,
4214			    "%s: Failed to remove station: %d\n",
4215			    __func__, error);
4216		}
4217	}
4218	if (sc->sc_firmware_state == 3) {
4219		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4220		if (error) {
4221			device_printf(sc->sc_dev,
4222			    "%s: Failed to change mac context: %d\n",
4223			    __func__, error);
4224		}
4225	}
4226	if (sc->sc_firmware_state == 3) {
4227		error = iwm_mvm_sf_update(sc, vap, FALSE);
4228		if (error) {
4229			device_printf(sc->sc_dev,
4230			    "%s: Failed to update smart FIFO: %d\n",
4231			    __func__, error);
4232		}
4233	}
4234	if (sc->sc_firmware_state == 3) {
4235		error = iwm_mvm_rm_sta_id(sc, vap);
4236		if (error) {
4237			device_printf(sc->sc_dev,
4238			    "%s: Failed to remove station id: %d\n",
4239			    __func__, error);
4240		}
4241	}
4242	if (sc->sc_firmware_state == 3) {
4243		error = iwm_mvm_update_quotas(sc, NULL);
4244		if (error) {
4245			device_printf(sc->sc_dev,
4246			    "%s: Failed to update PHY quota: %d\n",
4247			    __func__, error);
4248		}
4249	}
4250	if (sc->sc_firmware_state == 3) {
4251		/* XXX Might need to specify bssid correctly. */
4252		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4253		if (error) {
4254			device_printf(sc->sc_dev,
4255			    "%s: Failed to change mac context: %d\n",
4256			    __func__, error);
4257		}
4258	}
4259	if (sc->sc_firmware_state == 3) {
4260		sc->sc_firmware_state = 2;
4261	}
4262	if (sc->sc_firmware_state > 1) {
4263		error = iwm_mvm_binding_remove_vif(sc, ivp);
4264		if (error) {
4265			device_printf(sc->sc_dev,
4266			    "%s: Failed to remove channel ctx: %d\n",
4267			    __func__, error);
4268		}
4269	}
4270	if (sc->sc_firmware_state > 1) {
4271		sc->sc_firmware_state = 1;
4272	}
4273	ivp->phy_ctxt = NULL;
4274	if (sc->sc_firmware_state > 0) {
4275		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4276		if (error) {
4277			device_printf(sc->sc_dev,
4278			    "%s: Failed to change mac context: %d\n",
4279			    __func__, error);
4280		}
4281	}
4282	if (sc->sc_firmware_state > 0) {
4283		error = iwm_mvm_power_update_mac(sc);
4284		if (error != 0) {
4285			device_printf(sc->sc_dev,
4286			    "%s: failed to update power management\n",
4287			    __func__);
4288		}
4289	}
4290	sc->sc_firmware_state = 0;
4291}
4292
4293static int
4294iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4295{
4296	struct iwm_vap *ivp = IWM_VAP(vap);
4297	struct ieee80211com *ic = vap->iv_ic;
4298	struct iwm_softc *sc = ic->ic_softc;
4299	struct iwm_node *in;
4300	int error;
4301
4302	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4303	    "switching state %s -> %s arg=0x%x\n",
4304	    ieee80211_state_name[vap->iv_state],
4305	    ieee80211_state_name[nstate],
4306	    arg);
4307
4308	IEEE80211_UNLOCK(ic);
4309	IWM_LOCK(sc);
4310
4311	if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4312	    (nstate == IEEE80211_S_AUTH ||
4313	     nstate == IEEE80211_S_ASSOC ||
4314	     nstate == IEEE80211_S_RUN)) {
4315		/* Stop blinking for a scan, when authenticating. */
4316		iwm_led_blink_stop(sc);
4317	}
4318
4319	if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4320		iwm_mvm_led_disable(sc);
4321		/* disable beacon filtering if we're hopping out of RUN */
4322		iwm_mvm_disable_beacon_filter(sc);
4323		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4324			in->in_assoc = 0;
4325	}
4326
4327	if ((vap->iv_state == IEEE80211_S_AUTH ||
4328	     vap->iv_state == IEEE80211_S_ASSOC ||
4329	     vap->iv_state == IEEE80211_S_RUN) &&
4330	    (nstate == IEEE80211_S_INIT ||
4331	     nstate == IEEE80211_S_SCAN ||
4332	     nstate == IEEE80211_S_AUTH)) {
4333		iwm_mvm_stop_session_protection(sc, ivp);
4334	}
4335
4336	if ((vap->iv_state == IEEE80211_S_RUN ||
4337	     vap->iv_state == IEEE80211_S_ASSOC) &&
4338	    nstate == IEEE80211_S_INIT) {
4339		/*
4340		 * In this case, iv_newstate() wants to send an 80211 frame on
4341		 * the network that we are leaving. So we need to call it,
4342		 * before tearing down all the firmware state.
4343		 */
4344		IWM_UNLOCK(sc);
4345		IEEE80211_LOCK(ic);
4346		ivp->iv_newstate(vap, nstate, arg);
4347		IEEE80211_UNLOCK(ic);
4348		IWM_LOCK(sc);
4349		iwm_bring_down_firmware(sc, vap);
4350		IWM_UNLOCK(sc);
4351		IEEE80211_LOCK(ic);
4352		return 0;
4353	}
4354
4355	switch (nstate) {
4356	case IEEE80211_S_INIT:
4357	case IEEE80211_S_SCAN:
4358		break;
4359
4360	case IEEE80211_S_AUTH:
4361		iwm_bring_down_firmware(sc, vap);
4362		if ((error = iwm_auth(vap, sc)) != 0) {
4363			device_printf(sc->sc_dev,
4364			    "%s: could not move to auth state: %d\n",
4365			    __func__, error);
4366			iwm_bring_down_firmware(sc, vap);
4367			IWM_UNLOCK(sc);
4368			IEEE80211_LOCK(ic);
4369			return 1;
4370		}
4371		break;
4372
4373	case IEEE80211_S_ASSOC:
4374		/*
4375		 * EBS may be disabled due to previous failures reported by FW.
4376		 * Reset EBS status here assuming environment has been changed.
4377		 */
4378		sc->last_ebs_successful = TRUE;
4379		break;
4380
4381	case IEEE80211_S_RUN:
4382		in = IWM_NODE(vap->iv_bss);
4383		/* Update the association state, now we have it all */
4384		/* (eg associd comes in at this point */
4385		error = iwm_mvm_update_sta(sc, in);
4386		if (error != 0) {
4387			device_printf(sc->sc_dev,
4388			    "%s: failed to update STA\n", __func__);
4389			IWM_UNLOCK(sc);
4390			IEEE80211_LOCK(ic);
4391			return error;
4392		}
4393		in->in_assoc = 1;
4394		error = iwm_mvm_mac_ctxt_changed(sc, vap);
4395		if (error != 0) {
4396			device_printf(sc->sc_dev,
4397			    "%s: failed to update MAC: %d\n", __func__, error);
4398		}
4399
4400		iwm_mvm_sf_update(sc, vap, FALSE);
4401		iwm_mvm_enable_beacon_filter(sc, ivp);
4402		iwm_mvm_power_update_mac(sc);
4403		iwm_mvm_update_quotas(sc, ivp);
4404		{
4405		int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4406		iwm_setrates(sc, in, rix);
4407
4408		if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4409			device_printf(sc->sc_dev,
4410			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4411		}
4412
4413		iwm_mvm_led_enable(sc);
4414		}
4415		break;
4416
4417	default:
4418		break;
4419	}
4420	IWM_UNLOCK(sc);
4421	IEEE80211_LOCK(ic);
4422
4423	return (ivp->iv_newstate(vap, nstate, arg));
4424}
4425
4426void
4427iwm_endscan_cb(void *arg, int pending)
4428{
4429	struct iwm_softc *sc = arg;
4430	struct ieee80211com *ic = &sc->sc_ic;
4431
4432	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4433	    "%s: scan ended\n",
4434	    __func__);
4435
4436	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4437}
4438
4439static int
4440iwm_send_bt_init_conf(struct iwm_softc *sc)
4441{
4442	struct iwm_bt_coex_cmd bt_cmd;
4443
4444	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4445	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4446
4447	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4448	    &bt_cmd);
4449}
4450
4451static boolean_t
4452iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4453{
4454	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4455	boolean_t tlv_lar = fw_has_capa(&sc->sc_fw.ucode_capa,
4456					IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4457
4458	if (iwm_lar_disable)
4459		return FALSE;
4460
4461	/*
4462	 * Enable LAR only if it is supported by the FW (TLV) &&
4463	 * enabled in the NVM
4464	 */
4465	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4466		return nvm_lar && tlv_lar;
4467	else
4468		return tlv_lar;
4469}
4470
4471static boolean_t
4472iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4473{
4474	return fw_has_api(&sc->sc_fw.ucode_capa,
4475			  IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4476	       fw_has_capa(&sc->sc_fw.ucode_capa,
4477			   IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4478}
4479
4480static int
4481iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4482{
4483	struct iwm_mcc_update_cmd mcc_cmd;
4484	struct iwm_host_cmd hcmd = {
4485		.id = IWM_MCC_UPDATE_CMD,
4486		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4487		.data = { &mcc_cmd },
4488	};
4489	int ret;
4490#ifdef IWM_DEBUG
4491	struct iwm_rx_packet *pkt;
4492	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4493	struct iwm_mcc_update_resp *mcc_resp;
4494	int n_channels;
4495	uint16_t mcc;
4496#endif
4497	int resp_v2 = fw_has_capa(&sc->sc_fw.ucode_capa,
4498	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4499
4500	if (!iwm_mvm_is_lar_supported(sc)) {
4501		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4502		    __func__);
4503		return 0;
4504	}
4505
4506	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4507	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4508	if (iwm_mvm_is_wifi_mcc_supported(sc))
4509		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4510	else
4511		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4512
4513	if (resp_v2)
4514		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4515	else
4516		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4517
4518	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4519	    "send MCC update to FW with '%c%c' src = %d\n",
4520	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4521
4522	ret = iwm_send_cmd(sc, &hcmd);
4523	if (ret)
4524		return ret;
4525
4526#ifdef IWM_DEBUG
4527	pkt = hcmd.resp_pkt;
4528
4529	/* Extract MCC response */
4530	if (resp_v2) {
4531		mcc_resp = (void *)pkt->data;
4532		mcc = mcc_resp->mcc;
4533		n_channels =  le32toh(mcc_resp->n_channels);
4534	} else {
4535		mcc_resp_v1 = (void *)pkt->data;
4536		mcc = mcc_resp_v1->mcc;
4537		n_channels =  le32toh(mcc_resp_v1->n_channels);
4538	}
4539
4540	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4541	if (mcc == 0)
4542		mcc = 0x3030;  /* "00" - world */
4543
4544	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4545	    "regulatory domain '%c%c' (%d channels available)\n",
4546	    mcc >> 8, mcc & 0xff, n_channels);
4547#endif
4548	iwm_free_resp(sc, &hcmd);
4549
4550	return 0;
4551}
4552
4553static void
4554iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4555{
4556	struct iwm_host_cmd cmd = {
4557		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4558		.len = { sizeof(uint32_t), },
4559		.data = { &backoff, },
4560	};
4561
4562	if (iwm_send_cmd(sc, &cmd) != 0) {
4563		device_printf(sc->sc_dev,
4564		    "failed to change thermal tx backoff\n");
4565	}
4566}
4567
4568static int
4569iwm_init_hw(struct iwm_softc *sc)
4570{
4571	struct ieee80211com *ic = &sc->sc_ic;
4572	int error, i, ac;
4573
4574	sc->sf_state = IWM_SF_UNINIT;
4575
4576	if ((error = iwm_start_hw(sc)) != 0) {
4577		printf("iwm_start_hw: failed %d\n", error);
4578		return error;
4579	}
4580
4581	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4582		printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4583		return error;
4584	}
4585
4586	/*
4587	 * should stop and start HW since that INIT
4588	 * image just loaded
4589	 */
4590	iwm_stop_device(sc);
4591	sc->sc_ps_disabled = FALSE;
4592	if ((error = iwm_start_hw(sc)) != 0) {
4593		device_printf(sc->sc_dev, "could not initialize hardware\n");
4594		return error;
4595	}
4596
4597	/* omstart, this time with the regular firmware */
4598	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4599	if (error) {
4600		device_printf(sc->sc_dev, "could not load firmware\n");
4601		goto error;
4602	}
4603
4604	error = iwm_mvm_sf_update(sc, NULL, FALSE);
4605	if (error)
4606		device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4607
4608	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4609		device_printf(sc->sc_dev, "bt init conf failed\n");
4610		goto error;
4611	}
4612
4613	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4614	if (error != 0) {
4615		device_printf(sc->sc_dev, "antenna config failed\n");
4616		goto error;
4617	}
4618
4619	/* Send phy db control command and then phy db calibration */
4620	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4621		goto error;
4622
4623	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4624		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4625		goto error;
4626	}
4627
4628	/* Add auxiliary station for scanning */
4629	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4630		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4631		goto error;
4632	}
4633
4634	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4635		/*
4636		 * The channel used here isn't relevant as it's
4637		 * going to be overwritten in the other flows.
4638		 * For now use the first channel we have.
4639		 */
4640		if ((error = iwm_mvm_phy_ctxt_add(sc,
4641		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4642			goto error;
4643	}
4644
4645	/* Initialize tx backoffs to the minimum. */
4646	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4647		iwm_mvm_tt_tx_backoff(sc, 0);
4648
4649	if (iwm_mvm_config_ltr(sc) != 0)
4650		device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4651
4652	error = iwm_mvm_power_update_device(sc);
4653	if (error)
4654		goto error;
4655
4656	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4657		goto error;
4658
4659	if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4660		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4661			goto error;
4662	}
4663
4664	/* Enable Tx queues. */
4665	for (ac = 0; ac < WME_NUM_AC; ac++) {
4666		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4667		    iwm_mvm_ac_to_tx_fifo[ac]);
4668		if (error)
4669			goto error;
4670	}
4671
4672	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4673		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4674		goto error;
4675	}
4676
4677	return 0;
4678
4679 error:
4680	iwm_stop_device(sc);
4681	return error;
4682}
4683
4684/* Allow multicast from our BSSID. */
4685static int
4686iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4687{
4688	struct ieee80211_node *ni = vap->iv_bss;
4689	struct iwm_mcast_filter_cmd *cmd;
4690	size_t size;
4691	int error;
4692
4693	size = roundup(sizeof(*cmd), 4);
4694	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4695	if (cmd == NULL)
4696		return ENOMEM;
4697	cmd->filter_own = 1;
4698	cmd->port_id = 0;
4699	cmd->count = 0;
4700	cmd->pass_all = 1;
4701	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4702
4703	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4704	    IWM_CMD_SYNC, size, cmd);
4705	free(cmd, M_DEVBUF);
4706
4707	return (error);
4708}
4709
4710/*
4711 * ifnet interfaces
4712 */
4713
4714static void
4715iwm_init(struct iwm_softc *sc)
4716{
4717	int error;
4718
4719	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4720		return;
4721	}
4722	sc->sc_generation++;
4723	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4724
4725	if ((error = iwm_init_hw(sc)) != 0) {
4726		printf("iwm_init_hw failed %d\n", error);
4727		iwm_stop(sc);
4728		return;
4729	}
4730
4731	/*
4732	 * Ok, firmware loaded and we are jogging
4733	 */
4734	sc->sc_flags |= IWM_FLAG_HW_INITED;
4735}
4736
4737static int
4738iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4739{
4740	struct iwm_softc *sc;
4741	int error;
4742
4743	sc = ic->ic_softc;
4744
4745	IWM_LOCK(sc);
4746	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4747		IWM_UNLOCK(sc);
4748		return (ENXIO);
4749	}
4750	error = mbufq_enqueue(&sc->sc_snd, m);
4751	if (error) {
4752		IWM_UNLOCK(sc);
4753		return (error);
4754	}
4755	iwm_start(sc);
4756	IWM_UNLOCK(sc);
4757	return (0);
4758}
4759
4760/*
4761 * Dequeue packets from sendq and call send.
4762 */
4763static void
4764iwm_start(struct iwm_softc *sc)
4765{
4766	struct ieee80211_node *ni;
4767	struct mbuf *m;
4768	int ac = 0;
4769
4770	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4771	while (sc->qfullmsk == 0 &&
4772		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4773		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4774		if (iwm_tx(sc, m, ni, ac) != 0) {
4775			if_inc_counter(ni->ni_vap->iv_ifp,
4776			    IFCOUNTER_OERRORS, 1);
4777			ieee80211_free_node(ni);
4778			continue;
4779		}
4780		if (sc->sc_tx_timer == 0) {
4781			callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
4782			    sc);
4783		}
4784		sc->sc_tx_timer = 15;
4785	}
4786	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4787}
4788
4789static void
4790iwm_stop(struct iwm_softc *sc)
4791{
4792
4793	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4794	sc->sc_flags |= IWM_FLAG_STOPPED;
4795	sc->sc_generation++;
4796	iwm_led_blink_stop(sc);
4797	sc->sc_tx_timer = 0;
4798	iwm_stop_device(sc);
4799	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4800}
4801
4802static void
4803iwm_watchdog(void *arg)
4804{
4805	struct iwm_softc *sc = arg;
4806	struct ieee80211com *ic = &sc->sc_ic;
4807
4808	if (sc->sc_attached == 0)
4809		return;
4810
4811	if (sc->sc_tx_timer > 0) {
4812		if (--sc->sc_tx_timer == 0) {
4813			device_printf(sc->sc_dev, "device timeout\n");
4814#ifdef IWM_DEBUG
4815			iwm_nic_error(sc);
4816#endif
4817			ieee80211_restart_all(ic);
4818			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4819			return;
4820		}
4821		callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4822	}
4823}
4824
4825static void
4826iwm_parent(struct ieee80211com *ic)
4827{
4828	struct iwm_softc *sc = ic->ic_softc;
4829	int startall = 0;
4830
4831	IWM_LOCK(sc);
4832	if (ic->ic_nrunning > 0) {
4833		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4834			iwm_init(sc);
4835			startall = 1;
4836		}
4837	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4838		iwm_stop(sc);
4839	IWM_UNLOCK(sc);
4840	if (startall)
4841		ieee80211_start_all(ic);
4842}
4843
4844/*
4845 * The interrupt side of things
4846 */
4847
4848/*
4849 * error dumping routines are from iwlwifi/mvm/utils.c
4850 */
4851
4852/*
4853 * Note: This structure is read from the device with IO accesses,
4854 * and the reading already does the endian conversion. As it is
4855 * read with uint32_t-sized accesses, any members with a different size
4856 * need to be ordered correctly though!
4857 */
4858struct iwm_error_event_table {
4859	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4860	uint32_t error_id;		/* type of error */
4861	uint32_t trm_hw_status0;	/* TRM HW status */
4862	uint32_t trm_hw_status1;	/* TRM HW status */
4863	uint32_t blink2;		/* branch link */
4864	uint32_t ilink1;		/* interrupt link */
4865	uint32_t ilink2;		/* interrupt link */
4866	uint32_t data1;		/* error-specific data */
4867	uint32_t data2;		/* error-specific data */
4868	uint32_t data3;		/* error-specific data */
4869	uint32_t bcon_time;		/* beacon timer */
4870	uint32_t tsf_low;		/* network timestamp function timer */
4871	uint32_t tsf_hi;		/* network timestamp function timer */
4872	uint32_t gp1;		/* GP1 timer register */
4873	uint32_t gp2;		/* GP2 timer register */
4874	uint32_t fw_rev_type;	/* firmware revision type */
4875	uint32_t major;		/* uCode version major */
4876	uint32_t minor;		/* uCode version minor */
4877	uint32_t hw_ver;		/* HW Silicon version */
4878	uint32_t brd_ver;		/* HW board version */
4879	uint32_t log_pc;		/* log program counter */
4880	uint32_t frame_ptr;		/* frame pointer */
4881	uint32_t stack_ptr;		/* stack pointer */
4882	uint32_t hcmd;		/* last host command header */
4883	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
4884				 * rxtx_flag */
4885	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
4886				 * host_flag */
4887	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
4888				 * enc_flag */
4889	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
4890				 * time_flag */
4891	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
4892				 * wico interrupt */
4893	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
4894	uint32_t wait_event;		/* wait event() caller address */
4895	uint32_t l2p_control;	/* L2pControlField */
4896	uint32_t l2p_duration;	/* L2pDurationField */
4897	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
4898	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
4899	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
4900				 * (LMPM_PMG_SEL) */
4901	uint32_t u_timestamp;	/* indicate when the date and time of the
4902				 * compilation */
4903	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
4904} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4905
4906/*
4907 * UMAC error struct - relevant starting from family 8000 chip.
4908 * Note: This structure is read from the device with IO accesses,
4909 * and the reading already does the endian conversion. As it is
4910 * read with u32-sized accesses, any members with a different size
4911 * need to be ordered correctly though!
4912 */
4913struct iwm_umac_error_event_table {
4914	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
4915	uint32_t error_id;	/* type of error */
4916	uint32_t blink1;	/* branch link */
4917	uint32_t blink2;	/* branch link */
4918	uint32_t ilink1;	/* interrupt link */
4919	uint32_t ilink2;	/* interrupt link */
4920	uint32_t data1;		/* error-specific data */
4921	uint32_t data2;		/* error-specific data */
4922	uint32_t data3;		/* error-specific data */
4923	uint32_t umac_major;
4924	uint32_t umac_minor;
4925	uint32_t frame_pointer;	/* core register 27*/
4926	uint32_t stack_pointer;	/* core register 28 */
4927	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
4928	uint32_t nic_isr_pref;	/* ISR status register */
4929} __packed;
4930
4931#define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4932#define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4933
4934#ifdef IWM_DEBUG
4935struct {
4936	const char *name;
4937	uint8_t num;
4938} advanced_lookup[] = {
4939	{ "NMI_INTERRUPT_WDG", 0x34 },
4940	{ "SYSASSERT", 0x35 },
4941	{ "UCODE_VERSION_MISMATCH", 0x37 },
4942	{ "BAD_COMMAND", 0x38 },
4943	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4944	{ "FATAL_ERROR", 0x3D },
4945	{ "NMI_TRM_HW_ERR", 0x46 },
4946	{ "NMI_INTERRUPT_TRM", 0x4C },
4947	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4948	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4949	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4950	{ "NMI_INTERRUPT_HOST", 0x66 },
4951	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
4952	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
4953	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4954	{ "ADVANCED_SYSASSERT", 0 },
4955};
4956
4957static const char *
4958iwm_desc_lookup(uint32_t num)
4959{
4960	int i;
4961
4962	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4963		if (advanced_lookup[i].num == num)
4964			return advanced_lookup[i].name;
4965
4966	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4967	return advanced_lookup[i].name;
4968}
4969
4970static void
4971iwm_nic_umac_error(struct iwm_softc *sc)
4972{
4973	struct iwm_umac_error_event_table table;
4974	uint32_t base;
4975
4976	base = sc->umac_error_event_table;
4977
4978	if (base < 0x800000) {
4979		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
4980		    base);
4981		return;
4982	}
4983
4984	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4985		device_printf(sc->sc_dev, "reading errlog failed\n");
4986		return;
4987	}
4988
4989	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4990		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
4991		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4992		    sc->sc_flags, table.valid);
4993	}
4994
4995	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
4996		iwm_desc_lookup(table.error_id));
4997	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
4998	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
4999	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5000	    table.ilink1);
5001	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5002	    table.ilink2);
5003	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5004	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5005	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5006	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5007	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5008	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5009	    table.frame_pointer);
5010	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5011	    table.stack_pointer);
5012	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5013	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5014	    table.nic_isr_pref);
5015}
5016
5017/*
5018 * Support for dumping the error log seemed like a good idea ...
5019 * but it's mostly hex junk and the only sensible thing is the
5020 * hw/ucode revision (which we know anyway).  Since it's here,
5021 * I'll just leave it in, just in case e.g. the Intel guys want to
5022 * help us decipher some "ADVANCED_SYSASSERT" later.
5023 */
5024static void
5025iwm_nic_error(struct iwm_softc *sc)
5026{
5027	struct iwm_error_event_table table;
5028	uint32_t base;
5029
5030	device_printf(sc->sc_dev, "dumping device error log\n");
5031	base = sc->error_event_table[0];
5032	if (base < 0x800000) {
5033		device_printf(sc->sc_dev,
5034		    "Invalid error log pointer 0x%08x\n", base);
5035		return;
5036	}
5037
5038	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5039		device_printf(sc->sc_dev, "reading errlog failed\n");
5040		return;
5041	}
5042
5043	if (!table.valid) {
5044		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5045		return;
5046	}
5047
5048	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5049		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5050		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5051		    sc->sc_flags, table.valid);
5052	}
5053
5054	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5055	    iwm_desc_lookup(table.error_id));
5056	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5057	    table.trm_hw_status0);
5058	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5059	    table.trm_hw_status1);
5060	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5061	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5062	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5063	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5064	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5065	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5066	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5067	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5068	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5069	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5070	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5071	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5072	    table.fw_rev_type);
5073	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5074	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5075	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5076	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5077	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5078	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5079	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5080	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5081	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5082	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5083	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5084	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5085	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5086	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5087	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5088	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5089	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5090	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5091	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5092
5093	if (sc->umac_error_event_table)
5094		iwm_nic_umac_error(sc);
5095}
5096#endif
5097
5098static void
5099iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5100{
5101	struct ieee80211com *ic = &sc->sc_ic;
5102	struct iwm_cmd_response *cresp;
5103	struct mbuf *m1;
5104	uint32_t offset = 0;
5105	uint32_t maxoff = IWM_RBUF_SIZE;
5106	uint32_t nextoff;
5107	boolean_t stolen = FALSE;
5108
5109#define HAVEROOM(a)	\
5110    ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5111
5112	while (HAVEROOM(offset)) {
5113		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5114		    offset);
5115		int qid, idx, code, len;
5116
5117		qid = pkt->hdr.qid;
5118		idx = pkt->hdr.idx;
5119
5120		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5121
5122		/*
5123		 * randomly get these from the firmware, no idea why.
5124		 * they at least seem harmless, so just ignore them for now
5125		 */
5126		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5127		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5128			break;
5129		}
5130
5131		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5132		    "rx packet qid=%d idx=%d type=%x\n",
5133		    qid & ~0x80, pkt->hdr.idx, code);
5134
5135		len = iwm_rx_packet_len(pkt);
5136		len += sizeof(uint32_t); /* account for status word */
5137		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5138
5139		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5140
5141		switch (code) {
5142		case IWM_REPLY_RX_PHY_CMD:
5143			iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5144			break;
5145
5146		case IWM_REPLY_RX_MPDU_CMD: {
5147			/*
5148			 * If this is the last frame in the RX buffer, we
5149			 * can directly feed the mbuf to the sharks here.
5150			 */
5151			struct iwm_rx_packet *nextpkt = mtodoff(m,
5152			    struct iwm_rx_packet *, nextoff);
5153			if (!HAVEROOM(nextoff) ||
5154			    (nextpkt->hdr.code == 0 &&
5155			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5156			     nextpkt->hdr.idx == 0) ||
5157			    (nextpkt->len_n_flags ==
5158			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5159				if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5160					stolen = FALSE;
5161					/* Make sure we abort the loop */
5162					nextoff = maxoff;
5163				}
5164				break;
5165			}
5166
5167			/*
5168			 * Use m_copym instead of m_split, because that
5169			 * makes it easier to keep a valid rx buffer in
5170			 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5171			 *
5172			 * We need to start m_copym() at offset 0, to get the
5173			 * M_PKTHDR flag preserved.
5174			 */
5175			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5176			if (m1) {
5177				if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5178					stolen = TRUE;
5179				else
5180					m_freem(m1);
5181			}
5182			break;
5183		}
5184
5185		case IWM_TX_CMD:
5186			iwm_mvm_rx_tx_cmd(sc, pkt);
5187			break;
5188
5189		case IWM_MISSED_BEACONS_NOTIFICATION: {
5190			struct iwm_missed_beacons_notif *resp;
5191			int missed;
5192
5193			/* XXX look at mac_id to determine interface ID */
5194			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5195
5196			resp = (void *)pkt->data;
5197			missed = le32toh(resp->consec_missed_beacons);
5198
5199			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5200			    "%s: MISSED_BEACON: mac_id=%d, "
5201			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5202			    "num_rx=%d\n",
5203			    __func__,
5204			    le32toh(resp->mac_id),
5205			    le32toh(resp->consec_missed_beacons_since_last_rx),
5206			    le32toh(resp->consec_missed_beacons),
5207			    le32toh(resp->num_expected_beacons),
5208			    le32toh(resp->num_recvd_beacons));
5209
5210			/* Be paranoid */
5211			if (vap == NULL)
5212				break;
5213
5214			/* XXX no net80211 locking? */
5215			if (vap->iv_state == IEEE80211_S_RUN &&
5216			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5217				if (missed > vap->iv_bmissthreshold) {
5218					/* XXX bad locking; turn into task */
5219					IWM_UNLOCK(sc);
5220					ieee80211_beacon_miss(ic);
5221					IWM_LOCK(sc);
5222				}
5223			}
5224
5225			break;
5226		}
5227
5228		case IWM_MFUART_LOAD_NOTIFICATION:
5229			break;
5230
5231		case IWM_MVM_ALIVE:
5232			break;
5233
5234		case IWM_CALIB_RES_NOTIF_PHY_DB:
5235			break;
5236
5237		case IWM_STATISTICS_NOTIFICATION:
5238			iwm_mvm_handle_rx_statistics(sc, pkt);
5239			break;
5240
5241		case IWM_NVM_ACCESS_CMD:
5242		case IWM_MCC_UPDATE_CMD:
5243			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5244				memcpy(sc->sc_cmd_resp,
5245				    pkt, sizeof(sc->sc_cmd_resp));
5246			}
5247			break;
5248
5249		case IWM_MCC_CHUB_UPDATE_CMD: {
5250			struct iwm_mcc_chub_notif *notif;
5251			notif = (void *)pkt->data;
5252
5253			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5254			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5255			sc->sc_fw_mcc[2] = '\0';
5256			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5257			    "fw source %d sent CC '%s'\n",
5258			    notif->source_id, sc->sc_fw_mcc);
5259			break;
5260		}
5261
5262		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5263		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5264				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5265			struct iwm_dts_measurement_notif_v1 *notif;
5266
5267			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5268				device_printf(sc->sc_dev,
5269				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5270				break;
5271			}
5272			notif = (void *)pkt->data;
5273			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5274			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5275			    notif->temp);
5276			break;
5277		}
5278
5279		case IWM_PHY_CONFIGURATION_CMD:
5280		case IWM_TX_ANT_CONFIGURATION_CMD:
5281		case IWM_ADD_STA:
5282		case IWM_MAC_CONTEXT_CMD:
5283		case IWM_REPLY_SF_CFG_CMD:
5284		case IWM_POWER_TABLE_CMD:
5285		case IWM_LTR_CONFIG:
5286		case IWM_PHY_CONTEXT_CMD:
5287		case IWM_BINDING_CONTEXT_CMD:
5288		case IWM_TIME_EVENT_CMD:
5289		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5290		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5291		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5292		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5293		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5294		case IWM_REPLY_BEACON_FILTERING_CMD:
5295		case IWM_MAC_PM_POWER_TABLE:
5296		case IWM_TIME_QUOTA_CMD:
5297		case IWM_REMOVE_STA:
5298		case IWM_TXPATH_FLUSH:
5299		case IWM_LQ_CMD:
5300		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5301				 IWM_FW_PAGING_BLOCK_CMD):
5302		case IWM_BT_CONFIG:
5303		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5304			cresp = (void *)pkt->data;
5305			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5306				memcpy(sc->sc_cmd_resp,
5307				    pkt, sizeof(*pkt)+sizeof(*cresp));
5308			}
5309			break;
5310
5311		/* ignore */
5312		case IWM_PHY_DB_CMD:
5313			break;
5314
5315		case IWM_INIT_COMPLETE_NOTIF:
5316			break;
5317
5318		case IWM_SCAN_OFFLOAD_COMPLETE:
5319			iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5320			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5321				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5322				ieee80211_runtask(ic, &sc->sc_es_task);
5323			}
5324			break;
5325
5326		case IWM_SCAN_ITERATION_COMPLETE: {
5327			struct iwm_lmac_scan_complete_notif *notif;
5328			notif = (void *)pkt->data;
5329			break;
5330		}
5331
5332		case IWM_SCAN_COMPLETE_UMAC:
5333			iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5334			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5335				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5336				ieee80211_runtask(ic, &sc->sc_es_task);
5337			}
5338			break;
5339
5340		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5341			struct iwm_umac_scan_iter_complete_notif *notif;
5342			notif = (void *)pkt->data;
5343
5344			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5345			    "complete, status=0x%x, %d channels scanned\n",
5346			    notif->status, notif->scanned_channels);
5347			break;
5348		}
5349
5350		case IWM_REPLY_ERROR: {
5351			struct iwm_error_resp *resp;
5352			resp = (void *)pkt->data;
5353
5354			device_printf(sc->sc_dev,
5355			    "firmware error 0x%x, cmd 0x%x\n",
5356			    le32toh(resp->error_type),
5357			    resp->cmd_id);
5358			break;
5359		}
5360
5361		case IWM_TIME_EVENT_NOTIFICATION:
5362			iwm_mvm_rx_time_event_notif(sc, pkt);
5363			break;
5364
5365		/*
5366		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5367		 * messages. Just ignore them for now.
5368		 */
5369		case IWM_DEBUG_LOG_MSG:
5370			break;
5371
5372		case IWM_MCAST_FILTER_CMD:
5373			break;
5374
5375		case IWM_SCD_QUEUE_CFG: {
5376			struct iwm_scd_txq_cfg_rsp *rsp;
5377			rsp = (void *)pkt->data;
5378
5379			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5380			    "queue cfg token=0x%x sta_id=%d "
5381			    "tid=%d scd_queue=%d\n",
5382			    rsp->token, rsp->sta_id, rsp->tid,
5383			    rsp->scd_queue);
5384			break;
5385		}
5386
5387		default:
5388			device_printf(sc->sc_dev,
5389			    "frame %d/%d %x UNHANDLED (this should "
5390			    "not happen)\n", qid & ~0x80, idx,
5391			    pkt->len_n_flags);
5392			break;
5393		}
5394
5395		/*
5396		 * Why test bit 0x80?  The Linux driver:
5397		 *
5398		 * There is one exception:  uCode sets bit 15 when it
5399		 * originates the response/notification, i.e. when the
5400		 * response/notification is not a direct response to a
5401		 * command sent by the driver.  For example, uCode issues
5402		 * IWM_REPLY_RX when it sends a received frame to the driver;
5403		 * it is not a direct response to any driver command.
5404		 *
5405		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5406		 * uses a slightly different format for pkt->hdr, and "qid"
5407		 * is actually the upper byte of a two-byte field.
5408		 */
5409		if (!(qid & (1 << 7)))
5410			iwm_cmd_done(sc, pkt);
5411
5412		offset = nextoff;
5413	}
5414	if (stolen)
5415		m_freem(m);
5416#undef HAVEROOM
5417}
5418
5419/*
5420 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5421 * Basic structure from if_iwn
5422 */
5423static void
5424iwm_notif_intr(struct iwm_softc *sc)
5425{
5426	uint16_t hw;
5427
5428	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5429	    BUS_DMASYNC_POSTREAD);
5430
5431	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5432
5433	/*
5434	 * Process responses
5435	 */
5436	while (sc->rxq.cur != hw) {
5437		struct iwm_rx_ring *ring = &sc->rxq;
5438		struct iwm_rx_data *data = &ring->data[ring->cur];
5439
5440		bus_dmamap_sync(ring->data_dmat, data->map,
5441		    BUS_DMASYNC_POSTREAD);
5442
5443		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5444		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5445		iwm_handle_rxb(sc, data->m);
5446
5447		ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5448	}
5449
5450	/*
5451	 * Tell the firmware that it can reuse the ring entries that
5452	 * we have just processed.
5453	 * Seems like the hardware gets upset unless we align
5454	 * the write by 8??
5455	 */
5456	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5457	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5458}
5459
5460static void
5461iwm_intr(void *arg)
5462{
5463	struct iwm_softc *sc = arg;
5464	int handled = 0;
5465	int r1, r2, rv = 0;
5466	int isperiodic = 0;
5467
5468	IWM_LOCK(sc);
5469#ifndef __HAIKU__
5470	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5471
5472	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5473		uint32_t *ict = sc->ict_dma.vaddr;
5474		int tmp;
5475
5476		tmp = htole32(ict[sc->ict_cur]);
5477		if (!tmp)
5478			goto out_ena;
5479
5480		/*
5481		 * ok, there was something.  keep plowing until we have all.
5482		 */
5483		r1 = r2 = 0;
5484		while (tmp) {
5485			r1 |= tmp;
5486			ict[sc->ict_cur] = 0;
5487			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5488			tmp = htole32(ict[sc->ict_cur]);
5489		}
5490
5491		/* this is where the fun begins.  don't ask */
5492		if (r1 == 0xffffffff)
5493			r1 = 0;
5494
5495		/* i am not expected to understand this */
5496		if (r1 & 0xc0000)
5497			r1 |= 0x8000;
5498		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5499	} else {
5500		r1 = IWM_READ(sc, IWM_CSR_INT);
5501		/* "hardware gone" (where, fishing?) */
5502		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5503			goto out;
5504		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5505	}
5506	if (r1 == 0 && r2 == 0) {
5507		goto out_ena;
5508	}
5509#else
5510	r1 = atomic_get((int32 *)&sc->sc_intr_status_1);
5511	r2 = atomic_get((int32 *)&sc->sc_intr_status_2);
5512#endif
5513
5514	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5515
5516	/* Safely ignore these bits for debug checks below */
5517	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5518
5519	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5520		int i;
5521		struct ieee80211com *ic = &sc->sc_ic;
5522		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5523
5524#ifdef IWM_DEBUG
5525		iwm_nic_error(sc);
5526#endif
5527		/* Dump driver status (TX and RX rings) while we're here. */
5528		device_printf(sc->sc_dev, "driver status:\n");
5529		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5530			struct iwm_tx_ring *ring = &sc->txq[i];
5531			device_printf(sc->sc_dev,
5532			    "  tx ring %2d: qid=%-2d cur=%-3d "
5533			    "queued=%-3d\n",
5534			    i, ring->qid, ring->cur, ring->queued);
5535		}
5536		device_printf(sc->sc_dev,
5537		    "  rx ring: cur=%d\n", sc->rxq.cur);
5538		device_printf(sc->sc_dev,
5539		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5540
5541		/* Reset our firmware state tracking. */
5542		sc->sc_firmware_state = 0;
5543		/* Don't stop the device; just do a VAP restart */
5544		IWM_UNLOCK(sc);
5545
5546		if (vap == NULL) {
5547			printf("%s: null vap\n", __func__);
5548			return;
5549		}
5550
5551		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5552		    "restarting\n", __func__, vap->iv_state);
5553
5554		ieee80211_restart_all(ic);
5555		return;
5556	}
5557
5558	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5559		handled |= IWM_CSR_INT_BIT_HW_ERR;
5560		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5561		iwm_stop(sc);
5562		rv = 1;
5563		goto out;
5564	}
5565
5566	/* firmware chunk loaded */
5567	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5568		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5569		handled |= IWM_CSR_INT_BIT_FH_TX;
5570		sc->sc_fw_chunk_done = 1;
5571		wakeup(&sc->sc_fw);
5572	}
5573
5574	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5575		handled |= IWM_CSR_INT_BIT_RF_KILL;
5576		if (iwm_check_rfkill(sc)) {
5577			device_printf(sc->sc_dev,
5578			    "%s: rfkill switch, disabling interface\n",
5579			    __func__);
5580			iwm_stop(sc);
5581		}
5582	}
5583
5584	/*
5585	 * The Linux driver uses periodic interrupts to avoid races.
5586	 * We cargo-cult like it's going out of fashion.
5587	 */
5588	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5589		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5590		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5591		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5592			IWM_WRITE_1(sc,
5593			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5594		isperiodic = 1;
5595	}
5596
5597	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5598		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5599		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5600
5601		iwm_notif_intr(sc);
5602
5603		/* enable periodic interrupt, see above */
5604		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5605			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5606			    IWM_CSR_INT_PERIODIC_ENA);
5607	}
5608
5609	if (__predict_false(r1 & ~handled))
5610		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5611		    "%s: unhandled interrupts: %x\n", __func__, r1);
5612	rv = 1;
5613
5614 out_ena:
5615	iwm_restore_interrupts(sc);
5616 out:
5617	IWM_UNLOCK(sc);
5618	return;
5619}
5620
5621/*
5622 * Autoconf glue-sniffing
5623 */
5624#define	PCI_VENDOR_INTEL		0x8086
5625#define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5626#define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5627#define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5628#define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5629#define	PCI_PRODUCT_INTEL_WL_3168_1	0x24fb
5630#define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5631#define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5632#define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5633#define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5634#define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5635#define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5636#define	PCI_PRODUCT_INTEL_WL_8265_1	0x24fd
5637
5638static const struct iwm_devices {
5639	uint16_t		device;
5640	const struct iwm_cfg	*cfg;
5641} iwm_devices[] = {
5642	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5643	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5644	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5645	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5646	{ PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
5647	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5648	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5649	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5650	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5651	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5652	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5653	{ PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5654};
5655
5656static int
5657iwm_probe(device_t dev)
5658{
5659	int i;
5660
5661	for (i = 0; i < nitems(iwm_devices); i++) {
5662		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5663		    pci_get_device(dev) == iwm_devices[i].device) {
5664			device_set_desc(dev, iwm_devices[i].cfg->name);
5665			return (BUS_PROBE_DEFAULT);
5666		}
5667	}
5668
5669	return (ENXIO);
5670}
5671
5672static int
5673iwm_dev_check(device_t dev)
5674{
5675	struct iwm_softc *sc;
5676	uint16_t devid;
5677	int i;
5678
5679	sc = device_get_softc(dev);
5680
5681	devid = pci_get_device(dev);
5682	for (i = 0; i < nitems(iwm_devices); i++) {
5683		if (iwm_devices[i].device == devid) {
5684			sc->cfg = iwm_devices[i].cfg;
5685			return (0);
5686		}
5687	}
5688	device_printf(dev, "unknown adapter type\n");
5689	return ENXIO;
5690}
5691
5692/* PCI registers */
5693#define PCI_CFG_RETRY_TIMEOUT	0x041
5694
5695static int
5696iwm_pci_attach(device_t dev)
5697{
5698	struct iwm_softc *sc;
5699	int count, error, rid;
5700	uint16_t reg;
5701
5702	sc = device_get_softc(dev);
5703
5704	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5705	 * PCI Tx retries from interfering with C3 CPU state */
5706	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5707
5708	/* Enable bus-mastering and hardware bug workaround. */
5709	pci_enable_busmaster(dev);
5710	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5711	/* if !MSI */
5712	if (reg & PCIM_STATUS_INTxSTATE) {
5713		reg &= ~PCIM_STATUS_INTxSTATE;
5714	}
5715	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5716
5717	rid = PCIR_BAR(0);
5718	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5719	    RF_ACTIVE);
5720	if (sc->sc_mem == NULL) {
5721		device_printf(sc->sc_dev, "can't map mem space\n");
5722		return (ENXIO);
5723	}
5724	sc->sc_st = rman_get_bustag(sc->sc_mem);
5725	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5726
5727	/* Install interrupt handler. */
5728	count = 1;
5729	rid = 0;
5730	if (pci_alloc_msi(dev, &count) == 0)
5731		rid = 1;
5732	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5733	    (rid != 0 ? 0 : RF_SHAREABLE));
5734	if (sc->sc_irq == NULL) {
5735		device_printf(dev, "can't map interrupt\n");
5736			return (ENXIO);
5737	}
5738	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5739	    NULL, iwm_intr, sc, &sc->sc_ih);
5740	if (sc->sc_ih == NULL) {
5741		device_printf(dev, "can't establish interrupt");
5742			return (ENXIO);
5743	}
5744	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5745
5746	return (0);
5747}
5748
5749static void
5750iwm_pci_detach(device_t dev)
5751{
5752	struct iwm_softc *sc = device_get_softc(dev);
5753
5754	if (sc->sc_irq != NULL) {
5755		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5756		bus_release_resource(dev, SYS_RES_IRQ,
5757		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5758		pci_release_msi(dev);
5759        }
5760	if (sc->sc_mem != NULL)
5761		bus_release_resource(dev, SYS_RES_MEMORY,
5762		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5763}
5764
5765
5766
5767static int
5768iwm_attach(device_t dev)
5769{
5770	struct iwm_softc *sc = device_get_softc(dev);
5771	struct ieee80211com *ic = &sc->sc_ic;
5772	int error;
5773	int txq_i, i;
5774
5775	sc->sc_dev = dev;
5776	sc->sc_attached = 1;
5777	IWM_LOCK_INIT(sc);
5778	mbufq_init(&sc->sc_snd, ifqmaxlen);
5779	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5780	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5781	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5782
5783	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5784	if (sc->sc_notif_wait == NULL) {
5785		device_printf(dev, "failed to init notification wait struct\n");
5786		goto fail;
5787	}
5788
5789	sc->sf_state = IWM_SF_UNINIT;
5790
5791	/* Init phy db */
5792	sc->sc_phy_db = iwm_phy_db_init(sc);
5793	if (!sc->sc_phy_db) {
5794		device_printf(dev, "Cannot init phy_db\n");
5795		goto fail;
5796	}
5797
5798	/* Set EBS as successful as long as not stated otherwise by the FW. */
5799	sc->last_ebs_successful = TRUE;
5800
5801	/* PCI attach */
5802	error = iwm_pci_attach(dev);
5803	if (error != 0)
5804		goto fail;
5805
5806	sc->sc_wantresp = -1;
5807
5808	/* Match device id */
5809	error = iwm_dev_check(dev);
5810	if (error != 0)
5811		goto fail;
5812
5813	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5814	/*
5815	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5816	 * changed, and now the revision step also includes bit 0-1 (no more
5817	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5818	 * in the old format.
5819	 */
5820	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5821		int ret;
5822		uint32_t hw_step;
5823
5824		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5825				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5826
5827		if (iwm_prepare_card_hw(sc) != 0) {
5828			device_printf(dev, "could not initialize hardware\n");
5829			goto fail;
5830		}
5831
5832		/*
5833		 * In order to recognize C step the driver should read the
5834		 * chip version id located at the AUX bus MISC address.
5835		 */
5836		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5837			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5838		DELAY(2);
5839
5840		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5841				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5842				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5843				   25000);
5844		if (!ret) {
5845			device_printf(sc->sc_dev,
5846			    "Failed to wake up the nic\n");
5847			goto fail;
5848		}
5849
5850		if (iwm_nic_lock(sc)) {
5851			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5852			hw_step |= IWM_ENABLE_WFPM;
5853			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5854			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5855			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5856			if (hw_step == 0x3)
5857				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5858						(IWM_SILICON_C_STEP << 2);
5859			iwm_nic_unlock(sc);
5860		} else {
5861			device_printf(sc->sc_dev, "Failed to lock the nic\n");
5862			goto fail;
5863		}
5864	}
5865
5866	/* special-case 7265D, it has the same PCI IDs. */
5867	if (sc->cfg == &iwm7265_cfg &&
5868	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5869		sc->cfg = &iwm7265d_cfg;
5870	}
5871
5872	/* Allocate DMA memory for firmware transfers. */
5873	if ((error = iwm_alloc_fwmem(sc)) != 0) {
5874		device_printf(dev, "could not allocate memory for firmware\n");
5875		goto fail;
5876	}
5877
5878	/* Allocate "Keep Warm" page. */
5879	if ((error = iwm_alloc_kw(sc)) != 0) {
5880		device_printf(dev, "could not allocate keep warm page\n");
5881		goto fail;
5882	}
5883
5884	/* We use ICT interrupts */
5885	if ((error = iwm_alloc_ict(sc)) != 0) {
5886		device_printf(dev, "could not allocate ICT table\n");
5887		goto fail;
5888	}
5889
5890	/* Allocate TX scheduler "rings". */
5891	if ((error = iwm_alloc_sched(sc)) != 0) {
5892		device_printf(dev, "could not allocate TX scheduler rings\n");
5893		goto fail;
5894	}
5895
5896	/* Allocate TX rings */
5897	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5898		if ((error = iwm_alloc_tx_ring(sc,
5899		    &sc->txq[txq_i], txq_i)) != 0) {
5900			device_printf(dev,
5901			    "could not allocate TX ring %d\n",
5902			    txq_i);
5903			goto fail;
5904		}
5905	}
5906
5907	/* Allocate RX ring. */
5908	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5909		device_printf(dev, "could not allocate RX ring\n");
5910		goto fail;
5911	}
5912
5913	/* Clear pending interrupts. */
5914	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5915
5916	ic->ic_softc = sc;
5917	ic->ic_name = device_get_nameunit(sc->sc_dev);
5918	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
5919	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
5920
5921	/* Set device capabilities. */
5922	ic->ic_caps =
5923	    IEEE80211_C_STA |
5924	    IEEE80211_C_WPA |		/* WPA/RSN */
5925	    IEEE80211_C_WME |
5926	    IEEE80211_C_PMGT |
5927	    IEEE80211_C_SHSLOT |	/* short slot time supported */
5928	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
5929//	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
5930	    ;
5931	/* Advertise full-offload scanning */
5932	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
5933	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5934		sc->sc_phyctxt[i].id = i;
5935		sc->sc_phyctxt[i].color = 0;
5936		sc->sc_phyctxt[i].ref = 0;
5937		sc->sc_phyctxt[i].channel = NULL;
5938	}
5939
5940	/* Default noise floor */
5941	sc->sc_noise = -96;
5942
5943	/* Max RSSI */
5944	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5945
5946#ifdef IWM_DEBUG
5947	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5948	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5949	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5950#endif
5951
5952	error = iwm_read_firmware(sc);
5953	if (error) {
5954		goto fail;
5955	} else if (sc->sc_fw.fw_fp == NULL) {
5956		/*
5957		 * XXX Add a solution for properly deferring firmware load
5958		 *     during bootup.
5959		 */
5960		goto fail;
5961	} else {
5962		sc->sc_preinit_hook.ich_func = iwm_preinit;
5963		sc->sc_preinit_hook.ich_arg = sc;
5964		if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5965			device_printf(dev,
5966			    "config_intrhook_establish failed\n");
5967			goto fail;
5968		}
5969	}
5970
5971	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5972	    "<-%s\n", __func__);
5973
5974	return 0;
5975
5976	/* Free allocated memory if something failed during attachment. */
5977fail:
5978	iwm_detach_local(sc, 0);
5979
5980	return ENXIO;
5981}
5982
5983static int
5984iwm_is_valid_ether_addr(uint8_t *addr)
5985{
5986	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
5987
5988	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
5989		return (FALSE);
5990
5991	return (TRUE);
5992}
5993
5994static int
5995iwm_wme_update(struct ieee80211com *ic)
5996{
5997#define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
5998	struct iwm_softc *sc = ic->ic_softc;
5999	struct chanAccParams chp;
6000	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6001	struct iwm_vap *ivp = IWM_VAP(vap);
6002	struct iwm_node *in;
6003	struct wmeParams tmp[WME_NUM_AC];
6004	int aci, error;
6005
6006	if (vap == NULL)
6007		return (0);
6008
6009	ieee80211_wme_ic_getparams(ic, &chp);
6010
6011	IEEE80211_LOCK(ic);
6012	for (aci = 0; aci < WME_NUM_AC; aci++)
6013		tmp[aci] = chp.cap_wmeParams[aci];
6014	IEEE80211_UNLOCK(ic);
6015
6016	IWM_LOCK(sc);
6017	for (aci = 0; aci < WME_NUM_AC; aci++) {
6018		const struct wmeParams *ac = &tmp[aci];
6019		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6020		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6021		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6022		ivp->queue_params[aci].edca_txop =
6023		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6024	}
6025	ivp->have_wme = TRUE;
6026	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6027		in = IWM_NODE(vap->iv_bss);
6028		if (in->in_assoc) {
6029			if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6030				device_printf(sc->sc_dev,
6031				    "%s: failed to update MAC\n", __func__);
6032			}
6033		}
6034	}
6035	IWM_UNLOCK(sc);
6036
6037	return (0);
6038#undef IWM_EXP2
6039}
6040
6041static void
6042iwm_preinit(void *arg)
6043{
6044	struct iwm_softc *sc = arg;
6045	device_t dev = sc->sc_dev;
6046	struct ieee80211com *ic = &sc->sc_ic;
6047	int error;
6048
6049	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6050	    "->%s\n", __func__);
6051
6052	IWM_LOCK(sc);
6053	if ((error = iwm_start_hw(sc)) != 0) {
6054		device_printf(dev, "could not initialize hardware\n");
6055		IWM_UNLOCK(sc);
6056		goto fail;
6057	}
6058
6059	error = iwm_run_init_mvm_ucode(sc, 1);
6060	iwm_stop_device(sc);
6061	if (error) {
6062		IWM_UNLOCK(sc);
6063		goto fail;
6064	}
6065#ifndef __HAIKU__ /* This printf causes a KDL. Not sure why... */
6066	device_printf(dev,
6067	    "hw rev 0x%x, fw ver %s, address %s\n",
6068	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6069	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6070#endif
6071
6072	/* not all hardware can do 5GHz band */
6073	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6074		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6075		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6076	IWM_UNLOCK(sc);
6077
6078	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6079	    ic->ic_channels);
6080
6081	/*
6082	 * At this point we've committed - if we fail to do setup,
6083	 * we now also have to tear down the net80211 state.
6084	 */
6085	ieee80211_ifattach(ic);
6086	ic->ic_vap_create = iwm_vap_create;
6087	ic->ic_vap_delete = iwm_vap_delete;
6088	ic->ic_raw_xmit = iwm_raw_xmit;
6089	ic->ic_node_alloc = iwm_node_alloc;
6090	ic->ic_scan_start = iwm_scan_start;
6091	ic->ic_scan_end = iwm_scan_end;
6092	ic->ic_update_mcast = iwm_update_mcast;
6093	ic->ic_getradiocaps = iwm_init_channel_map;
6094	ic->ic_set_channel = iwm_set_channel;
6095	ic->ic_scan_curchan = iwm_scan_curchan;
6096	ic->ic_scan_mindwell = iwm_scan_mindwell;
6097	ic->ic_wme.wme_update = iwm_wme_update;
6098	ic->ic_parent = iwm_parent;
6099	ic->ic_transmit = iwm_transmit;
6100	iwm_radiotap_attach(sc);
6101	if (bootverbose)
6102		ieee80211_announce(ic);
6103
6104	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6105	    "<-%s\n", __func__);
6106	config_intrhook_disestablish(&sc->sc_preinit_hook);
6107
6108	return;
6109fail:
6110	config_intrhook_disestablish(&sc->sc_preinit_hook);
6111	iwm_detach_local(sc, 0);
6112}
6113
6114/*
6115 * Attach the interface to 802.11 radiotap.
6116 */
6117static void
6118iwm_radiotap_attach(struct iwm_softc *sc)
6119{
6120        struct ieee80211com *ic = &sc->sc_ic;
6121
6122	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6123	    "->%s begin\n", __func__);
6124        ieee80211_radiotap_attach(ic,
6125            &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6126                IWM_TX_RADIOTAP_PRESENT,
6127            &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6128                IWM_RX_RADIOTAP_PRESENT);
6129	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6130	    "->%s end\n", __func__);
6131}
6132
6133static struct ieee80211vap *
6134iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6135    enum ieee80211_opmode opmode, int flags,
6136    const uint8_t bssid[IEEE80211_ADDR_LEN],
6137    const uint8_t mac[IEEE80211_ADDR_LEN])
6138{
6139	struct iwm_vap *ivp;
6140	struct ieee80211vap *vap;
6141
6142	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6143		return NULL;
6144	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6145	vap = &ivp->iv_vap;
6146	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6147	vap->iv_bmissthreshold = 10;            /* override default */
6148	/* Override with driver methods. */
6149	ivp->iv_newstate = vap->iv_newstate;
6150	vap->iv_newstate = iwm_newstate;
6151
6152	ivp->id = IWM_DEFAULT_MACID;
6153	ivp->color = IWM_DEFAULT_COLOR;
6154
6155	ivp->have_wme = FALSE;
6156	ivp->ps_disabled = FALSE;
6157
6158	ieee80211_ratectl_init(vap);
6159	/* Complete setup. */
6160	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6161	    mac);
6162	ic->ic_opmode = opmode;
6163
6164	return vap;
6165}
6166
6167static void
6168iwm_vap_delete(struct ieee80211vap *vap)
6169{
6170	struct iwm_vap *ivp = IWM_VAP(vap);
6171
6172	ieee80211_ratectl_deinit(vap);
6173	ieee80211_vap_detach(vap);
6174	free(ivp, M_80211_VAP);
6175}
6176
6177static void
6178iwm_xmit_queue_drain(struct iwm_softc *sc)
6179{
6180	struct mbuf *m;
6181	struct ieee80211_node *ni;
6182
6183	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6184		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6185		ieee80211_free_node(ni);
6186		m_freem(m);
6187	}
6188}
6189
6190st