1/*-
2 * Copyright (c) 2007-2009 Damien Bergamini <damien.bergamini@free.fr>
3 * Copyright (c) 2008 Benjamin Close <benjsc@FreeBSD.org>
4 * Copyright (c) 2008 Sam Leffler, Errno Consulting
5 * Copyright (c) 2011 Intel Corporation
6 * Copyright (c) 2013 Cedric GROSS <c.gross@kreiz-it.fr>
7 * Copyright (c) 2013 Adrian Chadd <adrian@FreeBSD.org>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
24 * adapters.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/dev/iwn/if_iwn.c 337949 2018-08-17 03:01:01Z kevans $");
29
30#include "opt_wlan.h"
31#include "opt_iwn.h"
32
33#include <sys/param.h>
34#include <sys/sockio.h>
35#include <sys/sysctl.h>
36#include <sys/mbuf.h>
37#include <sys/kernel.h>
38#include <sys/socket.h>
39#include <sys/systm.h>
40#include <sys/malloc.h>
41#include <sys/bus.h>
42#include <sys/conf.h>
43#include <sys/rman.h>
44#include <sys/endian.h>
45#include <sys/firmware.h>
46#include <sys/limits.h>
47#include <sys/module.h>
48#include <sys/priv.h>
49#include <sys/queue.h>
50#include <sys/taskqueue.h>
51
52#include <machine/bus.h>
53#include <machine/resource.h>
54#include <machine/clock.h>
55
56#include <dev/pci/pcireg.h>
57#include <dev/pci/pcivar.h>
58
59#include <net/if.h>
60#include <net/if_var.h>
61#include <net/if_dl.h>
62#include <net/if_media.h>
63
64#include <netinet/in.h>
65#include <netinet/if_ether.h>
66
67#include <net80211/ieee80211_var.h>
68#include <net80211/ieee80211_radiotap.h>
69#include <net80211/ieee80211_regdomain.h>
70#include <net80211/ieee80211_ratectl.h>
71
72#include <dev/iwn/if_iwnreg.h>
73#include <dev/iwn/if_iwnvar.h>
74#include <dev/iwn/if_iwn_devid.h>
75#include <dev/iwn/if_iwn_chip_cfg.h>
76#include <dev/iwn/if_iwn_debug.h>
77#include <dev/iwn/if_iwn_ioctl.h>
78
79struct iwn_ident {
80	uint16_t	vendor;
81	uint16_t	device;
82	const char	*name;
83};
84
85static const struct iwn_ident iwn_ident_table[] = {
86	{ 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205"		},
87	{ 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000"		},
88	{ 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000"		},
89	{ 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205"		},
90	{ 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250"	},
91	{ 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250"	},
92	{ 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030"		},
93	{ 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030"		},
94	{ 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230"		},
95	{ 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230"		},
96	{ 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150"	},
97	{ 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150"	},
98	{ 0x8086, IWN_DID_2x00_1, "Intel(R) Centrino(R) Wireless-N 2200 BGN"	},
99	{ 0x8086, IWN_DID_2x00_2, "Intel(R) Centrino(R) Wireless-N 2200 BGN"	},
100	/* XXX 2200D is IWN_SDID_2x00_4; there's no way to express this here! */
101	{ 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230"		},
102	{ 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230"		},
103	{ 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130"		},
104	{ 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130"		},
105	{ 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100"		},
106	{ 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100"		},
107	{ 0x8086, IWN_DID_105_1, "Intel Centrino Wireless-N 105"		},
108	{ 0x8086, IWN_DID_105_2, "Intel Centrino Wireless-N 105"		},
109	{ 0x8086, IWN_DID_135_1, "Intel Centrino Wireless-N 135"		},
110	{ 0x8086, IWN_DID_135_2, "Intel Centrino Wireless-N 135"		},
111	{ 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965"		},
112	{ 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300"		},
113	{ 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200"		},
114	{ 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965"		},
115	{ 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965"		},
116	{ 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100"			},
117	{ 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965"		},
118	{ 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300"		},
119	{ 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300"		},
120	{ 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100"			},
121	{ 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300"		},
122	{ 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200"		},
123	{ 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350"			},
124	{ 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350"			},
125	{ 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150"			},
126	{ 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150"			},
127	{ 0x8086, IWN_DID_6035_1, "Intel Centrino Advanced 6235"		},
128	{ 0x8086, IWN_DID_6035_2, "Intel Centrino Advanced 6235"		},
129	{ 0, 0, NULL }
130};
131
132static int	iwn_probe(device_t);
133static int	iwn_attach(device_t);
134static int	iwn4965_attach(struct iwn_softc *, uint16_t);
135static int	iwn5000_attach(struct iwn_softc *, uint16_t);
136static int	iwn_config_specific(struct iwn_softc *, uint16_t);
137static void	iwn_radiotap_attach(struct iwn_softc *);
138static void	iwn_sysctlattach(struct iwn_softc *);
139static struct ieee80211vap *iwn_vap_create(struct ieee80211com *,
140		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
141		    const uint8_t [IEEE80211_ADDR_LEN],
142		    const uint8_t [IEEE80211_ADDR_LEN]);
143static void	iwn_vap_delete(struct ieee80211vap *);
144static int	iwn_detach(device_t);
145static int	iwn_shutdown(device_t);
146static int	iwn_suspend(device_t);
147static int	iwn_resume(device_t);
148static int	iwn_nic_lock(struct iwn_softc *);
149static int	iwn_eeprom_lock(struct iwn_softc *);
150static int	iwn_init_otprom(struct iwn_softc *);
151static int	iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
152static void	iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int);
153static int	iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
154		    void **, bus_size_t, bus_size_t);
155static void	iwn_dma_contig_free(struct iwn_dma_info *);
156static int	iwn_alloc_sched(struct iwn_softc *);
157static void	iwn_free_sched(struct iwn_softc *);
158static int	iwn_alloc_kw(struct iwn_softc *);
159static void	iwn_free_kw(struct iwn_softc *);
160static int	iwn_alloc_ict(struct iwn_softc *);
161static void	iwn_free_ict(struct iwn_softc *);
162static int	iwn_alloc_fwmem(struct iwn_softc *);
163static void	iwn_free_fwmem(struct iwn_softc *);
164static int	iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
165static void	iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
166static void	iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
167static int	iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
168		    int);
169static void	iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
170static void	iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
171static void	iwn5000_ict_reset(struct iwn_softc *);
172static int	iwn_read_eeprom(struct iwn_softc *,
173		    uint8_t macaddr[IEEE80211_ADDR_LEN]);
174static void	iwn4965_read_eeprom(struct iwn_softc *);
175#ifdef	IWN_DEBUG
176static void	iwn4965_print_power_group(struct iwn_softc *, int);
177#endif
178static void	iwn5000_read_eeprom(struct iwn_softc *);
179static uint32_t	iwn_eeprom_channel_flags(struct iwn_eeprom_chan *);
180static void	iwn_read_eeprom_band(struct iwn_softc *, int, int, int *,
181		    struct ieee80211_channel[]);
182static void	iwn_read_eeprom_ht40(struct iwn_softc *, int, int, int *,
183		    struct ieee80211_channel[]);
184static void	iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
185static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *,
186		    struct ieee80211_channel *);
187static void	iwn_getradiocaps(struct ieee80211com *, int, int *,
188		    struct ieee80211_channel[]);
189static int	iwn_setregdomain(struct ieee80211com *,
190		    struct ieee80211_regdomain *, int,
191		    struct ieee80211_channel[]);
192static void	iwn_read_eeprom_enhinfo(struct iwn_softc *);
193static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *,
194		    const uint8_t mac[IEEE80211_ADDR_LEN]);
195static void	iwn_newassoc(struct ieee80211_node *, int);
196static int	iwn_media_change(struct ifnet *);
197static int	iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
198static void	iwn_calib_timeout(void *);
199static void	iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
200		    struct iwn_rx_data *);
201static void	iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
202		    struct iwn_rx_data *);
203static void	iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
204		    struct iwn_rx_data *);
205static void	iwn5000_rx_calib_results(struct iwn_softc *,
206		    struct iwn_rx_desc *, struct iwn_rx_data *);
207static void	iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
208		    struct iwn_rx_data *);
209static void	iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
210		    struct iwn_rx_data *);
211static void	iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
212		    struct iwn_rx_data *);
213static void	iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
214		    uint8_t);
215static void	iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, int, void *);
216static void	iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
217static void	iwn_notif_intr(struct iwn_softc *);
218static void	iwn_wakeup_intr(struct iwn_softc *);
219static void	iwn_rftoggle_intr(struct iwn_softc *);
220static void	iwn_fatal_intr(struct iwn_softc *);
221static void	iwn_intr(void *);
222static void	iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
223		    uint16_t);
224static void	iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
225		    uint16_t);
226#ifdef notyet
227static void	iwn5000_reset_sched(struct iwn_softc *, int, int);
228#endif
229static int	iwn_tx_data(struct iwn_softc *, struct mbuf *,
230		    struct ieee80211_node *);
231static int	iwn_tx_data_raw(struct iwn_softc *, struct mbuf *,
232		    struct ieee80211_node *,
233		    const struct ieee80211_bpf_params *params);
234static void	iwn_xmit_task(void *arg0, int pending);
235static int	iwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
236		    const struct ieee80211_bpf_params *);
237static int	iwn_transmit(struct ieee80211com *, struct mbuf *);
238static void	iwn_scan_timeout(void *);
239static void	iwn_watchdog(void *);
240static int	iwn_ioctl(struct ieee80211com *, u_long , void *);
241static void	iwn_parent(struct ieee80211com *);
242static int	iwn_cmd(struct iwn_softc *, int, const void *, int, int);
243static int	iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
244		    int);
245static int	iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
246		    int);
247static int	iwn_set_link_quality(struct iwn_softc *,
248		    struct ieee80211_node *);
249static int	iwn_add_broadcast_node(struct iwn_softc *, int);
250static int	iwn_updateedca(struct ieee80211com *);
251static void	iwn_update_mcast(struct ieee80211com *);
252static void	iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
253static int	iwn_set_critical_temp(struct iwn_softc *);
254static int	iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
255static void	iwn4965_power_calibration(struct iwn_softc *, int);
256static int	iwn4965_set_txpower(struct iwn_softc *,
257		    struct ieee80211_channel *, int);
258static int	iwn5000_set_txpower(struct iwn_softc *,
259		    struct ieee80211_channel *, int);
260static int	iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
261static int	iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
262static int	iwn_get_noise(const struct iwn_rx_general_stats *);
263static int	iwn4965_get_temperature(struct iwn_softc *);
264static int	iwn5000_get_temperature(struct iwn_softc *);
265static int	iwn_init_sensitivity(struct iwn_softc *);
266static void	iwn_collect_noise(struct iwn_softc *,
267		    const struct iwn_rx_general_stats *);
268static int	iwn4965_init_gains(struct iwn_softc *);
269static int	iwn5000_init_gains(struct iwn_softc *);
270static int	iwn4965_set_gains(struct iwn_softc *);
271static int	iwn5000_set_gains(struct iwn_softc *);
272static void	iwn_tune_sensitivity(struct iwn_softc *,
273		    const struct iwn_rx_stats *);
274static void	iwn_save_stats_counters(struct iwn_softc *,
275		    const struct iwn_stats *);
276static int	iwn_send_sensitivity(struct iwn_softc *);
277static void	iwn_check_rx_recovery(struct iwn_softc *, struct iwn_stats *);
278static int	iwn_set_pslevel(struct iwn_softc *, int, int, int);
279static int	iwn_send_btcoex(struct iwn_softc *);
280static int	iwn_send_advanced_btcoex(struct iwn_softc *);
281static int	iwn5000_runtime_calib(struct iwn_softc *);
282static int	iwn_config(struct iwn_softc *);
283static int	iwn_scan(struct iwn_softc *, struct ieee80211vap *,
284		    struct ieee80211_scan_state *, struct ieee80211_channel *);
285static int	iwn_auth(struct iwn_softc *, struct ieee80211vap *vap);
286static int	iwn_run(struct iwn_softc *, struct ieee80211vap *vap);
287static int	iwn_ampdu_rx_start(struct ieee80211_node *,
288		    struct ieee80211_rx_ampdu *, int, int, int);
289static void	iwn_ampdu_rx_stop(struct ieee80211_node *,
290		    struct ieee80211_rx_ampdu *);
291static int	iwn_addba_request(struct ieee80211_node *,
292		    struct ieee80211_tx_ampdu *, int, int, int);
293static int	iwn_addba_response(struct ieee80211_node *,
294		    struct ieee80211_tx_ampdu *, int, int, int);
295static int	iwn_ampdu_tx_start(struct ieee80211com *,
296		    struct ieee80211_node *, uint8_t);
297static void	iwn_ampdu_tx_stop(struct ieee80211_node *,
298		    struct ieee80211_tx_ampdu *);
299static void	iwn4965_ampdu_tx_start(struct iwn_softc *,
300		    struct ieee80211_node *, int, uint8_t, uint16_t);
301static void	iwn4965_ampdu_tx_stop(struct iwn_softc *, int,
302		    uint8_t, uint16_t);
303static void	iwn5000_ampdu_tx_start(struct iwn_softc *,
304		    struct ieee80211_node *, int, uint8_t, uint16_t);
305static void	iwn5000_ampdu_tx_stop(struct iwn_softc *, int,
306		    uint8_t, uint16_t);
307static int	iwn5000_query_calibration(struct iwn_softc *);
308static int	iwn5000_send_calibration(struct iwn_softc *);
309static int	iwn5000_send_wimax_coex(struct iwn_softc *);
310static int	iwn5000_crystal_calib(struct iwn_softc *);
311static int	iwn5000_temp_offset_calib(struct iwn_softc *);
312static int	iwn5000_temp_offset_calibv2(struct iwn_softc *);
313static int	iwn4965_post_alive(struct iwn_softc *);
314static int	iwn5000_post_alive(struct iwn_softc *);
315static int	iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
316		    int);
317static int	iwn4965_load_firmware(struct iwn_softc *);
318static int	iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
319		    const uint8_t *, int);
320static int	iwn5000_load_firmware(struct iwn_softc *);
321static int	iwn_read_firmware_leg(struct iwn_softc *,
322		    struct iwn_fw_info *);
323static int	iwn_read_firmware_tlv(struct iwn_softc *,
324		    struct iwn_fw_info *, uint16_t);
325static int	iwn_read_firmware(struct iwn_softc *);
326static void	iwn_unload_firmware(struct iwn_softc *);
327static int	iwn_clock_wait(struct iwn_softc *);
328static int	iwn_apm_init(struct iwn_softc *);
329static void	iwn_apm_stop_master(struct iwn_softc *);
330static void	iwn_apm_stop(struct iwn_softc *);
331static int	iwn4965_nic_config(struct iwn_softc *);
332static int	iwn5000_nic_config(struct iwn_softc *);
333static int	iwn_hw_prepare(struct iwn_softc *);
334static int	iwn_hw_init(struct iwn_softc *);
335static void	iwn_hw_stop(struct iwn_softc *);
336static void	iwn_radio_on(void *, int);
337static void	iwn_radio_off(void *, int);
338static void	iwn_panicked(void *, int);
339static void	iwn_init_locked(struct iwn_softc *);
340static void	iwn_init(struct iwn_softc *);
341static void	iwn_stop_locked(struct iwn_softc *);
342static void	iwn_stop(struct iwn_softc *);
343static void	iwn_scan_start(struct ieee80211com *);
344static void	iwn_scan_end(struct ieee80211com *);
345static void	iwn_set_channel(struct ieee80211com *);
346static void	iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long);
347static void	iwn_scan_mindwell(struct ieee80211_scan_state *);
348#ifdef	IWN_DEBUG
349static char	*iwn_get_csr_string(int);
350static void	iwn_debug_register(struct iwn_softc *);
351#endif
352
353static device_method_t iwn_methods[] = {
354	/* Device interface */
355	DEVMETHOD(device_probe,		iwn_probe),
356	DEVMETHOD(device_attach,	iwn_attach),
357	DEVMETHOD(device_detach,	iwn_detach),
358	DEVMETHOD(device_shutdown,	iwn_shutdown),
359	DEVMETHOD(device_suspend,	iwn_suspend),
360	DEVMETHOD(device_resume,	iwn_resume),
361
362	DEVMETHOD_END
363};
364
365static driver_t iwn_driver = {
366	"iwn",
367	iwn_methods,
368	sizeof(struct iwn_softc)
369};
370static devclass_t iwn_devclass;
371
372DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL);
373
374MODULE_VERSION(iwn, 1);
375
376MODULE_DEPEND(iwn, firmware, 1, 1, 1);
377MODULE_DEPEND(iwn, pci, 1, 1, 1);
378MODULE_DEPEND(iwn, wlan, 1, 1, 1);
379
380static d_ioctl_t iwn_cdev_ioctl;
381static d_open_t iwn_cdev_open;
382static d_close_t iwn_cdev_close;
383
384static struct cdevsw iwn_cdevsw = {
385	.d_version = D_VERSION,
386	.d_flags = 0,
387	.d_open = iwn_cdev_open,
388	.d_close = iwn_cdev_close,
389	.d_ioctl = iwn_cdev_ioctl,
390	.d_name = "iwn",
391};
392
393static int
394iwn_probe(device_t dev)
395{
396	const struct iwn_ident *ident;
397
398	for (ident = iwn_ident_table; ident->name != NULL; ident++) {
399		if (pci_get_vendor(dev) == ident->vendor &&
400		    pci_get_device(dev) == ident->device) {
401			device_set_desc(dev, ident->name);
402			return (BUS_PROBE_DEFAULT);
403		}
404	}
405	return ENXIO;
406}
407
408static int
409iwn_is_3stream_device(struct iwn_softc *sc)
410{
411	/* XXX for now only 5300, until the 5350 can be tested */
412	if (sc->hw_type == IWN_HW_REV_TYPE_5300)
413		return (1);
414	return (0);
415}
416
417static int
418iwn_attach(device_t dev)
419{
420	struct iwn_softc *sc = device_get_softc(dev);
421	struct ieee80211com *ic;
422	int i, error, rid;
423
424	sc->sc_dev = dev;
425
426#ifdef	IWN_DEBUG
427	error = resource_int_value(device_get_name(sc->sc_dev),
428	    device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug));
429	if (error != 0)
430		sc->sc_debug = 0;
431#else
432	sc->sc_debug = 0;
433#endif
434
435	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__);
436
437	/*
438	 * Get the offset of the PCI Express Capability Structure in PCI
439	 * Configuration Space.
440	 */
441	error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
442	if (error != 0) {
443		device_printf(dev, "PCIe capability structure not found!\n");
444		return error;
445	}
446
447	/* Clear device-specific "PCI retry timeout" register (41h). */
448	pci_write_config(dev, 0x41, 0, 1);
449
450	/* Enable bus-mastering. */
451	pci_enable_busmaster(dev);
452
453	rid = PCIR_BAR(0);
454	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
455	    RF_ACTIVE);
456	if (sc->mem == NULL) {
457		device_printf(dev, "can't map mem space\n");
458		error = ENOMEM;
459		return error;
460	}
461	sc->sc_st = rman_get_bustag(sc->mem);
462	sc->sc_sh = rman_get_bushandle(sc->mem);
463
464	i = 1;
465	rid = 0;
466	if (pci_alloc_msi(dev, &i) == 0)
467		rid = 1;
468	/* Install interrupt handler. */
469	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
470	    (rid != 0 ? 0 : RF_SHAREABLE));
471	if (sc->irq == NULL) {
472		device_printf(dev, "can't map interrupt\n");
473		error = ENOMEM;
474		goto fail;
475	}
476
477	IWN_LOCK_INIT(sc);
478
479	/* Read hardware revision and attach. */
480	sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT)
481	    & IWN_HW_REV_TYPE_MASK;
482	sc->subdevice_id = pci_get_subdevice(dev);
483
484	/*
485	 * 4965 versus 5000 and later have different methods.
486	 * Let's set those up first.
487	 */
488	if (sc->hw_type == IWN_HW_REV_TYPE_4965)
489		error = iwn4965_attach(sc, pci_get_device(dev));
490	else
491		error = iwn5000_attach(sc, pci_get_device(dev));
492	if (error != 0) {
493		device_printf(dev, "could not attach device, error %d\n",
494		    error);
495		goto fail;
496	}
497
498	/*
499	 * Next, let's setup the various parameters of each NIC.
500	 */
501	error = iwn_config_specific(sc, pci_get_device(dev));
502	if (error != 0) {
503		device_printf(dev, "could not attach device, error %d\n",
504		    error);
505		goto fail;
506	}
507
508	if ((error = iwn_hw_prepare(sc)) != 0) {
509		device_printf(dev, "hardware not ready, error %d\n", error);
510		goto fail;
511	}
512
513	/* Allocate DMA memory for firmware transfers. */
514	if ((error = iwn_alloc_fwmem(sc)) != 0) {
515		device_printf(dev,
516		    "could not allocate memory for firmware, error %d\n",
517		    error);
518		goto fail;
519	}
520
521	/* Allocate "Keep Warm" page. */
522	if ((error = iwn_alloc_kw(sc)) != 0) {
523		device_printf(dev,
524		    "could not allocate keep warm page, error %d\n", error);
525		goto fail;
526	}
527
528	/* Allocate ICT table for 5000 Series. */
529	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
530	    (error = iwn_alloc_ict(sc)) != 0) {
531		device_printf(dev, "could not allocate ICT table, error %d\n",
532		    error);
533		goto fail;
534	}
535
536	/* Allocate TX scheduler "rings". */
537	if ((error = iwn_alloc_sched(sc)) != 0) {
538		device_printf(dev,
539		    "could not allocate TX scheduler rings, error %d\n", error);
540		goto fail;
541	}
542
543	/* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
544	for (i = 0; i < sc->ntxqs; i++) {
545		if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
546			device_printf(dev,
547			    "could not allocate TX ring %d, error %d\n", i,
548			    error);
549			goto fail;
550		}
551	}
552
553	/* Allocate RX ring. */
554	if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
555		device_printf(dev, "could not allocate RX ring, error %d\n",
556		    error);
557		goto fail;
558	}
559
560	/* Clear pending interrupts. */
561	IWN_WRITE(sc, IWN_INT, 0xffffffff);
562
563	ic = &sc->sc_ic;
564	ic->ic_softc = sc;
565	ic->ic_name = device_get_nameunit(dev);
566	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
567	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
568
569	/* Set device capabilities. */
570	ic->ic_caps =
571		  IEEE80211_C_STA		/* station mode supported */
572		| IEEE80211_C_MONITOR		/* monitor mode supported */
573#if 0
574		| IEEE80211_C_BGSCAN		/* background scanning */
575#endif
576		| IEEE80211_C_TXPMGT		/* tx power management */
577		| IEEE80211_C_SHSLOT		/* short slot time supported */
578		| IEEE80211_C_WPA
579		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
580#if 0
581		| IEEE80211_C_IBSS		/* ibss/adhoc mode */
582#endif
583		| IEEE80211_C_WME		/* WME */
584		| IEEE80211_C_PMGT		/* Station-side power mgmt */
585		;
586
587	/* Read MAC address, channels, etc from EEPROM. */
588	if ((error = iwn_read_eeprom(sc, ic->ic_macaddr)) != 0) {
589		device_printf(dev, "could not read EEPROM, error %d\n",
590		    error);
591		goto fail;
592	}
593
594	/* Count the number of available chains. */
595	sc->ntxchains =
596	    ((sc->txchainmask >> 2) & 1) +
597	    ((sc->txchainmask >> 1) & 1) +
598	    ((sc->txchainmask >> 0) & 1);
599	sc->nrxchains =
600	    ((sc->rxchainmask >> 2) & 1) +
601	    ((sc->rxchainmask >> 1) & 1) +
602	    ((sc->rxchainmask >> 0) & 1);
603	if (bootverbose) {
604		device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n",
605		    sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
606		    ic->ic_macaddr, ":");
607	}
608
609	if (sc->sc_flags & IWN_FLAG_HAS_11N) {
610		ic->ic_rxstream = sc->nrxchains;
611		ic->ic_txstream = sc->ntxchains;
612
613		/*
614		 * Some of the 3 antenna devices (ie, the 4965) only supports
615		 * 2x2 operation.  So correct the number of streams if
616		 * it's not a 3-stream device.
617		 */
618		if (! iwn_is_3stream_device(sc)) {
619			if (ic->ic_rxstream > 2)
620				ic->ic_rxstream = 2;
621			if (ic->ic_txstream > 2)
622				ic->ic_txstream = 2;
623		}
624
625		ic->ic_htcaps =
626			  IEEE80211_HTCAP_SMPS_OFF	/* SMPS mode disabled */
627			| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
628			| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width*/
629			| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
630#ifdef notyet
631			| IEEE80211_HTCAP_GREENFIELD
632#if IWN_RBUF_SIZE == 8192
633			| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
634#else
635			| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
636#endif
637#endif
638			/* s/w capabilities */
639			| IEEE80211_HTC_HT		/* HT operation */
640			| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
641#ifdef notyet
642			| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
643#endif
644			;
645	}
646
647	ieee80211_ifattach(ic);
648	ic->ic_vap_create = iwn_vap_create;
649	ic->ic_ioctl = iwn_ioctl;
650	ic->ic_parent = iwn_parent;
651	ic->ic_vap_delete = iwn_vap_delete;
652	ic->ic_transmit = iwn_transmit;
653	ic->ic_raw_xmit = iwn_raw_xmit;
654	ic->ic_node_alloc = iwn_node_alloc;
655	sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
656	ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
657	sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
658	ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
659	sc->sc_addba_request = ic->ic_addba_request;
660	ic->ic_addba_request = iwn_addba_request;
661	sc->sc_addba_response = ic->ic_addba_response;
662	ic->ic_addba_response = iwn_addba_response;
663	sc->sc_addba_stop = ic->ic_addba_stop;
664	ic->ic_addba_stop = iwn_ampdu_tx_stop;
665	ic->ic_newassoc = iwn_newassoc;
666	ic->ic_wme.wme_update = iwn_updateedca;
667	ic->ic_update_mcast = iwn_update_mcast;
668	ic->ic_scan_start = iwn_scan_start;
669	ic->ic_scan_end = iwn_scan_end;
670	ic->ic_set_channel = iwn_set_channel;
671	ic->ic_scan_curchan = iwn_scan_curchan;
672	ic->ic_scan_mindwell = iwn_scan_mindwell;
673	ic->ic_getradiocaps = iwn_getradiocaps;
674	ic->ic_setregdomain = iwn_setregdomain;
675
676	iwn_radiotap_attach(sc);
677
678	callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0);
679	callout_init_mtx(&sc->scan_timeout, &sc->sc_mtx, 0);
680	callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
681	TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc);
682	TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc);
683	TASK_INIT(&sc->sc_panic_task, 0, iwn_panicked, sc);
684	TASK_INIT(&sc->sc_xmit_task, 0, iwn_xmit_task, sc);
685
686	mbufq_init(&sc->sc_xmit_queue, 1024);
687
688	sc->sc_tq = taskqueue_create("iwn_taskq", M_WAITOK,
689	    taskqueue_thread_enqueue, &sc->sc_tq);
690	error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwn_taskq");
691	if (error != 0) {
692		device_printf(dev, "can't start threads, error %d\n", error);
693		goto fail;
694	}
695
696	iwn_sysctlattach(sc);
697
698	/*
699	 * Hook our interrupt after all initialization is complete.
700	 */
701	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
702	    NULL, iwn_intr, sc, &sc->sc_ih);
703	if (error != 0) {
704		device_printf(dev, "can't establish interrupt, error %d\n",
705		    error);
706		goto fail;
707	}
708
709#if 0
710	device_printf(sc->sc_dev, "%s: rx_stats=%d, rx_stats_bt=%d\n",
711	    __func__,
712	    sizeof(struct iwn_stats),
713	    sizeof(struct iwn_stats_bt));
714#endif
715
716	if (bootverbose)
717		ieee80211_announce(ic);
718	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
719
720	/* Add debug ioctl right at the end */
721	sc->sc_cdev = make_dev(&iwn_cdevsw, device_get_unit(dev),
722	    UID_ROOT, GID_WHEEL, 0600, "%s", device_get_nameunit(dev));
723	if (sc->sc_cdev == NULL) {
724		device_printf(dev, "failed to create debug character device\n");
725	} else {
726		sc->sc_cdev->si_drv1 = sc;
727	}
728	return 0;
729fail:
730	iwn_detach(dev);
731	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
732	return error;
733}
734
735/*
736 * Define specific configuration based on device id and subdevice id
737 * pid : PCI device id
738 */
739static int
740iwn_config_specific(struct iwn_softc *sc, uint16_t pid)
741{
742
743	switch (pid) {
744/* 4965 series */
745	case IWN_DID_4965_1:
746	case IWN_DID_4965_2:
747	case IWN_DID_4965_3:
748	case IWN_DID_4965_4:
749		sc->base_params = &iwn4965_base_params;
750		sc->limits = &iwn4965_sensitivity_limits;
751		sc->fwname = "iwn4965fw";
752		/* Override chains masks, ROM is known to be broken. */
753		sc->txchainmask = IWN_ANT_AB;
754		sc->rxchainmask = IWN_ANT_ABC;
755		/* Enable normal btcoex */
756		sc->sc_flags |= IWN_FLAG_BTCOEX;
757		break;
758/* 1000 Series */
759	case IWN_DID_1000_1:
760	case IWN_DID_1000_2:
761		switch(sc->subdevice_id) {
762			case	IWN_SDID_1000_1:
763			case	IWN_SDID_1000_2:
764			case	IWN_SDID_1000_3:
765			case	IWN_SDID_1000_4:
766			case	IWN_SDID_1000_5:
767			case	IWN_SDID_1000_6:
768			case	IWN_SDID_1000_7:
769			case	IWN_SDID_1000_8:
770			case	IWN_SDID_1000_9:
771			case	IWN_SDID_1000_10:
772			case	IWN_SDID_1000_11:
773			case	IWN_SDID_1000_12:
774				sc->limits = &iwn1000_sensitivity_limits;
775				sc->base_params = &iwn1000_base_params;
776				sc->fwname = "iwn1000fw";
777				break;
778			default:
779				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
780				    "0x%04x rev %d not supported (subdevice)\n", pid,
781				    sc->subdevice_id,sc->hw_type);
782				return ENOTSUP;
783		}
784		break;
785/* 6x00 Series */
786	case IWN_DID_6x00_2:
787	case IWN_DID_6x00_4:
788	case IWN_DID_6x00_1:
789	case IWN_DID_6x00_3:
790		sc->fwname = "iwn6000fw";
791		sc->limits = &iwn6000_sensitivity_limits;
792		switch(sc->subdevice_id) {
793			case IWN_SDID_6x00_1:
794			case IWN_SDID_6x00_2:
795			case IWN_SDID_6x00_8:
796				//iwl6000_3agn_cfg
797				sc->base_params = &iwn_6000_base_params;
798				break;
799			case IWN_SDID_6x00_3:
800			case IWN_SDID_6x00_6:
801			case IWN_SDID_6x00_9:
802				////iwl6000i_2agn
803			case IWN_SDID_6x00_4:
804			case IWN_SDID_6x00_7:
805			case IWN_SDID_6x00_10:
806				//iwl6000i_2abg_cfg
807			case IWN_SDID_6x00_5:
808				//iwl6000i_2bg_cfg
809				sc->base_params = &iwn_6000i_base_params;
810				sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
811				sc->txchainmask = IWN_ANT_BC;
812				sc->rxchainmask = IWN_ANT_BC;
813				break;
814			default:
815				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
816				    "0x%04x rev %d not supported (subdevice)\n", pid,
817				    sc->subdevice_id,sc->hw_type);
818				return ENOTSUP;
819		}
820		break;
821/* 6x05 Series */
822	case IWN_DID_6x05_1:
823	case IWN_DID_6x05_2:
824		switch(sc->subdevice_id) {
825			case IWN_SDID_6x05_1:
826			case IWN_SDID_6x05_4:
827			case IWN_SDID_6x05_6:
828				//iwl6005_2agn_cfg
829			case IWN_SDID_6x05_2:
830			case IWN_SDID_6x05_5:
831			case IWN_SDID_6x05_7:
832				//iwl6005_2abg_cfg
833			case IWN_SDID_6x05_3:
834				//iwl6005_2bg_cfg
835			case IWN_SDID_6x05_8:
836			case IWN_SDID_6x05_9:
837				//iwl6005_2agn_sff_cfg
838			case IWN_SDID_6x05_10:
839				//iwl6005_2agn_d_cfg
840			case IWN_SDID_6x05_11:
841				//iwl6005_2agn_mow1_cfg
842			case IWN_SDID_6x05_12:
843				//iwl6005_2agn_mow2_cfg
844				sc->fwname = "iwn6000g2afw";
845				sc->limits = &iwn6000_sensitivity_limits;
846				sc->base_params = &iwn_6000g2_base_params;
847				break;
848			default:
849				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
850				    "0x%04x rev %d not supported (subdevice)\n", pid,
851				    sc->subdevice_id,sc->hw_type);
852				return ENOTSUP;
853		}
854		break;
855/* 6x35 Series */
856	case IWN_DID_6035_1:
857	case IWN_DID_6035_2:
858		switch(sc->subdevice_id) {
859			case IWN_SDID_6035_1:
860			case IWN_SDID_6035_2:
861			case IWN_SDID_6035_3:
862			case IWN_SDID_6035_4:
863			case IWN_SDID_6035_5:
864				sc->fwname = "iwn6000g2bfw";
865				sc->limits = &iwn6235_sensitivity_limits;
866				sc->base_params = &iwn_6235_base_params;
867				break;
868			default:
869				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
870				    "0x%04x rev %d not supported (subdevice)\n", pid,
871				    sc->subdevice_id,sc->hw_type);
872				return ENOTSUP;
873		}
874		break;
875/* 6x50 WiFi/WiMax Series */
876	case IWN_DID_6050_1:
877	case IWN_DID_6050_2:
878		switch(sc->subdevice_id) {
879			case IWN_SDID_6050_1:
880			case IWN_SDID_6050_3:
881			case IWN_SDID_6050_5:
882				//iwl6050_2agn_cfg
883			case IWN_SDID_6050_2:
884			case IWN_SDID_6050_4:
885			case IWN_SDID_6050_6:
886				//iwl6050_2abg_cfg
887				sc->fwname = "iwn6050fw";
888				sc->txchainmask = IWN_ANT_AB;
889				sc->rxchainmask = IWN_ANT_AB;
890				sc->limits = &iwn6000_sensitivity_limits;
891				sc->base_params = &iwn_6050_base_params;
892				break;
893			default:
894				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
895				    "0x%04x rev %d not supported (subdevice)\n", pid,
896				    sc->subdevice_id,sc->hw_type);
897				return ENOTSUP;
898		}
899		break;
900/* 6150 WiFi/WiMax Series */
901	case IWN_DID_6150_1:
902	case IWN_DID_6150_2:
903		switch(sc->subdevice_id) {
904			case IWN_SDID_6150_1:
905			case IWN_SDID_6150_3:
906			case IWN_SDID_6150_5:
907				// iwl6150_bgn_cfg
908			case IWN_SDID_6150_2:
909			case IWN_SDID_6150_4:
910			case IWN_SDID_6150_6:
911				//iwl6150_bg_cfg
912				sc->fwname = "iwn6050fw";
913				sc->limits = &iwn6000_sensitivity_limits;
914				sc->base_params = &iwn_6150_base_params;
915				break;
916			default:
917				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
918				    "0x%04x rev %d not supported (subdevice)\n", pid,
919				    sc->subdevice_id,sc->hw_type);
920				return ENOTSUP;
921		}
922		break;
923/* 6030 Series and 1030 Series */
924	case IWN_DID_x030_1:
925	case IWN_DID_x030_2:
926	case IWN_DID_x030_3:
927	case IWN_DID_x030_4:
928		switch(sc->subdevice_id) {
929			case IWN_SDID_x030_1:
930			case IWN_SDID_x030_3:
931			case IWN_SDID_x030_5:
932			// iwl1030_bgn_cfg
933			case IWN_SDID_x030_2:
934			case IWN_SDID_x030_4:
935			case IWN_SDID_x030_6:
936			//iwl1030_bg_cfg
937			case IWN_SDID_x030_7:
938			case IWN_SDID_x030_10:
939			case IWN_SDID_x030_14:
940			//iwl6030_2agn_cfg
941			case IWN_SDID_x030_8:
942			case IWN_SDID_x030_11:
943			case IWN_SDID_x030_15:
944			// iwl6030_2bgn_cfg
945			case IWN_SDID_x030_9:
946			case IWN_SDID_x030_12:
947			case IWN_SDID_x030_16:
948			// iwl6030_2abg_cfg
949			case IWN_SDID_x030_13:
950			//iwl6030_2bg_cfg
951				sc->fwname = "iwn6000g2bfw";
952				sc->limits = &iwn6000_sensitivity_limits;
953				sc->base_params = &iwn_6000g2b_base_params;
954				break;
955			default:
956				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
957				    "0x%04x rev %d not supported (subdevice)\n", pid,
958				    sc->subdevice_id,sc->hw_type);
959				return ENOTSUP;
960		}
961		break;
962/* 130 Series WiFi */
963/* XXX: This series will need adjustment for rate.
964 * see rx_with_siso_diversity in linux kernel
965 */
966	case IWN_DID_130_1:
967	case IWN_DID_130_2:
968		switch(sc->subdevice_id) {
969			case IWN_SDID_130_1:
970			case IWN_SDID_130_3:
971			case IWN_SDID_130_5:
972			//iwl130_bgn_cfg
973			case IWN_SDID_130_2:
974			case IWN_SDID_130_4:
975			case IWN_SDID_130_6:
976			//iwl130_bg_cfg
977				sc->fwname = "iwn6000g2bfw";
978				sc->limits = &iwn6000_sensitivity_limits;
979				sc->base_params = &iwn_6000g2b_base_params;
980				break;
981			default:
982				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
983				    "0x%04x rev %d not supported (subdevice)\n", pid,
984				    sc->subdevice_id,sc->hw_type);
985				return ENOTSUP;
986		}
987		break;
988/* 100 Series WiFi */
989	case IWN_DID_100_1:
990	case IWN_DID_100_2:
991		switch(sc->subdevice_id) {
992			case IWN_SDID_100_1:
993			case IWN_SDID_100_2:
994			case IWN_SDID_100_3:
995			case IWN_SDID_100_4:
996			case IWN_SDID_100_5:
997			case IWN_SDID_100_6:
998				sc->limits = &iwn1000_sensitivity_limits;
999				sc->base_params = &iwn1000_base_params;
1000				sc->fwname = "iwn100fw";
1001				break;
1002			default:
1003				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1004				    "0x%04x rev %d not supported (subdevice)\n", pid,
1005				    sc->subdevice_id,sc->hw_type);
1006				return ENOTSUP;
1007		}
1008		break;
1009
1010/* 105 Series */
1011/* XXX: This series will need adjustment for rate.
1012 * see rx_with_siso_diversity in linux kernel
1013 */
1014	case IWN_DID_105_1:
1015	case IWN_DID_105_2:
1016		switch(sc->subdevice_id) {
1017			case IWN_SDID_105_1:
1018			case IWN_SDID_105_2:
1019			case IWN_SDID_105_3:
1020			//iwl105_bgn_cfg
1021			case IWN_SDID_105_4:
1022			//iwl105_bgn_d_cfg
1023				sc->limits = &iwn2030_sensitivity_limits;
1024				sc->base_params = &iwn2000_base_params;
1025				sc->fwname = "iwn105fw";
1026				break;
1027			default:
1028				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1029				    "0x%04x rev %d not supported (subdevice)\n", pid,
1030				    sc->subdevice_id,sc->hw_type);
1031				return ENOTSUP;
1032		}
1033		break;
1034
1035/* 135 Series */
1036/* XXX: This series will need adjustment for rate.
1037 * see rx_with_siso_diversity in linux kernel
1038 */
1039	case IWN_DID_135_1:
1040	case IWN_DID_135_2:
1041		switch(sc->subdevice_id) {
1042			case IWN_SDID_135_1:
1043			case IWN_SDID_135_2:
1044			case IWN_SDID_135_3:
1045				sc->limits = &iwn2030_sensitivity_limits;
1046				sc->base_params = &iwn2030_base_params;
1047				sc->fwname = "iwn135fw";
1048				break;
1049			default:
1050				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1051				    "0x%04x rev %d not supported (subdevice)\n", pid,
1052				    sc->subdevice_id,sc->hw_type);
1053				return ENOTSUP;
1054		}
1055		break;
1056
1057/* 2x00 Series */
1058	case IWN_DID_2x00_1:
1059	case IWN_DID_2x00_2:
1060		switch(sc->subdevice_id) {
1061			case IWN_SDID_2x00_1:
1062			case IWN_SDID_2x00_2:
1063			case IWN_SDID_2x00_3:
1064			//iwl2000_2bgn_cfg
1065			case IWN_SDID_2x00_4:
1066			//iwl2000_2bgn_d_cfg
1067				sc->limits = &iwn2030_sensitivity_limits;
1068				sc->base_params = &iwn2000_base_params;
1069				sc->fwname = "iwn2000fw";
1070				break;
1071			default:
1072				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1073				    "0x%04x rev %d not supported (subdevice) \n",
1074				    pid, sc->subdevice_id, sc->hw_type);
1075				return ENOTSUP;
1076		}
1077		break;
1078/* 2x30 Series */
1079	case IWN_DID_2x30_1:
1080	case IWN_DID_2x30_2:
1081		switch(sc->subdevice_id) {
1082			case IWN_SDID_2x30_1:
1083			case IWN_SDID_2x30_3:
1084			case IWN_SDID_2x30_5:
1085			//iwl100_bgn_cfg
1086			case IWN_SDID_2x30_2:
1087			case IWN_SDID_2x30_4:
1088			case IWN_SDID_2x30_6:
1089			//iwl100_bg_cfg
1090				sc->limits = &iwn2030_sensitivity_limits;
1091				sc->base_params = &iwn2030_base_params;
1092				sc->fwname = "iwn2030fw";
1093				break;
1094			default:
1095				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1096				    "0x%04x rev %d not supported (subdevice)\n", pid,
1097				    sc->subdevice_id,sc->hw_type);
1098				return ENOTSUP;
1099		}
1100		break;
1101/* 5x00 Series */
1102	case IWN_DID_5x00_1:
1103	case IWN_DID_5x00_2:
1104	case IWN_DID_5x00_3:
1105	case IWN_DID_5x00_4:
1106		sc->limits = &iwn5000_sensitivity_limits;
1107		sc->base_params = &iwn5000_base_params;
1108		sc->fwname = "iwn5000fw";
1109		switch(sc->subdevice_id) {
1110			case IWN_SDID_5x00_1:
1111			case IWN_SDID_5x00_2:
1112			case IWN_SDID_5x00_3:
1113			case IWN_SDID_5x00_4:
1114			case IWN_SDID_5x00_9:
1115			case IWN_SDID_5x00_10:
1116			case IWN_SDID_5x00_11:
1117			case IWN_SDID_5x00_12:
1118			case IWN_SDID_5x00_17:
1119			case IWN_SDID_5x00_18:
1120			case IWN_SDID_5x00_19:
1121			case IWN_SDID_5x00_20:
1122			//iwl5100_agn_cfg
1123				sc->txchainmask = IWN_ANT_B;
1124				sc->rxchainmask = IWN_ANT_AB;
1125				break;
1126			case IWN_SDID_5x00_5:
1127			case IWN_SDID_5x00_6:
1128			case IWN_SDID_5x00_13:
1129			case IWN_SDID_5x00_14:
1130			case IWN_SDID_5x00_21:
1131			case IWN_SDID_5x00_22:
1132			//iwl5100_bgn_cfg
1133				sc->txchainmask = IWN_ANT_B;
1134				sc->rxchainmask = IWN_ANT_AB;
1135				break;
1136			case IWN_SDID_5x00_7:
1137			case IWN_SDID_5x00_8:
1138			case IWN_SDID_5x00_15:
1139			case IWN_SDID_5x00_16:
1140			case IWN_SDID_5x00_23:
1141			case IWN_SDID_5x00_24:
1142			//iwl5100_abg_cfg
1143				sc->txchainmask = IWN_ANT_B;
1144				sc->rxchainmask = IWN_ANT_AB;
1145				break;
1146			case IWN_SDID_5x00_25:
1147			case IWN_SDID_5x00_26:
1148			case IWN_SDID_5x00_27:
1149			case IWN_SDID_5x00_28:
1150			case IWN_SDID_5x00_29:
1151			case IWN_SDID_5x00_30:
1152			case IWN_SDID_5x00_31:
1153			case IWN_SDID_5x00_32:
1154			case IWN_SDID_5x00_33:
1155			case IWN_SDID_5x00_34:
1156			case IWN_SDID_5x00_35:
1157			case IWN_SDID_5x00_36:
1158			//iwl5300_agn_cfg
1159				sc->txchainmask = IWN_ANT_ABC;
1160				sc->rxchainmask = IWN_ANT_ABC;
1161				break;
1162			default:
1163				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1164				    "0x%04x rev %d not supported (subdevice)\n", pid,
1165				    sc->subdevice_id,sc->hw_type);
1166				return ENOTSUP;
1167		}
1168		break;
1169/* 5x50 Series */
1170	case IWN_DID_5x50_1:
1171	case IWN_DID_5x50_2:
1172	case IWN_DID_5x50_3:
1173	case IWN_DID_5x50_4:
1174		sc->limits = &iwn5000_sensitivity_limits;
1175		sc->base_params = &iwn5000_base_params;
1176		sc->fwname = "iwn5000fw";
1177		switch(sc->subdevice_id) {
1178			case IWN_SDID_5x50_1:
1179			case IWN_SDID_5x50_2:
1180			case IWN_SDID_5x50_3:
1181			//iwl5350_agn_cfg
1182				sc->limits = &iwn5000_sensitivity_limits;
1183				sc->base_params = &iwn5000_base_params;
1184				sc->fwname = "iwn5000fw";
1185				break;
1186			case IWN_SDID_5x50_4:
1187			case IWN_SDID_5x50_5:
1188			case IWN_SDID_5x50_8:
1189			case IWN_SDID_5x50_9:
1190			case IWN_SDID_5x50_10:
1191			case IWN_SDID_5x50_11:
1192			//iwl5150_agn_cfg
1193			case IWN_SDID_5x50_6:
1194			case IWN_SDID_5x50_7:
1195			case IWN_SDID_5x50_12:
1196			case IWN_SDID_5x50_13:
1197			//iwl5150_abg_cfg
1198				sc->limits = &iwn5000_sensitivity_limits;
1199				sc->fwname = "iwn5150fw";
1200				sc->base_params = &iwn_5x50_base_params;
1201				break;
1202			default:
1203				device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1204				    "0x%04x rev %d not supported (subdevice)\n", pid,
1205				    sc->subdevice_id,sc->hw_type);
1206				return ENOTSUP;
1207		}
1208		break;
1209	default:
1210		device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id : 0x%04x"
1211		    "rev 0x%08x not supported (device)\n", pid, sc->subdevice_id,
1212		     sc->hw_type);
1213		return ENOTSUP;
1214	}
1215	return 0;
1216}
1217
1218static int
1219iwn4965_attach(struct iwn_softc *sc, uint16_t pid)
1220{
1221	struct iwn_ops *ops = &sc->ops;
1222
1223	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1224	ops->load_firmware = iwn4965_load_firmware;
1225	ops->read_eeprom = iwn4965_read_eeprom;
1226	ops->post_alive = iwn4965_post_alive;
1227	ops->nic_config = iwn4965_nic_config;
1228	ops->update_sched = iwn4965_update_sched;
1229	ops->get_temperature = iwn4965_get_temperature;
1230	ops->get_rssi = iwn4965_get_rssi;
1231	ops->set_txpower = iwn4965_set_txpower;
1232	ops->init_gains = iwn4965_init_gains;
1233	ops->set_gains = iwn4965_set_gains;
1234	ops->add_node = iwn4965_add_node;
1235	ops->tx_done = iwn4965_tx_done;
1236	ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
1237	ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
1238	sc->ntxqs = IWN4965_NTXQUEUES;
1239	sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE;
1240	sc->ndmachnls = IWN4965_NDMACHNLS;
1241	sc->broadcast_id = IWN4965_ID_BROADCAST;
1242	sc->rxonsz = IWN4965_RXONSZ;
1243	sc->schedsz = IWN4965_SCHEDSZ;
1244	sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
1245	sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
1246	sc->fwsz = IWN4965_FWSZ;
1247	sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
1248	sc->limits = &iwn4965_sensitivity_limits;
1249	sc->fwname = "iwn4965fw";
1250	/* Override chains masks, ROM is known to be broken. */
1251	sc->txchainmask = IWN_ANT_AB;
1252	sc->rxchainmask = IWN_ANT_ABC;
1253	/* Enable normal btcoex */
1254	sc->sc_flags |= IWN_FLAG_BTCOEX;
1255
1256	DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__);
1257
1258	return 0;
1259}
1260
1261static int
1262iwn5000_attach(struct iwn_softc *sc, uint16_t pid)
1263{
1264	struct iwn_ops *ops = &sc->ops;
1265
1266	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1267
1268	ops->load_firmware = iwn5000_load_firmware;
1269	ops->read_eeprom = iwn5000_read_eeprom;
1270	ops->post_alive = iwn5000_post_alive;
1271	ops->nic_config = iwn5000_nic_config;
1272	ops->update_sched = iwn5000_update_sched;
1273	ops->get_temperature = iwn5000_get_temperature;
1274	ops->get_rssi = iwn5000_get_rssi;
1275	ops->set_txpower = iwn5000_set_txpower;
1276	ops->init_gains = iwn5000_init_gains;
1277	ops->set_gains = iwn5000_set_gains;
1278	ops->add_node = iwn5000_add_node;
1279	ops->tx_done = iwn5000_tx_done;
1280	ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
1281	ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
1282	sc->ntxqs = IWN5000_NTXQUEUES;
1283	sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE;
1284	sc->ndmachnls = IWN5000_NDMACHNLS;
1285	sc->broadcast_id = IWN5000_ID_BROADCAST;
1286	sc->rxonsz = IWN5000_RXONSZ;
1287	sc->schedsz = IWN5000_SCHEDSZ;
1288	sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
1289	sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
1290	sc->fwsz = IWN5000_FWSZ;
1291	sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
1292	sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
1293	sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN;
1294
1295	return 0;
1296}
1297
1298/*
1299 * Attach the interface to 802.11 radiotap.
1300 */
1301static void
1302iwn_radiotap_attach(struct iwn_softc *sc)
1303{
1304
1305	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1306	ieee80211_radiotap_attach(&sc->sc_ic,
1307	    &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
1308		IWN_TX_RADIOTAP_PRESENT,
1309	    &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
1310		IWN_RX_RADIOTAP_PRESENT);
1311	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1312}
1313
1314static void
1315iwn_sysctlattach(struct iwn_softc *sc)
1316{
1317#ifdef	IWN_DEBUG
1318	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
1319	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
1320
1321	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1322	    "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug,
1323		"control debugging printfs");
1324#endif
1325}
1326
1327static struct ieee80211vap *
1328iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
1329    enum ieee80211_opmode opmode, int flags,
1330    const uint8_t bssid[IEEE80211_ADDR_LEN],
1331    const uint8_t mac[IEEE80211_ADDR_LEN])
1332{
1333	struct iwn_softc *sc = ic->ic_softc;
1334	struct iwn_vap *ivp;
1335	struct ieee80211vap *vap;
1336
1337	if (!TAILQ_EMPTY(&ic->ic_vaps))		/* only one at a time */
1338		return NULL;
1339
1340	ivp = malloc(sizeof(struct iwn_vap), M_80211_VAP, M_WAITOK | M_ZERO);
1341	vap = &ivp->iv_vap;
1342	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
1343	ivp->ctx = IWN_RXON_BSS_CTX;
1344	vap->iv_bmissthreshold = 10;		/* override default */
1345	/* Override with driver methods. */
1346	ivp->iv_newstate = vap->iv_newstate;
1347	vap->iv_newstate = iwn_newstate;
1348	sc->ivap[IWN_RXON_BSS_CTX] = vap;
1349
1350	ieee80211_ratectl_init(vap);
1351	/* Complete setup. */
1352	ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status,
1353	    mac);
1354	ic->ic_opmode = opmode;
1355	return vap;
1356}
1357
1358static void
1359iwn_vap_delete(struct ieee80211vap *vap)
1360{
1361	struct iwn_vap *ivp = IWN_VAP(vap);
1362
1363	ieee80211_ratectl_deinit(vap);
1364	ieee80211_vap_detach(vap);
1365	free(ivp, M_80211_VAP);
1366}
1367
1368static void
1369iwn_xmit_queue_drain(struct iwn_softc *sc)
1370{
1371	struct mbuf *m;
1372	struct ieee80211_node *ni;
1373
1374	IWN_LOCK_ASSERT(sc);
1375	while ((m = mbufq_dequeue(&sc->sc_xmit_queue)) != NULL) {
1376		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
1377		ieee80211_free_node(ni);
1378		m_freem(m);
1379	}
1380}
1381
1382static int
1383iwn_xmit_queue_enqueue(struct iwn_softc *sc, struct mbuf *m)
1384{
1385
1386	IWN_LOCK_ASSERT(sc);
1387	return (mbufq_enqueue(&sc->sc_xmit_queue, m));
1388}
1389
1390static int
1391iwn_detach(device_t dev)
1392{
1393	struct iwn_softc *sc = device_get_softc(dev);
1394	int qid;
1395
1396	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1397
1398	if (sc->sc_ic.ic_softc != NULL) {
1399		/* Free the mbuf queue and node references */
1400		IWN_LOCK(sc);
1401		iwn_xmit_queue_drain(sc);
1402		IWN_UNLOCK(sc);
1403
1404		ieee80211_draintask(&sc->sc_ic, &sc->sc_radioon_task);
1405		ieee80211_draintask(&sc->sc_ic, &sc->sc_radiooff_task);
1406		iwn_stop(sc);
1407
1408		taskqueue_drain_all(sc->sc_tq);
1409		taskqueue_free(sc->sc_tq);
1410
1411		callout_drain(&sc->watchdog_to);
1412		callout_drain(&sc->scan_timeout);
1413		callout_drain(&sc->calib_to);
1414		ieee80211_ifdetach(&sc->sc_ic);
1415	}
1416
1417	/* Uninstall interrupt handler. */
1418	if (sc->irq != NULL) {
1419		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
1420		bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq),
1421		    sc->irq);
1422		pci_release_msi(dev);
1423	}
1424
1425	/* Free DMA resources. */
1426	iwn_free_rx_ring(sc, &sc->rxq);
1427	for (qid = 0; qid < sc->ntxqs; qid++)
1428		iwn_free_tx_ring(sc, &sc->txq[qid]);
1429	iwn_free_sched(sc);
1430	iwn_free_kw(sc);
1431	if (sc->ict != NULL)
1432		iwn_free_ict(sc);
1433	iwn_free_fwmem(sc);
1434
1435	if (sc->mem != NULL)
1436		bus_release_resource(dev, SYS_RES_MEMORY,
1437		    rman_get_rid(sc->mem), sc->mem);
1438
1439	if (sc->sc_cdev) {
1440		destroy_dev(sc->sc_cdev);
1441		sc->sc_cdev = NULL;
1442	}
1443
1444	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__);
1445	IWN_LOCK_DESTROY(sc);
1446	return 0;
1447}
1448
1449static int
1450iwn_shutdown(device_t dev)
1451{
1452	struct iwn_softc *sc = device_get_softc(dev);
1453
1454	iwn_stop(sc);
1455	return 0;
1456}
1457
1458static int
1459iwn_suspend(device_t dev)
1460{
1461	struct iwn_softc *sc = device_get_softc(dev);
1462
1463	ieee80211_suspend_all(&sc->sc_ic);
1464	return 0;
1465}
1466
1467static int
1468iwn_resume(device_t dev)
1469{
1470	struct iwn_softc *sc = device_get_softc(dev);
1471
1472	/* Clear device-specific "PCI retry timeout" register (41h). */
1473	pci_write_config(dev, 0x41, 0, 1);
1474
1475	ieee80211_resume_all(&sc->sc_ic);
1476	return 0;
1477}
1478
1479static int
1480iwn_nic_lock(struct iwn_softc *sc)
1481{
1482	int ntries;
1483
1484	/* Request exclusive access to NIC. */
1485	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1486
1487	/* Spin until we actually get the lock. */
1488	for (ntries = 0; ntries < 1000; ntries++) {
1489		if ((IWN_READ(sc, IWN_GP_CNTRL) &
1490		     (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
1491		    IWN_GP_CNTRL_MAC_ACCESS_ENA)
1492			return 0;
1493		DELAY(10);
1494	}
1495	return ETIMEDOUT;
1496}
1497
1498static __inline void
1499iwn_nic_unlock(struct iwn_softc *sc)
1500{
1501	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1502}
1503
1504static __inline uint32_t
1505iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
1506{
1507	IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
1508	IWN_BARRIER_READ_WRITE(sc);
1509	return IWN_READ(sc, IWN_PRPH_RDATA);
1510}
1511
1512static __inline void
1513iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1514{
1515	IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
1516	IWN_BARRIER_WRITE(sc);
1517	IWN_WRITE(sc, IWN_PRPH_WDATA, data);
1518}
1519
1520static __inline void
1521iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1522{
1523	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
1524}
1525
1526static __inline void
1527iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1528{
1529	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
1530}
1531
1532static __inline void
1533iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
1534    const uint32_t *data, int count)
1535{
1536	for (; count > 0; count--, data++, addr += 4)
1537		iwn_prph_write(sc, addr, *data);
1538}
1539
1540static __inline uint32_t
1541iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
1542{
1543	IWN_WRITE(sc, IWN_MEM_RADDR, addr);
1544	IWN_BARRIER_READ_WRITE(sc);
1545	return IWN_READ(sc, IWN_MEM_RDATA);
1546}
1547
1548static __inline void
1549iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1550{
1551	IWN_WRITE(sc, IWN_MEM_WADDR, addr);
1552	IWN_BARRIER_WRITE(sc);
1553	IWN_WRITE(sc, IWN_MEM_WDATA, data);
1554}
1555
1556static __inline void
1557iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
1558{
1559	uint32_t tmp;
1560
1561	tmp = iwn_mem_read(sc, addr & ~3);
1562	if (addr & 3)
1563		tmp = (tmp & 0x0000ffff) | data << 16;
1564	else
1565		tmp = (tmp & 0xffff0000) | data;
1566	iwn_mem_write(sc, addr & ~3, tmp);
1567}
1568
1569static __inline void
1570iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
1571    int count)
1572{
1573	for (; count > 0; count--, addr += 4)
1574		*data++ = iwn_mem_read(sc, addr);
1575}
1576
1577static __inline void
1578iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
1579    int count)
1580{
1581	for (; count > 0; count--, addr += 4)
1582		iwn_mem_write(sc, addr, val);
1583}
1584
1585static int
1586iwn_eeprom_lock(struct iwn_softc *sc)
1587{
1588	int i, ntries;
1589
1590	for (i = 0; i < 100; i++) {
1591		/* Request exclusive access to EEPROM. */
1592		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
1593		    IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1594
1595		/* Spin until we actually get the lock. */
1596		for (ntries = 0; ntries < 100; ntries++) {
1597			if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
1598			    IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1599				return 0;
1600			DELAY(10);
1601		}
1602	}
1603	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__);
1604	return ETIMEDOUT;
1605}
1606
1607static __inline void
1608iwn_eeprom_unlock(struct iwn_softc *sc)
1609{
1610	IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1611}
1612
1613/*
1614 * Initialize access by host to One Time Programmable ROM.
1615 * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1616 */
1617static int
1618iwn_init_otprom(struct iwn_softc *sc)
1619{
1620	uint16_t prev, base, next;
1621	int count, error;
1622
1623	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1624
1625	/* Wait for clock stabilization before accessing prph. */
1626	if ((error = iwn_clock_wait(sc)) != 0)
1627		return error;
1628
1629	if ((error = iwn_nic_lock(sc)) != 0)
1630		return error;
1631	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1632	DELAY(5);
1633	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1634	iwn_nic_unlock(sc);
1635
1636	/* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1637	if (sc->base_params->shadow_ram_support) {
1638		IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1639		    IWN_RESET_LINK_PWR_MGMT_DIS);
1640	}
1641	IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1642	/* Clear ECC status. */
1643	IWN_SETBITS(sc, IWN_OTP_GP,
1644	    IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1645
1646	/*
1647	 * Find the block before last block (contains the EEPROM image)
1648	 * for HW without OTP shadow RAM.
1649	 */
1650	if (! sc->base_params->shadow_ram_support) {
1651		/* Switch to absolute addressing mode. */
1652		IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1653		base = prev = 0;
1654		for (count = 0; count < sc->base_params->max_ll_items;
1655		    count++) {
1656			error = iwn_read_prom_data(sc, base, &next, 2);
1657			if (error != 0)
1658				return error;
1659			if (next == 0)	/* End of linked-list. */
1660				break;
1661			prev = base;
1662			base = le16toh(next);
1663		}
1664		if (count == 0 || count == sc->base_params->max_ll_items)
1665			return EIO;
1666		/* Skip "next" word. */
1667		sc->prom_base = prev + 1;
1668	}
1669
1670	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1671
1672	return 0;
1673}
1674
1675static int
1676iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1677{
1678	uint8_t *out = data;
1679	uint32_t val, tmp;
1680	int ntries;
1681
1682	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1683
1684	addr += sc->prom_base;
1685	for (; count > 0; count -= 2, addr++) {
1686		IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1687		for (ntries = 0; ntries < 10; ntries++) {
1688			val = IWN_READ(sc, IWN_EEPROM);
1689			if (val & IWN_EEPROM_READ_VALID)
1690				break;
1691			DELAY(5);
1692		}
1693		if (ntries == 10) {
1694			device_printf(sc->sc_dev,
1695			    "timeout reading ROM at 0x%x\n", addr);
1696			return ETIMEDOUT;
1697		}
1698		if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1699			/* OTPROM, check for ECC errors. */
1700			tmp = IWN_READ(sc, IWN_OTP_GP);
1701			if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1702				device_printf(sc->sc_dev,
1703				    "OTPROM ECC error at 0x%x\n", addr);
1704				return EIO;
1705			}
1706			if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1707				/* Correctable ECC error, clear bit. */
1708				IWN_SETBITS(sc, IWN_OTP_GP,
1709				    IWN_OTP_GP_ECC_CORR_STTS);
1710			}
1711		}
1712		*out++ = val >> 16;
1713		if (count > 1)
1714			*out++ = val >> 24;
1715	}
1716
1717	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1718
1719	return 0;
1720}
1721
1722static void
1723iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1724{
1725	if (error != 0)
1726		return;
1727	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1728	*(bus_addr_t *)arg = segs[0].ds_addr;
1729}
1730
1731static int
1732iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1733    void **kvap, bus_size_t size, bus_size_t alignment)
1734{
1735	int error;
1736
1737	dma->tag = NULL;
1738	dma->size = size;
1739
1740	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
1741	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1742	    1, size, 0, NULL, NULL, &dma->tag);
1743	if (error != 0)
1744		goto fail;
1745
1746	error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1747	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1748	if (error != 0)
1749		goto fail;
1750
1751	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1752	    iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1753	if (error != 0)
1754		goto fail;
1755
1756	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1757
1758	if (kvap != NULL)
1759		*kvap = dma->vaddr;
1760
1761	return 0;
1762
1763fail:	iwn_dma_contig_free(dma);
1764	return error;
1765}
1766
1767static void
1768iwn_dma_contig_free(struct iwn_dma_info *dma)
1769{
1770	if (dma->vaddr != NULL) {
1771		bus_dmamap_sync(dma->tag, dma->map,
1772		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1773		bus_dmamap_unload(dma->tag, dma->map);
1774		bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1775		dma->vaddr = NULL;
1776	}
1777	if (dma->tag != NULL) {
1778		bus_dma_tag_destroy(dma->tag);
1779		dma->tag = NULL;
1780	}
1781}
1782
1783static int
1784iwn_alloc_sched(struct iwn_softc *sc)
1785{
1786	/* TX scheduler rings must be aligned on a 1KB boundary. */
1787	return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched,
1788	    sc->schedsz, 1024);
1789}
1790
1791static void
1792iwn_free_sched(struct iwn_softc *sc)
1793{
1794	iwn_dma_contig_free(&sc->sched_dma);
1795}
1796
1797static int
1798iwn_alloc_kw(struct iwn_softc *sc)
1799{
1800	/* "Keep Warm" page must be aligned on a 4KB boundary. */
1801	return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096);
1802}
1803
1804static void
1805iwn_free_kw(struct iwn_softc *sc)
1806{
1807	iwn_dma_contig_free(&sc->kw_dma);
1808}
1809
1810static int
1811iwn_alloc_ict(struct iwn_softc *sc)
1812{
1813	/* ICT table must be aligned on a 4KB boundary. */
1814	return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict,
1815	    IWN_ICT_SIZE, 4096);
1816}
1817
1818static void
1819iwn_free_ict(struct iwn_softc *sc)
1820{
1821	iwn_dma_contig_free(&sc->ict_dma);
1822}
1823
1824static int
1825iwn_alloc_fwmem(struct iwn_softc *sc)
1826{
1827	/* Must be aligned on a 16-byte boundary. */
1828	return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16);
1829}
1830
1831static void
1832iwn_free_fwmem(struct iwn_softc *sc)
1833{
1834	iwn_dma_contig_free(&sc->fw_dma);
1835}
1836
1837static int
1838iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1839{
1840	bus_size_t size;
1841	int i, error;
1842
1843	ring->cur = 0;
1844
1845	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1846
1847	/* Allocate RX descriptors (256-byte aligned). */
1848	size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1849	error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1850	    size, 256);
1851	if (error != 0) {
1852		device_printf(sc->sc_dev,
1853		    "%s: could not allocate RX ring DMA memory, error %d\n",
1854		    __func__, error);
1855		goto fail;
1856	}
1857
1858	/* Allocate RX status area (16-byte aligned). */
1859	error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat,
1860	    sizeof (struct iwn_rx_status), 16);
1861	if (error != 0) {
1862		device_printf(sc->sc_dev,
1863		    "%s: could not allocate RX status DMA memory, error %d\n",
1864		    __func__, error);
1865		goto fail;
1866	}
1867
1868	/* Create RX buffer DMA tag. */
1869	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1870	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1871	    IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1872	if (error != 0) {
1873		device_printf(sc->sc_dev,
1874		    "%s: could not create RX buf DMA tag, error %d\n",
1875		    __func__, error);
1876		goto fail;
1877	}
1878
1879	/*
1880	 * Allocate and map RX buffers.
1881	 */
1882	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1883		struct iwn_rx_data *data = &ring->data[i];
1884		bus_addr_t paddr;
1885
1886		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1887		if (error != 0) {
1888			device_printf(sc->sc_dev,
1889			    "%s: could not create RX buf DMA map, error %d\n",
1890			    __func__, error);
1891			goto fail;
1892		}
1893
1894		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1895		    IWN_RBUF_SIZE);
1896		if (data->m == NULL) {
1897			device_printf(sc->sc_dev,
1898			    "%s: could not allocate RX mbuf\n", __func__);
1899			error = ENOBUFS;
1900			goto fail;
1901		}
1902
1903		error = bus_dmamap_load(ring->data_dmat, data->map,
1904		    mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
1905		    &paddr, BUS_DMA_NOWAIT);
1906		if (error != 0 && error != EFBIG) {
1907			device_printf(sc->sc_dev,
1908			    "%s: can't map mbuf, error %d\n", __func__,
1909			    error);
1910			goto fail;
1911		}
1912
1913		bus_dmamap_sync(ring->data_dmat, data->map,
1914		    BUS_DMASYNC_PREREAD);
1915
1916		/* Set physical address of RX buffer (256-byte aligned). */
1917		ring->desc[i] = htole32(paddr >> 8);
1918	}
1919
1920	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1921	    BUS_DMASYNC_PREWRITE);
1922
1923	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
1924
1925	return 0;
1926
1927fail:	iwn_free_rx_ring(sc, ring);
1928
1929	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
1930
1931	return error;
1932}
1933
1934static void
1935iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1936{
1937	int ntries;
1938
1939	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
1940
1941	if (iwn_nic_lock(sc) == 0) {
1942		IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1943		for (ntries = 0; ntries < 1000; ntries++) {
1944			if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1945			    IWN_FH_RX_STATUS_IDLE)
1946				break;
1947			DELAY(10);
1948		}
1949		iwn_nic_unlock(sc);
1950	}
1951	ring->cur = 0;
1952	sc->last_rx_valid = 0;
1953}
1954
1955static void
1956iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1957{
1958	int i;
1959
1960	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__);
1961
1962	iwn_dma_contig_free(&ring->desc_dma);
1963	iwn_dma_contig_free(&ring->stat_dma);
1964
1965	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1966		struct iwn_rx_data *data = &ring->data[i];
1967
1968		if (data->m != NULL) {
1969			bus_dmamap_sync(ring->data_dmat, data->map,
1970			    BUS_DMASYNC_POSTREAD);
1971			bus_dmamap_unload(ring->data_dmat, data->map);
1972			m_freem(data->m);
1973			data->m = NULL;
1974		}
1975		if (data->map != NULL)
1976			bus_dmamap_destroy(ring->data_dmat, data->map);
1977	}
1978	if (ring->data_dmat != NULL) {
1979		bus_dma_tag_destroy(ring->data_dmat);
1980		ring->data_dmat = NULL;
1981	}
1982}
1983
1984static int
1985iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1986{
1987	bus_addr_t paddr;
1988	bus_size_t size;
1989	int i, error;
1990
1991	ring->qid = qid;
1992	ring->queued = 0;
1993	ring->cur = 0;
1994
1995	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1996
1997	/* Allocate TX descriptors (256-byte aligned). */
1998	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
1999	error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
2000	    size, 256);
2001	if (error != 0) {
2002		device_printf(sc->sc_dev,
2003		    "%s: could not allocate TX ring DMA memory, error %d\n",
2004		    __func__, error);
2005		goto fail;
2006	}
2007
2008	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
2009	error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
2010	    size, 4);
2011	if (error != 0) {
2012		device_printf(sc->sc_dev,
2013		    "%s: could not allocate TX cmd DMA memory, error %d\n",
2014		    __func__, error);
2015		goto fail;
2016	}
2017
2018	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
2019	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
2020	    IWN_MAX_SCATTER - 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
2021	if (error != 0) {
2022		device_printf(sc->sc_dev,
2023		    "%s: could not create TX buf DMA tag, error %d\n",
2024		    __func__, error);
2025		goto fail;
2026	}
2027
2028	paddr = ring->cmd_dma.paddr;
2029	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2030		struct iwn_tx_data *data = &ring->data[i];
2031
2032		data->cmd_paddr = paddr;
2033		data->scratch_paddr = paddr + 12;
2034		paddr += sizeof (struct iwn_tx_cmd);
2035
2036		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2037		if (error != 0) {
2038			device_printf(sc->sc_dev,
2039			    "%s: could not create TX buf DMA map, error %d\n",
2040			    __func__, error);
2041			goto fail;
2042		}
2043	}
2044
2045	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2046
2047	return 0;
2048
2049fail:	iwn_free_tx_ring(sc, ring);
2050	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
2051	return error;
2052}
2053
2054static void
2055iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
2056{
2057	int i;
2058
2059	DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__);
2060
2061	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2062		struct iwn_tx_data *data = &ring->data[i];
2063
2064		if (data->m != NULL) {
2065			bus_dmamap_sync(ring->data_dmat, data->map,
2066			    BUS_DMASYNC_POSTWRITE);
2067			bus_dmamap_unload(ring->data_dmat, data->map);
2068			m_freem(data->m);
2069			data->m = NULL;
2070		}
2071		if (data->ni != NULL) {
2072			ieee80211_free_node(data->ni);
2073			data->ni = NULL;
2074		}
2075	}
2076	/* Clear TX descriptors. */
2077	memset(ring->desc, 0, ring->desc_dma.size);
2078	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2079	    BUS_DMASYNC_PREWRITE);
2080	sc->qfullmsk &= ~(1 << ring->qid);
2081	ring->queued = 0;
2082	ring->cur = 0;
2083}
2084
2085static void
2086iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
2087{
2088	int i;
2089
2090	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__);
2091
2092	iwn_dma_contig_free(&ring->desc_dma);
2093	iwn_dma_contig_free(&ring->cmd_dma);
2094
2095	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2096		struct iwn_tx_data *data = &ring->data[i];
2097
2098		if (data->m != NULL) {
2099			bus_dmamap_sync(ring->data_dmat, data->map,
2100			    BUS_DMASYNC_POSTWRITE);
2101			bus_dmamap_unload(ring->data_dmat, data->map);
2102			m_freem(data->m);
2103		}
2104		if (data->map != NULL)
2105			bus_dmamap_destroy(ring->data_dmat, data->map);
2106	}
2107	if (ring->data_dmat != NULL) {
2108		bus_dma_tag_destroy(ring->data_dmat);
2109		ring->data_dmat = NULL;
2110	}
2111}
2112
2113static void
2114iwn5000_ict_reset(struct iwn_softc *sc)
2115{
2116	/* Disable interrupts. */
2117	IWN_WRITE(sc, IWN_INT_MASK, 0);
2118
2119	/* Reset ICT table. */
2120	memset(sc->ict, 0, IWN_ICT_SIZE);
2121	sc->ict_cur = 0;
2122
2123	bus_dmamap_sync(sc->ict_dma.tag, sc->ict_dma.map,
2124	    BUS_DMASYNC_PREWRITE);
2125
2126	/* Set physical address of ICT table (4KB aligned). */
2127	DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__);
2128	IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
2129	    IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
2130
2131	/* Enable periodic RX interrupt. */
2132	sc->int_mask |= IWN_INT_RX_PERIODIC;
2133	/* Switch to ICT interrupt mode in driver. */
2134	sc->sc_flags |= IWN_FLAG_USE_ICT;
2135
2136	/* Re-enable interrupts. */
2137	IWN_WRITE(sc, IWN_INT, 0xffffffff);
2138	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2139}
2140
2141static int
2142iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
2143{
2144	struct iwn_ops *ops = &sc->ops;
2145	uint16_t val;
2146	int error;
2147
2148	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2149
2150	/* Check whether adapter has an EEPROM or an OTPROM. */
2151	if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
2152	    (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
2153		sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
2154	DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n",
2155	    (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
2156
2157	/* Adapter has to be powered on for EEPROM access to work. */
2158	if ((error = iwn_apm_init(sc)) != 0) {
2159		device_printf(sc->sc_dev,
2160		    "%s: could not power ON adapter, error %d\n", __func__,
2161		    error);
2162		return error;
2163	}
2164
2165	if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
2166		device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__);
2167		return EIO;
2168	}
2169	if ((error = iwn_eeprom_lock(sc)) != 0) {
2170		device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n",
2171		    __func__, error);
2172		return error;
2173	}
2174	if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
2175		if ((error = iwn_init_otprom(sc)) != 0) {
2176			device_printf(sc->sc_dev,
2177			    "%s: could not initialize OTPROM, error %d\n",
2178			    __func__, error);
2179			return error;
2180		}
2181	}
2182
2183	iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
2184	DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val));
2185	/* Check if HT support is bonded out. */
2186	if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
2187		sc->sc_flags |= IWN_FLAG_HAS_11N;
2188
2189	iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
2190	sc->rfcfg = le16toh(val);
2191	DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg);
2192	/* Read Tx/Rx chains from ROM unless it's known to be broken. */
2193	if (sc->txchainmask == 0)
2194		sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
2195	if (sc->rxchainmask == 0)
2196		sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
2197
2198	/* Read MAC address. */
2199	iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6);
2200
2201	/* Read adapter-specific information from EEPROM. */
2202	ops->read_eeprom(sc);
2203
2204	iwn_apm_stop(sc);	/* Power OFF adapter. */
2205
2206	iwn_eeprom_unlock(sc);
2207
2208	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2209
2210	return 0;
2211}
2212
2213static void
2214iwn4965_read_eeprom(struct iwn_softc *sc)
2215{
2216	uint32_t addr;
2217	uint16_t val;
2218	int i;
2219
2220	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2221
2222	/* Read regulatory domain (4 ASCII characters). */
2223	iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
2224
2225	/* Read the list of authorized channels (20MHz & 40MHz). */
2226	for (i = 0; i < IWN_NBANDS - 1; i++) {
2227		addr = iwn4965_regulatory_bands[i];
2228		iwn_read_eeprom_channels(sc, i, addr);
2229	}
2230
2231	/* Read maximum allowed TX power for 2GHz and 5GHz bands. */
2232	iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
2233	sc->maxpwr2GHz = val & 0xff;
2234	sc->maxpwr5GHz = val >> 8;
2235	/* Check that EEPROM values are within valid range. */
2236	if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
2237		sc->maxpwr5GHz = 38;
2238	if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
2239		sc->maxpwr2GHz = 38;
2240	DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n",
2241	    sc->maxpwr2GHz, sc->maxpwr5GHz);
2242
2243	/* Read samples for each TX power group. */
2244	iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
2245	    sizeof sc->bands);
2246
2247	/* Read voltage at which samples were taken. */
2248	iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
2249	sc->eeprom_voltage = (int16_t)le16toh(val);
2250	DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n",
2251	    sc->eeprom_voltage);
2252
2253#ifdef IWN_DEBUG
2254	/* Print samples. */
2255	if (sc->sc_debug & IWN_DEBUG_ANY) {
2256		for (i = 0; i < IWN_NBANDS - 1; i++)
2257			iwn4965_print_power_group(sc, i);
2258	}
2259#endif
2260
2261	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2262}
2263
2264#ifdef IWN_DEBUG
2265static void
2266iwn4965_print_power_group(struct iwn_softc *sc, int i)
2267{
2268	struct iwn4965_eeprom_band *band = &sc->bands[i];
2269	struct iwn4965_eeprom_chan_samples *chans = band->chans;
2270	int j, c;
2271
2272	printf("===band %d===\n", i);
2273	printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
2274	printf("chan1 num=%d\n", chans[0].num);
2275	for (c = 0; c < 2; c++) {
2276		for (j = 0; j < IWN_NSAMPLES; j++) {
2277			printf("chain %d, sample %d: temp=%d gain=%d "
2278			    "power=%d pa_det=%d\n", c, j,
2279			    chans[0].samples[c][j].temp,
2280			    chans[0].samples[c][j].gain,
2281			    chans[0].samples[c][j].power,
2282			    chans[0].samples[c][j].pa_det);
2283		}
2284	}
2285	printf("chan2 num=%d\n", chans[1].num);
2286	for (c = 0; c < 2; c++) {
2287		for (j = 0; j < IWN_NSAMPLES; j++) {
2288			printf("chain %d, sample %d: temp=%d gain=%d "
2289			    "power=%d pa_det=%d\n", c, j,
2290			    chans[1].samples[c][j].temp,
2291			    chans[1].samples[c][j].gain,
2292			    chans[1].samples[c][j].power,
2293			    chans[1].samples[c][j].pa_det);
2294		}
2295	}
2296}
2297#endif
2298
2299static void
2300iwn5000_read_eeprom(struct iwn_softc *sc)
2301{
2302	struct iwn5000_eeprom_calib_hdr hdr;
2303	int32_t volt;
2304	uint32_t base, addr;
2305	uint16_t val;
2306	int i;
2307
2308	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2309
2310	/* Read regulatory domain (4 ASCII characters). */
2311	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2312	base = le16toh(val);
2313	iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
2314	    sc->eeprom_domain, 4);
2315
2316	/* Read the list of authorized channels (20MHz & 40MHz). */
2317	for (i = 0; i < IWN_NBANDS - 1; i++) {
2318		addr =  base + sc->base_params->regulatory_bands[i];
2319		iwn_read_eeprom_channels(sc, i, addr);
2320	}
2321
2322	/* Read enhanced TX power information for 6000 Series. */
2323	if (sc->base_params->enhanced_TX_power)
2324		iwn_read_eeprom_enhinfo(sc);
2325
2326	iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
2327	base = le16toh(val);
2328	iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
2329	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2330	    "%s: calib version=%u pa type=%u voltage=%u\n", __func__,
2331	    hdr.version, hdr.pa_type, le16toh(hdr.volt));
2332	sc->calib_ver = hdr.version;
2333
2334	if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) {
2335		sc->eeprom_voltage = le16toh(hdr.volt);
2336		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
2337		sc->eeprom_temp_high=le16toh(val);
2338		iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
2339		sc->eeprom_temp = le16toh(val);
2340	}
2341
2342	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
2343		/* Compute temperature offset. */
2344		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
2345		sc->eeprom_temp = le16toh(val);
2346		iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
2347		volt = le16toh(val);
2348		sc->temp_off = sc->eeprom_temp - (volt / -5);
2349		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n",
2350		    sc->eeprom_temp, volt, sc->temp_off);
2351	} else {
2352		/* Read crystal calibration. */
2353		iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
2354		    &sc->eeprom_crystal, sizeof (uint32_t));
2355		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n",
2356		    le32toh(sc->eeprom_crystal));
2357	}
2358
2359	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2360
2361}
2362
2363/*
2364 * Translate EEPROM flags to net80211.
2365 */
2366static uint32_t
2367iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel)
2368{
2369	uint32_t nflags;
2370
2371	nflags = 0;
2372	if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0)
2373		nflags |= IEEE80211_CHAN_PASSIVE;
2374	if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0)
2375		nflags |= IEEE80211_CHAN_NOADHOC;
2376	if (channel->flags & IWN_EEPROM_CHAN_RADAR) {
2377		nflags |= IEEE80211_CHAN_DFS;
2378		/* XXX apparently IBSS may still be marked */
2379		nflags |= IEEE80211_CHAN_NOADHOC;
2380	}
2381
2382	return nflags;
2383}
2384
2385static void
2386iwn_read_eeprom_band(struct iwn_softc *sc, int n, int maxchans, int *nchans,
2387    struct ieee80211_channel chans[])
2388{
2389	struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
2390	const struct iwn_chan_band *band = &iwn_bands[n];
2391	uint8_t bands[IEEE80211_MODE_BYTES];
2392	uint8_t chan;
2393	int i, error, nflags;
2394
2395	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2396
2397	memset(bands, 0, sizeof(bands));
2398	if (n == 0) {
2399		setbit(bands, IEEE80211_MODE_11B);
2400		setbit(bands, IEEE80211_MODE_11G);
2401		if (sc->sc_flags & IWN_FLAG_HAS_11N)
2402			setbit(bands, IEEE80211_MODE_11NG);
2403	} else {
2404		setbit(bands, IEEE80211_MODE_11A);
2405		if (sc->sc_flags & IWN_FLAG_HAS_11N)
2406			setbit(bands, IEEE80211_MODE_11NA);
2407	}
2408
2409	for (i = 0; i < band->nchan; i++) {
2410		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
2411			DPRINTF(sc, IWN_DEBUG_RESET,
2412			    "skip chan %d flags 0x%x maxpwr %d\n",
2413			    band->chan[i], channels[i].flags,
2414			    channels[i].maxpwr);
2415			continue;
2416		}
2417
2418		chan = band->chan[i];
2419		nflags = iwn_eeprom_channel_flags(&channels[i]);
2420		error = ieee80211_add_channel(chans, maxchans, nchans,
2421		    chan, 0, channels[i].maxpwr, nflags, bands);
2422		if (error != 0)
2423			break;
2424
2425		/* Save maximum allowed TX power for this channel. */
2426		/* XXX wrong */
2427		sc->maxpwr[chan] = channels[i].maxpwr;
2428
2429		DPRINTF(sc, IWN_DEBUG_RESET,
2430		    "add chan %d flags 0x%x maxpwr %d\n", chan,
2431		    channels[i].flags, channels[i].maxpwr);
2432	}
2433
2434	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2435
2436}
2437
2438static void
2439iwn_read_eeprom_ht40(struct iwn_softc *sc, int n, int maxchans, int *nchans,
2440    struct ieee80211_channel chans[])
2441{
2442	struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
2443	const struct iwn_chan_band *band = &iwn_bands[n];
2444	uint8_t chan;
2445	int i, error, nflags;
2446
2447	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__);
2448
2449	if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) {
2450		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__);
2451		return;
2452	}
2453
2454	for (i = 0; i < band->nchan; i++) {
2455		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
2456			DPRINTF(sc, IWN_DEBUG_RESET,
2457			    "skip chan %d flags 0x%x maxpwr %d\n",
2458			    band->chan[i], channels[i].flags,
2459			    channels[i].maxpwr);
2460			continue;
2461		}
2462
2463		chan = band->chan[i];
2464		nflags = iwn_eeprom_channel_flags(&channels[i]);
2465		nflags |= (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A);
2466		error = ieee80211_add_channel_ht40(chans, maxchans, nchans,
2467		    chan, channels[i].maxpwr, nflags);
2468		switch (error) {
2469		case EINVAL:
2470			device_printf(sc->sc_dev,
2471			    "%s: no entry for channel %d\n", __func__, chan);
2472			continue;
2473		case ENOENT:
2474			DPRINTF(sc, IWN_DEBUG_RESET,
2475			    "%s: skip chan %d, extension channel not found\n",
2476			    __func__, chan);
2477			continue;
2478		case ENOBUFS:
2479			device_printf(sc->sc_dev,
2480			    "%s: channel table is full!\n", __func__);
2481			break;
2482		case 0:
2483			DPRINTF(sc, IWN_DEBUG_RESET,
2484			    "add ht40 chan %d flags 0x%x maxpwr %d\n",
2485			    chan, channels[i].flags, channels[i].maxpwr);
2486			/* FALLTHROUGH */
2487		default:
2488			break;
2489		}
2490	}
2491
2492	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2493
2494}
2495
2496static void
2497iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
2498{
2499	struct ieee80211com *ic = &sc->sc_ic;
2500
2501	iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n],
2502	    iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan));
2503
2504	if (n < 5) {
2505		iwn_read_eeprom_band(sc, n, IEEE80211_CHAN_MAX, &ic->ic_nchans,
2506		    ic->ic_channels);
2507	} else {
2508		iwn_read_eeprom_ht40(sc, n, IEEE80211_CHAN_MAX, &ic->ic_nchans,
2509		    ic->ic_channels);
2510	}
2511	ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
2512}
2513
2514static struct iwn_eeprom_chan *
2515iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c)
2516{
2517	int band, chan, i, j;
2518
2519	if (IEEE80211_IS_CHAN_HT40(c)) {
2520		band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5;
2521		if (IEEE80211_IS_CHAN_HT40D(c))
2522			chan = c->ic_extieee;
2523		else
2524			chan = c->ic_ieee;
2525		for (i = 0; i < iwn_bands[band].nchan; i++) {
2526			if (iwn_bands[band].chan[i] == chan)
2527				return &sc->eeprom_channels[band][i];
2528		}
2529	} else {
2530		for (j = 0; j < 5; j++) {
2531			for (i = 0; i < iwn_bands[j].nchan; i++) {
2532				if (iwn_bands[j].chan[i] == c->ic_ieee &&
2533				    ((j == 0) ^ IEEE80211_IS_CHAN_A(c)) == 1)
2534					return &sc->eeprom_channels[j][i];
2535			}
2536		}
2537	}
2538	return NULL;
2539}
2540
2541static void
2542iwn_getradiocaps(struct ieee80211com *ic,
2543    int maxchans, int *nchans, struct ieee80211_channel chans[])
2544{
2545	struct iwn_softc *sc = ic->ic_softc;
2546	int i;
2547
2548	/* Parse the list of authorized channels. */
2549	for (i = 0; i < 5 && *nchans < maxchans; i++)
2550		iwn_read_eeprom_band(sc, i, maxchans, nchans, chans);
2551	for (i = 5; i < IWN_NBANDS - 1 && *nchans < maxchans; i++)
2552		iwn_read_eeprom_ht40(sc, i, maxchans, nchans, chans);
2553}
2554
2555/*
2556 * Enforce flags read from EEPROM.
2557 */
2558static int
2559iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
2560    int nchan, struct ieee80211_channel chans[])
2561{
2562	struct iwn_softc *sc = ic->ic_softc;
2563	int i;
2564
2565	for (i = 0; i < nchan; i++) {
2566		struct ieee80211_channel *c = &chans[i];
2567		struct iwn_eeprom_chan *channel;
2568
2569		channel = iwn_find_eeprom_channel(sc, c);
2570		if (channel == NULL) {
2571			ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n",
2572			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
2573			return EINVAL;
2574		}
2575		c->ic_flags |= iwn_eeprom_channel_flags(channel);
2576	}
2577
2578	return 0;
2579}
2580
2581static void
2582iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
2583{
2584	struct iwn_eeprom_enhinfo enhinfo[35];
2585	struct ieee80211com *ic = &sc->sc_ic;
2586	struct ieee80211_channel *c;
2587	uint16_t val, base;
2588	int8_t maxpwr;
2589	uint8_t flags;
2590	int i, j;
2591
2592	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2593
2594	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2595	base = le16toh(val);
2596	iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
2597	    enhinfo, sizeof enhinfo);
2598
2599	for (i = 0; i < nitems(enhinfo); i++) {
2600		flags = enhinfo[i].flags;
2601		if (!(flags & IWN_ENHINFO_VALID))
2602			continue;	/* Skip invalid entries. */
2603
2604		maxpwr = 0;
2605		if (sc->txchainmask & IWN_ANT_A)
2606			maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
2607		if (sc->txchainmask & IWN_ANT_B)
2608			maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
2609		if (sc->txchainmask & IWN_ANT_C)
2610			maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
2611		if (sc->ntxchains == 2)
2612			maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
2613		else if (sc->ntxchains == 3)
2614			maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
2615
2616		for (j = 0; j < ic->ic_nchans; j++) {
2617			c = &ic->ic_channels[j];
2618			if ((flags & IWN_ENHINFO_5GHZ)) {
2619				if (!IEEE80211_IS_CHAN_A(c))
2620					continue;
2621			} else if ((flags & IWN_ENHINFO_OFDM)) {
2622				if (!IEEE80211_IS_CHAN_G(c))
2623					continue;
2624			} else if (!IEEE80211_IS_CHAN_B(c))
2625				continue;
2626			if ((flags & IWN_ENHINFO_HT40)) {
2627				if (!IEEE80211_IS_CHAN_HT40(c))
2628					continue;
2629			} else {
2630				if (IEEE80211_IS_CHAN_HT40(c))
2631					continue;
2632			}
2633			if (enhinfo[i].chan != 0 &&
2634			    enhinfo[i].chan != c->ic_ieee)
2635				continue;
2636
2637			DPRINTF(sc, IWN_DEBUG_RESET,
2638			    "channel %d(%x), maxpwr %d\n", c->ic_ieee,
2639			    c->ic_flags, maxpwr / 2);
2640			c->ic_maxregpower = maxpwr / 2;
2641			c->ic_maxpower = maxpwr;
2642		}
2643	}
2644
2645	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2646
2647}
2648
2649static struct ieee80211_node *
2650iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2651{
2652	return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO);
2653}
2654
2655static __inline int
2656rate2plcp(int rate)
2657{
2658	switch (rate & 0xff) {
2659	case 12:	return 0xd;
2660	case 18:	return 0xf;
2661	case 24:	return 0x5;
2662	case 36:	return 0x7;
2663	case 48:	return 0x9;
2664	case 72:	return 0xb;
2665	case 96:	return 0x1;
2666	case 108:	return 0x3;
2667	case 2:		return 10;
2668	case 4:		return 20;
2669	case 11:	return 55;
2670	case 22:	return 110;
2671	}
2672	return 0;
2673}
2674
2675static int
2676iwn_get_1stream_tx_antmask(struct iwn_softc *sc)
2677{
2678
2679	return IWN_LSB(sc->txchainmask);
2680}
2681
2682static int
2683iwn_get_2stream_tx_antmask(struct iwn_softc *sc)
2684{
2685	int tx;
2686
2687	/*
2688	 * The '2 stream' setup is a bit .. odd.
2689	 *
2690	 * For NICs that support only 1 antenna, default to IWN_ANT_AB or
2691	 * the firmware panics (eg Intel 5100.)
2692	 *
2693	 * For NICs that support two antennas, we use ANT_AB.
2694	 *
2695	 * For NICs that support three antennas, we use the two that
2696	 * wasn't the default one.
2697	 *
2698	 * XXX TODO: if bluetooth (full concurrent) is enabled, restrict
2699	 * this to only one antenna.
2700	 */
2701
2702	/* Default - transmit on the other antennas */
2703	tx = (sc->txchainmask & ~IWN_LSB(sc->txchainmask));
2704
2705	/* Now, if it's zero, set it to IWN_ANT_AB, so to not panic firmware */
2706	if (tx == 0)
2707		tx = IWN_ANT_AB;
2708
2709	/*
2710	 * If the NIC is a two-stream TX NIC, configure the TX mask to
2711	 * the default chainmask
2712	 */
2713	else if (sc->ntxchains == 2)
2714		tx = sc->txchainmask;
2715
2716	return (tx);
2717}
2718
2719
2720
2721/*
2722 * Calculate the required PLCP value from the given rate,
2723 * to the given node.
2724 *
2725 * This will take the node configuration (eg 11n, rate table
2726 * setup, etc) into consideration.
2727 */
2728static uint32_t
2729iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni,
2730    uint8_t rate)
2731{
2732	struct ieee80211com *ic = ni->ni_ic;
2733	uint32_t plcp = 0;
2734	int ridx;
2735
2736	/*
2737	 * If it's an MCS rate, let's set the plcp correctly
2738	 * and set the relevant flags based on the node config.
2739	 */
2740	if (rate & IEEE80211_RATE_MCS) {
2741		/*
2742		 * Set the initial PLCP value to be between 0->31 for
2743		 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!"
2744		 * flag.
2745		 */
2746		plcp = IEEE80211_RV(rate) | IWN_RFLAG_MCS;
2747
2748		/*
2749		 * XXX the following should only occur if both
2750		 * the local configuration _and_ the remote node
2751		 * advertise these capabilities.  Thus this code
2752		 * may need fixing!
2753		 */
2754
2755		/*
2756		 * Set the channel width and guard interval.
2757		 */
2758		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
2759			plcp |= IWN_RFLAG_HT40;
2760			if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
2761				plcp |= IWN_RFLAG_SGI;
2762		} else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
2763			plcp |= IWN_RFLAG_SGI;
2764		}
2765
2766		/*
2767		 * Ensure the selected rate matches the link quality
2768		 * table entries being used.
2769		 */
2770		if (rate > 0x8f)
2771			plcp |= IWN_RFLAG_ANT(sc->txchainmask);
2772		else if (rate > 0x87)
2773			plcp |= IWN_RFLAG_ANT(iwn_get_2stream_tx_antmask(sc));
2774		else
2775			plcp |= IWN_RFLAG_ANT(iwn_get_1stream_tx_antmask(sc));
2776	} else {
2777		/*
2778		 * Set the initial PLCP - fine for both
2779		 * OFDM and CCK rates.
2780		 */
2781		plcp = rate2plcp(rate);
2782
2783		/* Set CCK flag if it's CCK */
2784
2785		/* XXX It would be nice to have a method
2786		 * to map the ridx -> phy table entry
2787		 * so we could just query that, rather than
2788		 * this hack to check against IWN_RIDX_OFDM6.
2789		 */
2790		ridx = ieee80211_legacy_rate_lookup(ic->ic_rt,
2791		    rate & IEEE80211_RATE_VAL);
2792		if (ridx < IWN_RIDX_OFDM6 &&
2793		    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
2794			plcp |= IWN_RFLAG_CCK;
2795
2796		/* Set antenna configuration */
2797		/* XXX TODO: is this the right antenna to use for legacy? */
2798		plcp |= IWN_RFLAG_ANT(iwn_get_1stream_tx_antmask(sc));
2799	}
2800
2801	DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n",
2802	    __func__,
2803	    rate,
2804	    plcp);
2805
2806	return (htole32(plcp));
2807}
2808
2809static void
2810iwn_newassoc(struct ieee80211_node *ni, int isnew)
2811{
2812	/* Doesn't do anything at the moment */
2813}
2814
2815static int
2816iwn_media_change(struct ifnet *ifp)
2817{
2818	int error;
2819
2820	error = ieee80211_media_change(ifp);
2821	/* NB: only the fixed rate can change and that doesn't need a reset */
2822	return (error == ENETRESET ? 0 : error);
2823}
2824
2825static int
2826iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
2827{
2828	struct iwn_vap *ivp = IWN_VAP(vap);
2829	struct ieee80211com *ic = vap->iv_ic;
2830	struct iwn_softc *sc = ic->ic_softc;
2831	int error = 0;
2832
2833	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2834
2835	DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
2836	    ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]);
2837
2838	IEEE80211_UNLOCK(ic);
2839	IWN_LOCK(sc);
2840	callout_stop(&sc->calib_to);
2841
2842	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
2843
2844	switch (nstate) {
2845	case IEEE80211_S_ASSOC:
2846		if (vap->iv_state != IEEE80211_S_RUN)
2847			break;
2848		/* FALLTHROUGH */
2849	case IEEE80211_S_AUTH:
2850		if (vap->iv_state == IEEE80211_S_AUTH)
2851			break;
2852
2853		/*
2854		 * !AUTH -> AUTH transition requires state reset to handle
2855		 * reassociations correctly.
2856		 */
2857		sc->rxon->associd = 0;
2858		sc->rxon->filter &= ~htole32(IWN_FILTER_BSS);
2859		sc->calib.state = IWN_CALIB_STATE_INIT;
2860
2861		/* Wait until we hear a beacon before we transmit */
2862		if (IEEE80211_IS_CHAN_PASSIVE(ic->ic_curchan))
2863			sc->sc_beacon_wait = 1;
2864
2865		if ((error = iwn_auth(sc, vap)) != 0) {
2866			device_printf(sc->sc_dev,
2867			    "%s: could not move to auth state\n", __func__);
2868		}
2869		break;
2870
2871	case IEEE80211_S_RUN:
2872		/*
2873		 * RUN -> RUN transition; Just restart the timers.
2874		 */
2875		if (vap->iv_state == IEEE80211_S_RUN) {
2876			sc->calib_cnt = 0;
2877			break;
2878		}
2879
2880		/* Wait until we hear a beacon before we transmit */
2881		if (IEEE80211_IS_CHAN_PASSIVE(ic->ic_curchan))
2882			sc->sc_beacon_wait = 1;
2883
2884		/*
2885		 * !RUN -> RUN requires setting the association id
2886		 * which is done with a firmware cmd.  We also defer
2887		 * starting the timers until that work is done.
2888		 */
2889		if ((error = iwn_run(sc, vap)) != 0) {
2890			device_printf(sc->sc_dev,
2891			    "%s: could not move to run state\n", __func__);
2892		}
2893		break;
2894
2895	case IEEE80211_S_INIT:
2896		sc->calib.state = IWN_CALIB_STATE_INIT;
2897		/*
2898		 * Purge the xmit queue so we don't have old frames
2899		 * during a new association attempt.
2900		 */
2901		sc->sc_beacon_wait = 0;
2902		iwn_xmit_queue_drain(sc);
2903		break;
2904
2905	default:
2906		break;
2907	}
2908	IWN_UNLOCK(sc);
2909	IEEE80211_LOCK(ic);
2910	if (error != 0){
2911		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
2912		return error;
2913	}
2914
2915	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
2916
2917	return ivp->iv_newstate(vap, nstate, arg);
2918}
2919
2920static void
2921iwn_calib_timeout(void *arg)
2922{
2923	struct iwn_softc *sc = arg;
2924
2925	IWN_LOCK_ASSERT(sc);
2926
2927	/* Force automatic TX power calibration every 60 secs. */
2928	if (++sc->calib_cnt >= 120) {
2929		uint32_t flags = 0;
2930
2931		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n",
2932		    "sending request for statistics");
2933		(void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2934		    sizeof flags, 1);
2935		sc->calib_cnt = 0;
2936	}
2937	callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
2938	    sc);
2939}
2940
2941/*
2942 * Process an RX_PHY firmware notification.  This is usually immediately
2943 * followed by an MPDU_RX_DONE notification.
2944 */
2945static void
2946iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2947    struct iwn_rx_data *data)
2948{
2949	struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
2950
2951	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__);
2952	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2953
2954	/* Save RX statistics, they will be used on MPDU_RX_DONE. */
2955	memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
2956	sc->last_rx_valid = 1;
2957}
2958
2959/*
2960 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2961 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2962 */
2963static void
2964iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2965    struct iwn_rx_data *data)
2966{
2967	struct iwn_ops *ops = &sc->ops;
2968	struct ieee80211com *ic = &sc->sc_ic;
2969	struct iwn_rx_ring *ring = &sc->rxq;
2970	struct ieee80211_frame *wh;
2971	struct ieee80211_node *ni;
2972	struct mbuf *m, *m1;
2973	struct iwn_rx_stat *stat;
2974	caddr_t head;
2975	bus_addr_t paddr;
2976	uint32_t flags;
2977	int error, len, rssi, nf;
2978
2979	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2980
2981	if (desc->type == IWN_MPDU_RX_DONE) {
2982		/* Check for prior RX_PHY notification. */
2983		if (!sc->last_rx_valid) {
2984			DPRINTF(sc, IWN_DEBUG_ANY,
2985			    "%s: missing RX_PHY\n", __func__);
2986			return;
2987		}
2988		stat = &sc->last_rx_stat;
2989	} else
2990		stat = (struct iwn_rx_stat *)(desc + 1);
2991
2992	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2993
2994	if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2995		device_printf(sc->sc_dev,
2996		    "%s: invalid RX statistic header, len %d\n", __func__,
2997		    stat->cfg_phy_len);
2998		return;
2999	}
3000	if (desc->type == IWN_MPDU_RX_DONE) {
3001		struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
3002		head = (caddr_t)(mpdu + 1);
3003		len = le16toh(mpdu->len);
3004	} else {
3005		head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
3006		len = le16toh(stat->len);
3007	}
3008
3009	flags = le32toh(*(uint32_t *)(head + len));
3010
3011	/* Discard frames with a bad FCS early. */
3012	if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
3013		DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n",
3014		    __func__, flags);
3015		counter_u64_add(ic->ic_ierrors, 1);
3016		return;
3017	}
3018	/* Discard frames that are too short. */
3019	if (len < sizeof (struct ieee80211_frame_ack)) {
3020		DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n",
3021		    __func__, len);
3022		counter_u64_add(ic->ic_ierrors, 1);
3023		return;
3024	}
3025
3026	m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE);
3027	if (m1 == NULL) {
3028		DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
3029		    __func__);
3030		counter_u64_add(ic->ic_ierrors, 1);
3031		return;
3032	}
3033	bus_dmamap_unload(ring->data_dmat, data->map);
3034
3035	error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *),
3036	    IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
3037	if (error != 0 && error != EFBIG) {
3038		device_printf(sc->sc_dev,
3039		    "%s: bus_dmamap_load failed, error %d\n", __func__, error);
3040		m_freem(m1);
3041
3042		/* Try to reload the old mbuf. */
3043		error = bus_dmamap_load(ring->data_dmat, data->map,
3044		    mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
3045		    &paddr, BUS_DMA_NOWAIT);
3046		if (error != 0 && error != EFBIG) {
3047			panic("%s: could not load old RX mbuf", __func__);
3048		}
3049		bus_dmamap_sync(ring->data_dmat, data->map,
3050		    BUS_DMASYNC_PREREAD);
3051		/* Physical address may have changed. */
3052		ring->desc[ring->cur] = htole32(paddr >> 8);
3053		bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3054		    BUS_DMASYNC_PREWRITE);
3055		counter_u64_add(ic->ic_ierrors, 1);
3056		return;
3057	}
3058
3059	bus_dmamap_sync(ring->data_dmat, data->map,
3060	    BUS_DMASYNC_PREREAD);
3061
3062	m = data->m;
3063	data->m = m1;
3064	/* Update RX descriptor. */
3065	ring->desc[ring->cur] = htole32(paddr >> 8);
3066	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3067	    BUS_DMASYNC_PREWRITE);
3068
3069	/* Finalize mbuf. */
3070	m->m_data = head;
3071	m->m_pkthdr.len = m->m_len = len;
3072
3073	/* Grab a reference to the source node. */
3074	wh = mtod(m, struct ieee80211_frame *);
3075	if (len >= sizeof(struct ieee80211_frame_min))
3076		ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3077	else
3078		ni = NULL;
3079	nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN &&
3080	    (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95;
3081
3082	rssi = ops->get_rssi(sc, stat);
3083
3084	if (ieee80211_radiotap_active(ic)) {
3085		struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
3086
3087		tap->wr_flags = 0;
3088		if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
3089			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3090		tap->wr_dbm_antsignal = (int8_t)rssi;
3091		tap->wr_dbm_antnoise = (int8_t)nf;
3092		tap->wr_tsft = stat->tstamp;
3093		switch (stat->rate) {
3094		/* CCK rates. */
3095		case  10: tap->wr_rate =   2; break;
3096		case  20: tap->wr_rate =   4; break;
3097		case  55: tap->wr_rate =  11; break;
3098		case 110: tap->wr_rate =  22; break;
3099		/* OFDM rates. */
3100		case 0xd: tap->wr_rate =  12; break;
3101		case 0xf: tap->wr_rate =  18; break;
3102		case 0x5: tap->wr_rate =  24; break;
3103		case 0x7: tap->wr_rate =  36; break;
3104		case 0x9: tap->wr_rate =  48; break;
3105		case 0xb: tap->wr_rate =  72; break;
3106		case 0x1: tap->wr_rate =  96; break;
3107		case 0x3: tap->wr_rate = 108; break;
3108		/* Unknown rate: should not happen. */
3109		default:  tap->wr_rate =   0;
3110		}
3111	}
3112
3113	/*
3114	 * If it's a beacon and we're waiting, then do the
3115	 * wakeup.  This should unblock raw_xmit/start.
3116	 */
3117	if (sc->sc_beacon_wait) {
3118		uint8_t type, subtype;
3119		/* NB: Re-assign wh */
3120		wh = mtod(m, struct ieee80211_frame *);
3121		type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3122		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3123		/*
3124		 * This assumes at this point we've received our own
3125		 * beacon.
3126		 */
3127		DPRINTF(sc, IWN_DEBUG_TRACE,
3128		    "%s: beacon_wait, type=%d, subtype=%d\n",
3129		    __func__, type, subtype);
3130		if (type == IEEE80211_FC0_TYPE_MGT &&
3131		    subtype == IEEE80211_FC0_SUBTYPE_BEACON) {
3132			DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT,
3133			    "%s: waking things up\n", __func__);
3134			/* queue taskqueue to transmit! */
3135			taskqueue_enqueue(sc->sc_tq, &sc->sc_xmit_task);
3136		}
3137	}
3138
3139	IWN_UNLOCK(sc);
3140
3141	/* Send the frame to the 802.11 layer. */
3142	if (ni != NULL) {
3143		if (ni->ni_flags & IEEE80211_NODE_HT)
3144			m->m_flags |= M_AMPDU;
3145		(void)ieee80211_input(ni, m, rssi - nf, nf);
3146		/* Node is no longer needed. */
3147		ieee80211_free_node(ni);
3148	} else
3149		(void)ieee80211_input_all(ic, m, rssi - nf, nf);
3150
3151	IWN_LOCK(sc);
3152
3153	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3154
3155}
3156
3157/* Process an incoming Compressed BlockAck. */
3158static void
3159iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3160    struct iwn_rx_data *data)
3161{
3162	struct iwn_ops *ops = &sc->ops;
3163	struct iwn_node *wn;
3164	struct ieee80211_node *ni;
3165	struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
3166	struct iwn_tx_ring *txq;
3167	struct iwn_tx_data *txdata;
3168	struct ieee80211_tx_ampdu *tap;
3169	struct mbuf *m;
3170	uint64_t bitmap;
3171	uint16_t ssn;
3172	uint8_t tid;
3173	int ackfailcnt = 0, i, lastidx, qid, *res, shift;
3174	int tx_ok = 0, tx_err = 0;
3175
3176	DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "->%s begin\n", __func__);
3177
3178	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3179
3180	qid = le16toh(ba->qid);
3181	txq = &sc->txq[ba->qid];
3182	tap = sc->qid2tap[ba->qid];
3183	tid = tap->txa_tid;
3184	wn = (void *)tap->txa_ni;
3185
3186	res = NULL;
3187	ssn = 0;
3188	if (!IEEE80211_AMPDU_RUNNING(tap)) {
3189		res = tap->txa_private;
3190		ssn = tap->txa_start & 0xfff;
3191	}
3192
3193	for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) {
3194		txdata = &txq->data[txq->read];
3195
3196		/* Unmap and free mbuf. */
3197		bus_dmamap_sync(txq->data_dmat, txdata->map,
3198		    BUS_DMASYNC_POSTWRITE);
3199		bus_dmamap_unload(txq->data_dmat, txdata->map);
3200		m = txdata->m, txdata->m = NULL;
3201		ni = txdata->ni, txdata->ni = NULL;
3202
3203		KASSERT(ni != NULL, ("no node"));
3204		KASSERT(m != NULL, ("no mbuf"));
3205
3206		DPRINTF(sc, IWN_DEBUG_XMIT, "%s: freeing m=%p\n", __func__, m);
3207		ieee80211_tx_complete(ni, m, 1);
3208
3209		txq->queued--;
3210		txq->read = (txq->read + 1) % IWN_TX_RING_COUNT;
3211	}
3212
3213	if (txq->queued == 0 && res != NULL) {
3214		iwn_nic_lock(sc);
3215		ops->ampdu_tx_stop(sc, qid, tid, ssn);
3216		iwn_nic_unlock(sc);
3217		sc->qid2tap[qid] = NULL;
3218		free(res, M_DEVBUF);
3219		return;
3220	}
3221
3222	if (wn->agg[tid].bitmap == 0)
3223		return;
3224
3225	shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff);
3226	if (shift < 0)
3227		shift += 0x100;
3228
3229	if (wn->agg[tid].nframes > (64 - shift))
3230		return;
3231
3232	/*
3233	 * Walk the bitmap and calculate how many successful and failed
3234	 * attempts are made.
3235	 *
3236	 * Yes, the rate control code doesn't know these are A-MPDU
3237	 * subframes and that it's okay to fail some of these.
3238	 */
3239	ni = tap->txa_ni;
3240	bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap;
3241	for (i = 0; bitmap; i++) {
3242		if ((bitmap & 1) == 0) {
3243			tx_err ++;
3244			ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
3245			    IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
3246		} else {
3247			tx_ok ++;
3248			ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
3249			    IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
3250		}
3251		bitmap >>= 1;
3252	}
3253
3254	DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT,
3255	    "->%s: end; %d ok; %d err\n",__func__, tx_ok, tx_err);
3256
3257}
3258
3259/*
3260 * Process a CALIBRATION_RESULT notification sent by the initialization
3261 * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
3262 */
3263static void
3264iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3265    struct iwn_rx_data *data)
3266{
3267	struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
3268	int len, idx = -1;
3269
3270	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3271
3272	/* Runtime firmware should not send such a notification. */
3273	if (sc->sc_flags & IWN_FLAG_CALIB_DONE){
3274		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n",
3275	    __func__);
3276		return;
3277	}
3278	len = (le32toh(desc->len) & 0x3fff) - 4;
3279	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3280
3281	switch (calib->code) {
3282	case IWN5000_PHY_CALIB_DC:
3283		if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_DC)
3284			idx = 0;
3285		break;
3286	case IWN5000_PHY_CALIB_LO:
3287		if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_LO)
3288			idx = 1;
3289		break;
3290	case IWN5000_PHY_CALIB_TX_IQ:
3291		if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ)
3292			idx = 2;
3293		break;
3294	case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
3295		if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ_PERIODIC)
3296			idx = 3;
3297		break;
3298	case IWN5000_PHY_CALIB_BASE_BAND:
3299		if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_BASE_BAND)
3300			idx = 4;
3301		break;
3302	}
3303	if (idx == -1)	/* Ignore other results. */
3304		return;
3305
3306	/* Save calibration result. */
3307	if (sc->calibcmd[idx].buf != NULL)
3308		free(sc->calibcmd[idx].buf, M_DEVBUF);
3309	sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
3310	if (sc->calibcmd[idx].buf == NULL) {
3311		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
3312		    "not enough memory for calibration result %d\n",
3313		    calib->code);
3314		return;
3315	}
3316	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
3317	    "saving calibration result idx=%d, code=%d len=%d\n", idx, calib->code, len);
3318	sc->calibcmd[idx].len = len;
3319	memcpy(sc->calibcmd[idx].buf, calib, len);
3320}
3321
3322static void
3323iwn_stats_update(struct iwn_softc *sc, struct iwn_calib_state *calib,
3324    struct iwn_stats *stats, int len)
3325{
3326	struct iwn_stats_bt *stats_bt;
3327	struct iwn_stats *lstats;
3328
3329	/*
3330	 * First - check whether the length is the bluetooth or normal.
3331	 *
3332	 * If it's normal - just copy it and bump out.
3333	 * Otherwise we have to convert things.
3334	 */
3335
3336	if (len == sizeof(struct iwn_stats) + 4) {
3337		memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats));
3338		sc->last_stat_valid = 1;
3339		return;
3340	}
3341
3342	/*
3343	 * If it's not the bluetooth size - log, then just copy.
3344	 */
3345	if (len != sizeof(struct iwn_stats_bt) + 4) {
3346		DPRINTF(sc, IWN_DEBUG_STATS,
3347		    "%s: size of rx statistics (%d) not an expected size!\n",
3348		    __func__,
3349		    len);
3350		memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats));
3351		sc->last_stat_valid = 1;
3352		return;
3353	}
3354
3355	/*
3356	 * Ok. Time to copy.
3357	 */
3358	stats_bt = (struct iwn_stats_bt *) stats;
3359	lstats = &sc->last_stat;
3360
3361	/* flags */
3362	lstats->flags = stats_bt->flags;
3363	/* rx_bt */
3364	memcpy(&lstats->rx.ofdm, &stats_bt->rx_bt.ofdm,
3365	    sizeof(struct iwn_rx_phy_stats));
3366	memcpy(&lstats->rx.cck, &stats_bt->rx_bt.cck,
3367	    sizeof(struct iwn_rx_phy_stats));
3368	memcpy(&lstats->rx.general, &stats_bt->rx_bt.general_bt.common,
3369	    sizeof(struct iwn_rx_general_stats));
3370	memcpy(&lstats->rx.ht, &stats_bt->rx_bt.ht,
3371	    sizeof(struct iwn_rx_ht_phy_stats));
3372	/* tx */
3373	memcpy(&lstats->tx, &stats_bt->tx,
3374	    sizeof(struct iwn_tx_stats));
3375	/* general */
3376	memcpy(&lstats->general, &stats_bt->general,
3377	    sizeof(struct iwn_general_stats));
3378
3379	/* XXX TODO: Squirrel away the extra bluetooth stats somewhere */
3380	sc->last_stat_valid = 1;
3381}
3382
3383/*
3384 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
3385 * The latter is sent by the firmware after each received beacon.
3386 */
3387static void
3388iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3389    struct iwn_rx_data *data)
3390{
3391	struct iwn_ops *ops = &sc->ops;
3392	struct ieee80211com *ic = &sc->sc_ic;
3393	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3394	struct iwn_calib_state *calib = &sc->calib;
3395	struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
3396	struct iwn_stats *lstats;
3397	int temp;
3398
3399	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3400
3401	/* Ignore statistics received during a scan. */
3402	if (vap->iv_state != IEEE80211_S_RUN ||
3403	    (ic->ic_flags & IEEE80211_F_SCAN)){
3404		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n",
3405	    __func__);
3406		return;
3407	}
3408
3409	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3410
3411	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_STATS,
3412	    "%s: received statistics, cmd %d, len %d\n",
3413	    __func__, desc->type, le16toh(desc->len));
3414	sc->calib_cnt = 0;	/* Reset TX power calibration timeout. */
3415
3416	/*
3417	 * Collect/track general statistics for reporting.
3418	 *
3419	 * This takes care of ensuring that the bluetooth sized message
3420	 * will be correctly converted to the legacy sized message.
3421	 */
3422	iwn_stats_update(sc, calib, stats, le16toh(desc->len));
3423
3424	/*
3425	 * And now, let's take a reference of it to use!
3426	 */
3427	lstats = &sc->last_stat;
3428
3429	/* Test if temperature has changed. */
3430	if (lstats->general.temp != sc->rawtemp) {
3431		/* Convert "raw" temperature to degC. */
3432		sc->rawtemp = stats->general.temp;
3433		temp = ops->get_temperature(sc);
3434		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n",
3435		    __func__, temp);
3436
3437		/* Update TX power if need be (4965AGN only). */
3438		if (sc->hw_type == IWN_HW_REV_TYPE_4965)
3439			iwn4965_power_calibration(sc, temp);
3440	}
3441
3442	if (desc->type != IWN_BEACON_STATISTICS)
3443		return;	/* Reply to a statistics request. */
3444
3445	sc->noise = iwn_get_noise(&lstats->rx.general);
3446	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise);
3447
3448	/* Test that RSSI and noise are present in stats report. */
3449	if (le32toh(lstats->rx.general.flags) != 1) {
3450		DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
3451		    "received statistics without RSSI");
3452		return;
3453	}
3454
3455	if (calib->state == IWN_CALIB_STATE_ASSOC)
3456		iwn_collect_noise(sc, &lstats->rx.general);
3457	else if (calib->state == IWN_CALIB_STATE_RUN) {
3458		iwn_tune_sensitivity(sc, &lstats->rx);
3459		/*
3460		 * XXX TODO: Only run the RX recovery if we're associated!
3461		 */
3462		iwn_check_rx_recovery(sc, lstats);
3463		iwn_save_stats_counters(sc, lstats);
3464	}
3465
3466	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3467}
3468
3469/*
3470 * Save the relevant statistic counters for the next calibration
3471 * pass.
3472 */
3473static void
3474iwn_save_stats_counters(struct iwn_softc *sc, const struct iwn_stats *rs)
3475{
3476	struct iwn_calib_state *calib = &sc->calib;
3477
3478	/* Save counters values for next call. */
3479	calib->bad_plcp_cck = le32toh(rs->rx.cck.bad_plcp);
3480	calib->fa_cck = le32toh(rs->rx.cck.fa);
3481	calib->bad_plcp_ht = le32toh(rs->rx.ht.bad_plcp);
3482	calib->bad_plcp_ofdm = le32toh(rs->rx.ofdm.bad_plcp);
3483	calib->fa_ofdm = le32toh(rs->rx.ofdm.fa);
3484
3485	/* Last time we received these tick values */
3486	sc->last_calib_ticks = ticks;
3487}
3488
3489/*
3490 * Process a TX_DONE firmware notification.  Unfortunately, the 4965AGN
3491 * and 5000 adapters have different incompatible TX status formats.
3492 */
3493static void
3494iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3495    struct iwn_rx_data *data)
3496{
3497	struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
3498	struct iwn_tx_ring *ring;
3499	int qid;
3500
3501	qid = desc->qid & 0xf;
3502	ring = &sc->txq[qid];
3503
3504	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
3505	    "qid %d idx %d RTS retries %d ACK retries %d nkill %d rate %x duration %d status %x\n",
3506	    __func__, desc->qid, desc->idx,
3507	    stat->rtsfailcnt,
3508	    stat->ackfailcnt,
3509	    stat->btkillcnt,
3510	    stat->rate, le16toh(stat->duration),
3511	    le32toh(stat->status));
3512
3513	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3514	if (qid >= sc->firstaggqueue) {
3515		iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
3516		    stat->ackfailcnt, &stat->status);
3517	} else {
3518		iwn_tx_done(sc, desc, stat->ackfailcnt,
3519		    le32toh(stat->status) & 0xff);
3520	}
3521}
3522
3523static void
3524iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3525    struct iwn_rx_data *data)
3526{
3527	struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
3528	struct iwn_tx_ring *ring;
3529	int qid;
3530
3531	qid = desc->qid & 0xf;
3532	ring = &sc->txq[qid];
3533
3534	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
3535	    "qid %d idx %d RTS retries %d ACK retries %d nkill %d rate %x duration %d status %x\n",
3536	    __func__, desc->qid, desc->idx,
3537	    stat->rtsfailcnt,
3538	    stat->ackfailcnt,
3539	    stat->btkillcnt,
3540	    stat->rate, le16toh(stat->duration),
3541	    le32toh(stat->status));
3542
3543#ifdef notyet
3544	/* Reset TX scheduler slot. */
3545	iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
3546#endif
3547
3548	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3549	if (qid >= sc->firstaggqueue) {
3550		iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
3551		    stat->ackfailcnt, &stat->status);
3552	} else {
3553		iwn_tx_done(sc, desc, stat->ackfailcnt,
3554		    le16toh(stat->status) & 0xff);
3555	}
3556}
3557
3558/*
3559 * Adapter-independent backend for TX_DONE firmware notifications.
3560 */
3561static void
3562iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
3563    uint8_t status)
3564{
3565	struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
3566	struct iwn_tx_data *data = &ring->data[desc->idx];
3567	struct mbuf *m;
3568	struct ieee80211_node *ni;
3569	struct ieee80211vap *vap;
3570
3571	KASSERT(data->ni != NULL, ("no node"));
3572
3573	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3574
3575	/* Unmap and free mbuf. */
3576	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
3577	bus_dmamap_unload(ring->data_dmat, data->map);
3578	m = data->m, data->m = NULL;
3579	ni = data->ni, data->ni = NULL;
3580	vap = ni->ni_vap;
3581
3582	/*
3583	 * Update rate control statistics for the node.
3584	 */
3585	if (status & IWN_TX_FAIL)
3586		ieee80211_ratectl_tx_complete(vap, ni,
3587		    IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
3588	else
3589		ieee80211_ratectl_tx_complete(vap, ni,
3590		    IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
3591
3592	/*
3593	 * Channels marked for "radar" require traffic to be received
3594	 * to unlock before we can transmit.  Until traffic is seen
3595	 * any attempt to transmit is returned immediately with status
3596	 * set to IWN_TX_FAIL_TX_LOCKED.  Unfortunately this can easily
3597	 * happen on first authenticate after scanning.  To workaround
3598	 * this we ignore a failure of this sort in AUTH state so the
3599	 * 802.11 layer will fall back to using a timeout to wait for
3600	 * the AUTH reply.  This allows the firmware time to see
3601	 * traffic so a subsequent retry of AUTH succeeds.  It's
3602	 * unclear why the firmware does not maintain state for
3603	 * channels recently visited as this would allow immediate
3604	 * use of the channel after a scan (where we see traffic).
3605	 */
3606	if (status == IWN_TX_FAIL_TX_LOCKED &&
3607	    ni->ni_vap->iv_state == IEEE80211_S_AUTH)
3608		ieee80211_tx_complete(ni, m, 0);
3609	else
3610		ieee80211_tx_complete(ni, m,
3611		    (status & IWN_TX_FAIL) != 0);
3612
3613	sc->sc_tx_timer = 0;
3614	if (--ring->queued < IWN_TX_RING_LOMARK)
3615		sc->qfullmsk &= ~(1 << ring->qid);
3616
3617	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3618}
3619
3620/*
3621 * Process a "command done" firmware notification.  This is where we wakeup
3622 * processes waiting for a synchronous command completion.
3623 */
3624static void
3625iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
3626{
3627	struct iwn_tx_ring *ring;
3628	struct iwn_tx_data *data;
3629	int cmd_queue_num;
3630
3631	if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT)
3632		cmd_queue_num = IWN_PAN_CMD_QUEUE;
3633	else
3634		cmd_queue_num = IWN_CMD_QUEUE_NUM;
3635
3636	if ((desc->qid & IWN_RX_DESC_QID_MSK) != cmd_queue_num)
3637		return;	/* Not a command ack. */
3638
3639	ring = &sc->txq[cmd_queue_num];
3640	data = &ring->data[desc->idx];
3641
3642	/* If the command was mapped in an mbuf, free it. */
3643	if (data->m != NULL) {
3644		bus_dmamap_sync(ring->data_dmat, data->map,
3645		    BUS_DMASYNC_POSTWRITE);
3646		bus_dmamap_unload(ring->data_dmat, data->map);
3647		m_freem(data->m);
3648		data->m = NULL;
3649	}
3650	wakeup(&ring->desc[desc->idx]);
3651}
3652
3653static void
3654iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes,
3655    int ackfailcnt, void *stat)
3656{
3657	struct iwn_ops *ops = &sc->ops;
3658	struct iwn_tx_ring *ring = &sc->txq[qid];
3659	struct iwn_tx_data *data;
3660	struct mbuf *m;
3661	struct iwn_node *wn;
3662	struct ieee80211_node *ni;
3663	struct ieee80211_tx_ampdu *tap;
3664	uint64_t bitmap;
3665	uint32_t *status = stat;
3666	uint16_t *aggstatus = stat;
3667	uint16_t ssn;
3668	uint8_t tid;
3669	int bit, i, lastidx, *res, seqno, shift, start;
3670
3671	/* XXX TODO: status is le16 field! Grr */
3672
3673	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3674	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: nframes=%d, status=0x%08x\n",
3675	    __func__,
3676	    nframes,
3677	    *status);
3678
3679	tap = sc->qid2tap[qid];
3680	tid = tap->txa_tid;
3681	wn = (void *)tap->txa_ni;
3682	ni = tap->txa_ni;
3683
3684	/*
3685	 * XXX TODO: ACK and RTS failures would be nice here!
3686	 */
3687
3688	/*
3689	 * A-MPDU single frame status - if we failed to transmit it
3690	 * in A-MPDU, then it may be a permanent failure.
3691	 *
3692	 * XXX TODO: check what the Linux iwlwifi driver does here;
3693	 * there's some permanent and temporary failures that may be
3694	 * handled differently.
3695	 */
3696	if (nframes == 1) {
3697		if ((*status & 0xff) != 1 && (*status & 0xff) != 2) {
3698#ifdef	NOT_YET
3699			printf("ieee80211_send_bar()\n");
3700#endif
3701			/*
3702			 * If we completely fail a transmit, make sure a
3703			 * notification is pushed up to the rate control
3704			 * layer.
3705			 */
3706			ieee80211_ratectl_tx_complete(ni->ni_vap,
3707			    ni,
3708			    IEEE80211_RATECTL_TX_FAILURE,
3709			    &ackfailcnt,
3710			    NULL);
3711		} else {
3712			/*
3713			 * If nframes=1, then we won't be getting a BA for
3714			 * this frame.  Ensure that we correctly update the
3715			 * rate control code with how many retries were
3716			 * needed to send it.
3717			 */
3718			ieee80211_ratectl_tx_complete(ni->ni_vap,
3719			    ni,
3720			    IEEE80211_RATECTL_TX_SUCCESS,
3721			    &ackfailcnt,
3722			    NULL);
3723		}
3724	}
3725
3726	bitmap = 0;
3727	start = idx;
3728	for (i = 0; i < nframes; i++) {
3729		if (le16toh(aggstatus[i * 2]) & 0xc)
3730			continue;
3731
3732		idx = le16toh(aggstatus[2*i + 1]) & 0xff;
3733		bit = idx - start;
3734		shift = 0;
3735		if (bit >= 64) {
3736			shift = 0x100 - idx + start;
3737			bit = 0;
3738			start = idx;
3739		} else if (bit <= -64)
3740			bit = 0x100 - start + idx;
3741		else if (bit < 0) {
3742			shift = start - idx;
3743			start = idx;
3744			bit = 0;
3745		}
3746		bitmap = bitmap << shift;
3747		bitmap |= 1ULL << bit;
3748	}
3749	tap = sc->qid2tap[qid];
3750	tid = tap->txa_tid;
3751	wn = (void *)tap->txa_ni;
3752	wn->agg[tid].bitmap = bitmap;
3753	wn->agg[tid].startidx = start;
3754	wn->agg[tid].nframes = nframes;
3755
3756	res = NULL;
3757	ssn = 0;
3758	if (!IEEE80211_AMPDU_RUNNING(tap)) {
3759		res = tap->txa_private;
3760		ssn = tap->txa_start & 0xfff;
3761	}
3762
3763	/* This is going nframes DWORDS into the descriptor? */
3764	seqno = le32toh(*(status + nframes)) & 0xfff;
3765	for (lastidx = (seqno & 0xff); ring->read != lastidx;) {
3766		data = &ring->data[ring->read];
3767
3768		/* Unmap and free mbuf. */
3769		bus_dmamap_sync(ring->data_dmat, data->map,
3770		    BUS_DMASYNC_POSTWRITE);
3771		bus_dmamap_unload(ring->data_dmat, data->map);
3772		m = data->m, data->m = NULL;
3773		ni = data->ni, data->ni = NULL;
3774
3775		KASSERT(ni != NULL, ("no node"));
3776		KASSERT(m != NULL, ("no mbuf"));
3777		DPRINTF(sc, IWN_DEBUG_XMIT, "%s: freeing m=%p\n", __func__, m);
3778		ieee80211_tx_complete(ni, m, 1);
3779
3780		ring->queued--;
3781		ring->read = (ring->read + 1) % IWN_TX_RING_COUNT;
3782	}
3783
3784	if (ring->queued == 0 && res != NULL) {
3785		iwn_nic_lock(sc);
3786		ops->ampdu_tx_stop(sc, qid, tid, ssn);
3787		iwn_nic_unlock(sc);
3788		sc->qid2tap[qid] = NULL;
3789		free(res, M_DEVBUF);
3790		return;
3791	}
3792
3793	sc->sc_tx_timer = 0;
3794	if (ring->queued < IWN_TX_RING_LOMARK)
3795		sc->qfullmsk &= ~(1 << ring->qid);
3796
3797	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3798}
3799
3800/*
3801 * Process an INT_FH_RX or INT_SW_RX interrupt.
3802 */
3803static void
3804iwn_notif_intr(struct iwn_softc *sc)
3805{
3806	struct iwn_ops *ops = &sc->ops;
3807	struct ieee80211com *ic = &sc->sc_ic;
3808	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3809	uint16_t hw;
3810
3811	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
3812	    BUS_DMASYNC_POSTREAD);
3813
3814	hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
3815	while (sc->rxq.cur != hw) {
3816		struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
3817		struct iwn_rx_desc *desc;
3818
3819		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3820		    BUS_DMASYNC_POSTREAD);
3821		desc = mtod(data->m, struct iwn_rx_desc *);
3822
3823		DPRINTF(sc, IWN_DEBUG_RECV,
3824		    "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n",
3825		    __func__, sc->rxq.cur, desc->qid & 0xf, desc->idx, desc->flags,
3826		    desc->type, iwn_intr_str(desc->type),
3827		    le16toh(desc->len));
3828
3829		if (!(desc->qid & IWN_UNSOLICITED_RX_NOTIF))	/* Reply to a command. */
3830			iwn_cmd_done(sc, desc);
3831
3832		switch (desc->type) {
3833		case IWN_RX_PHY:
3834			iwn_rx_phy(sc, desc, data);
3835			break;
3836
3837		case IWN_RX_DONE:		/* 4965AGN only. */
3838		case IWN_MPDU_RX_DONE:
3839			/* An 802.11 frame has been received. */
3840			iwn_rx_done(sc, desc, data);
3841			break;
3842
3843		case IWN_RX_COMPRESSED_BA:
3844			/* A Compressed BlockAck has been received. */
3845			iwn_rx_compressed_ba(sc, desc, data);
3846			break;
3847
3848		case IWN_TX_DONE:
3849			/* An 802.11 frame has been transmitted. */
3850			ops->tx_done(sc, desc, data);
3851			break;
3852
3853		case IWN_RX_STATISTICS:
3854		case IWN_BEACON_STATISTICS:
3855			iwn_rx_statistics(sc, desc, data);
3856			break;
3857
3858		case IWN_BEACON_MISSED:
3859		{
3860			struct iwn_beacon_missed *miss =
3861			    (struct iwn_beacon_missed *)(desc + 1);
3862			int misses;
3863
3864			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3865			    BUS_DMASYNC_POSTREAD);
3866			misses = le32toh(miss->consecutive);
3867
3868			DPRINTF(sc, IWN_DEBUG_STATE,
3869			    "%s: beacons missed %d/%d\n", __func__,
3870			    misses, le32toh(miss->total));
3871			/*
3872			 * If more than 5 consecutive beacons are missed,
3873			 * reinitialize the sensitivity state machine.
3874			 */
3875			if (vap->iv_state == IEEE80211_S_RUN &&
3876			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
3877				if (misses > 5)
3878					(void)iwn_init_sensitivity(sc);
3879				if (misses >= vap->iv_bmissthreshold) {
3880					IWN_UNLOCK(sc);
3881					ieee80211_beacon_miss(ic);
3882					IWN_LOCK(sc);
3883				}
3884			}
3885			break;
3886		}
3887		case IWN_UC_READY:
3888		{
3889			struct iwn_ucode_info *uc =
3890			    (struct iwn_ucode_info *)(desc + 1);
3891
3892			/* The microcontroller is ready. */
3893			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3894			    BUS_DMASYNC_POSTREAD);
3895			DPRINTF(sc, IWN_DEBUG_RESET,
3896			    "microcode alive notification version=%d.%d "
3897			    "subtype=%x alive=%x\n", uc->major, uc->minor,
3898			    uc->subtype, le32toh(uc->valid));
3899
3900			if (le32toh(uc->valid) != 1) {
3901				device_printf(sc->sc_dev,
3902				    "microcontroller initialization failed");
3903				break;
3904			}
3905			if (uc->subtype == IWN_UCODE_INIT) {
3906				/* Save microcontroller report. */
3907				memcpy(&sc->ucode_info, uc, sizeof (*uc));
3908			}
3909			/* Save the address of the error log in SRAM. */
3910			sc->errptr = le32toh(uc->errptr);
3911			break;
3912		}
3913		case IWN_STATE_CHANGED:
3914		{
3915			/*
3916			 * State change allows hardware switch change to be
3917			 * noted. However, we handle this in iwn_intr as we
3918			 * get both the enable/disble intr.
3919			 */
3920			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3921			    BUS_DMASYNC_POSTREAD);
3922#ifdef	IWN_DEBUG
3923			uint32_t *status = (uint32_t *)(desc + 1);
3924			DPRINTF(sc, IWN_DEBUG_INTR | IWN_DEBUG_STATE,
3925			    "state changed to %x\n",
3926			    le32toh(*status));
3927#endif
3928			break;
3929		}
3930		case IWN_START_SCAN:
3931		{
3932			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3933			    BUS_DMASYNC_POSTREAD);
3934#ifdef	IWN_DEBUG
3935			struct iwn_start_scan *scan =
3936			    (struct iwn_start_scan *)(desc + 1);
3937			DPRINTF(sc, IWN_DEBUG_ANY,
3938			    "%s: scanning channel %d status %x\n",
3939			    __func__, scan->chan, le32toh(scan->status));
3940#endif
3941			break;
3942		}
3943		case IWN_STOP_SCAN:
3944		{
3945			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3946			    BUS_DMASYNC_POSTREAD);
3947#ifdef	IWN_DEBUG
3948			struct iwn_stop_scan *scan =
3949			    (struct iwn_stop_scan *)(desc + 1);
3950			DPRINTF(sc, IWN_DEBUG_STATE | IWN_DEBUG_SCAN,
3951			    "scan finished nchan=%d status=%d chan=%d\n",
3952			    scan->nchan, scan->status, scan->chan);
3953#endif
3954			sc->sc_is_scanning = 0;
3955			callout_stop(&sc->scan_timeout);
3956			IWN_UNLOCK(sc);
3957			ieee80211_scan_next(vap);
3958			IWN_LOCK(sc);
3959			break;
3960		}
3961		case IWN5000_CALIBRATION_RESULT:
3962			iwn5000_rx_calib_results(sc, desc, data);
3963			break;
3964
3965		case IWN5000_CALIBRATION_DONE:
3966			sc->sc_flags |= IWN_FLAG_CALIB_DONE;
3967			wakeup(sc);
3968			break;
3969		}
3970
3971		sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
3972	}
3973
3974	/* Tell the firmware what we have processed. */
3975	hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
3976	IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
3977}
3978
3979/*
3980 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
3981 * from power-down sleep mode.
3982 */
3983static void
3984iwn_wakeup_intr(struct iwn_softc *sc)
3985{
3986	int qid;
3987
3988	DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n",
3989	    __func__);
3990
3991	/* Wakeup RX and TX rings. */
3992	IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
3993	for (qid = 0; qid < sc->ntxqs; qid++) {
3994		struct iwn_tx_ring *ring = &sc->txq[qid];
3995		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
3996	}
3997}
3998
3999static void
4000iwn_rftoggle_intr(struct iwn_softc *sc)
4001{
4002	struct ieee80211com *ic = &sc->sc_ic;
4003	uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL);
4004
4005	IWN_LOCK_ASSERT(sc);
4006
4007	device_printf(sc->sc_dev, "RF switch: radio %s\n",
4008	    (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
4009	if (tmp & IWN_GP_CNTRL_RFKILL)
4010		ieee80211_runtask(ic, &sc->sc_radioon_task);
4011	else
4012		ieee80211_runtask(ic, &sc->sc_radiooff_task);
4013}
4014
4015/*
4016 * Dump the error log of the firmware when a firmware panic occurs.  Although
4017 * we can't debug the firmware because it is neither open source nor free, it
4018 * can help us to identify certain classes of problems.
4019 */
4020static void
4021iwn_fatal_intr(struct iwn_softc *sc)
4022{
4023	struct iwn_fw_dump dump;
4024	int i;
4025
4026	IWN_LOCK_ASSERT(sc);
4027
4028	/* Force a complete recalibration on next init. */
4029	sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
4030
4031	/* Check that the error log address is valid. */
4032	if (sc->errptr < IWN_FW_DATA_BASE ||
4033	    sc->errptr + sizeof (dump) >
4034	    IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
4035		printf("%s: bad firmware error log address 0x%08x\n", __func__,
4036		    sc->errptr);
4037		return;
4038	}
4039	if (iwn_nic_lock(sc) != 0) {
4040		printf("%s: could not read firmware error log\n", __func__);
4041		return;
4042	}
4043	/* Read firmware error log from SRAM. */
4044	iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
4045	    sizeof (dump) / sizeof (uint32_t));
4046	iwn_nic_unlock(sc);
4047
4048	if (dump.valid == 0) {
4049		printf("%s: firmware error log is empty\n", __func__);
4050		return;
4051	}
4052	printf("firmware error log:\n");
4053	printf("  error type      = \"%s\" (0x%08X)\n",
4054	    (dump.id < nitems(iwn_fw_errmsg)) ?
4055		iwn_fw_errmsg[dump.id] : "UNKNOWN",
4056	    dump.id);
4057	printf("  program counter = 0x%08X\n", dump.pc);
4058	printf("  source line     = 0x%08X\n", dump.src_line);
4059	printf("  error data      = 0x%08X%08X\n",
4060	    dump.error_data[0], dump.error_data[1]);
4061	printf("  branch link     = 0x%08X%08X\n",
4062	    dump.branch_link[0], dump.branch_link[1]);
4063	printf("  interrupt link  = 0x%08X%08X\n",
4064	    dump.interrupt_link[0], dump.interrupt_link[1]);
4065	printf("  time            = %u\n", dump.time[0]);
4066
4067	/* Dump driver status (TX and RX rings) while we're here. */
4068	printf("driver status:\n");
4069	for (i = 0; i < sc->ntxqs; i++) {
4070		struct iwn_tx_ring *ring = &sc->txq[i];
4071		printf("  tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
4072		    i, ring->qid, ring->cur, ring->queued);
4073	}
4074	printf("  rx ring: cur=%d\n", sc->rxq.cur);
4075}
4076
4077static void
4078iwn_intr(void *arg)
4079{
4080	struct iwn_softc *sc = arg;
4081	uint32_t r1, r2, tmp;
4082
4083	IWN_LOCK(sc);
4084
4085	/* Disable interrupts. */
4086	IWN_WRITE(sc, IWN_INT_MASK, 0);
4087
4088	/* Read interrupts from ICT (fast) or from registers (slow). */
4089	if (sc->sc_flags & IWN_FLAG_USE_ICT) {
4090		bus_dmamap_sync(sc->ict_dma.tag, sc->ict_dma.map,
4091		    BUS_DMASYNC_POSTREAD);
4092		tmp = 0;
4093		while (sc->ict[sc->ict_cur] != 0) {
4094			tmp |= sc->ict[sc->ict_cur];
4095			sc->ict[sc->ict_cur] = 0;	/* Acknowledge. */
4096			sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
4097		}
4098		tmp = le32toh(tmp);
4099		if (tmp == 0xffffffff)	/* Shouldn't happen. */
4100			tmp = 0;
4101		else if (tmp & 0xc0000)	/* Workaround a HW bug. */
4102			tmp |= 0x8000;
4103		r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
4104		r2 = 0;	/* Unused. */
4105	} else {
4106		r1 = IWN_READ(sc, IWN_INT);
4107		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) {
4108			IWN_UNLOCK(sc);
4109			return;	/* Hardware gone! */
4110		}
4111		r2 = IWN_READ(sc, IWN_FH_INT);
4112	}
4113
4114	DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n"
4115    , r1, r2);
4116
4117	if (r1 == 0 && r2 == 0)
4118		goto done;	/* Interrupt not for us. */
4119
4120	/* Acknowledge interrupts. */
4121	IWN_WRITE(sc, IWN_INT, r1);
4122	if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
4123		IWN_WRITE(sc, IWN_FH_INT, r2);
4124
4125	if (r1 & IWN_INT_RF_TOGGLED) {
4126		iwn_rftoggle_intr(sc);
4127		goto done;
4128	}
4129	if (r1 & IWN_INT_CT_REACHED) {
4130		device_printf(sc->sc_dev, "%s: critical temperature reached!\n",
4131		    __func__);
4132	}
4133	if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
4134		device_printf(sc->sc_dev, "%s: fatal firmware error\n",
4135		    __func__);
4136#ifdef	IWN_DEBUG
4137		iwn_debug_register(sc);
4138#endif
4139		/* Dump firmware error log and stop. */
4140		iwn_fatal_intr(sc);
4141
4142		taskqueue_enqueue(sc->sc_tq, &sc->sc_panic_task);
4143		goto done;
4144	}
4145	if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
4146	    (r2 & IWN_FH_INT_RX)) {
4147		if (sc->sc_flags & IWN_FLAG_USE_ICT) {
4148			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
4149				IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
4150			IWN_WRITE_1(sc, IWN_INT_PERIODIC,
4151			    IWN_INT_PERIODIC_DIS);
4152			iwn_notif_intr(sc);
4153			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
4154				IWN_WRITE_1(sc, IWN_INT_PERIODIC,
4155				    IWN_INT_PERIODIC_ENA);
4156			}
4157		} else
4158			iwn_notif_intr(sc);
4159	}
4160
4161	if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
4162		if (sc->sc_flags & IWN_FLAG_USE_ICT)
4163			IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
4164		wakeup(sc);	/* FH DMA transfer completed. */
4165	}
4166
4167	if (r1 & IWN_INT_ALIVE)
4168		wakeup(sc);	/* Firmware is alive. */
4169
4170	if (r1 & IWN_INT_WAKEUP)
4171		iwn_wakeup_intr(sc);
4172
4173done:
4174	/* Re-enable interrupts. */
4175	if (sc->sc_flags & IWN_FLAG_RUNNING)
4176		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
4177
4178	IWN_UNLOCK(sc);
4179}
4180
4181/*
4182 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
4183 * 5000 adapters use a slightly different format).
4184 */
4185static void
4186iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
4187    uint16_t len)
4188{
4189	uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
4190
4191	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4192
4193	*w = htole16(len + 8);
4194	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4195	    BUS_DMASYNC_PREWRITE);
4196	if (idx < IWN_SCHED_WINSZ) {
4197		*(w + IWN_TX_RING_COUNT) = *w;
4198		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4199		    BUS_DMASYNC_PREWRITE);
4200	}
4201}
4202
4203static void
4204iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
4205    uint16_t len)
4206{
4207	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
4208
4209	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4210
4211	*w = htole16(id << 12 | (len + 8));
4212	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4213	    BUS_DMASYNC_PREWRITE);
4214	if (idx < IWN_SCHED_WINSZ) {
4215		*(w + IWN_TX_RING_COUNT) = *w;
4216		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4217		    BUS_DMASYNC_PREWRITE);
4218	}
4219}
4220
4221#ifdef notyet
4222static void
4223iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
4224{
4225	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
4226
4227	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4228
4229	*w = (*w & htole16(0xf000)) | htole16(1);
4230	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4231	    BUS_DMASYNC_PREWRITE);
4232	if (idx < IWN_SCHED_WINSZ) {
4233		*(w + IWN_TX_RING_COUNT) = *w;
4234		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4235		    BUS_DMASYNC_PREWRITE);
4236	}
4237}
4238#endif
4239
4240/*
4241 * Check whether OFDM 11g protection will be enabled for the given rate.
4242 *
4243 * The original driver code only enabled protection for OFDM rates.
4244 * It didn't check to see whether it was operating in 11a or 11bg mode.
4245 */
4246static int
4247iwn_check_rate_needs_protection(struct iwn_softc *sc,
4248    struct ieee80211vap *vap, uint8_t rate)
4249{
4250	struct ieee80211com *ic = vap->iv_ic;
4251
4252	/*
4253	 * Not in 2GHz mode? Then there's no need to enable OFDM
4254	 * 11bg protection.
4255	 */
4256	if (! IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
4257		return (0);
4258	}
4259
4260	/*
4261	 * 11bg protection not enabled? Then don't use it.
4262	 */
4263	if ((ic->ic_flags & IEEE80211_F_USEPROT) == 0)
4264		return (0);
4265
4266	/*
4267	 * If it's an 11n rate - no protection.
4268	 * We'll do it via a specific 11n check.
4269	 */
4270	if (rate & IEEE80211_RATE_MCS) {
4271		return (0);
4272	}
4273
4274	/*
4275	 * Do a rate table lookup.  If the PHY is CCK,
4276	 * don't do protection.
4277	 */
4278	if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_CCK)
4279		return (0);
4280
4281	/*
4282	 * Yup, enable protection.
4283	 */
4284	return (1);
4285}
4286
4287/*
4288 * return a value between 0 and IWN_MAX_TX_RETRIES-1 as an index into
4289 * the link quality table that reflects this particular entry.
4290 */
4291static int
4292iwn_tx_rate_to_linkq_offset(struct iwn_softc *sc, struct ieee80211_node *ni,
4293    uint8_t rate)
4294{
4295	struct ieee80211_rateset *rs;
4296	int is_11n;
4297	int nr;
4298	int i;
4299	uint8_t cmp_rate;
4300
4301	/*
4302	 * Figure out if we're using 11n or not here.
4303	 */
4304	if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0)
4305		is_11n = 1;
4306	else
4307		is_11n = 0;
4308
4309	/*
4310	 * Use the correct rate table.
4311	 */
4312	if (is_11n) {
4313		rs = (struct ieee80211_rateset *) &ni->ni_htrates;
4314		nr = ni->ni_htrates.rs_nrates;
4315	} else {
4316		rs = &ni->ni_rates;
4317		nr = rs->rs_nrates;
4318	}
4319
4320	/*
4321	 * Find the relevant link quality entry in the table.
4322	 */
4323	for (i = 0; i < nr && i < IWN_MAX_TX_RETRIES - 1 ; i++) {
4324		/*
4325		 * The link quality table index starts at 0 == highest
4326		 * rate, so we walk the rate table backwards.
4327		 */
4328		cmp_rate = rs->rs_rates[(nr - 1) - i];
4329		if (rate & IEEE80211_RATE_MCS)
4330			cmp_rate |= IEEE80211_RATE_MCS;
4331
4332#if 0
4333		DPRINTF(sc, IWN_DEBUG_XMIT, "%s: idx %d: nr=%d, rate=0x%02x, rateentry=0x%02x\n",
4334		    __func__,
4335		    i,
4336		    nr,
4337		    rate,
4338		    cmp_rate);
4339#endif
4340
4341		if (cmp_rate == rate)
4342			return (i);
4343	}
4344
4345	/* Failed? Start at the end */
4346	return (IWN_MAX_TX_RETRIES - 1);
4347}
4348
4349static int
4350iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
4351{
4352	struct iwn_ops *ops = &sc->ops;
4353	const struct ieee80211_txparam *tp;
4354	struct ieee80211vap *vap = ni->ni_vap;
4355	struct ieee80211com *ic = ni->ni_ic;
4356	struct iwn_node *wn = (void *)ni;
4357	struct iwn_tx_ring *ring;
4358	struct iwn_tx_desc *desc;
4359	struct iwn_tx_data *data;
4360	struct iwn_tx_cmd *cmd;
4361	struct iwn_cmd_data *tx;
4362	struct ieee80211_frame *wh;
4363	struct ieee80211_key *k = NULL;
4364	struct mbuf *m1;
4365	uint32_t flags;
4366	uint16_t qos;
4367	u_int hdrlen;
4368	bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
4369	uint8_t tid, type;
4370	int ac, i, totlen, error, pad, nsegs = 0, rate;
4371
4372	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4373
4374	IWN_LOCK_ASSERT(sc);
4375
4376	wh = mtod(m, struct ieee80211_frame *);
4377	hdrlen = ieee80211_anyhdrsize(wh);
4378	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4379
4380	/* Select EDCA Access Category and TX ring for this frame. */
4381	if (IEEE80211_QOS_HAS_SEQ(wh)) {
4382		qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
4383		tid = qos & IEEE80211_QOS_TID;
4384	} else {
4385		qos = 0;
4386		tid = 0;
4387	}
4388	ac = M_WME_GETAC(m);
4389	if (m->m_flags & M_AMPDU_MPDU) {
4390		uint16_t seqno;
4391		struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac];
4392
4393		if (!IEEE80211_AMPDU_RUNNING(tap)) {
4394			return EINVAL;
4395		}
4396
4397		/*
4398		 * Queue this frame to the hardware ring that we've
4399		 * negotiated AMPDU TX on.
4400		 *
4401		 * Note that the sequence number must match the TX slot
4402		 * being used!
4403		 */
4404		ac = *(int *)tap->txa_private;
4405		seqno = ni->ni_txseqs[tid];
4406		*(uint16_t *)wh->i_seq =
4407		    htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
4408		ring = &sc->txq[ac];
4409		if ((seqno % 256) != ring->cur) {
4410			device_printf(sc->sc_dev,
4411			    "%s: m=%p: seqno (%d) (%d) != ring index (%d) !\n",
4412			    __func__,
4413			    m,
4414			    seqno,
4415			    seqno % 256,
4416			    ring->cur);
4417		}
4418		ni->ni_txseqs[tid]++;
4419	}
4420	ring = &sc->txq[ac];
4421	desc = &ring->desc[ring->cur];
4422	data = &ring->data[ring->cur];
4423
4424	/* Choose a TX rate index. */
4425	tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
4426	if (type == IEEE80211_FC0_TYPE_MGT)
4427		rate = tp->mgmtrate;
4428	else if (IEEE80211_IS_MULTICAST(wh->i_addr1))
4429		rate = tp->mcastrate;
4430	else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
4431		rate = tp->ucastrate;
4432	else if (m->m_flags & M_EAPOL)
4433		rate = tp->mgmtrate;
4434	else {
4435		/* XXX pass pktlen */
4436		(void) ieee80211_ratectl_rate(ni, NULL, 0);
4437		rate = ni->ni_txrate;
4438	}
4439
4440	/* Encrypt the frame if need be. */
4441	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
4442		/* Retrieve key for TX. */
4443		k = ieee80211_crypto_encap(ni, m);
4444		if (k == NULL) {
4445			return ENOBUFS;
4446		}
4447		/* 802.11 header may have moved. */
4448		wh = mtod(m, struct ieee80211_frame *);
4449	}
4450	totlen = m->m_pkthdr.len;
4451
4452	if (ieee80211_radiotap_active_vap(vap)) {
4453		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
4454
4455		tap->wt_flags = 0;
4456		tap->wt_rate = rate;
4457		if (k != NULL)
4458			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4459
4460		ieee80211_radiotap_tx(vap, m);
4461	}
4462
4463	/* Prepare TX firmware command. */
4464	cmd = &ring->cmd[ring->cur];
4465	cmd->code = IWN_CMD_TX_DATA;
4466	cmd->flags = 0;
4467	cmd->qid = ring->qid;
4468	cmd->idx = ring->cur;
4469
4470	tx = (struct iwn_cmd_data *)cmd->data;
4471	/* NB: No need to clear tx, all fields are reinitialized here. */
4472	tx->scratch = 0;	/* clear "scratch" area */
4473
4474	flags = 0;
4475	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4476		/* Unicast frame, check if an ACK is expected. */
4477		if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
4478		    IEEE80211_QOS_ACKPOLICY_NOACK)
4479			flags |= IWN_TX_NEED_ACK;
4480	}
4481	if ((wh->i_fc[0] &
4482	    (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
4483	    (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
4484		flags |= IWN_TX_IMM_BA;		/* Cannot happen yet. */
4485
4486	if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
4487		flags |= IWN_TX_MORE_FRAG;	/* Cannot happen yet. */
4488
4489	/* Check if frame must be protected using RTS/CTS or CTS-to-self. */
4490	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4491		/* NB: Group frames are sent using CCK in 802.11b/g. */
4492		if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
4493			flags |= IWN_TX_NEED_RTS;
4494		} else if (iwn_check_rate_needs_protection(sc, vap, rate)) {
4495			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
4496				flags |= IWN_TX_NEED_CTS;
4497			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
4498				flags |= IWN_TX_NEED_RTS;
4499		} else if ((rate & IEEE80211_RATE_MCS) &&
4500			(ic->ic_htprotmode == IEEE80211_PROT_RTSCTS)) {
4501			flags |= IWN_TX_NEED_RTS;
4502		}
4503
4504		/* XXX HT protection? */
4505
4506		if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
4507			if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4508				/* 5000 autoselects RTS/CTS or CTS-to-self. */
4509				flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
4510				flags |= IWN_TX_NEED_PROTECTION;
4511			} else
4512				flags |= IWN_TX_FULL_TXOP;
4513		}
4514	}
4515
4516	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4517	    type != IEEE80211_FC0_TYPE_DATA)
4518		tx->id = sc->broadcast_id;
4519	else
4520		tx->id = wn->id;
4521
4522	if (type == IEEE80211_FC0_TYPE_MGT) {
4523		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4524
4525		/* Tell HW to set timestamp in probe responses. */
4526		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
4527			flags |= IWN_TX_INSERT_TSTAMP;
4528		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4529		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4530			tx->timeout = htole16(3);
4531		else
4532			tx->timeout = htole16(2);
4533	} else
4534		tx->timeout = htole16(0);
4535
4536	if (hdrlen & 3) {
4537		/* First segment length must be a multiple of 4. */
4538		flags |= IWN_TX_NEED_PADDING;
4539		pad = 4 - (hdrlen & 3);
4540	} else
4541		pad = 0;
4542
4543	tx->len = htole16(totlen);
4544	tx->tid = tid;
4545	tx->rts_ntries = 60;
4546	tx->data_ntries = 15;
4547	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
4548	tx->rate = iwn_rate_to_plcp(sc, ni, rate);
4549	if (tx->id == sc->broadcast_id) {
4550		/* Group or management frame. */
4551		tx->linkq = 0;
4552	} else {
4553		tx->linkq = iwn_tx_rate_to_linkq_offset(sc, ni, rate);
4554		flags |= IWN_TX_LINKQ;	/* enable MRR */
4555	}
4556
4557	/* Set physical address of "scratch area". */
4558	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
4559	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
4560
4561	/* Copy 802.11 header in TX command. */
4562	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
4563
4564	/* Trim 802.11 header. */
4565	m_adj(m, hdrlen);
4566	tx->security = 0;
4567	tx->flags = htole32(flags);
4568
4569	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
4570	    &nsegs, BUS_DMA_NOWAIT);
4571	if (error != 0) {
4572		if (error != EFBIG) {
4573			device_printf(sc->sc_dev,
4574			    "%s: can't map mbuf (error %d)\n", __func__, error);
4575			return error;
4576		}
4577		/* Too many DMA segments, linearize mbuf. */
4578		m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER - 1);
4579		if (m1 == NULL) {
4580			device_printf(sc->sc_dev,
4581			    "%s: could not defrag mbuf\n", __func__);
4582			return ENOBUFS;
4583		}
4584		m = m1;
4585
4586		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
4587		    segs, &nsegs, BUS_DMA_NOWAIT);
4588		if (error != 0) {
4589			device_printf(sc->sc_dev,
4590			    "%s: can't map mbuf (error %d)\n", __func__, error);
4591			return error;
4592		}
4593	}
4594
4595	data->m = m;
4596	data->ni = ni;
4597
4598	DPRINTF(sc, IWN_DEBUG_XMIT,
4599	    "%s: qid %d idx %d len %d nsegs %d flags 0x%08x rate 0x%04x plcp 0x%08x\n",
4600	    __func__,
4601	    ring->qid,
4602	    ring->cur,
4603	    m->m_pkthdr.len,
4604	    nsegs,
4605	    flags,
4606	    rate,
4607	    tx->rate);
4608
4609	/* Fill TX descriptor. */
4610	desc->nsegs = 1;
4611	if (m->m_len != 0)
4612		desc->nsegs += nsegs;
4613	/* First DMA segment is used by the TX command. */
4614	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
4615	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
4616	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
4617	/* Other DMA segments are for data payload. */
4618	seg = &segs[0];
4619	for (i = 1; i <= nsegs; i++) {
4620		desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
4621		desc->segs[i].len  = htole16(IWN_HIADDR(seg->ds_addr) |
4622		    seg->ds_len << 4);
4623		seg++;
4624	}
4625
4626	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
4627	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
4628	    BUS_DMASYNC_PREWRITE);
4629	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4630	    BUS_DMASYNC_PREWRITE);
4631
4632	/* Update TX scheduler. */
4633	if (ring->qid >= sc->firstaggqueue)
4634		ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
4635
4636	/* Kick TX ring. */
4637	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4638	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4639
4640	/* Mark TX ring as full if we reach a certain threshold. */
4641	if (++ring->queued > IWN_TX_RING_HIMARK)
4642		sc->qfullmsk |= 1 << ring->qid;
4643
4644	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4645
4646	return 0;
4647}
4648
4649static int
4650iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
4651    struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
4652{
4653	struct iwn_ops *ops = &sc->ops;
4654	struct ieee80211vap *vap = ni->ni_vap;
4655	struct iwn_tx_cmd *cmd;
4656	struct iwn_cmd_data *tx;
4657	struct ieee80211_frame *wh;
4658	struct iwn_tx_ring *ring;
4659	struct iwn_tx_desc *desc;
4660	struct iwn_tx_data *data;
4661	struct mbuf *m1;
4662	bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
4663	uint32_t flags;
4664	u_int hdrlen;
4665	int ac, totlen, error, pad, nsegs = 0, i, rate;
4666	uint8_t type;
4667
4668	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4669
4670	IWN_LOCK_ASSERT(sc);
4671
4672	wh = mtod(m, struct ieee80211_frame *);
4673	hdrlen = ieee80211_anyhdrsize(wh);
4674	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4675
4676	ac = params->ibp_pri & 3;
4677
4678	ring = &sc->txq[ac];
4679	desc = &ring->desc[ring->cur];
4680	data = &ring->data[ring->cur];
4681
4682	/* Choose a TX rate. */
4683	rate = params->ibp_rate0;
4684	totlen = m->m_pkthdr.len;
4685
4686	/* Prepare TX firmware command. */
4687	cmd = &ring->cmd[ring->cur];
4688	cmd->code = IWN_CMD_TX_DATA;
4689	cmd->flags = 0;
4690	cmd->qid = ring->qid;
4691	cmd->idx = ring->cur;
4692
4693	tx = (struct iwn_cmd_data *)cmd->data;
4694	/* NB: No need to clear tx, all fields are reinitialized here. */
4695	tx->scratch = 0;	/* clear "scratch" area */
4696
4697	flags = 0;
4698	if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
4699		flags |= IWN_TX_NEED_ACK;
4700	if (params->ibp_flags & IEEE80211_BPF_RTS) {
4701		if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4702			/* 5000 autoselects RTS/CTS or CTS-to-self. */
4703			flags &= ~IWN_TX_NEED_RTS;
4704			flags |= IWN_TX_NEED_PROTECTION;
4705		} else
4706			flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP;
4707	}
4708	if (params->ibp_flags & IEEE80211_BPF_CTS) {
4709		if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4710			/* 5000 autoselects RTS/CTS or CTS-to-self. */
4711			flags &= ~IWN_TX_NEED_CTS;
4712			flags |= IWN_TX_NEED_PROTECTION;
4713		} else
4714			flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP;
4715	}
4716	if (type == IEEE80211_FC0_TYPE_MGT) {
4717		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4718
4719		/* Tell HW to set timestamp in probe responses. */
4720		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
4721			flags |= IWN_TX_INSERT_TSTAMP;
4722
4723		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4724		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4725			tx->timeout = htole16(3);
4726		else
4727			tx->timeout = htole16(2);
4728	} else
4729		tx->timeout = htole16(0);
4730
4731	if (hdrlen & 3) {
4732		/* First segment length must be a multiple of 4. */
4733		flags |= IWN_TX_NEED_PADDING;
4734		pad = 4 - (hdrlen & 3);
4735	} else
4736		pad = 0;
4737
4738	if (ieee80211_radiotap_active_vap(vap)) {
4739		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
4740
4741		tap->wt_flags = 0;
4742		tap->wt_rate = rate;
4743
4744		ieee80211_radiotap_tx(vap, m);
4745	}
4746
4747	tx->len = htole16(totlen);
4748	tx->tid = 0;
4749	tx->id = sc->broadcast_id;
4750	tx->rts_ntries = params->ibp_try1;
4751	tx->data_ntries = params->ibp_try0;
4752	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
4753	tx->rate = iwn_rate_to_plcp(sc, ni, rate);
4754
4755	/* Group or management frame. */
4756	tx->linkq = 0;
4757
4758	/* Set physical address of "scratch area". */
4759	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
4760	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
4761
4762	/* Copy 802.11 header in TX command. */
4763	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
4764
4765	/* Trim 802.11 header. */
4766	m_adj(m, hdrlen);
4767	tx->security = 0;
4768	tx->flags = htole32(flags);
4769
4770	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
4771	    &nsegs, BUS_DMA_NOWAIT);
4772	if (error != 0) {
4773		if (error != EFBIG) {
4774			device_printf(sc->sc_dev,
4775			    "%s: can't map mbuf (error %d)\n", __func__, error);
4776			return error;
4777		}
4778		/* Too many DMA segments, linearize mbuf. */
4779		m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER - 1);
4780		if (m1 == NULL) {
4781			device_printf(sc->sc_dev,
4782			    "%s: could not defrag mbuf\n", __func__);
4783			return ENOBUFS;
4784		}
4785		m = m1;
4786
4787		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
4788		    segs, &nsegs, BUS_DMA_NOWAIT);
4789		if (error != 0) {
4790			device_printf(sc->sc_dev,
4791			    "%s: can't map mbuf (error %d)\n", __func__, error);
4792			return error;
4793		}
4794	}
4795
4796	data->m = m;
4797	data->ni = ni;
4798
4799	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
4800	    __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
4801
4802	/* Fill TX descriptor. */
4803	desc->nsegs = 1;
4804	if (m->m_len != 0)
4805		desc->nsegs += nsegs;
4806	/* First DMA segment is used by the TX command. */
4807	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
4808	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
4809	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
4810	/* Other DMA segments are for data payload. */
4811	seg = &segs[0];
4812	for (i = 1; i <= nsegs; i++) {
4813		desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
4814		desc->segs[i].len  = htole16(IWN_HIADDR(seg->ds_addr) |
4815		    seg->ds_len << 4);
4816		seg++;
4817	}
4818
4819	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
4820	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
4821	    BUS_DMASYNC_PREWRITE);
4822	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4823	    BUS_DMASYNC_PREWRITE);
4824
4825	/* Update TX scheduler. */
4826	if (ring->qid >= sc->firstaggqueue)
4827		ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
4828
4829	/* Kick TX ring. */
4830	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4831	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4832
4833	/* Mark TX ring as full if we reach a certain threshold. */
4834	if (++ring->queued > IWN_TX_RING_HIMARK)
4835		sc->qfullmsk |= 1 << ring->qid;
4836
4837	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4838
4839	return 0;
4840}
4841
4842static void
4843iwn_xmit_task(void *arg0, int pending)
4844{
4845	struct iwn_softc *sc = arg0;
4846	struct ieee80211_node *ni;
4847	struct mbuf *m;
4848	int error;
4849	struct ieee80211_bpf_params p;
4850	int have_p;
4851
4852	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: called\n", __func__);
4853
4854	IWN_LOCK(sc);
4855	/*
4856	 * Dequeue frames, attempt to transmit,
4857	 * then disable beaconwait when we're done.
4858	 */
4859	while ((m = mbufq_dequeue(&sc->sc_xmit_queue)) != NULL) {
4860		have_p = 0;
4861		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4862
4863		/* Get xmit params if appropriate */
4864		if (ieee80211_get_xmit_params(m, &p) == 0)
4865			have_p = 1;
4866
4867		DPRINTF(sc, IWN_DEBUG_XMIT, "%s: m=%p, have_p=%d\n",
4868		    __func__, m, have_p);
4869
4870		/* If we have xmit params, use them */
4871		if (have_p)
4872			error = iwn_tx_data_raw(sc, m, ni, &p);
4873		else
4874			error = iwn_tx_data(sc, m, ni);
4875
4876		if (error != 0) {
4877			if_inc_counter(ni->ni_vap->iv_ifp,
4878			    IFCOUNTER_OERRORS, 1);
4879			ieee80211_free_node(ni);
4880			m_freem(m);
4881		}
4882	}
4883
4884	sc->sc_beacon_wait = 0;
4885	IWN_UNLOCK(sc);
4886}
4887
4888/*
4889 * raw frame xmit - free node/reference if failed.
4890 */
4891static int
4892iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
4893    const struct ieee80211_bpf_params *params)
4894{
4895	struct ieee80211com *ic = ni->ni_ic;
4896	struct iwn_softc *sc = ic->ic_softc;
4897	int error = 0;
4898
4899	DPRINTF(sc, IWN_DEBUG_XMIT | IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4900
4901	IWN_LOCK(sc);
4902	if ((sc->sc_flags & IWN_FLAG_RUNNING) == 0) {
4903		m_freem(m);
4904		IWN_UNLOCK(sc);
4905		return (ENETDOWN);
4906	}
4907
4908	/* queue frame if we have to */
4909	if (sc->sc_beacon_wait) {
4910		if (iwn_xmit_queue_enqueue(sc, m) != 0) {
4911			m_freem(m);
4912			IWN_UNLOCK(sc);
4913			return (ENOBUFS);
4914		}
4915		/* Queued, so just return OK */
4916		IWN_UNLOCK(sc);
4917		return (0);
4918	}
4919
4920	if (params == NULL) {
4921		/*
4922		 * Legacy path; interpret frame contents to decide
4923		 * precisely how to send the frame.
4924		 */
4925		error = iwn_tx_data(sc, m, ni);
4926	} else {
4927		/*
4928		 * Caller supplied explicit parameters to use in
4929		 * sending the frame.
4930		 */
4931		error = iwn_tx_data_raw(sc, m, ni, params);
4932	}
4933	if (error == 0)
4934		sc->sc_tx_timer = 5;
4935	else
4936		m_freem(m);
4937
4938	IWN_UNLOCK(sc);
4939
4940	DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "->%s: end\n",__func__);
4941
4942	return (error);
4943}
4944
4945/*
4946 * transmit - don't free mbuf if failed; don't free node ref if failed.
4947 */
4948static int
4949iwn_transmit(struct ieee80211com *ic, struct mbuf *m)
4950{
4951	struct iwn_softc *sc = ic->ic_softc;
4952	struct ieee80211_node *ni;
4953	int error;
4954
4955	ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4956
4957	IWN_LOCK(sc);
4958	if ((sc->sc_flags & IWN_FLAG_RUNNING) == 0 || sc->sc_beacon_wait) {
4959		IWN_UNLOCK(sc);
4960		return (ENXIO);
4961	}
4962
4963	if (sc->qfullmsk) {
4964		IWN_UNLOCK(sc);
4965		return (ENOBUFS);
4966	}
4967
4968	error = iwn_tx_data(sc, m, ni);
4969	if (!error)
4970		sc->sc_tx_timer = 5;
4971	IWN_UNLOCK(sc);
4972	return (error);
4973}
4974
4975static void
4976iwn_scan_timeout(void *arg)
4977{
4978	struct iwn_softc *sc = arg;
4979	struct ieee80211com *ic = &sc->sc_ic;
4980
4981	ic_printf(ic, "scan timeout\n");
4982	ieee80211_restart_all(ic);
4983}
4984
4985static void
4986iwn_watchdog(void *arg)
4987{
4988	struct iwn_softc *sc = arg;
4989	struct ieee80211com *ic = &sc->sc_ic;
4990
4991	IWN_LOCK_ASSERT(sc);
4992
4993	KASSERT(sc->sc_flags & IWN_FLAG_RUNNING, ("not running"));
4994
4995	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4996
4997	if (sc->sc_tx_timer > 0) {
4998		if (--sc->sc_tx_timer == 0) {
4999			ic_printf(ic, "device timeout\n");
5000			ieee80211_restart_all(ic);
5001			return;
5002		}
5003	}
5004	callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
5005}
5006
5007static int
5008iwn_cdev_open(struct cdev *dev, int flags, int type, struct thread *td)
5009{
5010
5011	return (0);
5012}
5013
5014static int
5015iwn_cdev_close(struct cdev *dev, int flags, int type, struct thread *td)
5016{
5017
5018	return (0);
5019}
5020
5021static int
5022iwn_cdev_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
5023    struct thread *td)
5024{
5025	int rc;
5026	struct iwn_softc *sc = dev->si_drv1;
5027	struct iwn_ioctl_data *d;
5028
5029	rc = priv_check(td, PRIV_DRIVER);
5030	if (rc != 0)
5031		return (0);
5032
5033	switch (cmd) {
5034	case SIOCGIWNSTATS:
5035		d = (struct iwn_ioctl_data *) data;
5036		IWN_LOCK(sc);
5037		/* XXX validate permissions/memory/etc? */
5038		rc = copyout(&sc->last_stat, d->dst_addr, sizeof(struct iwn_stats));
5039		IWN_UNLOCK(sc);
5040		break;
5041	case SIOCZIWNSTATS:
5042		IWN_LOCK(sc);
5043		memset(&sc->last_stat, 0, sizeof(struct iwn_stats));
5044		IWN_UNLOCK(sc);
5045		break;
5046	default:
5047		rc = EINVAL;
5048		break;
5049	}
5050	return (rc);
5051}
5052
5053static int
5054iwn_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
5055{
5056
5057	return (ENOTTY);
5058}
5059
5060static void
5061iwn_parent(struct ieee80211com *ic)
5062{
5063	struct iwn_softc *sc = ic->ic_softc;
5064	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5065	int startall = 0, stop = 0;
5066
5067	IWN_LOCK(sc);
5068	if (ic->ic_nrunning > 0) {
5069		if (!(sc->sc_flags & IWN_FLAG_RUNNING)) {
5070			iwn_init_locked(sc);
5071			if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
5072				startall = 1;
5073			else
5074				stop = 1;
5075		}
5076	} else if (sc->sc_flags & IWN_FLAG_RUNNING)
5077		iwn_stop_locked(sc);
5078	IWN_UNLOCK(sc);
5079	if (startall)
5080		ieee80211_start_all(ic);
5081	else if (vap != NULL && stop)
5082		ieee80211_stop(vap);
5083}
5084
5085/*
5086 * Send a command to the firmware.
5087 */
5088static int
5089iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
5090{
5091	struct iwn_tx_ring *ring;
5092	struct iwn_tx_desc *desc;
5093	struct iwn_tx_data *data;
5094	struct iwn_tx_cmd *cmd;
5095	struct mbuf *m;
5096	bus_addr_t paddr;
5097	int totlen, error;
5098	int cmd_queue_num;
5099
5100	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5101
5102	if (async == 0)
5103		IWN_LOCK_ASSERT(sc);
5104
5105	if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT)
5106		cmd_queue_num = IWN_PAN_CMD_QUEUE;
5107	else
5108		cmd_queue_num = IWN_CMD_QUEUE_NUM;
5109
5110	ring = &sc->txq[cmd_queue_num];
5111	desc = &ring->desc[ring->cur];
5112	data = &ring->data[ring->cur];
5113	totlen = 4 + size;
5114
5115	if (size > sizeof cmd->data) {
5116		/* Command is too large to fit in a descriptor. */
5117		if (totlen > MCLBYTES)
5118			return EINVAL;
5119		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
5120		if (m == NULL)
5121			return ENOMEM;
5122		cmd = mtod(m, struct iwn_tx_cmd *);
5123		error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
5124		    totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
5125		if (error != 0) {
5126			m_freem(m);
5127			return error;
5128		}
5129		data->m = m;
5130	} else {
5131		cmd = &ring->cmd[ring->cur];
5132		paddr = data->cmd_paddr;
5133	}
5134
5135	cmd->code = code;
5136	cmd->flags = 0;
5137	cmd->qid = ring->qid;
5138	cmd->idx = ring->cur;
5139	memcpy(cmd->data, buf, size);
5140
5141	desc->nsegs = 1;
5142	desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
5143	desc->segs[0].len  = htole16(IWN_HIADDR(paddr) | totlen << 4);
5144
5145	DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n",
5146	    __func__, iwn_intr_str(cmd->code), cmd->code,
5147	    cmd->flags, cmd->qid, cmd->idx);
5148
5149	if (size > sizeof cmd->data) {
5150		bus_dmamap_sync(ring->data_dmat, data->map,
5151		    BUS_DMASYNC_PREWRITE);
5152	} else {
5153		bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5154		    BUS_DMASYNC_PREWRITE);
5155	}
5156	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5157	    BUS_DMASYNC_PREWRITE);
5158
5159	/* Kick command ring. */
5160	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
5161	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
5162
5163	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5164
5165	return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz);
5166}
5167
5168static int
5169iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
5170{
5171	struct iwn4965_node_info hnode;
5172	caddr_t src, dst;
5173
5174	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5175
5176	/*
5177	 * We use the node structure for 5000 Series internally (it is
5178	 * a superset of the one for 4965AGN). We thus copy the common
5179	 * fields before sending the command.
5180	 */
5181	src = (caddr_t)node;
5182	dst = (caddr_t)&hnode;
5183	memcpy(dst, src, 48);
5184	/* Skip TSC, RX MIC and TX MIC fields from ``src''. */
5185	memcpy(dst + 48, src + 72, 20);
5186	return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
5187}
5188
5189static int
5190iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
5191{
5192
5193	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5194
5195	/* Direct mapping. */
5196	return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
5197}
5198
5199static int
5200iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
5201{
5202	struct iwn_node *wn = (void *)ni;
5203	struct ieee80211_rateset *rs;
5204	struct iwn_cmd_link_quality linkq;
5205	int i, rate, txrate;
5206	int is_11n;
5207
5208	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5209
5210	memset(&linkq, 0, sizeof linkq);
5211	linkq.id = wn->id;
5212	linkq.antmsk_1stream = iwn_get_1stream_tx_antmask(sc);
5213	linkq.antmsk_2stream = iwn_get_2stream_tx_antmask(sc);
5214
5215	linkq.ampdu_max = 32;		/* XXX negotiated? */
5216	linkq.ampdu_threshold = 3;
5217	linkq.ampdu_limit = htole16(4000);	/* 4ms */
5218
5219	DPRINTF(sc, IWN_DEBUG_XMIT,
5220	    "%s: 1stream antenna=0x%02x, 2stream antenna=0x%02x, ntxstreams=%d\n",
5221	    __func__,
5222	    linkq.antmsk_1stream,
5223	    linkq.antmsk_2stream,
5224	    sc->ntxchains);
5225
5226	/*
5227	 * Are we using 11n rates? Ensure the channel is
5228	 * 11n _and_ we have some 11n rates, or don't
5229	 * try.
5230	 */
5231	if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) {
5232		rs = (struct ieee80211_rateset *) &ni->ni_htrates;
5233		is_11n = 1;
5234	} else {
5235		rs = &ni->ni_rates;
5236		is_11n = 0;
5237	}
5238
5239	/* Start at highest available bit-rate. */
5240	/*
5241	 * XXX this is all very dirty!
5242	 */
5243	if (is_11n)
5244		txrate = ni->ni_htrates.rs_nrates - 1;
5245	else
5246		txrate = rs->rs_nrates - 1;
5247	for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
5248		uint32_t plcp;
5249
5250		/*
5251		 * XXX TODO: ensure the last two slots are the two lowest
5252		 * rate entries, just for now.
5253		 */
5254		if (i == 14 || i == 15)
5255			txrate = 0;
5256
5257		if (is_11n)
5258			rate = IEEE80211_RATE_MCS | rs->rs_rates[txrate];
5259		else
5260			rate = IEEE80211_RV(rs->rs_rates[txrate]);
5261
5262		/* Do rate -> PLCP config mapping */
5263		plcp = iwn_rate_to_plcp(sc, ni, rate);
5264		linkq.retry[i] = plcp;
5265		DPRINTF(sc, IWN_DEBUG_XMIT,
5266		    "%s: i=%d, txrate=%d, rate=0x%02x, plcp=0x%08x\n",
5267		    __func__,
5268		    i,
5269		    txrate,
5270		    rate,
5271		    le32toh(plcp));
5272
5273		/*
5274		 * The mimo field is an index into the table which
5275		 * indicates the first index where it and subsequent entries
5276		 * will not be using MIMO.
5277		 *
5278		 * Since we're filling linkq from 0..15 and we're filling
5279		 * from the highest MCS rates to the lowest rates, if we
5280		 * _are_ doing a dual-stream rate, set mimo to idx+1 (ie,
5281		 * the next entry.)  That way if the next entry is a non-MIMO
5282		 * entry, we're already pointing at it.
5283		 */
5284		if ((le32toh(plcp) & IWN_RFLAG_MCS) &&
5285		    IEEE80211_RV(le32toh(plcp)) > 7)
5286			linkq.mimo = i + 1;
5287
5288		/* Next retry at immediate lower bit-rate. */
5289		if (txrate > 0)
5290			txrate--;
5291	}
5292	/*
5293	 * If we reached the end of the list and indeed we hit
5294	 * all MIMO rates (eg 5300 doing MCS23-15) then yes,
5295	 * set mimo to 15.  Setting it to 16 panics the firmware.
5296	 */
5297	if (linkq.mimo > 15)
5298		linkq.mimo = 15;
5299
5300	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: mimo = %d\n", __func__, linkq.mimo);
5301
5302	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5303
5304	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
5305}
5306
5307/*
5308 * Broadcast node is used to send group-addressed and management frames.
5309 */
5310static int
5311iwn_add_broadcast_node(struct iwn_softc *sc, int async)
5312{
5313	struct iwn_ops *ops = &sc->ops;
5314	struct ieee80211com *ic = &sc->sc_ic;
5315	struct iwn_node_info node;
5316	struct iwn_cmd_link_quality linkq;
5317	uint8_t txant;
5318	int i, error;
5319
5320	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5321
5322	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
5323
5324	memset(&node, 0, sizeof node);
5325	IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr);
5326	node.id = sc->broadcast_id;
5327	DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__);
5328	if ((error = ops->add_node(sc, &node, async)) != 0)
5329		return error;
5330
5331	/* Use the first valid TX antenna. */
5332	txant = IWN_LSB(sc->txchainmask);
5333
5334	memset(&linkq, 0, sizeof linkq);
5335	linkq.id = sc->broadcast_id;
5336	linkq.antmsk_1stream = iwn_get_1stream_tx_antmask(sc);
5337	linkq.antmsk_2stream = iwn_get_2stream_tx_antmask(sc);
5338	linkq.ampdu_max = 64;
5339	linkq.ampdu_threshold = 3;
5340	linkq.ampdu_limit = htole16(4000);	/* 4ms */
5341
5342	/* Use lowest mandatory bit-rate. */
5343	/* XXX rate table lookup? */
5344	if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
5345		linkq.retry[0] = htole32(0xd);
5346	else
5347		linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK);
5348	linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant));
5349	/* Use same bit-rate for all TX retries. */
5350	for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
5351		linkq.retry[i] = linkq.retry[0];
5352	}
5353
5354	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5355
5356	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
5357}
5358
5359static int
5360iwn_updateedca(struct ieee80211com *ic)
5361{
5362#define IWN_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
5363	struct iwn_softc *sc = ic->ic_softc;
5364	struct iwn_edca_params cmd;
5365	int aci;
5366
5367	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5368
5369	memset(&cmd, 0, sizeof cmd);
5370	cmd.flags = htole32(IWN_EDCA_UPDATE);
5371
5372	IEEE80211_LOCK(ic);
5373	for (aci = 0; aci < WME_NUM_AC; aci++) {
5374		const struct wmeParams *ac =
5375		    &ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
5376		cmd.ac[aci].aifsn = ac->wmep_aifsn;
5377		cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin));
5378		cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax));
5379		cmd.ac[aci].txoplimit =
5380		    htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit));
5381	}
5382	IEEE80211_UNLOCK(