1/******************************************************************************
2
3  Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5  802.11 status code portion of this file from ethereal-0.10.6:
6    Copyright 2000, Axis Communications AB
7    Ethereal - Network traffic analyzer
8    By Gerald Combs <gerald@ethereal.com>
9    Copyright 1998 Gerald Combs
10
11  This program is free software; you can redistribute it and/or modify it
12  under the terms of version 2 of the GNU General Public License as
13  published by the Free Software Foundation.
14
15  This program is distributed in the hope that it will be useful, but WITHOUT
16  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  more details.
19
20  You should have received a copy of the GNU General Public License along with
21  this program; if not, write to the Free Software Foundation, Inc., 59
22  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
23
24  The full GNU General Public License is included in this distribution in the
25  file called LICENSE.
26
27  Contact Information:
28  Intel Linux Wireless <ilw@linux.intel.com>
29  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31******************************************************************************/
32
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <net/cfg80211-wext.h>
36#include "ipw2200.h"
37#include "ipw.h"
38
39
40#ifndef KBUILD_EXTMOD
41#define VK "k"
42#else
43#define VK
44#endif
45
46#ifdef CONFIG_IPW2200_DEBUG
47#define VD "d"
48#else
49#define VD
50#endif
51
52#ifdef CONFIG_IPW2200_MONITOR
53#define VM "m"
54#else
55#define VM
56#endif
57
58#ifdef CONFIG_IPW2200_PROMISCUOUS
59#define VP "p"
60#else
61#define VP
62#endif
63
64#ifdef CONFIG_IPW2200_RADIOTAP
65#define VR "r"
66#else
67#define VR
68#endif
69
70#ifdef CONFIG_IPW2200_QOS
71#define VQ "q"
72#else
73#define VQ
74#endif
75
76#define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
77#define DRV_DESCRIPTION	"Intel(R) PRO/Wireless 2200/2915 Network Driver"
78#define DRV_COPYRIGHT	"Copyright(c) 2003-2006 Intel Corporation"
79#define DRV_VERSION     IPW2200_VERSION
80
81#define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
82
83MODULE_DESCRIPTION(DRV_DESCRIPTION);
84MODULE_VERSION(DRV_VERSION);
85MODULE_AUTHOR(DRV_COPYRIGHT);
86MODULE_LICENSE("GPL");
87MODULE_FIRMWARE("ipw2200-ibss.fw");
88#ifdef CONFIG_IPW2200_MONITOR
89MODULE_FIRMWARE("ipw2200-sniffer.fw");
90#endif
91MODULE_FIRMWARE("ipw2200-bss.fw");
92
93static int cmdlog = 0;
94static int debug = 0;
95static int default_channel = 0;
96static int network_mode = 0;
97
98static u32 ipw_debug_level;
99static int associate;
100static int auto_create = 1;
101static int led_support = 1;
102static int disable = 0;
103static int bt_coexist = 0;
104static int hwcrypto = 0;
105static int roaming = 1;
106static const char ipw_modes[] = {
107	'a', 'b', 'g', '?'
108};
109static int antenna = CFG_SYS_ANTENNA_BOTH;
110
111#ifdef CONFIG_IPW2200_PROMISCUOUS
112static int rtap_iface = 0;     /* def: 0 -- do not create rtap interface */
113#endif
114
115static struct ieee80211_rate ipw2200_rates[] = {
116	{ .bitrate = 10 },
117	{ .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
118	{ .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
119	{ .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
120	{ .bitrate = 60 },
121	{ .bitrate = 90 },
122	{ .bitrate = 120 },
123	{ .bitrate = 180 },
124	{ .bitrate = 240 },
125	{ .bitrate = 360 },
126	{ .bitrate = 480 },
127	{ .bitrate = 540 }
128};
129
130#define ipw2200_a_rates		(ipw2200_rates + 4)
131#define ipw2200_num_a_rates	8
132#define ipw2200_bg_rates	(ipw2200_rates + 0)
133#define ipw2200_num_bg_rates	12
134
135/* Ugly macro to convert literal channel numbers into their mhz equivalents
136 * There are certianly some conditions that will break this (like feeding it '30')
137 * but they shouldn't arise since nothing talks on channel 30. */
138#define ieee80211chan2mhz(x) \
139	(((x) <= 14) ? \
140	(((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
141	((x) + 1000) * 5)
142
143#ifdef CONFIG_IPW2200_QOS
144static int qos_enable = 0;
145static int qos_burst_enable = 0;
146static int qos_no_ack_mask = 0;
147static int burst_duration_CCK = 0;
148static int burst_duration_OFDM = 0;
149
150static struct libipw_qos_parameters def_qos_parameters_OFDM = {
151	{QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
152	 QOS_TX3_CW_MIN_OFDM},
153	{QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
154	 QOS_TX3_CW_MAX_OFDM},
155	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
156	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
157	{QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
158	 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
159};
160
161static struct libipw_qos_parameters def_qos_parameters_CCK = {
162	{QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
163	 QOS_TX3_CW_MIN_CCK},
164	{QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
165	 QOS_TX3_CW_MAX_CCK},
166	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
167	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
168	{QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
169	 QOS_TX3_TXOP_LIMIT_CCK}
170};
171
172static struct libipw_qos_parameters def_parameters_OFDM = {
173	{DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
174	 DEF_TX3_CW_MIN_OFDM},
175	{DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
176	 DEF_TX3_CW_MAX_OFDM},
177	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
178	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
179	{DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
180	 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
181};
182
183static struct libipw_qos_parameters def_parameters_CCK = {
184	{DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
185	 DEF_TX3_CW_MIN_CCK},
186	{DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
187	 DEF_TX3_CW_MAX_CCK},
188	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
189	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
190	{DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
191	 DEF_TX3_TXOP_LIMIT_CCK}
192};
193
194static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
195
196static int from_priority_to_tx_queue[] = {
197	IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
198	IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
199};
200
201static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
202
203static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
204				       *qos_param);
205static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
206				     *qos_param);
207#endif				/* CONFIG_IPW2200_QOS */
208
209static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
210static void ipw_remove_current_network(struct ipw_priv *priv);
211static void ipw_rx(struct ipw_priv *priv);
212static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
213				struct clx2_tx_queue *txq, int qindex);
214static int ipw_queue_reset(struct ipw_priv *priv);
215
216static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
217			     int len, int sync);
218
219static void ipw_tx_queue_free(struct ipw_priv *);
220
221static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
222static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
223static void ipw_rx_queue_replenish(void *);
224static int ipw_up(struct ipw_priv *);
225static void ipw_bg_up(struct work_struct *work);
226static void ipw_down(struct ipw_priv *);
227static void ipw_bg_down(struct work_struct *work);
228static int ipw_config(struct ipw_priv *);
229static int init_supported_rates(struct ipw_priv *priv,
230				struct ipw_supported_rates *prates);
231static void ipw_set_hwcrypto_keys(struct ipw_priv *);
232static void ipw_send_wep_keys(struct ipw_priv *, int);
233
234static int snprint_line(char *buf, size_t count,
235			const u8 * data, u32 len, u32 ofs)
236{
237	int out, i, j, l;
238	char c;
239
240	out = snprintf(buf, count, "%08X", ofs);
241
242	for (l = 0, i = 0; i < 2; i++) {
243		out += snprintf(buf + out, count - out, " ");
244		for (j = 0; j < 8 && l < len; j++, l++)
245			out += snprintf(buf + out, count - out, "%02X ",
246					data[(i * 8 + j)]);
247		for (; j < 8; j++)
248			out += snprintf(buf + out, count - out, "   ");
249	}
250
251	out += snprintf(buf + out, count - out, " ");
252	for (l = 0, i = 0; i < 2; i++) {
253		out += snprintf(buf + out, count - out, " ");
254		for (j = 0; j < 8 && l < len; j++, l++) {
255			c = data[(i * 8 + j)];
256			if (!isascii(c) || !isprint(c))
257				c = '.';
258
259			out += snprintf(buf + out, count - out, "%c", c);
260		}
261
262		for (; j < 8; j++)
263			out += snprintf(buf + out, count - out, " ");
264	}
265
266	return out;
267}
268
269static void printk_buf(int level, const u8 * data, u32 len)
270{
271	char line[81];
272	u32 ofs = 0;
273	if (!(ipw_debug_level & level))
274		return;
275
276	while (len) {
277		snprint_line(line, sizeof(line), &data[ofs],
278			     min(len, 16U), ofs);
279		printk(KERN_DEBUG "%s\n", line);
280		ofs += 16;
281		len -= min(len, 16U);
282	}
283}
284
285static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
286{
287	size_t out = size;
288	u32 ofs = 0;
289	int total = 0;
290
291	while (size && len) {
292		out = snprint_line(output, size, &data[ofs],
293				   min_t(size_t, len, 16U), ofs);
294
295		ofs += 16;
296		output += out;
297		size -= out;
298		len -= min_t(size_t, len, 16U);
299		total += out;
300	}
301	return total;
302}
303
304/* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
305static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
306#define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
307
308/* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
309static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
310#define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
311
312/* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
313static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
314static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
315{
316	IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
317		     __LINE__, (u32) (b), (u32) (c));
318	_ipw_write_reg8(a, b, c);
319}
320
321/* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
322static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
323static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
324{
325	IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
326		     __LINE__, (u32) (b), (u32) (c));
327	_ipw_write_reg16(a, b, c);
328}
329
330/* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
331static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
332static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
333{
334	IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
335		     __LINE__, (u32) (b), (u32) (c));
336	_ipw_write_reg32(a, b, c);
337}
338
339/* 8-bit direct write (low 4K) */
340static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
341		u8 val)
342{
343	writeb(val, ipw->hw_base + ofs);
344}
345
346/* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
347#define ipw_write8(ipw, ofs, val) do { \
348	IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
349			__LINE__, (u32)(ofs), (u32)(val)); \
350	_ipw_write8(ipw, ofs, val); \
351} while (0)
352
353/* 16-bit direct write (low 4K) */
354static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
355		u16 val)
356{
357	writew(val, ipw->hw_base + ofs);
358}
359
360/* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
361#define ipw_write16(ipw, ofs, val) do { \
362	IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
363			__LINE__, (u32)(ofs), (u32)(val)); \
364	_ipw_write16(ipw, ofs, val); \
365} while (0)
366
367/* 32-bit direct write (low 4K) */
368static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
369		u32 val)
370{
371	writel(val, ipw->hw_base + ofs);
372}
373
374/* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
375#define ipw_write32(ipw, ofs, val) do { \
376	IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
377			__LINE__, (u32)(ofs), (u32)(val)); \
378	_ipw_write32(ipw, ofs, val); \
379} while (0)
380
381/* 8-bit direct read (low 4K) */
382static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
383{
384	return readb(ipw->hw_base + ofs);
385}
386
387/* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
388#define ipw_read8(ipw, ofs) ({ \
389	IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
390			(u32)(ofs)); \
391	_ipw_read8(ipw, ofs); \
392})
393
394/* 16-bit direct read (low 4K) */
395static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
396{
397	return readw(ipw->hw_base + ofs);
398}
399
400/* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
401#define ipw_read16(ipw, ofs) ({ \
402	IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
403			(u32)(ofs)); \
404	_ipw_read16(ipw, ofs); \
405})
406
407/* 32-bit direct read (low 4K) */
408static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
409{
410	return readl(ipw->hw_base + ofs);
411}
412
413/* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
414#define ipw_read32(ipw, ofs) ({ \
415	IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
416			(u32)(ofs)); \
417	_ipw_read32(ipw, ofs); \
418})
419
420static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
421/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
422#define ipw_read_indirect(a, b, c, d) ({ \
423	IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
424			__LINE__, (u32)(b), (u32)(d)); \
425	_ipw_read_indirect(a, b, c, d); \
426})
427
428/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
429static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
430				int num);
431#define ipw_write_indirect(a, b, c, d) do { \
432	IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
433			__LINE__, (u32)(b), (u32)(d)); \
434	_ipw_write_indirect(a, b, c, d); \
435} while (0)
436
437/* 32-bit indirect write (above 4K) */
438static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
439{
440	IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
441	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
442	_ipw_write32(priv, IPW_INDIRECT_DATA, value);
443}
444
445/* 8-bit indirect write (above 4K) */
446static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
447{
448	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
449	u32 dif_len = reg - aligned_addr;
450
451	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
452	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
453	_ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
454}
455
456/* 16-bit indirect write (above 4K) */
457static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
458{
459	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
460	u32 dif_len = (reg - aligned_addr) & (~0x1ul);
461
462	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
463	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
464	_ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
465}
466
467/* 8-bit indirect read (above 4K) */
468static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
469{
470	u32 word;
471	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
472	IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
473	word = _ipw_read32(priv, IPW_INDIRECT_DATA);
474	return (word >> ((reg & 0x3) * 8)) & 0xff;
475}
476
477/* 32-bit indirect read (above 4K) */
478static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
479{
480	u32 value;
481
482	IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
483
484	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
485	value = _ipw_read32(priv, IPW_INDIRECT_DATA);
486	IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
487	return value;
488}
489
490/* General purpose, no alignment requirement, iterative (multi-byte) read, */
491/*    for area above 1st 4K of SRAM/reg space */
492static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
493			       int num)
494{
495	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
496	u32 dif_len = addr - aligned_addr;
497	u32 i;
498
499	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
500
501	if (num <= 0) {
502		return;
503	}
504
505	/* Read the first dword (or portion) byte by byte */
506	if (unlikely(dif_len)) {
507		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
508		/* Start reading at aligned_addr + dif_len */
509		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
510			*buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
511		aligned_addr += 4;
512	}
513
514	/* Read all of the middle dwords as dwords, with auto-increment */
515	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
516	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
517		*(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
518
519	/* Read the last dword (or portion) byte by byte */
520	if (unlikely(num)) {
521		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
522		for (i = 0; num > 0; i++, num--)
523			*buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
524	}
525}
526
527/* General purpose, no alignment requirement, iterative (multi-byte) write, */
528/*    for area above 1st 4K of SRAM/reg space */
529static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
530				int num)
531{
532	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
533	u32 dif_len = addr - aligned_addr;
534	u32 i;
535
536	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
537
538	if (num <= 0) {
539		return;
540	}
541
542	/* Write the first dword (or portion) byte by byte */
543	if (unlikely(dif_len)) {
544		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
545		/* Start writing at aligned_addr + dif_len */
546		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
547			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
548		aligned_addr += 4;
549	}
550
551	/* Write all of the middle dwords as dwords, with auto-increment */
552	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
553	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
554		_ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
555
556	/* Write the last dword (or portion) byte by byte */
557	if (unlikely(num)) {
558		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
559		for (i = 0; num > 0; i++, num--, buf++)
560			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
561	}
562}
563
564/* General purpose, no alignment requirement, iterative (multi-byte) write, */
565/*    for 1st 4K of SRAM/regs space */
566static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
567			     int num)
568{
569	memcpy_toio((priv->hw_base + addr), buf, num);
570}
571
572/* Set bit(s) in low 4K of SRAM/regs */
573static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
574{
575	ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
576}
577
578/* Clear bit(s) in low 4K of SRAM/regs */
579static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
580{
581	ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
582}
583
584static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
585{
586	if (priv->status & STATUS_INT_ENABLED)
587		return;
588	priv->status |= STATUS_INT_ENABLED;
589	ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
590}
591
592static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
593{
594	if (!(priv->status & STATUS_INT_ENABLED))
595		return;
596	priv->status &= ~STATUS_INT_ENABLED;
597	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
598}
599
600static inline void ipw_enable_interrupts(struct ipw_priv *priv)
601{
602	unsigned long flags;
603
604	spin_lock_irqsave(&priv->irq_lock, flags);
605	__ipw_enable_interrupts(priv);
606	spin_unlock_irqrestore(&priv->irq_lock, flags);
607}
608
609static inline void ipw_disable_interrupts(struct ipw_priv *priv)
610{
611	unsigned long flags;
612
613	spin_lock_irqsave(&priv->irq_lock, flags);
614	__ipw_disable_interrupts(priv);
615	spin_unlock_irqrestore(&priv->irq_lock, flags);
616}
617
618static char *ipw_error_desc(u32 val)
619{
620	switch (val) {
621	case IPW_FW_ERROR_OK:
622		return "ERROR_OK";
623	case IPW_FW_ERROR_FAIL:
624		return "ERROR_FAIL";
625	case IPW_FW_ERROR_MEMORY_UNDERFLOW:
626		return "MEMORY_UNDERFLOW";
627	case IPW_FW_ERROR_MEMORY_OVERFLOW:
628		return "MEMORY_OVERFLOW";
629	case IPW_FW_ERROR_BAD_PARAM:
630		return "BAD_PARAM";
631	case IPW_FW_ERROR_BAD_CHECKSUM:
632		return "BAD_CHECKSUM";
633	case IPW_FW_ERROR_NMI_INTERRUPT:
634		return "NMI_INTERRUPT";
635	case IPW_FW_ERROR_BAD_DATABASE:
636		return "BAD_DATABASE";
637	case IPW_FW_ERROR_ALLOC_FAIL:
638		return "ALLOC_FAIL";
639	case IPW_FW_ERROR_DMA_UNDERRUN:
640		return "DMA_UNDERRUN";
641	case IPW_FW_ERROR_DMA_STATUS:
642		return "DMA_STATUS";
643	case IPW_FW_ERROR_DINO_ERROR:
644		return "DINO_ERROR";
645	case IPW_FW_ERROR_EEPROM_ERROR:
646		return "EEPROM_ERROR";
647	case IPW_FW_ERROR_SYSASSERT:
648		return "SYSASSERT";
649	case IPW_FW_ERROR_FATAL_ERROR:
650		return "FATAL_ERROR";
651	default:
652		return "UNKNOWN_ERROR";
653	}
654}
655
656static void ipw_dump_error_log(struct ipw_priv *priv,
657			       struct ipw_fw_error *error)
658{
659	u32 i;
660
661	if (!error) {
662		IPW_ERROR("Error allocating and capturing error log.  "
663			  "Nothing to dump.\n");
664		return;
665	}
666
667	IPW_ERROR("Start IPW Error Log Dump:\n");
668	IPW_ERROR("Status: 0x%08X, Config: %08X\n",
669		  error->status, error->config);
670
671	for (i = 0; i < error->elem_len; i++)
672		IPW_ERROR("%s %i 0x%08x  0x%08x  0x%08x  0x%08x  0x%08x\n",
673			  ipw_error_desc(error->elem[i].desc),
674			  error->elem[i].time,
675			  error->elem[i].blink1,
676			  error->elem[i].blink2,
677			  error->elem[i].link1,
678			  error->elem[i].link2, error->elem[i].data);
679	for (i = 0; i < error->log_len; i++)
680		IPW_ERROR("%i\t0x%08x\t%i\n",
681			  error->log[i].time,
682			  error->log[i].data, error->log[i].event);
683}
684
685static inline int ipw_is_init(struct ipw_priv *priv)
686{
687	return (priv->status & STATUS_INIT) ? 1 : 0;
688}
689
690static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
691{
692	u32 addr, field_info, field_len, field_count, total_len;
693
694	IPW_DEBUG_ORD("ordinal = %i\n", ord);
695
696	if (!priv || !val || !len) {
697		IPW_DEBUG_ORD("Invalid argument\n");
698		return -EINVAL;
699	}
700
701	/* verify device ordinal tables have been initialized */
702	if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
703		IPW_DEBUG_ORD("Access ordinals before initialization\n");
704		return -EINVAL;
705	}
706
707	switch (IPW_ORD_TABLE_ID_MASK & ord) {
708	case IPW_ORD_TABLE_0_MASK:
709		/*
710		 * TABLE 0: Direct access to a table of 32 bit values
711		 *
712		 * This is a very simple table with the data directly
713		 * read from the table
714		 */
715
716		/* remove the table id from the ordinal */
717		ord &= IPW_ORD_TABLE_VALUE_MASK;
718
719		/* boundary check */
720		if (ord > priv->table0_len) {
721			IPW_DEBUG_ORD("ordinal value (%i) longer then "
722				      "max (%i)\n", ord, priv->table0_len);
723			return -EINVAL;
724		}
725
726		/* verify we have enough room to store the value */
727		if (*len < sizeof(u32)) {
728			IPW_DEBUG_ORD("ordinal buffer length too small, "
729				      "need %zd\n", sizeof(u32));
730			return -EINVAL;
731		}
732
733		IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
734			      ord, priv->table0_addr + (ord << 2));
735
736		*len = sizeof(u32);
737		ord <<= 2;
738		*((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
739		break;
740
741	case IPW_ORD_TABLE_1_MASK:
742		/*
743		 * TABLE 1: Indirect access to a table of 32 bit values
744		 *
745		 * This is a fairly large table of u32 values each
746		 * representing starting addr for the data (which is
747		 * also a u32)
748		 */
749
750		/* remove the table id from the ordinal */
751		ord &= IPW_ORD_TABLE_VALUE_MASK;
752
753		/* boundary check */
754		if (ord > priv->table1_len) {
755			IPW_DEBUG_ORD("ordinal value too long\n");
756			return -EINVAL;
757		}
758
759		/* verify we have enough room to store the value */
760		if (*len < sizeof(u32)) {
761			IPW_DEBUG_ORD("ordinal buffer length too small, "
762				      "need %zd\n", sizeof(u32));
763			return -EINVAL;
764		}
765
766		*((u32 *) val) =
767		    ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
768		*len = sizeof(u32);
769		break;
770
771	case IPW_ORD_TABLE_2_MASK:
772		/*
773		 * TABLE 2: Indirect access to a table of variable sized values
774		 *
775		 * This table consist of six values, each containing
776		 *     - dword containing the starting offset of the data
777		 *     - dword containing the lengh in the first 16bits
778		 *       and the count in the second 16bits
779		 */
780
781		/* remove the table id from the ordinal */
782		ord &= IPW_ORD_TABLE_VALUE_MASK;
783
784		/* boundary check */
785		if (ord > priv->table2_len) {
786			IPW_DEBUG_ORD("ordinal value too long\n");
787			return -EINVAL;
788		}
789
790		/* get the address of statistic */
791		addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
792
793		/* get the second DW of statistics ;
794		 * two 16-bit words - first is length, second is count */
795		field_info =
796		    ipw_read_reg32(priv,
797				   priv->table2_addr + (ord << 3) +
798				   sizeof(u32));
799
800		/* get each entry length */
801		field_len = *((u16 *) & field_info);
802
803		/* get number of entries */
804		field_count = *(((u16 *) & field_info) + 1);
805
806		/* abort if not enough memory */
807		total_len = field_len * field_count;
808		if (total_len > *len) {
809			*len = total_len;
810			return -EINVAL;
811		}
812
813		*len = total_len;
814		if (!total_len)
815			return 0;
816
817		IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
818			      "field_info = 0x%08x\n",
819			      addr, total_len, field_info);
820		ipw_read_indirect(priv, addr, val, total_len);
821		break;
822
823	default:
824		IPW_DEBUG_ORD("Invalid ordinal!\n");
825		return -EINVAL;
826
827	}
828
829	return 0;
830}
831
832static void ipw_init_ordinals(struct ipw_priv *priv)
833{
834	priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
835	priv->table0_len = ipw_read32(priv, priv->table0_addr);
836
837	IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
838		      priv->table0_addr, priv->table0_len);
839
840	priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
841	priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
842
843	IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
844		      priv->table1_addr, priv->table1_len);
845
846	priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
847	priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
848	priv->table2_len &= 0x0000ffff;	/* use first two bytes */
849
850	IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
851		      priv->table2_addr, priv->table2_len);
852
853}
854
855static u32 ipw_register_toggle(u32 reg)
856{
857	reg &= ~IPW_START_STANDBY;
858	if (reg & IPW_GATE_ODMA)
859		reg &= ~IPW_GATE_ODMA;
860	if (reg & IPW_GATE_IDMA)
861		reg &= ~IPW_GATE_IDMA;
862	if (reg & IPW_GATE_ADMA)
863		reg &= ~IPW_GATE_ADMA;
864	return reg;
865}
866
867/*
868 * LED behavior:
869 * - On radio ON, turn on any LEDs that require to be on during start
870 * - On initialization, start unassociated blink
871 * - On association, disable unassociated blink
872 * - On disassociation, start unassociated blink
873 * - On radio OFF, turn off any LEDs started during radio on
874 *
875 */
876#define LD_TIME_LINK_ON msecs_to_jiffies(300)
877#define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
878#define LD_TIME_ACT_ON msecs_to_jiffies(250)
879
880static void ipw_led_link_on(struct ipw_priv *priv)
881{
882	unsigned long flags;
883	u32 led;
884
885	/* If configured to not use LEDs, or nic_type is 1,
886	 * then we don't toggle a LINK led */
887	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
888		return;
889
890	spin_lock_irqsave(&priv->lock, flags);
891
892	if (!(priv->status & STATUS_RF_KILL_MASK) &&
893	    !(priv->status & STATUS_LED_LINK_ON)) {
894		IPW_DEBUG_LED("Link LED On\n");
895		led = ipw_read_reg32(priv, IPW_EVENT_REG);
896		led |= priv->led_association_on;
897
898		led = ipw_register_toggle(led);
899
900		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
901		ipw_write_reg32(priv, IPW_EVENT_REG, led);
902
903		priv->status |= STATUS_LED_LINK_ON;
904
905		/* If we aren't associated, schedule turning the LED off */
906		if (!(priv->status & STATUS_ASSOCIATED))
907			schedule_delayed_work(&priv->led_link_off,
908					      LD_TIME_LINK_ON);
909	}
910
911	spin_unlock_irqrestore(&priv->lock, flags);
912}
913
914static void ipw_bg_led_link_on(struct work_struct *work)
915{
916	struct ipw_priv *priv =
917		container_of(work, struct ipw_priv, led_link_on.work);
918	mutex_lock(&priv->mutex);
919	ipw_led_link_on(priv);
920	mutex_unlock(&priv->mutex);
921}
922
923static void ipw_led_link_off(struct ipw_priv *priv)
924{
925	unsigned long flags;
926	u32 led;
927
928	/* If configured not to use LEDs, or nic type is 1,
929	 * then we don't goggle the LINK led. */
930	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
931		return;
932
933	spin_lock_irqsave(&priv->lock, flags);
934
935	if (priv->status & STATUS_LED_LINK_ON) {
936		led = ipw_read_reg32(priv, IPW_EVENT_REG);
937		led &= priv->led_association_off;
938		led = ipw_register_toggle(led);
939
940		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
941		ipw_write_reg32(priv, IPW_EVENT_REG, led);
942
943		IPW_DEBUG_LED("Link LED Off\n");
944
945		priv->status &= ~STATUS_LED_LINK_ON;
946
947		/* If we aren't associated and the radio is on, schedule
948		 * turning the LED on (blink while unassociated) */
949		if (!(priv->status & STATUS_RF_KILL_MASK) &&
950		    !(priv->status & STATUS_ASSOCIATED))
951			schedule_delayed_work(&priv->led_link_on,
952					      LD_TIME_LINK_OFF);
953
954	}
955
956	spin_unlock_irqrestore(&priv->lock, flags);
957}
958
959static void ipw_bg_led_link_off(struct work_struct *work)
960{
961	struct ipw_priv *priv =
962		container_of(work, struct ipw_priv, led_link_off.work);
963	mutex_lock(&priv->mutex);
964	ipw_led_link_off(priv);
965	mutex_unlock(&priv->mutex);
966}
967
968static void __ipw_led_activity_on(struct ipw_priv *priv)
969{
970	u32 led;
971
972	if (priv->config & CFG_NO_LED)
973		return;
974
975	if (priv->status & STATUS_RF_KILL_MASK)
976		return;
977
978	if (!(priv->status & STATUS_LED_ACT_ON)) {
979		led = ipw_read_reg32(priv, IPW_EVENT_REG);
980		led |= priv->led_activity_on;
981
982		led = ipw_register_toggle(led);
983
984		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
985		ipw_write_reg32(priv, IPW_EVENT_REG, led);
986
987		IPW_DEBUG_LED("Activity LED On\n");
988
989		priv->status |= STATUS_LED_ACT_ON;
990
991		cancel_delayed_work(&priv->led_act_off);
992		schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
993	} else {
994		/* Reschedule LED off for full time period */
995		cancel_delayed_work(&priv->led_act_off);
996		schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
997	}
998}
999
1000#if 0
1001void ipw_led_activity_on(struct ipw_priv *priv)
1002{
1003	unsigned long flags;
1004	spin_lock_irqsave(&priv->lock, flags);
1005	__ipw_led_activity_on(priv);
1006	spin_unlock_irqrestore(&priv->lock, flags);
1007}
1008#endif  /*  0  */
1009
1010static void ipw_led_activity_off(struct ipw_priv *priv)
1011{
1012	unsigned long flags;
1013	u32 led;
1014
1015	if (priv->config & CFG_NO_LED)
1016		return;
1017
1018	spin_lock_irqsave(&priv->lock, flags);
1019
1020	if (priv->status & STATUS_LED_ACT_ON) {
1021		led = ipw_read_reg32(priv, IPW_EVENT_REG);
1022		led &= priv->led_activity_off;
1023
1024		led = ipw_register_toggle(led);
1025
1026		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1027		ipw_write_reg32(priv, IPW_EVENT_REG, led);
1028
1029		IPW_DEBUG_LED("Activity LED Off\n");
1030
1031		priv->status &= ~STATUS_LED_ACT_ON;
1032	}
1033
1034	spin_unlock_irqrestore(&priv->lock, flags);
1035}
1036
1037static void ipw_bg_led_activity_off(struct work_struct *work)
1038{
1039	struct ipw_priv *priv =
1040		container_of(work, struct ipw_priv, led_act_off.work);
1041	mutex_lock(&priv->mutex);
1042	ipw_led_activity_off(priv);
1043	mutex_unlock(&priv->mutex);
1044}
1045
1046static void ipw_led_band_on(struct ipw_priv *priv)
1047{
1048	unsigned long flags;
1049	u32 led;
1050
1051	/* Only nic type 1 supports mode LEDs */
1052	if (priv->config & CFG_NO_LED ||
1053	    priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1054		return;
1055
1056	spin_lock_irqsave(&priv->lock, flags);
1057
1058	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1059	if (priv->assoc_network->mode == IEEE_A) {
1060		led |= priv->led_ofdm_on;
1061		led &= priv->led_association_off;
1062		IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1063	} else if (priv->assoc_network->mode == IEEE_G) {
1064		led |= priv->led_ofdm_on;
1065		led |= priv->led_association_on;
1066		IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1067	} else {
1068		led &= priv->led_ofdm_off;
1069		led |= priv->led_association_on;
1070		IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1071	}
1072
1073	led = ipw_register_toggle(led);
1074
1075	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1076	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1077
1078	spin_unlock_irqrestore(&priv->lock, flags);
1079}
1080
1081static void ipw_led_band_off(struct ipw_priv *priv)
1082{
1083	unsigned long flags;
1084	u32 led;
1085
1086	/* Only nic type 1 supports mode LEDs */
1087	if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1088		return;
1089
1090	spin_lock_irqsave(&priv->lock, flags);
1091
1092	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1093	led &= priv->led_ofdm_off;
1094	led &= priv->led_association_off;
1095
1096	led = ipw_register_toggle(led);
1097
1098	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1099	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1100
1101	spin_unlock_irqrestore(&priv->lock, flags);
1102}
1103
1104static void ipw_led_radio_on(struct ipw_priv *priv)
1105{
1106	ipw_led_link_on(priv);
1107}
1108
1109static void ipw_led_radio_off(struct ipw_priv *priv)
1110{
1111	ipw_led_activity_off(priv);
1112	ipw_led_link_off(priv);
1113}
1114
1115static void ipw_led_link_up(struct ipw_priv *priv)
1116{
1117	/* Set the Link Led on for all nic types */
1118	ipw_led_link_on(priv);
1119}
1120
1121static void ipw_led_link_down(struct ipw_priv *priv)
1122{
1123	ipw_led_activity_off(priv);
1124	ipw_led_link_off(priv);
1125
1126	if (priv->status & STATUS_RF_KILL_MASK)
1127		ipw_led_radio_off(priv);
1128}
1129
1130static void ipw_led_init(struct ipw_priv *priv)
1131{
1132	priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1133
1134	/* Set the default PINs for the link and activity leds */
1135	priv->led_activity_on = IPW_ACTIVITY_LED;
1136	priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1137
1138	priv->led_association_on = IPW_ASSOCIATED_LED;
1139	priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1140
1141	/* Set the default PINs for the OFDM leds */
1142	priv->led_ofdm_on = IPW_OFDM_LED;
1143	priv->led_ofdm_off = ~(IPW_OFDM_LED);
1144
1145	switch (priv->nic_type) {
1146	case EEPROM_NIC_TYPE_1:
1147		/* In this NIC type, the LEDs are reversed.... */
1148		priv->led_activity_on = IPW_ASSOCIATED_LED;
1149		priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1150		priv->led_association_on = IPW_ACTIVITY_LED;
1151		priv->led_association_off = ~(IPW_ACTIVITY_LED);
1152
1153		if (!(priv->config & CFG_NO_LED))
1154			ipw_led_band_on(priv);
1155
1156		/* And we don't blink link LEDs for this nic, so
1157		 * just return here */
1158		return;
1159
1160	case EEPROM_NIC_TYPE_3:
1161	case EEPROM_NIC_TYPE_2:
1162	case EEPROM_NIC_TYPE_4:
1163	case EEPROM_NIC_TYPE_0:
1164		break;
1165
1166	default:
1167		IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1168			       priv->nic_type);
1169		priv->nic_type = EEPROM_NIC_TYPE_0;
1170		break;
1171	}
1172
1173	if (!(priv->config & CFG_NO_LED)) {
1174		if (priv->status & STATUS_ASSOCIATED)
1175			ipw_led_link_on(priv);
1176		else
1177			ipw_led_link_off(priv);
1178	}
1179}
1180
1181static void ipw_led_shutdown(struct ipw_priv *priv)
1182{
1183	ipw_led_activity_off(priv);
1184	ipw_led_link_off(priv);
1185	ipw_led_band_off(priv);
1186	cancel_delayed_work(&priv->led_link_on);
1187	cancel_delayed_work(&priv->led_link_off);
1188	cancel_delayed_work(&priv->led_act_off);
1189}
1190
1191/*
1192 * The following adds a new attribute to the sysfs representation
1193 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1194 * used for controlling the debug level.
1195 *
1196 * See the level definitions in ipw for details.
1197 */
1198static ssize_t show_debug_level(struct device_driver *d, char *buf)
1199{
1200	return sprintf(buf, "0x%08X\n", ipw_debug_level);
1201}
1202
1203static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1204				 size_t count)
1205{
1206	char *p = (char *)buf;
1207	u32 val;
1208
1209	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1210		p++;
1211		if (p[0] == 'x' || p[0] == 'X')
1212			p++;
1213		val = simple_strtoul(p, &p, 16);
1214	} else
1215		val = simple_strtoul(p, &p, 10);
1216	if (p == buf)
1217		printk(KERN_INFO DRV_NAME
1218		       ": %s is not in hex or decimal form.\n", buf);
1219	else
1220		ipw_debug_level = val;
1221
1222	return strnlen(buf, count);
1223}
1224
1225static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1226		   show_debug_level, store_debug_level);
1227
1228static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1229{
1230	/* length = 1st dword in log */
1231	return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1232}
1233
1234static void ipw_capture_event_log(struct ipw_priv *priv,
1235				  u32 log_len, struct ipw_event *log)
1236{
1237	u32 base;
1238
1239	if (log_len) {
1240		base = ipw_read32(priv, IPW_EVENT_LOG);
1241		ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1242				  (u8 *) log, sizeof(*log) * log_len);
1243	}
1244}
1245
1246static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1247{
1248	struct ipw_fw_error *error;
1249	u32 log_len = ipw_get_event_log_len(priv);
1250	u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1251	u32 elem_len = ipw_read_reg32(priv, base);
1252
1253	error = kmalloc(sizeof(*error) +
1254			sizeof(*error->elem) * elem_len +
1255			sizeof(*error->log) * log_len, GFP_ATOMIC);
1256	if (!error) {
1257		IPW_ERROR("Memory allocation for firmware error log "
1258			  "failed.\n");
1259		return NULL;
1260	}
1261	error->jiffies = jiffies;
1262	error->status = priv->status;
1263	error->config = priv->config;
1264	error->elem_len = elem_len;
1265	error->log_len = log_len;
1266	error->elem = (struct ipw_error_elem *)error->payload;
1267	error->log = (struct ipw_event *)(error->elem + elem_len);
1268
1269	ipw_capture_event_log(priv, log_len, error->log);
1270
1271	if (elem_len)
1272		ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1273				  sizeof(*error->elem) * elem_len);
1274
1275	return error;
1276}
1277
1278static ssize_t show_event_log(struct device *d,
1279			      struct device_attribute *attr, char *buf)
1280{
1281	struct ipw_priv *priv = dev_get_drvdata(d);
1282	u32 log_len = ipw_get_event_log_len(priv);
1283	u32 log_size;
1284	struct ipw_event *log;
1285	u32 len = 0, i;
1286
1287	/* not using min() because of its strict type checking */
1288	log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1289			sizeof(*log) * log_len : PAGE_SIZE;
1290	log = kzalloc(log_size, GFP_KERNEL);
1291	if (!log) {
1292		IPW_ERROR("Unable to allocate memory for log\n");
1293		return 0;
1294	}
1295	log_len = log_size / sizeof(*log);
1296	ipw_capture_event_log(priv, log_len, log);
1297
1298	len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1299	for (i = 0; i < log_len; i++)
1300		len += snprintf(buf + len, PAGE_SIZE - len,
1301				"\n%08X%08X%08X",
1302				log[i].time, log[i].event, log[i].data);
1303	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1304	kfree(log);
1305	return len;
1306}
1307
1308static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1309
1310static ssize_t show_error(struct device *d,
1311			  struct device_attribute *attr, char *buf)
1312{
1313	struct ipw_priv *priv = dev_get_drvdata(d);
1314	u32 len = 0, i;
1315	if (!priv->error)
1316		return 0;
1317	len += snprintf(buf + len, PAGE_SIZE - len,
1318			"%08lX%08X%08X%08X",
1319			priv->error->jiffies,
1320			priv->error->status,
1321			priv->error->config, priv->error->elem_len);
1322	for (i = 0; i < priv->error->elem_len; i++)
1323		len += snprintf(buf + len, PAGE_SIZE - len,
1324				"\n%08X%08X%08X%08X%08X%08X%08X",
1325				priv->error->elem[i].time,
1326				priv->error->elem[i].desc,
1327				priv->error->elem[i].blink1,
1328				priv->error->elem[i].blink2,
1329				priv->error->elem[i].link1,
1330				priv->error->elem[i].link2,
1331				priv->error->elem[i].data);
1332
1333	len += snprintf(buf + len, PAGE_SIZE - len,
1334			"\n%08X", priv->error->log_len);
1335	for (i = 0; i < priv->error->log_len; i++)
1336		len += snprintf(buf + len, PAGE_SIZE - len,
1337				"\n%08X%08X%08X",
1338				priv->error->log[i].time,
1339				priv->error->log[i].event,
1340				priv->error->log[i].data);
1341	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1342	return len;
1343}
1344
1345static ssize_t clear_error(struct device *d,
1346			   struct device_attribute *attr,
1347			   const char *buf, size_t count)
1348{
1349	struct ipw_priv *priv = dev_get_drvdata(d);
1350
1351	kfree(priv->error);
1352	priv->error = NULL;
1353	return count;
1354}
1355
1356static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1357
1358static ssize_t show_cmd_log(struct device *d,
1359			    struct device_attribute *attr, char *buf)
1360{
1361	struct ipw_priv *priv = dev_get_drvdata(d);
1362	u32 len = 0, i;
1363	if (!priv->cmdlog)
1364		return 0;
1365	for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1366	     (i != priv->cmdlog_pos) && (len < PAGE_SIZE);
1367	     i = (i + 1) % priv->cmdlog_len) {
1368		len +=
1369		    snprintf(buf + len, PAGE_SIZE - len,
1370			     "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1371			     priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1372			     priv->cmdlog[i].cmd.len);
1373		len +=
1374		    snprintk_buf(buf + len, PAGE_SIZE - len,
1375				 (u8 *) priv->cmdlog[i].cmd.param,
1376				 priv->cmdlog[i].cmd.len);
1377		len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1378	}
1379	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1380	return len;
1381}
1382
1383static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1384
1385#ifdef CONFIG_IPW2200_PROMISCUOUS
1386static void ipw_prom_free(struct ipw_priv *priv);
1387static int ipw_prom_alloc(struct ipw_priv *priv);
1388static ssize_t store_rtap_iface(struct device *d,
1389			 struct device_attribute *attr,
1390			 const char *buf, size_t count)
1391{
1392	struct ipw_priv *priv = dev_get_drvdata(d);
1393	int rc = 0;
1394
1395	if (count < 1)
1396		return -EINVAL;
1397
1398	switch (buf[0]) {
1399	case '0':
1400		if (!rtap_iface)
1401			return count;
1402
1403		if (netif_running(priv->prom_net_dev)) {
1404			IPW_WARNING("Interface is up.  Cannot unregister.\n");
1405			return count;
1406		}
1407
1408		ipw_prom_free(priv);
1409		rtap_iface = 0;
1410		break;
1411
1412	case '1':
1413		if (rtap_iface)
1414			return count;
1415
1416		rc = ipw_prom_alloc(priv);
1417		if (!rc)
1418			rtap_iface = 1;
1419		break;
1420
1421	default:
1422		return -EINVAL;
1423	}
1424
1425	if (rc) {
1426		IPW_ERROR("Failed to register promiscuous network "
1427			  "device (error %d).\n", rc);
1428	}
1429
1430	return count;
1431}
1432
1433static ssize_t show_rtap_iface(struct device *d,
1434			struct device_attribute *attr,
1435			char *buf)
1436{
1437	struct ipw_priv *priv = dev_get_drvdata(d);
1438	if (rtap_iface)
1439		return sprintf(buf, "%s", priv->prom_net_dev->name);
1440	else {
1441		buf[0] = '-';
1442		buf[1] = '1';
1443		buf[2] = '\0';
1444		return 3;
1445	}
1446}
1447
1448static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1449		   store_rtap_iface);
1450
1451static ssize_t store_rtap_filter(struct device *d,
1452			 struct device_attribute *attr,
1453			 const char *buf, size_t count)
1454{
1455	struct ipw_priv *priv = dev_get_drvdata(d);
1456
1457	if (!priv->prom_priv) {
1458		IPW_ERROR("Attempting to set filter without "
1459			  "rtap_iface enabled.\n");
1460		return -EPERM;
1461	}
1462
1463	priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1464
1465	IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1466		       BIT_ARG16(priv->prom_priv->filter));
1467
1468	return count;
1469}
1470
1471static ssize_t show_rtap_filter(struct device *d,
1472			struct device_attribute *attr,
1473			char *buf)
1474{
1475	struct ipw_priv *priv = dev_get_drvdata(d);
1476	return sprintf(buf, "0x%04X",
1477		       priv->prom_priv ? priv->prom_priv->filter : 0);
1478}
1479
1480static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1481		   store_rtap_filter);
1482#endif
1483
1484static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1485			     char *buf)
1486{
1487	struct ipw_priv *priv = dev_get_drvdata(d);
1488	return sprintf(buf, "%d\n", priv->ieee->scan_age);
1489}
1490
1491static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1492			      const char *buf, size_t count)
1493{
1494	struct ipw_priv *priv = dev_get_drvdata(d);
1495	struct net_device *dev = priv->net_dev;
1496	char buffer[] = "00000000";
1497	unsigned long len =
1498	    (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1499	unsigned long val;
1500	char *p = buffer;
1501
1502	IPW_DEBUG_INFO("enter\n");
1503
1504	strncpy(buffer, buf, len);
1505	buffer[len] = 0;
1506
1507	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1508		p++;
1509		if (p[0] == 'x' || p[0] == 'X')
1510			p++;
1511		val = simple_strtoul(p, &p, 16);
1512	} else
1513		val = simple_strtoul(p, &p, 10);
1514	if (p == buffer) {
1515		IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1516	} else {
1517		priv->ieee->scan_age = val;
1518		IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1519	}
1520
1521	IPW_DEBUG_INFO("exit\n");
1522	return len;
1523}
1524
1525static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1526
1527static ssize_t show_led(struct device *d, struct device_attribute *attr,
1528			char *buf)
1529{
1530	struct ipw_priv *priv = dev_get_drvdata(d);
1531	return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1532}
1533
1534static ssize_t store_led(struct device *d, struct device_attribute *attr,
1535			 const char *buf, size_t count)
1536{
1537	struct ipw_priv *priv = dev_get_drvdata(d);
1538
1539	IPW_DEBUG_INFO("enter\n");
1540
1541	if (count == 0)
1542		return 0;
1543
1544	if (*buf == 0) {
1545		IPW_DEBUG_LED("Disabling LED control.\n");
1546		priv->config |= CFG_NO_LED;
1547		ipw_led_shutdown(priv);
1548	} else {
1549		IPW_DEBUG_LED("Enabling LED control.\n");
1550		priv->config &= ~CFG_NO_LED;
1551		ipw_led_init(priv);
1552	}
1553
1554	IPW_DEBUG_INFO("exit\n");
1555	return count;
1556}
1557
1558static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1559
1560static ssize_t show_status(struct device *d,
1561			   struct device_attribute *attr, char *buf)
1562{
1563	struct ipw_priv *p = dev_get_drvdata(d);
1564	return sprintf(buf, "0x%08x\n", (int)p->status);
1565}
1566
1567static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1568
1569static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1570			char *buf)
1571{
1572	struct ipw_priv *p = dev_get_drvdata(d);
1573	return sprintf(buf, "0x%08x\n", (int)p->config);
1574}
1575
1576static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1577
1578static ssize_t show_nic_type(struct device *d,
1579			     struct device_attribute *attr, char *buf)
1580{
1581	struct ipw_priv *priv = dev_get_drvdata(d);
1582	return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1583}
1584
1585static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1586
1587static ssize_t show_ucode_version(struct device *d,
1588				  struct device_attribute *attr, char *buf)
1589{
1590	u32 len = sizeof(u32), tmp = 0;
1591	struct ipw_priv *p = dev_get_drvdata(d);
1592
1593	if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1594		return 0;
1595
1596	return sprintf(buf, "0x%08x\n", tmp);
1597}
1598
1599static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1600
1601static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1602			char *buf)
1603{
1604	u32 len = sizeof(u32), tmp = 0;
1605	struct ipw_priv *p = dev_get_drvdata(d);
1606
1607	if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1608		return 0;
1609
1610	return sprintf(buf, "0x%08x\n", tmp);
1611}
1612
1613static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1614
1615/*
1616 * Add a device attribute to view/control the delay between eeprom
1617 * operations.
1618 */
1619static ssize_t show_eeprom_delay(struct device *d,
1620				 struct device_attribute *attr, char *buf)
1621{
1622	struct ipw_priv *p = dev_get_drvdata(d);
1623	int n = p->eeprom_delay;
1624	return sprintf(buf, "%i\n", n);
1625}
1626static ssize_t store_eeprom_delay(struct device *d,
1627				  struct device_attribute *attr,
1628				  const char *buf, size_t count)
1629{
1630	struct ipw_priv *p = dev_get_drvdata(d);
1631	sscanf(buf, "%i", &p->eeprom_delay);
1632	return strnlen(buf, count);
1633}
1634
1635static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1636		   show_eeprom_delay, store_eeprom_delay);
1637
1638static ssize_t show_command_event_reg(struct device *d,
1639				      struct device_attribute *attr, char *buf)
1640{
1641	u32 reg = 0;
1642	struct ipw_priv *p = dev_get_drvdata(d);
1643
1644	reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1645	return sprintf(buf, "0x%08x\n", reg);
1646}
1647static ssize_t store_command_event_reg(struct device *d,
1648				       struct device_attribute *attr,
1649				       const char *buf, size_t count)
1650{
1651	u32 reg;
1652	struct ipw_priv *p = dev_get_drvdata(d);
1653
1654	sscanf(buf, "%x", &reg);
1655	ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1656	return strnlen(buf, count);
1657}
1658
1659static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1660		   show_command_event_reg, store_command_event_reg);
1661
1662static ssize_t show_mem_gpio_reg(struct device *d,
1663				 struct device_attribute *attr, char *buf)
1664{
1665	u32 reg = 0;
1666	struct ipw_priv *p = dev_get_drvdata(d);
1667
1668	reg = ipw_read_reg32(p, 0x301100);
1669	return sprintf(buf, "0x%08x\n", reg);
1670}
1671static ssize_t store_mem_gpio_reg(struct device *d,
1672				  struct device_attribute *attr,
1673				  const char *buf, size_t count)
1674{
1675	u32 reg;
1676	struct ipw_priv *p = dev_get_drvdata(d);
1677
1678	sscanf(buf, "%x", &reg);
1679	ipw_write_reg32(p, 0x301100, reg);
1680	return strnlen(buf, count);
1681}
1682
1683static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1684		   show_mem_gpio_reg, store_mem_gpio_reg);
1685
1686static ssize_t show_indirect_dword(struct device *d,
1687				   struct device_attribute *attr, char *buf)
1688{
1689	u32 reg = 0;
1690	struct ipw_priv *priv = dev_get_drvdata(d);
1691
1692	if (priv->status & STATUS_INDIRECT_DWORD)
1693		reg = ipw_read_reg32(priv, priv->indirect_dword);
1694	else
1695		reg = 0;
1696
1697	return sprintf(buf, "0x%08x\n", reg);
1698}
1699static ssize_t store_indirect_dword(struct device *d,
1700				    struct device_attribute *attr,
1701				    const char *buf, size_t count)
1702{
1703	struct ipw_priv *priv = dev_get_drvdata(d);
1704
1705	sscanf(buf, "%x", &priv->indirect_dword);
1706	priv->status |= STATUS_INDIRECT_DWORD;
1707	return strnlen(buf, count);
1708}
1709
1710static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1711		   show_indirect_dword, store_indirect_dword);
1712
1713static ssize_t show_indirect_byte(struct device *d,
1714				  struct device_attribute *attr, char *buf)
1715{
1716	u8 reg = 0;
1717	struct ipw_priv *priv = dev_get_drvdata(d);
1718
1719	if (priv->status & STATUS_INDIRECT_BYTE)
1720		reg = ipw_read_reg8(priv, priv->indirect_byte);
1721	else
1722		reg = 0;
1723
1724	return sprintf(buf, "0x%02x\n", reg);
1725}
1726static ssize_t store_indirect_byte(struct device *d,
1727				   struct device_attribute *attr,
1728				   const char *buf, size_t count)
1729{
1730	struct ipw_priv *priv = dev_get_drvdata(d);
1731
1732	sscanf(buf, "%x", &priv->indirect_byte);
1733	priv->status |= STATUS_INDIRECT_BYTE;
1734	return strnlen(buf, count);
1735}
1736
1737static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1738		   show_indirect_byte, store_indirect_byte);
1739
1740static ssize_t show_direct_dword(struct device *d,
1741				 struct device_attribute *attr, char *buf)
1742{
1743	u32 reg = 0;
1744	struct ipw_priv *priv = dev_get_drvdata(d);
1745
1746	if (priv->status & STATUS_DIRECT_DWORD)
1747		reg = ipw_read32(priv, priv->direct_dword);
1748	else
1749		reg = 0;
1750
1751	return sprintf(buf, "0x%08x\n", reg);
1752}
1753static ssize_t store_direct_dword(struct device *d,
1754				  struct device_attribute *attr,
1755				  const char *buf, size_t count)
1756{
1757	struct ipw_priv *priv = dev_get_drvdata(d);
1758
1759	sscanf(buf, "%x", &priv->direct_dword);
1760	priv->status |= STATUS_DIRECT_DWORD;
1761	return strnlen(buf, count);
1762}
1763
1764static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1765		   show_direct_dword, store_direct_dword);
1766
1767static int rf_kill_active(struct ipw_priv *priv)
1768{
1769	if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1770		priv->status |= STATUS_RF_KILL_HW;
1771		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1772	} else {
1773		priv->status &= ~STATUS_RF_KILL_HW;
1774		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1775	}
1776
1777	return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1778}
1779
1780static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1781			    char *buf)
1782{
1783	/* 0 - RF kill not enabled
1784	   1 - SW based RF kill active (sysfs)
1785	   2 - HW based RF kill active
1786	   3 - Both HW and SW baed RF kill active */
1787	struct ipw_priv *priv = dev_get_drvdata(d);
1788	int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1789	    (rf_kill_active(priv) ? 0x2 : 0x0);
1790	return sprintf(buf, "%i\n", val);
1791}
1792
1793static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1794{
1795	if ((disable_radio ? 1 : 0) ==
1796	    ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1797		return 0;
1798
1799	IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO  %s\n",
1800			  disable_radio ? "OFF" : "ON");
1801
1802	if (disable_radio) {
1803		priv->status |= STATUS_RF_KILL_SW;
1804
1805		cancel_delayed_work(&priv->request_scan);
1806		cancel_delayed_work(&priv->request_direct_scan);
1807		cancel_delayed_work(&priv->request_passive_scan);
1808		cancel_delayed_work(&priv->scan_event);
1809		schedule_work(&priv->down);
1810	} else {
1811		priv->status &= ~STATUS_RF_KILL_SW;
1812		if (rf_kill_active(priv)) {
1813			IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1814					  "disabled by HW switch\n");
1815			/* Make sure the RF_KILL check timer is running */
1816			cancel_delayed_work(&priv->rf_kill);
1817			schedule_delayed_work(&priv->rf_kill,
1818					      round_jiffies_relative(2 * HZ));
1819		} else
1820			schedule_work(&priv->up);
1821	}
1822
1823	return 1;
1824}
1825
1826static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1827			     const char *buf, size_t count)
1828{
1829	struct ipw_priv *priv = dev_get_drvdata(d);
1830
1831	ipw_radio_kill_sw(priv, buf[0] == '1');
1832
1833	return count;
1834}
1835
1836static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1837
1838static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1839			       char *buf)
1840{
1841	struct ipw_priv *priv = dev_get_drvdata(d);
1842	int pos = 0, len = 0;
1843	if (priv->config & CFG_SPEED_SCAN) {
1844		while (priv->speed_scan[pos] != 0)
1845			len += sprintf(&buf[len], "%d ",
1846				       priv->speed_scan[pos++]);
1847		return len + sprintf(&buf[len], "\n");
1848	}
1849
1850	return sprintf(buf, "0\n");
1851}
1852
1853static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1854				const char *buf, size_t count)
1855{
1856	struct ipw_priv *priv = dev_get_drvdata(d);
1857	int channel, pos = 0;
1858	const char *p = buf;
1859
1860	/* list of space separated channels to scan, optionally ending with 0 */
1861	while ((channel = simple_strtol(p, NULL, 0))) {
1862		if (pos == MAX_SPEED_SCAN - 1) {
1863			priv->speed_scan[pos] = 0;
1864			break;
1865		}
1866
1867		if (libipw_is_valid_channel(priv->ieee, channel))
1868			priv->speed_scan[pos++] = channel;
1869		else
1870			IPW_WARNING("Skipping invalid channel request: %d\n",
1871				    channel);
1872		p = strchr(p, ' ');
1873		if (!p)
1874			break;
1875		while (*p == ' ' || *p == '\t')
1876			p++;
1877	}
1878
1879	if (pos == 0)
1880		priv->config &= ~CFG_SPEED_SCAN;
1881	else {
1882		priv->speed_scan_pos = 0;
1883		priv->config |= CFG_SPEED_SCAN;
1884	}
1885
1886	return count;
1887}
1888
1889static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1890		   store_speed_scan);
1891
1892static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1893			      char *buf)
1894{
1895	struct ipw_priv *priv = dev_get_drvdata(d);
1896	return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1897}
1898
1899static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1900			       const char *buf, size_t count)
1901{
1902	struct ipw_priv *priv = dev_get_drvdata(d);
1903	if (buf[0] == '1')
1904		priv->config |= CFG_NET_STATS;
1905	else
1906		priv->config &= ~CFG_NET_STATS;
1907
1908	return count;
1909}
1910
1911static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1912		   show_net_stats, store_net_stats);
1913
1914static ssize_t show_channels(struct device *d,
1915			     struct device_attribute *attr,
1916			     char *buf)
1917{
1918	struct ipw_priv *priv = dev_get_drvdata(d);
1919	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1920	int len = 0, i;
1921
1922	len = sprintf(&buf[len],
1923		      "Displaying %d channels in 2.4Ghz band "
1924		      "(802.11bg):\n", geo->bg_channels);
1925
1926	for (i = 0; i < geo->bg_channels; i++) {
1927		len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1928			       geo->bg[i].channel,
1929			       geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1930			       " (radar spectrum)" : "",
1931			       ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1932				(geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1933			       ? "" : ", IBSS",
1934			       geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1935			       "passive only" : "active/passive",
1936			       geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1937			       "B" : "B/G");
1938	}
1939
1940	len += sprintf(&buf[len],
1941		       "Displaying %d channels in 5.2Ghz band "
1942		       "(802.11a):\n", geo->a_channels);
1943	for (i = 0; i < geo->a_channels; i++) {
1944		len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1945			       geo->a[i].channel,
1946			       geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1947			       " (radar spectrum)" : "",
1948			       ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1949				(geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1950			       ? "" : ", IBSS",
1951			       geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1952			       "passive only" : "active/passive");
1953	}
1954
1955	return len;
1956}
1957
1958static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1959
1960static void notify_wx_assoc_event(struct ipw_priv *priv)
1961{
1962	union iwreq_data wrqu;
1963	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1964	if (priv->status & STATUS_ASSOCIATED)
1965		memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1966	else
1967		eth_zero_addr(wrqu.ap_addr.sa_data);
1968	wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1969}
1970
1971static void ipw_irq_tasklet(struct ipw_priv *priv)
1972{
1973	u32 inta, inta_mask, handled = 0;
1974	unsigned long flags;
1975	int rc = 0;
1976
1977	spin_lock_irqsave(&priv->irq_lock, flags);
1978
1979	inta = ipw_read32(priv, IPW_INTA_RW);
1980	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1981
1982	if (inta == 0xFFFFFFFF) {
1983		/* Hardware disappeared */
1984		IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
1985		/* Only handle the cached INTA values */
1986		inta = 0;
1987	}
1988	inta &= (IPW_INTA_MASK_ALL & inta_mask);
1989
1990	/* Add any cached INTA values that need to be handled */
1991	inta |= priv->isr_inta;
1992
1993	spin_unlock_irqrestore(&priv->irq_lock, flags);
1994
1995	spin_lock_irqsave(&priv->lock, flags);
1996
1997	/* handle all the justifications for the interrupt */
1998	if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1999		ipw_rx(priv);
2000		handled |= IPW_INTA_BIT_RX_TRANSFER;
2001	}
2002
2003	if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
2004		IPW_DEBUG_HC("Command completed.\n");
2005		rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
2006		priv->status &= ~STATUS_HCMD_ACTIVE;
2007		wake_up_interruptible(&priv->wait_command_queue);
2008		handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
2009	}
2010
2011	if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
2012		IPW_DEBUG_TX("TX_QUEUE_1\n");
2013		rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
2014		handled |= IPW_INTA_BIT_TX_QUEUE_1;
2015	}
2016
2017	if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
2018		IPW_DEBUG_TX("TX_QUEUE_2\n");
2019		rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
2020		handled |= IPW_INTA_BIT_TX_QUEUE_2;
2021	}
2022
2023	if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
2024		IPW_DEBUG_TX("TX_QUEUE_3\n");
2025		rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2026		handled |= IPW_INTA_BIT_TX_QUEUE_3;
2027	}
2028
2029	if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2030		IPW_DEBUG_TX("TX_QUEUE_4\n");
2031		rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2032		handled |= IPW_INTA_BIT_TX_QUEUE_4;
2033	}
2034
2035	if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2036		IPW_WARNING("STATUS_CHANGE\n");
2037		handled |= IPW_INTA_BIT_STATUS_CHANGE;
2038	}
2039
2040	if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2041		IPW_WARNING("TX_PERIOD_EXPIRED\n");
2042		handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2043	}
2044
2045	if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2046		IPW_WARNING("HOST_CMD_DONE\n");
2047		handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2048	}
2049
2050	if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2051		IPW_WARNING("FW_INITIALIZATION_DONE\n");
2052		handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2053	}
2054
2055	if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2056		IPW_WARNING("PHY_OFF_DONE\n");
2057		handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2058	}
2059
2060	if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2061		IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2062		priv->status |= STATUS_RF_KILL_HW;
2063		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2064		wake_up_interruptible(&priv->wait_command_queue);
2065		priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2066		cancel_delayed_work(&priv->request_scan);
2067		cancel_delayed_work(&priv->request_direct_scan);
2068		cancel_delayed_work(&priv->request_passive_scan);
2069		cancel_delayed_work(&priv->scan_event);
2070		schedule_work(&priv->link_down);
2071		schedule_delayed_work(&priv->rf_kill, 2 * HZ);
2072		handled |= IPW_INTA_BIT_RF_KILL_DONE;
2073	}
2074
2075	if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2076		IPW_WARNING("Firmware error detected.  Restarting.\n");
2077		if (priv->error) {
2078			IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2079			if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2080				struct ipw_fw_error *error =
2081				    ipw_alloc_error_log(priv);
2082				ipw_dump_error_log(priv, error);
2083				kfree(error);
2084			}
2085		} else {
2086			priv->error = ipw_alloc_error_log(priv);
2087			if (priv->error)
2088				IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2089			else
2090				IPW_DEBUG_FW("Error allocating sysfs 'error' "
2091					     "log.\n");
2092			if (ipw_debug_level & IPW_DL_FW_ERRORS)
2093				ipw_dump_error_log(priv, priv->error);
2094		}
2095
2096		/* XXX: If hardware encryption is for WPA/WPA2,
2097		 * we have to notify the supplicant. */
2098		if (priv->ieee->sec.encrypt) {
2099			priv->status &= ~STATUS_ASSOCIATED;
2100			notify_wx_assoc_event(priv);
2101		}
2102
2103		/* Keep the restart process from trying to send host
2104		 * commands by clearing the INIT status bit */
2105		priv->status &= ~STATUS_INIT;
2106
2107		/* Cancel currently queued command. */
2108		priv->status &= ~STATUS_HCMD_ACTIVE;
2109		wake_up_interruptible(&priv->wait_command_queue);
2110
2111		schedule_work(&priv->adapter_restart);
2112		handled |= IPW_INTA_BIT_FATAL_ERROR;
2113	}
2114
2115	if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2116		IPW_ERROR("Parity error\n");
2117		handled |= IPW_INTA_BIT_PARITY_ERROR;
2118	}
2119
2120	if (handled != inta) {
2121		IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2122	}
2123
2124	spin_unlock_irqrestore(&priv->lock, flags);
2125
2126	/* enable all interrupts */
2127	ipw_enable_interrupts(priv);
2128}
2129
2130#define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2131static char *get_cmd_string(u8 cmd)
2132{
2133	switch (cmd) {
2134		IPW_CMD(HOST_COMPLETE);
2135		IPW_CMD(POWER_DOWN);
2136		IPW_CMD(SYSTEM_CONFIG);
2137		IPW_CMD(MULTICAST_ADDRESS);
2138		IPW_CMD(SSID);
2139		IPW_CMD(ADAPTER_ADDRESS);
2140		IPW_CMD(PORT_TYPE);
2141		IPW_CMD(RTS_THRESHOLD);
2142		IPW_CMD(FRAG_THRESHOLD);
2143		IPW_CMD(POWER_MODE);
2144		IPW_CMD(WEP_KEY);
2145		IPW_CMD(TGI_TX_KEY);
2146		IPW_CMD(SCAN_REQUEST);
2147		IPW_CMD(SCAN_REQUEST_EXT);
2148		IPW_CMD(ASSOCIATE);
2149		IPW_CMD(SUPPORTED_RATES);
2150		IPW_CMD(SCAN_ABORT);
2151		IPW_CMD(TX_FLUSH);
2152		IPW_CMD(QOS_PARAMETERS);
2153		IPW_CMD(DINO_CONFIG);
2154		IPW_CMD(RSN_CAPABILITIES);
2155		IPW_CMD(RX_KEY);
2156		IPW_CMD(CARD_DISABLE);
2157		IPW_CMD(SEED_NUMBER);
2158		IPW_CMD(TX_POWER);
2159		IPW_CMD(COUNTRY_INFO);
2160		IPW_CMD(AIRONET_INFO);
2161		IPW_CMD(AP_TX_POWER);
2162		IPW_CMD(CCKM_INFO);
2163		IPW_CMD(CCX_VER_INFO);
2164		IPW_CMD(SET_CALIBRATION);
2165		IPW_CMD(SENSITIVITY_CALIB);
2166		IPW_CMD(RETRY_LIMIT);
2167		IPW_CMD(IPW_PRE_POWER_DOWN);
2168		IPW_CMD(VAP_BEACON_TEMPLATE);
2169		IPW_CMD(VAP_DTIM_PERIOD);
2170		IPW_CMD(EXT_SUPPORTED_RATES);
2171		IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2172		IPW_CMD(VAP_QUIET_INTERVALS);
2173		IPW_CMD(VAP_CHANNEL_SWITCH);
2174		IPW_CMD(VAP_MANDATORY_CHANNELS);
2175		IPW_CMD(VAP_CELL_PWR_LIMIT);
2176		IPW_CMD(VAP_CF_PARAM_SET);
2177		IPW_CMD(VAP_SET_BEACONING_STATE);
2178		IPW_CMD(MEASUREMENT);
2179		IPW_CMD(POWER_CAPABILITY);
2180		IPW_CMD(SUPPORTED_CHANNELS);
2181		IPW_CMD(TPC_REPORT);
2182		IPW_CMD(WME_INFO);
2183		IPW_CMD(PRODUCTION_COMMAND);
2184	default:
2185		return "UNKNOWN";
2186	}
2187}
2188
2189#define HOST_COMPLETE_TIMEOUT HZ
2190
2191static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2192{
2193	int rc = 0;
2194	unsigned long flags;
2195	unsigned long now, end;
2196
2197	spin_lock_irqsave(&priv->lock, flags);
2198	if (priv->status & STATUS_HCMD_ACTIVE) {
2199		IPW_ERROR("Failed to send %s: Already sending a command.\n",
2200			  get_cmd_string(cmd->cmd));
2201		spin_unlock_irqrestore(&priv->lock, flags);
2202		return -EAGAIN;
2203	}
2204
2205	priv->status |= STATUS_HCMD_ACTIVE;
2206
2207	if (priv->cmdlog) {
2208		priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2209		priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2210		priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2211		memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2212		       cmd->len);
2213		priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2214	}
2215
2216	IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2217		     get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2218		     priv->status);
2219
2220#ifndef DEBUG_CMD_WEP_KEY
2221	if (cmd->cmd == IPW_CMD_WEP_KEY)
2222		IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2223	else
2224#endif
2225		printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2226
2227	rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2228	if (rc) {
2229		priv->status &= ~STATUS_HCMD_ACTIVE;
2230		IPW_ERROR("Failed to send %s: Reason %d\n",
2231			  get_cmd_string(cmd->cmd), rc);
2232		spin_unlock_irqrestore(&priv->lock, flags);
2233		goto exit;
2234	}
2235	spin_unlock_irqrestore(&priv->lock, flags);
2236
2237	now = jiffies;
2238	end = now + HOST_COMPLETE_TIMEOUT;
2239again:
2240	rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2241					      !(priv->
2242						status & STATUS_HCMD_ACTIVE),
2243					      end - now);
2244	if (rc < 0) {
2245		now = jiffies;
2246		if (time_before(now, end))
2247			goto again;
2248		rc = 0;
2249	}
2250
2251	if (rc == 0) {
2252		spin_lock_irqsave(&priv->lock, flags);
2253		if (priv->status & STATUS_HCMD_ACTIVE) {
2254			IPW_ERROR("Failed to send %s: Command timed out.\n",
2255				  get_cmd_string(cmd->cmd));
2256			priv->status &= ~STATUS_HCMD_ACTIVE;
2257			spin_unlock_irqrestore(&priv->lock, flags);
2258			rc = -EIO;
2259			goto exit;
2260		}
2261		spin_unlock_irqrestore(&priv->lock, flags);
2262	} else
2263		rc = 0;
2264
2265	if (priv->status & STATUS_RF_KILL_HW) {
2266		IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2267			  get_cmd_string(cmd->cmd));
2268		rc = -EIO;
2269		goto exit;
2270	}
2271
2272      exit:
2273	if (priv->cmdlog) {
2274		priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2275		priv->cmdlog_pos %= priv->cmdlog_len;
2276	}
2277	return rc;
2278}
2279
2280static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2281{
2282	struct host_cmd cmd = {
2283		.cmd = command,
2284	};
2285
2286	return __ipw_send_cmd(priv, &cmd);
2287}
2288
2289static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2290			    void *data)
2291{
2292	struct host_cmd cmd = {
2293		.cmd = command,
2294		.len = len,
2295		.param = data,
2296	};
2297
2298	return __ipw_send_cmd(priv, &cmd);
2299}
2300
2301static int ipw_send_host_complete(struct ipw_priv *priv)
2302{
2303	if (!priv) {
2304		IPW_ERROR("Invalid args\n");
2305		return -1;
2306	}
2307
2308	return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2309}
2310
2311static int ipw_send_system_config(struct ipw_priv *priv)
2312{
2313	return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2314				sizeof(priv->sys_config),
2315				&priv->sys_config);
2316}
2317
2318static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2319{
2320	if (!priv || !ssid) {
2321		IPW_ERROR("Invalid args\n");
2322		return -1;
2323	}
2324
2325	return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2326				ssid);
2327}
2328
2329static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2330{
2331	if (!priv || !mac) {
2332		IPW_ERROR("Invalid args\n");
2333		return -1;
2334	}
2335
2336	IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2337		       priv->net_dev->name, mac);
2338
2339	return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2340}
2341
2342static void ipw_adapter_restart(void *adapter)
2343{
2344	struct ipw_priv *priv = adapter;
2345
2346	if (priv->status & STATUS_RF_KILL_MASK)
2347		return;
2348
2349	ipw_down(priv);
2350
2351	if (priv->assoc_network &&
2352	    (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2353		ipw_remove_current_network(priv);
2354
2355	if (ipw_up(priv)) {
2356		IPW_ERROR("Failed to up device\n");
2357		return;
2358	}
2359}
2360
2361static void ipw_bg_adapter_restart(struct work_struct *work)
2362{
2363	struct ipw_priv *priv =
2364		container_of(work, struct ipw_priv, adapter_restart);
2365	mutex_lock(&priv->mutex);
2366	ipw_adapter_restart(priv);
2367	mutex_unlock(&priv->mutex);
2368}
2369
2370static void ipw_abort_scan(struct ipw_priv *priv);
2371
2372#define IPW_SCAN_CHECK_WATCHDOG	(5 * HZ)
2373
2374static void ipw_scan_check(void *data)
2375{
2376	struct ipw_priv *priv = data;
2377
2378	if (priv->status & STATUS_SCAN_ABORTING) {
2379		IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2380			       "adapter after (%dms).\n",
2381			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2382		schedule_work(&priv->adapter_restart);
2383	} else if (priv->status & STATUS_SCANNING) {
2384		IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2385			       "after (%dms).\n",
2386			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2387		ipw_abort_scan(priv);
2388		schedule_delayed_work(&priv->scan_check, HZ);
2389	}
2390}
2391
2392static void ipw_bg_scan_check(struct work_struct *work)
2393{
2394	struct ipw_priv *priv =
2395		container_of(work, struct ipw_priv, scan_check.work);
2396	mutex_lock(&priv->mutex);
2397	ipw_scan_check(priv);
2398	mutex_unlock(&priv->mutex);
2399}
2400
2401static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2402				     struct ipw_scan_request_ext *request)
2403{
2404	return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2405				sizeof(*request), request);
2406}
2407
2408static int ipw_send_scan_abort(struct ipw_priv *priv)
2409{
2410	if (!priv) {
2411		IPW_ERROR("Invalid args\n");
2412		return -1;
2413	}
2414
2415	return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2416}
2417
2418static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2419{
2420	struct ipw_sensitivity_calib calib = {
2421		.beacon_rssi_raw = cpu_to_le16(sens),
2422	};
2423
2424	return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2425				&calib);
2426}
2427
2428static int ipw_send_associate(struct ipw_priv *priv,
2429			      struct ipw_associate *associate)
2430{
2431	if (!priv || !associate) {
2432		IPW_ERROR("Invalid args\n");
2433		return -1;
2434	}
2435
2436	return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2437				associate);
2438}
2439
2440static int ipw_send_supported_rates(struct ipw_priv *priv,
2441				    struct ipw_supported_rates *rates)
2442{
2443	if (!priv || !rates) {
2444		IPW_ERROR("Invalid args\n");
2445		return -1;
2446	}
2447
2448	return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2449				rates);
2450}
2451
2452static int ipw_set_random_seed(struct ipw_priv *priv)
2453{
2454	u32 val;
2455
2456	if (!priv) {
2457		IPW_ERROR("Invalid args\n");
2458		return -1;
2459	}
2460
2461	get_random_bytes(&val, sizeof(val));
2462
2463	return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2464}
2465
2466static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2467{
2468	__le32 v = cpu_to_le32(phy_off);
2469	if (!priv) {
2470		IPW_ERROR("Invalid args\n");
2471		return -1;
2472	}
2473
2474	return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2475}
2476
2477static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2478{
2479	if (!priv || !power) {
2480		IPW_ERROR("Invalid args\n");
2481		return -1;
2482	}
2483
2484	return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2485}
2486
2487static int ipw_set_tx_power(struct ipw_priv *priv)
2488{
2489	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2490	struct ipw_tx_power tx_power;
2491	s8 max_power;
2492	int i;
2493
2494	memset(&tx_power, 0, sizeof(tx_power));
2495
2496	/* configure device for 'G' band */
2497	tx_power.ieee_mode = IPW_G_MODE;
2498	tx_power.num_channels = geo->bg_channels;
2499	for (i = 0; i < geo->bg_channels; i++) {
2500		max_power = geo->bg[i].max_power;
2501		tx_power.channels_tx_power[i].channel_number =
2502		    geo->bg[i].channel;
2503		tx_power.channels_tx_power[i].tx_power = max_power ?
2504		    min(max_power, priv->tx_power) : priv->tx_power;
2505	}
2506	if (ipw_send_tx_power(priv, &tx_power))
2507		return -EIO;
2508
2509	/* configure device to also handle 'B' band */
2510	tx_power.ieee_mode = IPW_B_MODE;
2511	if (ipw_send_tx_power(priv, &tx_power))
2512		return -EIO;
2513
2514	/* configure device to also handle 'A' band */
2515	if (priv->ieee->abg_true) {
2516		tx_power.ieee_mode = IPW_A_MODE;
2517		tx_power.num_channels = geo->a_channels;
2518		for (i = 0; i < tx_power.num_channels; i++) {
2519			max_power = geo->a[i].max_power;
2520			tx_power.channels_tx_power[i].channel_number =
2521			    geo->a[i].channel;
2522			tx_power.channels_tx_power[i].tx_power = max_power ?
2523			    min(max_power, priv->tx_power) : priv->tx_power;
2524		}
2525		if (ipw_send_tx_power(priv, &tx_power))
2526			return -EIO;
2527	}
2528	return 0;
2529}
2530
2531static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2532{
2533	struct ipw_rts_threshold rts_threshold = {
2534		.rts_threshold = cpu_to_le16(rts),
2535	};
2536
2537	if (!priv) {
2538		IPW_ERROR("Invalid args\n");
2539		return -1;
2540	}
2541
2542	return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2543				sizeof(rts_threshold), &rts_threshold);
2544}
2545
2546static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2547{
2548	struct ipw_frag_threshold frag_threshold = {
2549		.frag_threshold = cpu_to_le16(frag),
2550	};
2551
2552	if (!priv) {
2553		IPW_ERROR("Invalid args\n");
2554		return -1;
2555	}
2556
2557	return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2558				sizeof(frag_threshold), &frag_threshold);
2559}
2560
2561static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2562{
2563	__le32 param;
2564
2565	if (!priv) {
2566		IPW_ERROR("Invalid args\n");
2567		return -1;
2568	}
2569
2570	/* If on battery, set to 3, if AC set to CAM, else user
2571	 * level */
2572	switch (mode) {
2573	case IPW_POWER_BATTERY:
2574		param = cpu_to_le32(IPW_POWER_INDEX_3);
2575		break;
2576	case IPW_POWER_AC:
2577		param = cpu_to_le32(IPW_POWER_MODE_CAM);
2578		break;
2579	default:
2580		param = cpu_to_le32(mode);
2581		break;
2582	}
2583
2584	return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2585				&param);
2586}
2587
2588static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2589{
2590	struct ipw_retry_limit retry_limit = {
2591		.short_retry_limit = slimit,
2592		.long_retry_limit = llimit
2593	};
2594
2595	if (!priv) {
2596		IPW_ERROR("Invalid args\n");
2597		return -1;
2598	}
2599
2600	return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2601				&retry_limit);
2602}
2603
2604/*
2605 * The IPW device contains a Microwire compatible EEPROM that stores
2606 * various data like the MAC address.  Usually the firmware has exclusive
2607 * access to the eeprom, but during device initialization (before the
2608 * device driver has sent the HostComplete command to the firmware) the
2609 * device driver has read access to the EEPROM by way of indirect addressing
2610 * through a couple of memory mapped registers.
2611 *
2612 * The following is a simplified implementation for pulling data out of the
2613 * the eeprom, along with some helper functions to find information in
2614 * the per device private data's copy of the eeprom.
2615 *
2616 * NOTE: To better understand how these functions work (i.e what is a chip
2617 *       select and why do have to keep driving the eeprom clock?), read
2618 *       just about any data sheet for a Microwire compatible EEPROM.
2619 */
2620
2621/* write a 32 bit value into the indirect accessor register */
2622static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2623{
2624	ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2625
2626	/* the eeprom requires some time to complete the operation */
2627	udelay(p->eeprom_delay);
2628}
2629
2630/* perform a chip select operation */
2631static void eeprom_cs(struct ipw_priv *priv)
2632{
2633	eeprom_write_reg(priv, 0);
2634	eeprom_write_reg(priv, EEPROM_BIT_CS);
2635	eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2636	eeprom_write_reg(priv, EEPROM_BIT_CS);
2637}
2638
2639/* perform a chip select operation */
2640static void eeprom_disable_cs(struct ipw_priv *priv)
2641{
2642	eeprom_write_reg(priv, EEPROM_BIT_CS);
2643	eeprom_write_reg(priv, 0);
2644	eeprom_write_reg(priv, EEPROM_BIT_SK);
2645}
2646
2647/* push a single bit down to the eeprom */
2648static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2649{
2650	int d = (bit ? EEPROM_BIT_DI : 0);
2651	eeprom_write_reg(p, EEPROM_BIT_CS | d);
2652	eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2653}
2654
2655/* push an opcode followed by an address down to the eeprom */
2656static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2657{
2658	int i;
2659
2660	eeprom_cs(priv);
2661	eeprom_write_bit(priv, 1);
2662	eeprom_write_bit(priv, op & 2);
2663	eeprom_write_bit(priv, op & 1);
2664	for (i = 7; i >= 0; i--) {
2665		eeprom_write_bit(priv, addr & (1 << i));
2666	}
2667}
2668
2669/* pull 16 bits off the eeprom, one bit at a time */
2670static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2671{
2672	int i;
2673	u16 r = 0;
2674
2675	/* Send READ Opcode */
2676	eeprom_op(priv, EEPROM_CMD_READ, addr);
2677
2678	/* Send dummy bit */
2679	eeprom_write_reg(priv, EEPROM_BIT_CS);
2680
2681	/* Read the byte off the eeprom one bit at a time */
2682	for (i = 0; i < 16; i++) {
2683		u32 data = 0;
2684		eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2685		eeprom_write_reg(priv, EEPROM_BIT_CS);
2686		data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2687		r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2688	}
2689
2690	/* Send another dummy bit */
2691	eeprom_write_reg(priv, 0);
2692	eeprom_disable_cs(priv);
2693
2694	return r;
2695}
2696
2697/* helper function for pulling the mac address out of the private */
2698/* data's copy of the eeprom data                                 */
2699static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2700{
2701	memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
2702}
2703
2704static void ipw_read_eeprom(struct ipw_priv *priv)
2705{
2706	int i;
2707	__le16 *eeprom = (__le16 *) priv->eeprom;
2708
2709	IPW_DEBUG_TRACE(">>\n");
2710
2711	/* read entire contents of eeprom into private buffer */
2712	for (i = 0; i < 128; i++)
2713		eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2714
2715	IPW_DEBUG_TRACE("<<\n");
2716}
2717
2718/*
2719 * Either the device driver (i.e. the host) or the firmware can
2720 * load eeprom data into the designated region in SRAM.  If neither
2721 * happens then the FW will shutdown with a fatal error.
2722 *
2723 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2724 * bit needs region of shared SRAM needs to be non-zero.
2725 */
2726static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2727{
2728	int i;
2729
2730	IPW_DEBUG_TRACE(">>\n");
2731
2732	/*
2733	   If the data looks correct, then copy it to our private
2734	   copy.  Otherwise let the firmware know to perform the operation
2735	   on its own.
2736	 */
2737	if (priv->eeprom[EEPROM_VERSION] != 0) {
2738		IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2739
2740		/* write the eeprom data to sram */
2741		for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2742			ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2743
2744		/* Do not load eeprom data on fatal error or suspend */
2745		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2746	} else {
2747		IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2748
2749		/* Load eeprom data on fatal error or suspend */
2750		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2751	}
2752
2753	IPW_DEBUG_TRACE("<<\n");
2754}
2755
2756static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2757{
2758	count >>= 2;
2759	if (!count)
2760		return;
2761	_ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2762	while (count--)
2763		_ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2764}
2765
2766static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2767{
2768	ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2769			CB_NUMBER_OF_ELEMENTS_SMALL *
2770			sizeof(struct command_block));
2771}
2772
2773static int ipw_fw_dma_enable(struct ipw_priv *priv)
2774{				/* start dma engine but no transfers yet */
2775
2776	IPW_DEBUG_FW(">> :\n");
2777
2778	/* Start the dma */
2779	ipw_fw_dma_reset_command_blocks(priv);
2780
2781	/* Write CB base address */
2782	ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2783
2784	IPW_DEBUG_FW("<< :\n");
2785	return 0;
2786}
2787
2788static void ipw_fw_dma_abort(struct ipw_priv *priv)
2789{
2790	u32 control = 0;
2791
2792	IPW_DEBUG_FW(">> :\n");
2793
2794	/* set the Stop and Abort bit */
2795	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2796	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2797	priv->sram_desc.last_cb_index = 0;
2798
2799	IPW_DEBUG_FW("<<\n");
2800}
2801
2802static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2803					  struct command_block *cb)
2804{
2805	u32 address =
2806	    IPW_SHARED_SRAM_DMA_CONTROL +
2807	    (sizeof(struct command_block) * index);
2808	IPW_DEBUG_FW(">> :\n");
2809
2810	ipw_write_indirect(priv, address, (u8 *) cb,
2811			   (int)sizeof(struct command_block));
2812
2813	IPW_DEBUG_FW("<< :\n");
2814	return 0;
2815
2816}
2817
2818static int ipw_fw_dma_kick(struct ipw_priv *priv)
2819{
2820	u32 control = 0;
2821	u32 index = 0;
2822
2823	IPW_DEBUG_FW(">> :\n");
2824
2825	for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2826		ipw_fw_dma_write_command_block(priv, index,
2827					       &priv->sram_desc.cb_list[index]);
2828
2829	/* Enable the DMA in the CSR register */
2830	ipw_clear_bit(priv, IPW_RESET_REG,
2831		      IPW_RESET_REG_MASTER_DISABLED |
2832		      IPW_RESET_REG_STOP_MASTER);
2833
2834	/* Set the Start bit. */
2835	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2836	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2837
2838	IPW_DEBUG_FW("<< :\n");
2839	return 0;
2840}
2841
2842static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2843{
2844	u32 address;
2845	u32 register_value = 0;
2846	u32 cb_fields_address = 0;
2847
2848	IPW_DEBUG_FW(">> :\n");
2849	address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2850	IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2851
2852	/* Read the DMA Controlor register */
2853	register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2854	IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2855
2856	/* Print the CB values */
2857	cb_fields_address = address;
2858	register_value = ipw_read_reg32(priv, cb_fields_address);
2859	IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2860
2861	cb_fields_address += sizeof(u32);
2862	register_value = ipw_read_reg32(priv, cb_fields_address);
2863	IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2864
2865	cb_fields_address += sizeof(u32);
2866	register_value = ipw_read_reg32(priv, cb_fields_address);
2867	IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2868			  register_value);
2869
2870	cb_fields_address += sizeof(u32);
2871	register_value = ipw_read_reg32(priv, cb_fields_address);
2872	IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2873
2874	IPW_DEBUG_FW(">> :\n");
2875}
2876
2877static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2878{
2879	u32 current_cb_address = 0;
2880	u32 current_cb_index = 0;
2881
2882	IPW_DEBUG_FW("<< :\n");
2883	current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2884
2885	current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2886	    sizeof(struct command_block);
2887
2888	IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2889			  current_cb_index, current_cb_address);
2890
2891	IPW_DEBUG_FW(">> :\n");
2892	return current_cb_index;
2893
2894}
2895
2896static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2897					u32 src_address,
2898					u32 dest_address,
2899					u32 length,
2900					int interrupt_enabled, int is_last)
2901{
2902
2903	u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2904	    CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2905	    CB_DEST_SIZE_LONG;
2906	struct command_block *cb;
2907	u32 last_cb_element = 0;
2908
2909	IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2910			  src_address, dest_address, length);
2911
2912	if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2913		return -1;
2914
2915	last_cb_element = priv->sram_desc.last_cb_index;
2916	cb = &priv->sram_desc.cb_list[last_cb_element];
2917	priv->sram_desc.last_cb_index++;
2918
2919	/* Calculate the new CB control word */
2920	if (interrupt_enabled)
2921		control |= CB_INT_ENABLED;
2922
2923	if (is_last)
2924		control |= CB_LAST_VALID;
2925
2926	control |= length;
2927
2928	/* Calculate the CB Element's checksum value */
2929	cb->status = control ^ src_address ^ dest_address;
2930
2931	/* Copy the Source and Destination addresses */
2932	cb->dest_addr = dest_address;
2933	cb->source_addr = src_address;
2934
2935	/* Copy the Control Word last */
2936	cb->control = control;
2937
2938	return 0;
2939}
2940
2941static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2942				 int nr, u32 dest_address, u32 len)
2943{
2944	int ret, i;
2945	u32 size;
2946
2947	IPW_DEBUG_FW(">>\n");
2948	IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2949			  nr, dest_address, len);
2950
2951	for (i = 0; i < nr; i++) {
2952		size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2953		ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2954						   dest_address +
2955						   i * CB_MAX_LENGTH, size,
2956						   0, 0);
2957		if (ret) {
2958			IPW_DEBUG_FW_INFO(": Failed\n");
2959			return -1;
2960		} else
2961			IPW_DEBUG_FW_INFO(": Added new cb\n");
2962	}
2963
2964	IPW_DEBUG_FW("<<\n");
2965	return 0;
2966}
2967
2968static int ipw_fw_dma_wait(struct ipw_priv *priv)
2969{
2970	u32 current_index = 0, previous_index;
2971	u32 watchdog = 0;
2972
2973	IPW_DEBUG_FW(">> :\n");
2974
2975	current_index = ipw_fw_dma_command_block_index(priv);
2976	IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2977			  (int)priv->sram_desc.last_cb_index);
2978
2979	while (current_index < priv->sram_desc.last_cb_index) {
2980		udelay(50);
2981		previous_index = current_index;
2982		current_index = ipw_fw_dma_command_block_index(priv);
2983
2984		if (previous_index < current_index) {
2985			watchdog = 0;
2986			continue;
2987		}
2988		if (++watchdog > 400) {
2989			IPW_DEBUG_FW_INFO("Timeout\n");
2990			ipw_fw_dma_dump_command_block(priv);
2991			ipw_fw_dma_abort(priv);
2992			return -1;
2993		}
2994	}
2995
2996	ipw_fw_dma_abort(priv);
2997
2998	/*Disable the DMA in the CSR register */
2999	ipw_set_bit(priv, IPW_RESET_REG,
3000		    IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
3001
3002	IPW_DEBUG_FW("<< dmaWaitSync\n");
3003	return 0;
3004}
3005
3006static void ipw_remove_current_network(struct ipw_priv *priv)
3007{
3008	struct list_head *element, *safe;
3009	struct libipw_network *network = NULL;
3010	unsigned long flags;
3011
3012	spin_lock_irqsave(&priv->ieee->lock, flags);
3013	list_for_each_safe(element, safe, &priv->ieee->network_list) {
3014		network = list_entry(element, struct libipw_network, list);
3015		if (ether_addr_equal(network->bssid, priv->bssid)) {
3016			list_del(element);
3017			list_add_tail(&network->list,
3018				      &priv->ieee->network_free_list);
3019		}
3020	}
3021	spin_unlock_irqrestore(&priv->ieee->lock, flags);
3022}
3023
3024/**
3025 * Check that card is still alive.
3026 * Reads debug register from domain0.
3027 * If card is present, pre-defined value should
3028 * be found there.
3029 *
3030 * @param priv
3031 * @return 1 if card is present, 0 otherwise
3032 */
3033static inline int ipw_alive(struct ipw_priv *priv)
3034{
3035	return ipw_read32(priv, 0x90) == 0xd55555d5;
3036}
3037
3038/* timeout in msec, attempted in 10-msec quanta */
3039static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3040			       int timeout)
3041{
3042	int i = 0;
3043
3044	do {
3045		if ((ipw_read32(priv, addr) & mask) == mask)
3046			return i;
3047		mdelay(10);
3048		i += 10;
3049	} while (i < timeout);
3050
3051	return -ETIME;
3052}
3053
3054/* These functions load the firmware and micro code for the operation of
3055 * the ipw hardware.  It assumes the buffer has all the bits for the
3056 * image and the caller is handling the memory allocation and clean up.
3057 */
3058
3059static int ipw_stop_master(struct ipw_priv *priv)
3060{
3061	int rc;
3062
3063	IPW_DEBUG_TRACE(">>\n");
3064	/* stop master. typical delay - 0 */
3065	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3066
3067	/* timeout is in msec, polled in 10-msec quanta */
3068	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3069			  IPW_RESET_REG_MASTER_DISABLED, 100);
3070	if (rc < 0) {
3071		IPW_ERROR("wait for stop master failed after 100ms\n");
3072		return -1;
3073	}
3074
3075	IPW_DEBUG_INFO("stop master %dms\n", rc);
3076
3077	return rc;
3078}
3079
3080static void ipw_arc_release(struct ipw_priv *priv)
3081{
3082	IPW_DEBUG_TRACE(">>\n");
3083	mdelay(5);
3084
3085	ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3086
3087	/* no one knows timing, for safety add some delay */
3088	mdelay(5);
3089}
3090
3091struct fw_chunk {
3092	__le32 address;
3093	__le32 length;
3094};
3095
3096static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3097{
3098	int rc = 0, i, addr;
3099	u8 cr = 0;
3100	__le16 *image;
3101
3102	image = (__le16 *) data;
3103
3104	IPW_DEBUG_TRACE(">>\n");
3105
3106	rc = ipw_stop_master(priv);
3107
3108	if (rc < 0)
3109		return rc;
3110
3111	for (addr = IPW_SHARED_LOWER_BOUND;
3112	     addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3113		ipw_write32(priv, addr, 0);
3114	}
3115
3116	/* no ucode (yet) */
3117	memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3118	/* destroy DMA queues */
3119	/* reset sequence */
3120
3121	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3122	ipw_arc_release(priv);
3123	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3124	mdelay(1);
3125
3126	/* reset PHY */
3127	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3128	mdelay(1);
3129
3130	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3131	mdelay(1);
3132
3133	/* enable ucode store */
3134	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3135	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3136	mdelay(1);
3137
3138	/* write ucode */
3139	/**
3140	 * @bug
3141	 * Do NOT set indirect address register once and then
3142	 * store data to indirect data register in the loop.
3143	 * It seems very reasonable, but in this case DINO do not
3144	 * accept ucode. It is essential to set address each time.
3145	 */
3146	/* load new ipw uCode */
3147	for (i = 0; i < len / 2; i++)
3148		ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3149				le16_to_cpu(image[i]));
3150
3151	/* enable DINO */
3152	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3153	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3154
3155	/* this is where the igx / win driver deveates from the VAP driver. */
3156
3157	/* wait for alive response */
3158	for (i = 0; i < 100; i++) {
3159		/* poll for incoming data */
3160		cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3161		if (cr & DINO_RXFIFO_DATA)
3162			break;
3163		mdelay(1);
3164	}
3165
3166	if (cr & DINO_RXFIFO_DATA) {
3167		/* alive_command_responce size is NOT multiple of 4 */
3168		__le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3169
3170		for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3171			response_buffer[i] =
3172			    cpu_to_le32(ipw_read_reg32(priv,
3173						       IPW_BASEBAND_RX_FIFO_READ));
3174		memcpy(&priv->dino_alive, response_buffer,
3175		       sizeof(priv->dino_alive));
3176		if (priv->dino_alive.alive_command == 1
3177		    && priv->dino_alive.ucode_valid == 1) {
3178			rc = 0;
3179			IPW_DEBUG_INFO
3180			    ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3181			     "of %02d/%02d/%02d %02d:%02d\n",
3182			     priv->dino_alive.software_revision,
3183			     priv->dino_alive.software_revision,
3184			     priv->dino_alive.device_identifier,
3185			     priv->dino_alive.device_identifier,
3186			     priv->dino_alive.time_stamp[0],
3187			     priv->dino_alive.time_stamp[1],
3188			     priv->dino_alive.time_stamp[2],
3189			     priv->dino_alive.time_stamp[3],
3190			     priv->dino_alive.time_stamp[4]);
3191		} else {
3192			IPW_DEBUG_INFO("Microcode is not alive\n");
3193			rc = -EINVAL;
3194		}
3195	} else {
3196		IPW_DEBUG_INFO("No alive response from DINO\n");
3197		rc = -ETIME;
3198	}
3199
3200	/* disable DINO, otherwise for some reason
3201	   firmware have problem getting alive resp. */
3202	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3203
3204	return rc;
3205}
3206
3207static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3208{
3209	int ret = -1;
3210	int offset = 0;
3211	struct fw_chunk *chunk;
3212	int total_nr = 0;
3213	int i;
3214	struct pci_pool *pool;
3215	void **virts;
3216	dma_addr_t *phys;
3217
3218	IPW_DEBUG_TRACE("<< :\n");
3219
3220	virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
3221			GFP_KERNEL);
3222	if (!virts)
3223		return -ENOMEM;
3224
3225	phys = kmalloc(sizeof(dma_addr_t) * CB_NUMBER_OF_ELEMENTS_SMALL,
3226			GFP_KERNEL);
3227	if (!phys) {
3228		kfree(virts);
3229		return -ENOMEM;
3230	}
3231	pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3232	if (!pool) {
3233		IPW_ERROR("pci_pool_create failed\n");
3234		kfree(phys);
3235		kfree(virts);
3236		return -ENOMEM;
3237	}
3238
3239	/* Start the Dma */
3240	ret = ipw_fw_dma_enable(priv);
3241
3242	/* the DMA is already ready this would be a bug. */
3243	BUG_ON(priv->sram_desc.last_cb_index > 0);
3244
3245	do {
3246		u32 chunk_len;
3247		u8 *start;
3248		int size;
3249		int nr = 0;
3250
3251		chunk = (struct fw_chunk *)(data + offset);
3252		offset += sizeof(struct fw_chunk);
3253		chunk_len = le32_to_cpu(chunk->length);
3254		start = data + offset;
3255
3256		nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3257		for (i = 0; i < nr; i++) {
3258			virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3259							 &phys[total_nr]);
3260			if (!virts[total_nr]) {
3261				ret = -ENOMEM;
3262				goto out;
3263			}
3264			size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3265				     CB_MAX_LENGTH);
3266			memcpy(virts[total_nr], start, size);
3267			start += size;
3268			total_nr++;
3269			/* We don't support fw chunk larger than 64*8K */
3270			BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3271		}
3272
3273		/* build DMA packet and queue up for sending */
3274		/* dma to chunk->address, the chunk->length bytes from data +
3275		 * offeset*/
3276		/* Dma loading */
3277		ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3278					    nr, le32_to_cpu(chunk->address),
3279					    chunk_len);
3280		if (ret) {
3281			IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3282			goto out;
3283		}
3284
3285		offset += chunk_len;
3286	} while (offset < len);
3287
3288	/* Run the DMA and wait for the answer */
3289	ret = ipw_fw_dma_kick(priv);
3290	if (ret) {
3291		IPW_ERROR("dmaKick Failed\n");
3292		goto out;
3293	}
3294
3295	ret = ipw_fw_dma_wait(priv);
3296	if (ret) {
3297		IPW_ERROR("dmaWaitSync Failed\n");
3298		goto out;
3299	}
3300 out:
3301	for (i = 0; i < total_nr; i++)
3302		pci_pool_free(pool, virts[i], phys[i]);
3303
3304	pci_pool_destroy(pool);
3305	kfree(phys);
3306	kfree(virts);
3307
3308	return ret;
3309}
3310
3311/* stop nic */
3312static int ipw_stop_nic(struct ipw_priv *priv)
3313{
3314	int rc = 0;
3315
3316	/* stop */
3317	ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3318
3319	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3320			  IPW_RESET_REG_MASTER_DISABLED, 500);
3321	if (rc < 0) {
3322		IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3323		return rc;
3324	}
3325
3326	ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3327
3328	return rc;
3329}
3330
3331static void ipw_start_nic(struct ipw_priv *priv)
3332{
3333	IPW_DEBUG_TRACE(">>\n");
3334
3335	/* prvHwStartNic  release ARC */
3336	ipw_clear_bit(priv, IPW_RESET_REG,
3337		      IPW_RESET_REG_MASTER_DISABLED |
3338		      IPW_RESET_REG_STOP_MASTER |
3339		      CBD_RESET_REG_PRINCETON_RESET);
3340
3341	/* enable power management */
3342	ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3343		    IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3344
3345	IPW_DEBUG_TRACE("<<\n");
3346}
3347
3348static int ipw_init_nic(struct ipw_priv *priv)
3349{
3350	int rc;
3351
3352	IPW_DEBUG_TRACE(">>\n");
3353	/* reset */
3354	/*prvHwInitNic */
3355	/* set "initialization complete" bit to move adapter to D0 state */
3356	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3357
3358	/* low-level PLL activation */
3359	ipw_write32(priv, IPW_READ_INT_REGISTER,
3360		    IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3361
3362	/* wait for clock stabilization */
3363	rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3364			  IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3365	if (rc < 0)
3366		IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3367
3368	/* assert SW reset */
3369	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3370
3371	udelay(10);
3372
3373	/* set "initialization complete" bit to move adapter to D0 state */
3374	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3375
3376	IPW_DEBUG_TRACE(">>\n");
3377	return 0;
3378}
3379
3380/* Call this function from process context, it will sleep in request_firmware.
3381 * Probe is an ok place to call this from.
3382 */
3383static int ipw_reset_nic(struct ipw_priv *priv)
3384{
3385	int rc = 0;
3386	unsigned long flags;
3387
3388	IPW_DEBUG_TRACE(">>\n");
3389
3390	rc = ipw_init_nic(priv);
3391
3392	spin_lock_irqsave(&priv->lock, flags);
3393	/* Clear the 'host command active' bit... */
3394	priv->status &= ~STATUS_HCMD_ACTIVE;
3395	wake_up_interruptible(&priv->wait_command_queue);
3396	priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3397	wake_up_interruptible(&priv->wait_state);
3398	spin_unlock_irqrestore(&priv->lock, flags);
3399
3400	IPW_DEBUG_TRACE("<<\n");
3401	return rc;
3402}
3403
3404
3405struct ipw_fw {
3406	__le32 ver;
3407	__le32 boot_size;
3408	__le32 ucode_size;
3409	__le32 fw_size;
3410	u8 data[0];
3411};
3412
3413static int ipw_get_fw(struct ipw_priv *priv,
3414		      const struct firmware **raw, const char *name)
3415{
3416	struct ipw_fw *fw;
3417	int rc;
3418
3419	/* ask firmware_class module to get the boot firmware off disk */
3420	rc = request_firmware(raw, name, &priv->pci_dev->dev);
3421	if (rc < 0) {
3422		IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3423		return rc;
3424	}
3425
3426	if ((*raw)->size < sizeof(*fw)) {
3427		IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3428		return -EINVAL;
3429	}
3430
3431	fw = (void *)(*raw)->data;
3432
3433	if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3434	    le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3435		IPW_ERROR("%s is too small or corrupt (%zd)\n",
3436			  name, (*raw)->size);
3437		return -EINVAL;
3438	}
3439
3440	IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3441		       name,
3442		       le32_to_cpu(fw->ver) >> 16,
3443		       le32_to_cpu(fw->ver) & 0xff,
3444		       (*raw)->size - sizeof(*fw));
3445	return 0;
3446}
3447
3448#define IPW_RX_BUF_SIZE (3000)
3449
3450static void ipw_rx_queue_reset(struct ipw_priv *priv,
3451				      struct ipw_rx_queue *rxq)
3452{
3453	unsigned long flags;
3454	int i;
3455
3456	spin_lock_irqsave(&rxq->lock, flags);
3457
3458	INIT_LIST_HEAD(&rxq->rx_free);
3459	INIT_LIST_HEAD(&rxq->rx_used);
3460
3461	/* Fill the rx_used queue with _all_ of the Rx buffers */
3462	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3463		/* In the reset function, these buffers may have been allocated
3464		 * to an SKB, so we need to unmap and free potential storage */
3465		if (rxq->pool[i].skb != NULL) {
3466			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3467					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3468			dev_kfree_skb(rxq->pool[i].skb);
3469			rxq->pool[i].skb = NULL;
3470		}
3471		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3472	}
3473
3474	/* Set us so that we have processed and used all buffers, but have
3475	 * not restocked the Rx queue with fresh buffers */
3476	rxq->read = rxq->write = 0;
3477	rxq->free_count = 0;
3478	spin_unlock_irqrestore(&rxq->lock, flags);
3479}
3480
3481#ifdef CONFIG_PM
3482static int fw_loaded = 0;
3483static const struct firmware *raw = NULL;
3484
3485static void free_firmware(void)
3486{
3487	if (fw_loaded) {
3488		release_firmware(raw);
3489		raw = NULL;
3490		fw_loaded = 0;
3491	}
3492}
3493#else
3494#define free_firmware() do {} while (0)
3495#endif
3496
3497static int ipw_load(struct ipw_priv *priv)
3498{
3499#ifndef CONFIG_PM
3500	const struct firmware *raw = NULL;
3501#endif
3502	struct ipw_fw *fw;
3503	u8 *boot_img, *ucode_img, *fw_img;
3504	u8 *name = NULL;
3505	int rc = 0, retries = 3;
3506
3507	switch (priv->ieee->iw_mode) {
3508	case IW_MODE_ADHOC:
3509		name = "ipw2200-ibss.fw";
3510		break;
3511#ifdef CONFIG_IPW2200_MONITOR
3512	case IW_MODE_MONITOR:
3513		name = "ipw2200-sniffer.fw";
3514		break;
3515#endif
3516	case IW_MODE_INFRA:
3517		name = "ipw2200-bss.fw";
3518		break;
3519	}
3520
3521	if (!name) {
3522		rc = -EINVAL;
3523		goto error;
3524	}
3525
3526#ifdef CONFIG_PM
3527	if (!fw_loaded) {
3528#endif
3529		rc = ipw_get_fw(priv, &raw, name);
3530		if (rc < 0)
3531			goto error;
3532#ifdef CONFIG_PM
3533	}
3534#endif
3535
3536	fw = (void *)raw->data;
3537	boot_img = &fw->data[0];
3538	ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3539	fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3540			   le32_to_cpu(fw->ucode_size)];
3541
3542	if (rc < 0)
3543		goto error;
3544
3545	if (!priv->rxq)
3546		priv->rxq = ipw_rx_queue_alloc(priv);
3547	else
3548		ipw_rx_queue_reset(priv, priv->rxq);
3549	if (!priv->rxq) {
3550		IPW_ERROR("Unable to initialize Rx queue\n");
3551		rc = -ENOMEM;
3552		goto error;
3553	}
3554
3555      retry:
3556	/* Ensure interrupts are disabled */
3557	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3558	priv->status &= ~STATUS_INT_ENABLED;
3559
3560	/* ack pending interrupts */
3561	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3562
3563	ipw_stop_nic(priv);
3564
3565	rc = ipw_reset_nic(priv);
3566	if (rc < 0) {
3567		IPW_ERROR("Unable to reset NIC\n");
3568		goto error;
3569	}
3570
3571	ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3572			IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3573
3574	/* DMA the initial boot firmware into the device */
3575	rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3576	if (rc < 0) {
3577		IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3578		goto error;
3579	}
3580
3581	/* kick start the device */
3582	ipw_start_nic(priv);
3583
3584	/* wait for the device to finish its initial startup sequence */
3585	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3586			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3587	if (rc < 0) {
3588		IPW_ERROR("device failed to boot initial fw image\n");
3589		goto error;
3590	}
3591	IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3592
3593	/* ack fw init done interrupt */
3594	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3595
3596	/* DMA the ucode into the device */
3597	rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3598	if (rc < 0) {
3599		IPW_ERROR("Unable to load ucode: %d\n", rc);
3600		goto error;
3601	}
3602
3603	/* stop nic */
3604	ipw_stop_nic(priv);
3605
3606	/* DMA bss firmware into the device */
3607	rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3608	if (rc < 0) {
3609		IPW_ERROR("Unable to load firmware: %d\n", rc);
3610		goto error;
3611	}
3612#ifdef CONFIG_PM
3613	fw_loaded = 1;
3614#endif
3615
3616	ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3617
3618	rc = ipw_queue_reset(priv);
3619	if (rc < 0) {
3620		IPW_ERROR("Unable to initialize queues\n");
3621		goto error;
3622	}
3623
3624	/* Ensure interrupts are disabled */
3625	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3626	/* ack pending interrupts */
3627	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3628
3629	/* kick start the device */
3630	ipw_start_nic(priv);
3631
3632	if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3633		if (retries > 0) {
3634			IPW_WARNING("Parity error.  Retrying init.\n");
3635			retries--;
3636			goto retry;
3637		}
3638
3639		IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3640		rc = -EIO;
3641		goto error;
3642	}
3643
3644	/* wait for the device */
3645	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3646			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3647	if (rc < 0) {
3648		IPW_ERROR("device failed to start within 500ms\n");
3649		goto error;
3650	}
3651	IPW_DEBUG_INFO("device response after %dms\n", rc);
3652
3653	/* ack fw init done interrupt */
3654	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3655
3656	/* read eeprom data */
3657	priv->eeprom_delay = 1;
3658	ipw_read_eeprom(priv);
3659	/* initialize the eeprom region of sram */
3660	ipw_eeprom_init_sram(priv);
3661
3662	/* enable interrupts */
3663	ipw_enable_interrupts(priv);
3664
3665	/* Ensure our queue has valid packets */
3666	ipw_rx_queue_replenish(priv);
3667
3668	ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3669
3670	/* ack pending interrupts */
3671	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3672
3673#ifndef CONFIG_PM
3674	release_firmware(raw);
3675#endif
3676	return 0;
3677
3678      error:
3679	if (priv->rxq) {
3680		ipw_rx_queue_free(priv, priv->rxq);
3681		priv->rxq = NULL;
3682	}
3683	ipw_tx_queue_free(priv);
3684	release_firmware(raw);
3685#ifdef CONFIG_PM
3686	fw_loaded = 0;
3687	raw = NULL;
3688#endif
3689
3690	return rc;
3691}
3692
3693/**
3694 * DMA services
3695 *
3696 * Theory of operation
3697 *
3698 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3699 * 2 empty entries always kept in the buffer to protect from overflow.
3700 *
3701 * For Tx queue, there are low mark and high mark limits. If, after queuing
3702 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3703 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3704 * Tx queue resumed.
3705 *
3706 * The IPW operates with six queues, one receive queue in the device's
3707 * sram, one transmit queue for sending commands to the device firmware,
3708 * and four transmit queues for data.
3709 *
3710 * The four transmit queues allow for performing quality of service (qos)
3711 * transmissions as per the 802.11 protocol.  Currently Linux does not
3712 * provide a mechanism to the user for utilizing prioritized queues, so
3713 * we only utilize the first data transmit queue (queue1).
3714 */
3715
3716/**
3717 * Driver allocates buffers of this size for Rx
3718 */
3719
3720/**
3721 * ipw_rx_queue_space - Return number of free slots available in queue.
3722 */
3723static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3724{
3725	int s = q->read - q->write;
3726	if (s <= 0)
3727		s += RX_QUEUE_SIZE;
3728	/* keep some buffer to not confuse full and empty queue */
3729	s -= 2;
3730	if (s < 0)
3731		s = 0;
3732	return s;
3733}
3734
3735static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3736{
3737	int s = q->last_used - q->first_empty;
3738	if (s <= 0)
3739		s += q->n_bd;
3740	s -= 2;			/* keep some reserve to not confuse empty and full situations */
3741	if (s < 0)
3742		s = 0;
3743	return s;
3744}
3745
3746static inline int ipw_queue_inc_wrap(int index, int n_bd)
3747{
3748	return (++index == n_bd) ? 0 : index;
3749}
3750
3751/**
3752 * Initialize common DMA queue structure
3753 *
3754 * @param q                queue to init
3755 * @param count            Number of BD's to allocate. Should be power of 2
3756 * @param read_register    Address for 'read' register
3757 *                         (not offset within BAR, full address)
3758 * @param write_register   Address for 'write' register
3759 *                         (not offset within BAR, full address)
3760 * @param base_register    Address for 'base' register
3761 *                         (not offset within BAR, full address)
3762 * @param size             Address for 'size' register
3763 *                         (not offset within BAR, full address)
3764 */
3765static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3766			   int count, u32 read, u32 write, u32 base, u32 size)
3767{
3768	q->n_bd = count;
3769
3770	q->low_mark = q->n_bd / 4;
3771	if (q->low_mark < 4)
3772		q->low_mark = 4;
3773
3774	q->high_mark = q->n_bd / 8;
3775	if (q->high_mark < 2)
3776		q->high_mark = 2;
3777
3778	q->first_empty = q->last_used = 0;
3779	q->reg_r = read;
3780	q->reg_w = write;
3781
3782	ipw_write32(priv, base, q->dma_addr);
3783	ipw_write32(priv, size, count);
3784	ipw_write32(priv, read, 0);
3785	ipw_write32(priv, write, 0);
3786
3787	_ipw_read32(priv, 0x90);
3788}
3789
3790static int ipw_queue_tx_init(struct ipw_priv *priv,
3791			     struct clx2_tx_queue *q,
3792			     int count, u32 read, u32 write, u32 base, u32 size)
3793{
3794	struct pci_dev *dev = priv->pci_dev;
3795
3796	q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3797	if (!q->txb) {
3798		IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
3799		return -ENOMEM;
3800	}
3801
3802	q->bd =
3803	    pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3804	if (!q->bd) {
3805		IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3806			  sizeof(q->bd[0]) * count);
3807		kfree(q->txb);
3808		q->txb = NULL;
3809		return -ENOMEM;
3810	}
3811
3812	ipw_queue_init(priv, &q->q, count, read, write, base, size);
3813	return 0;
3814}
3815
3816/**
3817 * Free one TFD, those at index [txq->q.last_used].
3818 * Do NOT advance any indexes
3819 *
3820 * @param dev
3821 * @param txq
3822 */
3823static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3824				  struct clx2_tx_queue *txq)
3825{
3826	struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3827	struct pci_dev *dev = priv->pci_dev;
3828	int i;
3829
3830	/* classify bd */
3831	if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3832		/* nothing to cleanup after for host commands */
3833		return;
3834
3835	/* sanity check */
3836	if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3837		IPW_ERROR("Too many chunks: %i\n",
3838			  le32_to_cpu(bd->u.data.num_chunks));
3839		/** @todo issue fatal error, it is quite serious situation */
3840		return;
3841	}
3842
3843	/* unmap chunks if any */
3844	for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3845		pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3846				 le16_to_cpu(bd->u.data.chunk_len[i]),
3847				 PCI_DMA_TODEVICE);
3848		if (txq->txb[txq->q.last_used]) {
3849			libipw_txb_free(txq->txb[txq->q.last_used]);
3850			txq->txb[txq->q.last_used] = NULL;
3851		}
3852	}
3853}
3854
3855/**
3856 * Deallocate DMA queue.
3857 *
3858 * Empty queue by removing and destroying all BD's.
3859 * Free all buffers.
3860 *
3861 * @param dev
3862 * @param q
3863 */
3864static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3865{
3866	struct clx2_queue *q = &txq->q;
3867	struct pci_dev *dev = priv->pci_dev;
3868
3869	if (q->n_bd == 0)
3870		return;
3871
3872	/* first, empty all BD's */
3873	for (; q->first_empty != q->last_used;
3874	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3875		ipw_queue_tx_free_tfd(priv, txq);
3876	}
3877
3878	/* free buffers belonging to queue itself */
3879	pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3880			    q->dma_addr);
3881	kfree(txq->txb);
3882
3883	/* 0 fill whole structure */
3884	memset(txq, 0, sizeof(*txq));
3885}
3886
3887/**
3888 * Destroy all DMA queues and structures
3889 *
3890 * @param priv
3891 */
3892static void ipw_tx_queue_free(struct ipw_priv *priv)
3893{
3894	/* Tx CMD queue */
3895	ipw_queue_tx_free(priv, &priv->txq_cmd);
3896
3897	/* Tx queues */
3898	ipw_queue_tx_free(priv, &priv->txq[0]);
3899	ipw_queue_tx_free(priv, &priv->txq[1]);
3900	ipw_queue_tx_free(priv, &priv->txq[2]);
3901	ipw_queue_tx_free(priv, &priv->txq[3]);
3902}
3903
3904static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3905{
3906	/* First 3 bytes are manufacturer */
3907	bssid[0] = priv->mac_addr[0];
3908	bssid[1] = priv->mac_addr[1];
3909	bssid[2] = priv->mac_addr[2];
3910
3911	/* Last bytes are random */
3912	get_random_bytes(&bssid[3], ETH_ALEN - 3);
3913
3914	bssid[0] &= 0xfe;	/* clear multicast bit */
3915	bssid[0] |= 0x02;	/* set local assignment bit (IEEE802) */
3916}
3917
3918static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3919{
3920	struct ipw_station_entry entry;
3921	int i;
3922
3923	for (i = 0; i < priv->num_stations; i++) {
3924		if (ether_addr_equal(priv->stations[i], bssid)) {
3925			/* Another node is active in network */
3926			priv->missed_adhoc_beacons = 0;
3927			if (!(priv->config & CFG_STATIC_CHANNEL))
3928				/* when other nodes drop out, we drop out */
3929				priv->config &= ~CFG_ADHOC_PERSIST;
3930
3931			return i;
3932		}
3933	}
3934
3935	if (i == MAX_STATIONS)
3936		return IPW_INVALID_STATION;
3937
3938	IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3939
3940	entry.reserved = 0;
3941	entry.support_mode = 0;
3942	memcpy(entry.mac_addr, bssid, ETH_ALEN);
3943	memcpy(priv->stations[i], bssid, ETH_ALEN);
3944	ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3945			 &entry, sizeof(entry));
3946	priv->num_stations++;
3947
3948	return i;
3949}
3950
3951static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3952{
3953	int i;
3954
3955	for (i = 0; i < priv->num_stations; i++)
3956		if (ether_addr_equal(priv->stations[i], bssid))
3957			return i;
3958
3959	return IPW_INVALID_STATION;
3960}
3961
3962static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3963{
3964	int err;
3965
3966	if (priv->status & STATUS_ASSOCIATING) {
3967		IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3968		schedule_work(&priv->disassociate);
3969		return;
3970	}
3971
3972	if (!(priv->status & STATUS_ASSOCIATED)) {
3973		IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3974		return;
3975	}
3976
3977	IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3978			"on channel %d.\n",
3979			priv->assoc_request.bssid,
3980			priv->assoc_request.channel);
3981
3982	priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3983	priv->status |= STATUS_DISASSOCIATING;
3984
3985	if (quiet)
3986		priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3987	else
3988		priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3989
3990	err = ipw_send_associate(priv, &priv->assoc_request);
3991	if (err) {
3992		IPW_DEBUG_HC("Attempt to send [dis]associate command "
3993			     "failed.\n");
3994		return;
3995	}
3996
3997}
3998
3999static int ipw_disassociate(void *data)
4000{
4001	struct ipw_priv *priv = data;
4002	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
4003		return 0;
4004	ipw_send_disassociate(data, 0);
4005	netif_carrier_off(priv->net_dev);
4006	return 1;
4007}
4008
4009static void ipw_bg_disassociate(struct work_struct *work)
4010{
4011	struct ipw_priv *priv =
4012		container_of(work, struct ipw_priv, disassociate);
4013	mutex_lock(&priv->mutex);
4014	ipw_disassociate(priv);
4015	mutex_unlock(&priv->mutex);
4016}
4017
4018static void ipw_system_config(struct work_struct *work)
4019{
4020	struct ipw_priv *priv =
4021		container_of(work, struct ipw_priv, system_config);
4022
4023#ifdef CONFIG_IPW2200_PROMISCUOUS
4024	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
4025		priv->sys_config.accept_all_data_frames = 1;
4026		priv->sys_config.accept_non_directed_frames = 1;
4027		priv->sys_config.accept_all_mgmt_bcpr = 1;
4028		priv->sys_config.accept_all_mgmt_frames = 1;
4029	}
4030#endif
4031
4032	ipw_send_system_config(priv);
4033}
4034
4035struct ipw_status_code {
4036	u16 status;
4037	const char *reason;
4038};
4039
4040static const struct ipw_status_code ipw_status_codes[] = {
4041	{0x00, "Successful"},
4042	{0x01, "Unspecified failure"},
4043	{0x0A, "Cannot support all requested capabilities in the "
4044	 "Capability information field"},
4045	{0x0B, "Reassociation denied due to inability to confirm that "
4046	 "association exists"},
4047	{0x0C, "Association denied due to reason outside the scope of this "
4048	 "standard"},
4049	{0x0D,
4050	 "Responding station does not support the specified authentication "
4051	 "algorithm"},
4052	{0x0E,
4053	 "Received an Authentication frame with authentication sequence "
4054	 "transaction sequence number out of expected sequence"},
4055	{0x0F, "Authentication rejected because of challenge failure"},
4056	{0x10, "Authentication rejected due to timeout waiting for next "
4057	 "frame in sequence"},
4058	{0x11, "Association denied because AP is unable to handle additional "
4059	 "associated stations"},
4060	{0x12,
4061	 "Association denied due to requesting station not supporting all "
4062	 "of the datarates in the BSSBasicServiceSet Parameter"},
4063	{0x13,
4064	 "Association denied due to requesting station not supporting "
4065	 "short preamble operation"},
4066	{0x14,
4067	 "Association denied due to requesting station not supporting "
4068	 "PBCC encoding"},
4069	{0x15,
4070	 "Association denied due to requesting station not supporting "
4071	 "channel agility"},
4072	{0x19,
4073	 "Association denied due to requesting station not supporting "
4074	 "short slot operation"},
4075	{0x1A,
4076	 "Association denied due to requesting station not supporting "
4077	 "DSSS-OFDM operation"},
4078	{0x28, "Invalid Information Element"},
4079	{0x29, "Group Cipher is not valid"},
4080	{0x2A, "Pairwise Cipher is not valid"},
4081	{0x2B, "AKMP is not valid"},
4082	{0x2C, "Unsupported RSN IE version"},
4083	{0x2D, "Invalid RSN IE Capabilities"},
4084	{0x2E, "Cipher suite is rejected per security policy"},
4085};
4086
4087static const char *ipw_get_status_code(u16 status)
4088{
4089	int i;
4090	for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4091		if (ipw_status_codes[i].status == (status & 0xff))
4092			return ipw_status_codes[i].reason;
4093	return "Unknown status value.";
4094}
4095
4096static void inline average_init(struct average *avg)
4097{
4098	memset(avg, 0, sizeof(*avg));
4099}
4100
4101#define DEPTH_RSSI 8
4102#define DEPTH_NOISE 16
4103static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4104{
4105	return ((depth-1)*prev_avg +  val)/depth;
4106}
4107
4108static void average_add(struct average *avg, s16 val)
4109{
4110	avg->sum -= avg->entries[avg->pos];
4111	avg->sum += val;
4112	avg->entries[avg->pos++] = val;
4113	if (unlikely(avg->pos == AVG_ENTRIES)) {
4114		avg->init = 1;
4115		avg->pos = 0;
4116	}
4117}
4118
4119static s16 average_value(struct average *avg)
4120{
4121	if (!unlikely(avg->init)) {
4122		if (avg->pos)
4123			return avg->sum / avg->pos;
4124		return 0;
4125	}
4126
4127	return avg->sum / AVG_ENTRIES;
4128}
4129
4130static void ipw_reset_stats(struct ipw_priv *priv)
4131{
4132	u32 len = sizeof(u32);
4133
4134	priv->quality = 0;
4135
4136	average_init(&priv->average_missed_beacons);
4137	priv->exp_avg_rssi = -60;
4138	priv->exp_avg_noise = -85 + 0x100;
4139
4140	priv->last_rate = 0;
4141	priv->last_missed_beacons = 0;
4142	priv->last_rx_packets = 0;
4143	priv->last_tx_packets = 0;
4144	priv->last_tx_failures = 0;
4145
4146	/* Firmware managed, reset only when NIC is restarted, so we have to
4147	 * normalize on the current value */
4148	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4149			&priv->last_rx_err, &len);
4150	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4151			&priv->last_tx_failures, &len);
4152
4153	/* Driver managed, reset with each association */
4154	priv->missed_adhoc_beacons = 0;
4155	priv->missed_beacons = 0;
4156	priv->tx_packets = 0;
4157	priv->rx_packets = 0;
4158
4159}
4160
4161static u32 ipw_get_max_rate(struct ipw_priv *priv)
4162{
4163	u32 i = 0x80000000;
4164	u32 mask = priv->rates_mask;
4165	/* If currently associated in B mode, restrict the maximum
4166	 * rate match to B rates */
4167	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4168		mask &= LIBIPW_CCK_RATES_MASK;
4169
4170	/* TODO: Verify that the rate is supported by the current rates
4171	 * list. */
4172
4173	while (i && !(mask & i))
4174		i >>= 1;
4175	switch (i) {
4176	case LIBIPW_CCK_RATE_1MB_MASK:
4177		return 1000000;
4178	case LIBIPW_CCK_RATE_2MB_MASK:
4179		return 2000000;
4180	case LIBIPW_CCK_RATE_5MB_MASK:
4181		return 5500000;
4182	case LIBIPW_OFDM_RATE_6MB_MASK:
4183		return 6000000;
4184	case LIBIPW_OFDM_RATE_9MB_MASK:
4185		return 9000000;
4186	case LIBIPW_CCK_RATE_11MB_MASK:
4187		return 11000000;
4188	case LIBIPW_OFDM_RATE_12MB_MASK:
4189		return 12000000;
4190	case LIBIPW_OFDM_RATE_18MB_MASK:
4191		return 18000000;
4192	case LIBIPW_OFDM_RATE_24MB_MASK:
4193		return 24000000;
4194	case LIBIPW_OFDM_RATE_36MB_MASK:
4195		return 36000000;
4196	case LIBIPW_OFDM_RATE_48MB_MASK:
4197		return 48000000;
4198	case LIBIPW_OFDM_RATE_54MB_MASK:
4199		return 54000000;
4200	}
4201
4202	if (priv->ieee->mode == IEEE_B)
4203		return 11000000;
4204	else
4205		return 54000000;
4206}
4207
4208static u32 ipw_get_current_rate(struct ipw_priv *priv)
4209{
4210	u32 rate, len = sizeof(rate);
4211	int err;
4212
4213	if (!(priv->status & STATUS_ASSOCIATED))
4214		return 0;
4215
4216	if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4217		err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4218				      &len);
4219		if (err) {
4220			IPW_DEBUG_INFO("failed querying ordinals.\n");
4221			return 0;
4222		}
4223	} else
4224		return ipw_get_max_rate(priv);
4225
4226	switch (rate) {
4227	case IPW_TX_RATE_1MB:
4228		return 1000000;
4229	case IPW_TX_RATE_2MB:
4230		return 2000000;
4231	case IPW_TX_RATE_5MB:
4232		return 5500000;
4233	case IPW_TX_RATE_6MB:
4234		return 6000000;
4235	case IPW_TX_RATE_9MB:
4236		return 9000000;
4237	case IPW_TX_RATE_11MB:
4238		return 11000000;
4239	case IPW_TX_RATE_12MB:
4240		return 12000000;
4241	case IPW_TX_RATE_18MB:
4242		return 18000000;
4243	case IPW_TX_RATE_24MB:
4244		return 24000000;
4245	case IPW_TX_RATE_36MB:
4246		return 36000000;
4247	case IPW_TX_RATE_48MB:
4248		return 48000000;
4249	case IPW_TX_RATE_54MB:
4250		return 54000000;
4251	}
4252
4253	return 0;
4254}
4255
4256#define IPW_STATS_INTERVAL (2 * HZ)
4257static void ipw_gather_stats(struct ipw_priv *priv)
4258{
4259	u32 rx_err, rx_err_delta, rx_packets_delta;
4260	u32 tx_failures, tx_failures_delta, tx_packets_delta;
4261	u32 missed_beacons_percent, missed_beacons_delta;
4262	u32 quality = 0;
4263	u32 len = sizeof(u32);
4264	s16 rssi;
4265	u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4266	    rate_quality;
4267	u32 max_rate;
4268
4269	if (!(priv->status & STATUS_ASSOCIATED)) {
4270		priv->quality = 0;
4271		return;
4272	}
4273
4274	/* Update the statistics */
4275	ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4276			&priv->missed_beacons, &len);
4277	missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4278	priv->last_missed_beacons = priv->missed_beacons;
4279	if (priv->assoc_request.beacon_interval) {
4280		missed_beacons_percent = missed_beacons_delta *
4281		    (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4282		    (IPW_STATS_INTERVAL * 10);
4283	} else {
4284		missed_beacons_percent = 0;
4285	}
4286	average_add(&priv->average_missed_beacons, missed_beacons_percent);
4287
4288	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4289	rx_err_delta = rx_err - priv->last_rx_err;
4290	priv->last_rx_err = rx_err;
4291
4292	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4293	tx_failures_delta = tx_failures - priv->last_tx_failures;
4294	priv->last_tx_failures = tx_failures;
4295
4296	rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4297	priv->last_rx_packets = priv->rx_packets;
4298
4299	tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4300	priv->last_tx_packets = priv->tx_packets;
4301
4302	/* Calculate quality based on the following:
4303	 *
4304	 * Missed beacon: 100% = 0, 0% = 70% missed
4305	 * Rate: 60% = 1Mbs, 100% = Max
4306	 * Rx and Tx errors represent a straight % of total Rx/Tx
4307	 * RSSI: 100% = > -50,  0% = < -80
4308	 * Rx errors: 100% = 0, 0% = 50% missed
4309	 *
4310	 * The lowest computed quality is used.
4311	 *
4312	 */
4313#define BEACON_THRESHOLD 5
4314	beacon_quality = 100 - missed_beacons_percent;
4315	if (beacon_quality < BEACON_THRESHOLD)
4316		beacon_quality = 0;
4317	else
4318		beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4319		    (100 - BEACON_THRESHOLD);
4320	IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4321			beacon_quality, missed_beacons_percent);
4322
4323	priv->last_rate = ipw_get_current_rate(priv);
4324	max_rate = ipw_get_max_rate(priv);
4325	rate_quality = priv->last_rate * 40 / max_rate + 60;
4326	IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4327			rate_quality, priv->last_rate / 1000000);
4328
4329	if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4330		rx_quality = 100 - (rx_err_delta * 100) /
4331		    (rx_packets_delta + rx_err_delta);
4332	else
4333		rx_quality = 100;
4334	IPW_DEBUG_STATS("Rx quality   : %3d%% (%u errors, %u packets)\n",
4335			rx_quality, rx_err_delta, rx_packets_delta);
4336
4337	if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4338		tx_quality = 100 - (tx_failures_delta * 100) /
4339		    (tx_packets_delta + tx_failures_delta);
4340	else
4341		tx_quality = 100;
4342	IPW_DEBUG_STATS("Tx quality   : %3d%% (%u errors, %u packets)\n",
4343			tx_quality, tx_failures_delta, tx_packets_delta);
4344
4345	rssi = priv->exp_avg_rssi;
4346	signal_quality =
4347	    (100 *
4348	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4349	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4350	     (priv->ieee->perfect_rssi - rssi) *
4351	     (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4352	      62 * (priv->ieee->perfect_rssi - rssi))) /
4353	    ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4354	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4355	if (signal_quality > 100)
4356		signal_quality = 100;
4357	else if (signal_quality < 1)
4358		signal_quality = 0;
4359
4360	IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4361			signal_quality, rssi);
4362
4363	quality = min(rx_quality, signal_quality);
4364	quality = min(tx_quality, quality);
4365	quality = min(rate_quality, quality);
4366	quality = min(beacon_quality, quality);
4367	if (quality == beacon_quality)
4368		IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4369				quality);
4370	if (quality == rate_quality)
4371		IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4372				quality);
4373	if (quality == tx_quality)
4374		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4375				quality);
4376	if (quality == rx_quality)
4377		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4378				quality);
4379	if (quality == signal_quality)
4380		IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4381				quality);
4382
4383	priv->quality = quality;
4384
4385	schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
4386}
4387
4388static void ipw_bg_gather_stats(struct work_struct *work)
4389{
4390	struct ipw_priv *priv =
4391		container_of(work, struct ipw_priv, gather_stats.work);
4392	mutex_lock(&priv->mutex);
4393	ipw_gather_stats(priv);
4394	mutex_unlock(&priv->mutex);
4395}
4396
4397/* Missed beacon behavior:
4398 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4399 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4400 * Above disassociate threshold, give up and stop scanning.
4401 * Roaming is disabled if disassociate_threshold <= roaming_threshold  */
4402static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4403					    int missed_count)
4404{
4405	priv->notif_missed_beacons = missed_count;
4406
4407	if (missed_count > priv->disassociate_threshold &&
4408	    priv->status & STATUS_ASSOCIATED) {
4409		/* If associated and we've hit the missed
4410		 * beacon threshold, disassociate, turn
4411		 * off roaming, and abort any active scans */
4412		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4413			  IPW_DL_STATE | IPW_DL_ASSOC,
4414			  "Missed beacon: %d - disassociate\n", missed_count);
4415		priv->status &= ~STATUS_ROAMING;
4416		if (priv->status & STATUS_SCANNING) {
4417			IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4418				  IPW_DL_STATE,
4419				  "Aborting scan with missed beacon.\n");
4420			schedule_work(&priv->abort_scan);
4421		}
4422
4423		schedule_work(&priv->disassociate);
4424		return;
4425	}
4426
4427	if (priv->status & STATUS_ROAMING) {
4428		/* If we are currently roaming, then just
4429		 * print a debug statement... */
4430		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4431			  "Missed beacon: %d - roam in progress\n",
4432			  missed_count);
4433		return;
4434	}
4435
4436	if (roaming &&
4437	    (missed_count > priv->roaming_threshold &&
4438	     missed_count <= priv->disassociate_threshold)) {
4439		/* If we are not already roaming, set the ROAM
4440		 * bit in the status and kick off a scan.
4441		 * This can happen several times before we reach
4442		 * disassociate_threshold. */
4443		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4444			  "Missed beacon: %d - initiate "
4445			  "roaming\n", missed_count);
4446		if (!(priv->status & STATUS_ROAMING)) {
4447			priv->status |= STATUS_ROAMING;
4448			if (!(priv->status & STATUS_SCANNING))
4449				schedule_delayed_work(&priv->request_scan, 0);
4450		}
4451		return;
4452	}
4453
4454	if (priv->status & STATUS_SCANNING &&
4455	    missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4456		/* Stop scan to keep fw from getting
4457		 * stuck (only if we aren't roaming --
4458		 * otherwise we'll never scan more than 2 or 3
4459		 * channels..) */
4460		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4461			  "Aborting scan with missed beacon.\n");
4462		schedule_work(&priv->abort_scan);
4463	}
4464
4465	IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4466}
4467
4468static void ipw_scan_event(struct work_struct *work)
4469{
4470	union iwreq_data wrqu;
4471
4472	struct ipw_priv *priv =
4473		container_of(work, struct ipw_priv, scan_event.work);
4474
4475	wrqu.data.length = 0;
4476	wrqu.data.flags = 0;
4477	wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4478}
4479
4480static void handle_scan_event(struct ipw_priv *priv)
4481{
4482	/* Only userspace-requested scan completion events go out immediately */
4483	if (!priv->user_requested_scan) {
4484		schedule_delayed_work(&priv->scan_event,
4485				      round_jiffies_relative(msecs_to_jiffies(4000)));
4486	} else {
4487		priv->user_requested_scan = 0;
4488		mod_delayed_work(system_wq, &priv->scan_event, 0);
4489	}
4490}
4491
4492/**
4493 * Handle host notification packet.
4494 * Called from interrupt routine
4495 */
4496static void ipw_rx_notification(struct ipw_priv *priv,
4497				       struct ipw_rx_notification *notif)
4498{
4499	u16 size = le16_to_cpu(notif->size);
4500
4501	IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4502
4503	switch (notif->subtype) {
4504	case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4505			struct notif_association *assoc = &notif->u.assoc;
4506
4507			switch (assoc->state) {
4508			case CMAS_ASSOCIATED:{
4509					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4510						  IPW_DL_ASSOC,
4511						  "associated: '%*pE' %pM\n",
4512						  priv->essid_len, priv->essid,
4513						  priv->bssid);
4514
4515					switch (priv->ieee->iw_mode) {
4516					case IW_MODE_INFRA:
4517						memcpy(priv->ieee->bssid,
4518						       priv->bssid, ETH_ALEN);
4519						break;
4520
4521					case IW_MODE_ADHOC:
4522						memcpy(priv->ieee->bssid,
4523						       priv->bssid, ETH_ALEN);
4524
4525						/* clear out the station table */
4526						priv->num_stations = 0;
4527
4528						IPW_DEBUG_ASSOC
4529						    ("queueing adhoc check\n");
4530						schedule_delayed_work(
4531							&priv->adhoc_check,
4532							le16_to_cpu(priv->
4533							assoc_request.
4534							beacon_interval));
4535						break;
4536					}
4537
4538					priv->status &= ~STATUS_ASSOCIATING;
4539					priv->status |= STATUS_ASSOCIATED;
4540					schedule_work(&priv->system_config);
4541
4542#ifdef CONFIG_IPW2200_QOS
4543#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4544			 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4545					if ((priv->status & STATUS_AUTH) &&
4546					    (IPW_GET_PACKET_STYPE(&notif->u.raw)
4547					     == IEEE80211_STYPE_ASSOC_RESP)) {
4548						if ((sizeof
4549						     (struct
4550						      libipw_assoc_response)
4551						     <= size)
4552						    && (size <= 2314)) {
4553							struct
4554							libipw_rx_stats
4555							    stats = {
4556								.len = size - 1,
4557							};
4558
4559							IPW_DEBUG_QOS
4560							    ("QoS Associate "
4561							     "size %d\n", size);
4562							libipw_rx_mgt(priv->
4563									 ieee,
4564									 (struct
4565									  libipw_hdr_4addr
4566									  *)
4567									 &notif->u.raw, &stats);
4568						}
4569					}
4570#endif
4571
4572					schedule_work(&priv->link_up);
4573
4574					break;
4575				}
4576
4577			case CMAS_AUTHENTICATED:{
4578					if (priv->
4579					    status & (STATUS_ASSOCIATED |
4580						      STATUS_AUTH)) {
4581						struct notif_authenticate *auth
4582						    = &notif->u.auth;
4583						IPW_DEBUG(IPW_DL_NOTIF |
4584							  IPW_DL_STATE |
4585							  IPW_DL_ASSOC,
4586							  "deauthenticated: '%*pE' %pM: (0x%04X) - %s\n",
4587							  priv->essid_len,
4588							  priv->essid,
4589							  priv->bssid,
4590							  le16_to_cpu(auth->status),
4591							  ipw_get_status_code
4592							  (le16_to_cpu
4593							   (auth->status)));
4594
4595						priv->status &=
4596						    ~(STATUS_ASSOCIATING |
4597						      STATUS_AUTH |
4598						      STATUS_ASSOCIATED);
4599
4600						schedule_work(&priv->link_down);
4601						break;
4602					}
4603
4604					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4605						  IPW_DL_ASSOC,
4606						  "authenticated: '%*pE' %pM\n",
4607						  priv->essid_len, priv->essid,
4608						  priv->bssid);
4609					break;
4610				}
4611
4612			case CMAS_INIT:{
4613					if (priv->status & STATUS_AUTH) {
4614						struct
4615						    libipw_assoc_response
4616						*resp;
4617						resp =
4618						    (struct
4619						     libipw_assoc_response
4620						     *)&notif->u.raw;
4621						IPW_DEBUG(IPW_DL_NOTIF |
4622							  IPW_DL_STATE |
4623							  IPW_DL_ASSOC,
4624							  "association failed (0x%04X): %s\n",
4625							  le16_to_cpu(resp->status),
4626							  ipw_get_status_code
4627							  (le16_to_cpu
4628							   (resp->status)));
4629					}
4630
4631					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4632						  IPW_DL_ASSOC,
4633						  "disassociated: '%*pE' %pM\n",
4634						  priv->essid_len, priv->essid,
4635						  priv->bssid);
4636
4637					priv->status &=
4638					    ~(STATUS_DISASSOCIATING |
4639					      STATUS_ASSOCIATING |
4640					      STATUS_ASSOCIATED | STATUS_AUTH);
4641					if (priv->assoc_network
4642					    && (priv->assoc_network->
4643						capability &
4644						WLAN_CAPABILITY_IBSS))
4645						ipw_remove_current_network
4646						    (priv);
4647
4648					schedule_work(&priv->link_down);
4649
4650					break;
4651				}
4652
4653			case CMAS_RX_ASSOC_RESP:
4654				break;
4655
4656			default:
4657				IPW_ERROR("assoc: unknown (%d)\n",
4658					  assoc->state);
4659				break;
4660			}
4661
4662			break;
4663		}
4664
4665	case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4666			struct notif_authenticate *auth = &notif->u.auth;
4667			switch (auth->state) {
4668			case CMAS_AUTHENTICATED:
4669				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4670					  "authenticated: '%*pE' %pM\n",
4671					  priv->essid_len, priv->essid,
4672					  priv->bssid);
4673				priv->status |= STATUS_AUTH;
4674				break;
4675
4676			case CMAS_INIT:
4677				if (priv->status & STATUS_AUTH) {
4678					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4679						  IPW_DL_ASSOC,
4680						  "authentication failed (0x%04X): %s\n",
4681						  le16_to_cpu(auth->status),
4682						  ipw_get_status_code(le16_to_cpu
4683								      (auth->
4684								       status)));
4685				}
4686				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4687					  IPW_DL_ASSOC,
4688					  "deauthenticated: '%*pE' %pM\n",
4689					  priv->essid_len, priv->essid,
4690					  priv->bssid);
4691
4692				priv->status &= ~(STATUS_ASSOCIATING |
4693						  STATUS_AUTH |
4694						  STATUS_ASSOCIATED);
4695
4696				schedule_work(&priv->link_down);
4697				break;
4698
4699			case CMAS_TX_AUTH_SEQ_1:
4700				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4701					  IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4702				break;
4703			case CMAS_RX_AUTH_SEQ_2:
4704				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4705					  IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4706				break;
4707			case CMAS_AUTH_SEQ_1_PASS:
4708				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4709					  IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4710				break;
4711			case CMAS_AUTH_SEQ_1_FAIL:
4712				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4713					  IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4714				break;
4715			case CMAS_TX_AUTH_SEQ_3:
4716				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4717					  IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4718				break;
4719			case CMAS_RX_AUTH_SEQ_4:
4720				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4721					  IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4722				break;
4723			case CMAS_AUTH_SEQ_2_PASS:
4724				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4725					  IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4726				break;
4727			case CMAS_AUTH_SEQ_2_FAIL:
4728				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4729					  IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4730				break;
4731			case CMAS_TX_ASSOC:
4732				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4733					  IPW_DL_ASSOC, "TX_ASSOC\n");
4734				break;
4735			case CMAS_RX_ASSOC_RESP:
4736				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4737					  IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4738
4739				break;
4740			case CMAS_ASSOCIATED:
4741				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4742					  IPW_DL_ASSOC, "ASSOCIATED\n");
4743				break;
4744			default:
4745				IPW_DEBUG_NOTIF("auth: failure - %d\n",
4746						auth->state);
4747				break;
4748			}
4749			break;
4750		}
4751
4752	case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4753			struct notif_channel_result *x =
4754			    &notif->u.channel_result;
4755
4756			if (size == sizeof(*x)) {
4757				IPW_DEBUG_SCAN("Scan result for channel %d\n",
4758					       x->channel_num);
4759			} else {
4760				IPW_DEBUG_SCAN("Scan result of wrong size %d "
4761					       "(should be %zd)\n",
4762					       size, sizeof(*x));
4763			}
4764			break;
4765		}
4766
4767	case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4768			struct notif_scan_complete *x = &notif->u.scan_complete;
4769			if (size == sizeof(*x)) {
4770				IPW_DEBUG_SCAN
4771				    ("Scan completed: type %d, %d channels, "
4772				     "%d status\n", x->scan_type,
4773				     x->num_channels, x->status);
4774			} else {
4775				IPW_ERROR("Scan completed of wrong size %d "
4776					  "(should be %zd)\n",
4777					  size, sizeof(*x));
4778			}
4779
4780			priv->status &=
4781			    ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4782
4783			wake_up_interruptible(&priv->wait_state);
4784			cancel_delayed_work(&priv->scan_check);
4785
4786			if (priv->status & STATUS_EXIT_PENDING)
4787				break;
4788
4789			priv->ieee->scans++;
4790
4791#ifdef CONFIG_IPW2200_MONITOR
4792			if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4793				priv->status |= STATUS_SCAN_FORCED;
4794				schedule_delayed_work(&priv->request_scan, 0);
4795				break;
4796			}
4797			priv->status &= ~STATUS_SCAN_FORCED;
4798#endif				/* CONFIG_IPW2200_MONITOR */
4799
4800			/* Do queued direct scans first */
4801			if (priv->status & STATUS_DIRECT_SCAN_PENDING)
4802				schedule_delayed_work(&priv->request_direct_scan, 0);
4803
4804			if (!(priv->status & (STATUS_ASSOCIATED |
4805					      STATUS_ASSOCIATING |
4806					      STATUS_ROAMING |
4807					      STATUS_DISASSOCIATING)))
4808				schedule_work(&priv->associate);
4809			else if (priv->status & STATUS_ROAMING) {
4810				if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4811					/* If a scan completed and we are in roam mode, then
4812					 * the scan that completed was the one requested as a
4813					 * result of entering roam... so, schedule the
4814					 * roam work */
4815					schedule_work(&priv->roam);
4816				else
4817					/* Don't schedule if we aborted the scan */
4818					priv->status &= ~STATUS_ROAMING;
4819			} else if (priv->status & STATUS_SCAN_PENDING)
4820				schedule_delayed_work(&priv->request_scan, 0);
4821			else if (priv->config & CFG_BACKGROUND_SCAN
4822				 && priv->status & STATUS_ASSOCIATED)
4823				schedule_delayed_work(&priv->request_scan,
4824						      round_jiffies_relative(HZ));
4825
4826			/* Send an empty event to user space.
4827			 * We don't send the received data on the event because
4828			 * it would require us to do complex transcoding, and
4829			 * we want to minimise the work done in the irq handler
4830			 * Use a request to extract the data.
4831			 * Also, we generate this even for any scan, regardless
4832			 * on how the scan was initiated. User space can just
4833			 * sync on periodic scan to get fresh data...
4834			 * Jean II */
4835			if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4836				handle_scan_event(priv);
4837			break;
4838		}
4839
4840	case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4841			struct notif_frag_length *x = &notif->u.frag_len;
4842
4843			if (size == sizeof(*x))
4844				IPW_ERROR("Frag length: %d\n",
4845					  le16_to_cpu(x->frag_length));
4846			else
4847				IPW_ERROR("Frag length of wrong size %d "
4848					  "(should be %zd)\n",
4849					  size, sizeof(*x));
4850			break;
4851		}
4852
4853	case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4854			struct notif_link_deterioration *x =
4855			    &notif->u.link_deterioration;
4856
4857			if (size == sizeof(*x)) {
4858				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4859					"link deterioration: type %d, cnt %d\n",
4860					x->silence_notification_type,
4861					x->silence_count);
4862				memcpy(&priv->last_link_deterioration, x,
4863				       sizeof(*x));
4864			} else {
4865				IPW_ERROR("Link Deterioration of wrong size %d "
4866					  "(should be %zd)\n",
4867					  size, sizeof(*x));
4868			}
4869			break;
4870		}
4871
4872	case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4873			IPW_ERROR("Dino config\n");
4874			if (priv->hcmd
4875			    && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4876				IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4877
4878			break;
4879		}
4880
4881	case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4882			struct notif_beacon_state *x = &notif->u.beacon_state;
4883			if (size != sizeof(*x)) {
4884				IPW_ERROR
4885				    ("Beacon state of wrong size %d (should "
4886				     "be %zd)\n", size, sizeof(*x));
4887				break;
4888			}
4889
4890			if (le32_to_cpu(x->state) ==
4891			    HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4892				ipw_handle_missed_beacon(priv,
4893							 le32_to_cpu(x->
4894								     number));
4895
4896			break;
4897		}
4898
4899	case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4900			struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4901			if (size == sizeof(*x)) {
4902				IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4903					  "0x%02x station %d\n",
4904					  x->key_state, x->security_type,
4905					  x->station_index);
4906				break;
4907			}
4908
4909			IPW_ERROR
4910			    ("TGi Tx Key of wrong size %d (should be %zd)\n",
4911			     size, sizeof(*x));
4912			break;
4913		}
4914
4915	case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4916			struct notif_calibration *x = &notif->u.calibration;
4917
4918			if (size == sizeof(*x)) {
4919				memcpy(&priv->calib, x, sizeof(*x));
4920				IPW_DEBUG_INFO("TODO: Calibration\n");
4921				break;
4922			}
4923
4924			IPW_ERROR
4925			    ("Calibration of wrong size %d (should be %zd)\n",
4926			     size, sizeof(*x));
4927			break;
4928		}
4929
4930	case HOST_NOTIFICATION_NOISE_STATS:{
4931			if (size == sizeof(u32)) {
4932				priv->exp_avg_noise =
4933				    exponential_average(priv->exp_avg_noise,
4934				    (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4935				    DEPTH_NOISE);
4936				break;
4937			}
4938
4939			IPW_ERROR
4940			    ("Noise stat is wrong size %d (should be %zd)\n",
4941			     size, sizeof(u32));
4942			break;
4943		}
4944
4945	default:
4946		IPW_DEBUG_NOTIF("Unknown notification: "
4947				"subtype=%d,flags=0x%2x,size=%d\n",
4948				notif->subtype, notif->flags, size);
4949	}
4950}
4951
4952/**
4953 * Destroys all DMA structures and initialise them again
4954 *
4955 * @param priv
4956 * @return error code
4957 */
4958static int ipw_queue_reset(struct ipw_priv *priv)
4959{
4960	int rc = 0;
4961	/** @todo customize queue sizes */
4962	int nTx = 64, nTxCmd = 8;
4963	ipw_tx_queue_free(priv);
4964	/* Tx CMD queue */
4965	rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4966			       IPW_TX_CMD_QUEUE_READ_INDEX,
4967			       IPW_TX_CMD_QUEUE_WRITE_INDEX,
4968			       IPW_TX_CMD_QUEUE_BD_BASE,
4969			       IPW_TX_CMD_QUEUE_BD_SIZE);
4970	if (rc) {
4971		IPW_ERROR("Tx Cmd queue init failed\n");
4972		goto error;
4973	}
4974	/* Tx queue(s) */
4975	rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4976			       IPW_TX_QUEUE_0_READ_INDEX,
4977			       IPW_TX_QUEUE_0_WRITE_INDEX,
4978			       IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4979	if (rc) {
4980		IPW_ERROR("Tx 0 queue init failed\n");
4981		goto error;
4982	}
4983	rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4984			       IPW_TX_QUEUE_1_READ_INDEX,
4985			       IPW_TX_QUEUE_1_WRITE_INDEX,
4986			       IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4987	if (rc) {
4988		IPW_ERROR("Tx 1 queue init failed\n");
4989		goto error;
4990	}
4991	rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4992			       IPW_TX_QUEUE_2_READ_INDEX,
4993			       IPW_TX_QUEUE_2_WRITE_INDEX,
4994			       IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4995	if (rc) {
4996		IPW_ERROR("Tx 2 queue init failed\n");
4997		goto error;
4998	}
4999	rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
5000			       IPW_TX_QUEUE_3_READ_INDEX,
5001			       IPW_TX_QUEUE_3_WRITE_INDEX,
5002			       IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
5003	if (rc) {
5004		IPW_ERROR("Tx 3 queue init failed\n");
5005		goto error;
5006	}
5007	/* statistics */
5008	priv->rx_bufs_min = 0;
5009	priv->rx_pend_max = 0;
5010	return rc;
5011
5012      error:
5013	ipw_tx_queue_free(priv);
5014	return rc;
5015}
5016
5017/**
5018 * Reclaim Tx queue entries no more used by NIC.
5019 *
5020 * When FW advances 'R' index, all entries between old and
5021 * new 'R' index need to be reclaimed. As result, some free space
5022 * forms. If there is enough free space (> low mark), wake Tx queue.
5023 *
5024 * @note Need to protect against garbage in 'R' index
5025 * @param priv
5026 * @param txq
5027 * @param qindex
5028 * @return Number of used entries remains in the queue
5029 */
5030static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
5031				struct clx2_tx_queue *txq, int qindex)
5032{
5033	u32 hw_tail;
5034	int used;
5035	struct clx2_queue *q = &txq->q;
5036
5037	hw_tail = ipw_read32(priv, q->reg_r);
5038	if (hw_tail >= q->n_bd) {
5039		IPW_ERROR
5040		    ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5041		     hw_tail, q->n_bd);
5042		goto done;
5043	}
5044	for (; q->last_used != hw_tail;
5045	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5046		ipw_queue_tx_free_tfd(priv, txq);
5047		priv->tx_packets++;
5048	}
5049      done:
5050	if ((ipw_tx_queue_space(q) > q->low_mark) &&
5051	    (qindex >= 0))
5052		netif_wake_queue(priv->net_dev);
5053	used = q->first_empty - q->last_used;
5054	if (used < 0)
5055		used += q->n_bd;
5056
5057	return used;
5058}
5059
5060static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5061			     int len, int sync)
5062{
5063	struct clx2_tx_queue *txq = &priv->txq_cmd;
5064	struct clx2_queue *q = &txq->q;
5065	struct tfd_frame *tfd;
5066
5067	if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5068		IPW_ERROR("No space for Tx\n");
5069		return -EBUSY;
5070	}
5071
5072	tfd = &txq->bd[q->first_empty];
5073	txq->txb[q->first_empty] = NULL;
5074
5075	memset(tfd, 0, sizeof(*tfd));
5076	tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5077	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5078	priv->hcmd_seq++;
5079	tfd->u.cmd.index = hcmd;
5080	tfd->u.cmd.length = len;
5081	memcpy(tfd->u.cmd.payload, buf, len);
5082	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5083	ipw_write32(priv, q->reg_w, q->first_empty);
5084	_ipw_read32(priv, 0x90);
5085
5086	return 0;
5087}
5088
5089/*
5090 * Rx theory of operation
5091 *
5092 * The host allocates 32 DMA target addresses and passes the host address
5093 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5094 * 0 to 31
5095 *
5096 * Rx Queue Indexes
5097 * The host/firmware share two index registers for managing the Rx buffers.
5098 *
5099 * The READ index maps to the first position that the firmware may be writing
5100 * to -- the driver can read up to (but not including) this position and get
5101 * good data.
5102 * The READ index is managed by the firmware once the card is enabled.
5103 *
5104 * The WRITE index maps to the last position the driver has read from -- the
5105 * position preceding WRITE is the last slot the firmware can place a packet.
5106 *
5107 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5108 * WRITE = READ.
5109 *
5110 * During initialization the host sets up the READ queue position to the first
5111 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5112 *
5113 * When the firmware places a packet in a buffer it will advance the READ index
5114 * and fire the RX interrupt.  The driver can then query the READ index and
5115 * process as many packets as possible, moving the WRITE index forward as it
5116 * resets the Rx queue buffers with new memory.
5117 *
5118 * The management in the driver is as follows:
5119 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free.  When
5120 *   ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5121 *   to replensish the ipw->rxq->rx_free.
5122 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5123 *   ipw->rxq is replenished and the READ INDEX is updated (updating the
5124 *   'processed' and 'read' driver indexes as well)
5125 * + A received packet is processed and handed to the kernel network stack,
5126 *   detached from the ipw->rxq.  The driver 'processed' index is updated.
5127 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5128 *   list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5129 *   INDEX is not incremented and ipw->status(RX_STALLED) is set.  If there
5130 *   were enough free buffers and RX_STALLED is set it is cleared.
5131 *
5132 *
5133 * Driver sequence:
5134 *
5135 * ipw_rx_queue_alloc()       Allocates rx_free
5136 * ipw_rx_queue_replenish()   Replenishes rx_free list from rx_used, and calls
5137 *                            ipw_rx_queue_restock
5138 * ipw_rx_queue_restock()     Moves available buffers from rx_free into Rx
5139 *                            queue, updates firmware pointers, and updates
5140 *                            the WRITE index.  If insufficient rx_free buffers
5141 *                            are available, schedules ipw_rx_queue_replenish
5142 *
5143 * -- enable interrupts --
5144 * ISR - ipw_rx()             Detach ipw_rx_mem_buffers from pool up to the
5145 *                            READ INDEX, detaching the SKB from the pool.
5146 *                            Moves the packet buffer from queue to rx_used.
5147 *                            Calls ipw_rx_queue_restock to refill any empty
5148 *                            slots.
5149 * ...
5150 *
5151 */
5152
5153/*
5154 * If there are slots in the RX queue that  need to be restocked,
5155 * and we have free pre-allocated buffers, fill the ranks as much
5156 * as we can pulling from rx_free.
5157 *
5158 * This moves the 'write' index forward to catch up with 'processed', and
5159 * also updates the memory address in the firmware to reference the new
5160 * target buffer.
5161 */
5162static void ipw_rx_queue_restock(struct ipw_priv *priv)
5163{
5164	struct ipw_rx_queue *rxq = priv->rxq;
5165	struct list_head *element;
5166	struct ipw_rx_mem_buffer *rxb;
5167	unsigned long flags;
5168	int write;
5169
5170	spin_lock_irqsave(&rxq->lock, flags);
5171	write = rxq->write;
5172	while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5173		element = rxq->rx_free.next;
5174		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5175		list_del(element);
5176
5177		ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5178			    rxb->dma_addr);
5179		rxq->queue[rxq->write] = rxb;
5180		rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5181		rxq->free_count--;
5182	}
5183	spin_unlock_irqrestore(&rxq->lock, flags);
5184
5185	/* If the pre-allocated buffer pool is dropping low, schedule to
5186	 * refill it */
5187	if (rxq->free_count <= RX_LOW_WATERMARK)
5188		schedule_work(&priv->rx_replenish);
5189
5190	/* If we've added more space for the firmware to place data, tell it */
5191	if (write != rxq->write)
5192		ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5193}
5194
5195/*
5196 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5197 * Also restock the Rx queue via ipw_rx_queue_restock.
5198 *
5199 * This is called as a scheduled work item (except for during intialization)
5200 */
5201static void ipw_rx_queue_replenish(void *data)
5202{
5203	struct ipw_priv *priv = data;
5204	struct ipw_rx_queue *rxq = priv->rxq;
5205	struct list_head *element;
5206	struct ipw_rx_mem_buffer *rxb;
5207	unsigned long flags;
5208
5209	spin_lock_irqsave(&rxq->lock, flags);
5210	while (!list_empty(&rxq->rx_used)) {
5211		element = rxq->rx_used.next;
5212		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5213		rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5214		if (!rxb->skb) {
5215			printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5216			       priv->net_dev->name);
5217			/* We don't reschedule replenish work here -- we will
5218			 * call the restock method and if it still needs
5219			 * more buffers it will schedule replenish */
5220			break;
5221		}
5222		list_del(element);
5223
5224		rxb->dma_addr =
5225		    pci_map_single(priv->pci_dev, rxb->skb->data,
5226				   IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5227
5228		list_add_tail(&rxb->list, &rxq->rx_free);
5229		rxq->free_count++;
5230	}
5231	spin_unlock_irqrestore(&rxq->lock, flags);
5232
5233	ipw_rx_queue_restock(priv);
5234}
5235
5236static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5237{
5238	struct ipw_priv *priv =
5239		container_of(work, struct ipw_priv, rx_replenish);
5240	mutex_lock(&priv->mutex);
5241	ipw_rx_queue_replenish(priv);
5242	mutex_unlock(&priv->mutex);
5243}
5244
5245/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5246 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5247 * This free routine walks the list of POOL entries and if SKB is set to
5248 * non NULL it is unmapped and freed
5249 */
5250static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5251{
5252	int i;
5253
5254	if (!rxq)
5255		return;
5256
5257	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5258		if (rxq->pool[i].skb != NULL) {
5259			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5260					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5261			dev_kfree_skb(rxq->pool[i].skb);
5262		}
5263	}
5264
5265	kfree(rxq);
5266}
5267
5268static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5269{
5270	struct ipw_rx_queue *rxq;
5271	int i;
5272
5273	rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5274	if (unlikely(!rxq)) {
5275		IPW_ERROR("memory allocation failed\n");
5276		return NULL;
5277	}
5278	spin_lock_init(&rxq->lock);
5279	INIT_LIST_HEAD(&rxq->rx_free);
5280	INIT_LIST_HEAD(&rxq->rx_used);
5281
5282	/* Fill the rx_used queue with _all_ of the Rx buffers */
5283	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5284		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5285
5286	/* Set us so that we have processed and used all buffers, but have
5287	 * not restocked the Rx queue with fresh buffers */
5288	rxq->read = rxq->write = 0;
5289	rxq->free_count = 0;
5290
5291	return rxq;
5292}
5293
5294static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5295{
5296	rate &= ~LIBIPW_BASIC_RATE_MASK;
5297	if (ieee_mode == IEEE_A) {
5298		switch (rate) {
5299		case LIBIPW_OFDM_RATE_6MB:
5300			return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5301			    1 : 0;
5302		case LIBIPW_OFDM_RATE_9MB:
5303			return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5304			    1 : 0;
5305		case LIBIPW_OFDM_RATE_12MB:
5306			return priv->
5307			    rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5308		case LIBIPW_OFDM_RATE_18MB:
5309			return priv->
5310			    rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5311		case LIBIPW_OFDM_RATE_24MB:
5312			return priv->
5313			    rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5314		case LIBIPW_OFDM_RATE_36MB:
5315			return priv->
5316			    rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5317		case LIBIPW_OFDM_RATE_48MB:
5318			return priv->
5319			    rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5320		case LIBIPW_OFDM_RATE_54MB:
5321			return priv->
5322			    rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5323		default:
5324			return 0;
5325		}
5326	}
5327
5328	/* B and G mixed */
5329	switch (rate) {
5330	case LIBIPW_CCK_RATE_1MB:
5331		return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5332	case LIBIPW_CCK_RATE_2MB:
5333		return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5334	case LIBIPW_CCK_RATE_5MB:
5335		return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5336	case LIBIPW_CCK_RATE_11MB:
5337		return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5338	}
5339
5340	/* If we are limited to B modulations, bail at this point */
5341	if (ieee_mode == IEEE_B)
5342		return 0;
5343
5344	/* G */
5345	switch (rate) {
5346	case LIBIPW_OFDM_RATE_6MB:
5347		return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5348	case LIBIPW_OFDM_RATE_9MB:
5349		return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5350	case LIBIPW_OFDM_RATE_12MB:
5351		return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5352	case LIBIPW_OFDM_RATE_18MB:
5353		return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5354	case LIBIPW_OFDM_RATE_24MB:
5355		return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5356	case LIBIPW_OFDM_RATE_36MB:
5357		return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5358	case LIBIPW_OFDM_RATE_48MB:
5359		return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5360	case LIBIPW_OFDM_RATE_54MB:
5361		return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5362	}
5363
5364	return 0;
5365}
5366
5367static int ipw_compatible_rates(struct ipw_priv *priv,
5368				const struct libipw_network *network,
5369				struct ipw_supported_rates *rates)
5370{
5371	int num_rates, i;
5372
5373	memset(rates, 0, sizeof(*rates));
5374	num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5375	rates->num_rates = 0;
5376	for (i = 0; i < num_rates; i++) {
5377		if (!ipw_is_rate_in_mask(priv, network->mode,
5378					 network->rates[i])) {
5379
5380			if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5381				IPW_DEBUG_SCAN("Adding masked mandatory "
5382					       "rate %02X\n",
5383					       network->rates[i]);
5384				rates->supported_rates[rates->num_rates++] =
5385				    network->rates[i];
5386				continue;
5387			}
5388
5389			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5390				       network->rates[i], priv->rates_mask);
5391			continue;
5392		}
5393
5394		rates->supported_rates[rates->num_rates++] = network->rates[i];
5395	}
5396
5397	num_rates = min(network->rates_ex_len,
5398			(u8) (IPW_MAX_RATES - num_rates));
5399	for (i = 0; i < num_rates; i++) {
5400		if (!ipw_is_rate_in_mask(priv, network->mode,
5401					 network->rates_ex[i])) {
5402			if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5403				IPW_DEBUG_SCAN("Adding masked mandatory "
5404					       "rate %02X\n",
5405					       network->rates_ex[i]);
5406				rates->supported_rates[rates->num_rates++] =
5407				    network->rates[i];
5408				continue;
5409			}
5410
5411			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5412				       network->rates_ex[i], priv->rates_mask);
5413			continue;
5414		}
5415
5416		rates->supported_rates[rates->num_rates++] =
5417		    network->rates_ex[i];
5418	}
5419
5420	return 1;
5421}
5422
5423static void ipw_copy_rates(struct ipw_supported_rates *dest,
5424				  const struct ipw_supported_rates *src)
5425{
5426	u8 i;
5427	for (i = 0; i < src->num_rates; i++)
5428		dest->supported_rates[i] = src->supported_rates[i];
5429	dest->num_rates = src->num_rates;
5430}
5431
5432/* TODO: Look at sniffed packets in the air to determine if the basic rate
5433 * mask should ever be used -- right now all callers to add the scan rates are
5434 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5435static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5436				   u8 modulation, u32 rate_mask)
5437{
5438	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5439	    LIBIPW_BASIC_RATE_MASK : 0;
5440
5441	if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5442		rates->supported_rates[rates->num_rates++] =
5443		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5444
5445	if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5446		rates->supported_rates[rates->num_rates++] =
5447		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5448
5449	if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5450		rates->supported_rates[rates->num_rates++] = basic_mask |
5451		    LIBIPW_CCK_RATE_5MB;
5452
5453	if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5454		rates->supported_rates[rates->num_rates++] = basic_mask |
5455		    LIBIPW_CCK_RATE_11MB;
5456}
5457
5458static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5459				    u8 modulation, u32 rate_mask)
5460{
5461	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5462	    LIBIPW_BASIC_RATE_MASK : 0;
5463
5464	if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5465		rates->supported_rates[rates->num_rates++] = basic_mask |
5466		    LIBIPW_OFDM_RATE_6MB;
5467
5468	if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5469		rates->supported_rates[rates->num_rates++] =
5470		    LIBIPW_OFDM_RATE_9MB;
5471
5472	if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5473		rates->supported_rates[rates->num_rates++] = basic_mask |
5474		    LIBIPW_OFDM_RATE_12MB;
5475
5476	if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5477		rates->supported_rates[rates->num_rates++] =
5478		    LIBIPW_OFDM_RATE_18MB;
5479
5480	if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5481		rates->supported_rates[rates->num_rates++] = basic_mask |
5482		    LIBIPW_OFDM_RATE_24MB;
5483
5484	if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5485		rates->supported_rates[rates->num_rates++] =
5486		    LIBIPW_OFDM_RATE_36MB;
5487
5488	if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5489		rates->supported_rates[rates->num_rates++] =
5490		    LIBIPW_OFDM_RATE_48MB;
5491
5492	if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5493		rates->supported_rates[rates->num_rates++] =
5494		    LIBIPW_OFDM_RATE_54MB;
5495}
5496
5497struct ipw_network_match {
5498	struct libipw_network *network;
5499	struct ipw_supported_rates rates;
5500};
5501
5502static int ipw_find_adhoc_network(struct ipw_priv *priv,
5503				  struct ipw_network_match *match,
5504				  struct libipw_network *network,
5505				  int roaming)
5506{
5507	struct ipw_supported_rates rates;
5508
5509	/* Verify that this network's capability is compatible with the
5510	 * current mode (AdHoc or Infrastructure) */
5511	if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5512	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5513		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
5514				network->ssid_len, network->ssid,
5515				network->bssid);
5516		return 0;
5517	}
5518
5519	if (unlikely(roaming)) {
5520		/* If we are roaming, then ensure check if this is a valid
5521		 * network to try and roam to */
5522		if ((network->ssid_len != match->network->ssid_len) ||
5523		    memcmp(network->ssid, match->network->ssid,
5524			   network->ssid_len)) {
5525			IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
5526					network->ssid_len, network->ssid,
5527					network->bssid);
5528			return 0;
5529		}
5530	} else {
5531		/* If an ESSID has been configured then compare the broadcast
5532		 * ESSID to ours */
5533		if ((priv->config & CFG_STATIC_ESSID) &&
5534		    ((network->ssid_len != priv->essid_len) ||
5535		     memcmp(network->ssid, priv->essid,
5536			    min(network->ssid_len, priv->essid_len)))) {
5537			IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
5538					network->ssid_len, network->ssid,
5539					network->bssid, priv->essid_len,
5540					priv->essid);
5541			return 0;
5542		}
5543	}
5544
5545	/* If the old network rate is better than this one, don't bother
5546	 * testing everything else. */
5547
5548	if (network->time_stamp[0] < match->network->time_stamp[0]) {
5549		IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
5550				match->network->ssid_len, match->network->ssid);
5551		return 0;
5552	} else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5553		IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
5554				match->network->ssid_len, match->network->ssid);
5555		return 0;
5556	}
5557
5558	/* Now go through and see if the requested network is valid... */
5559	if (priv->ieee->scan_age != 0 &&
5560	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5561		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of age: %ums.\n",
5562				network->ssid_len, network->ssid,
5563				network->bssid,
5564				jiffies_to_msecs(jiffies -
5565						 network->last_scanned));
5566		return 0;
5567	}
5568
5569	if ((priv->config & CFG_STATIC_CHANNEL) &&
5570	    (network->channel != priv->channel)) {
5571		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
5572				network->ssid_len, network->ssid,
5573				network->bssid,
5574				network->channel, priv->channel);
5575		return 0;
5576	}
5577
5578	/* Verify privacy compatibility */
5579	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5580	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5581		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
5582				network->ssid_len, network->ssid,
5583				network->bssid,
5584				priv->
5585				capability & CAP_PRIVACY_ON ? "on" : "off",
5586				network->
5587				capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5588				"off");
5589		return 0;
5590	}
5591
5592	if (ether_addr_equal(network->bssid, priv->bssid)) {
5593		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of the same BSSID match: %pM.\n",
5594				network->ssid_len, network->ssid,
5595				network->bssid, priv->bssid);
5596		return 0;
5597	}
5598
5599	/* Filter out any incompatible freq / mode combinations */
5600	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5601		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
5602				network->ssid_len, network->ssid,
5603				network->bssid);
5604		return 0;
5605	}
5606
5607	/* Ensure that the rates supported by the driver are compatible with
5608	 * this AP, including verification of basic rates (mandatory) */
5609	if (!ipw_compatible_rates(priv, network, &rates)) {
5610		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
5611				network->ssid_len, network->ssid,
5612				network->bssid);
5613		return 0;
5614	}
5615
5616	if (rates.num_rates == 0) {
5617		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
5618				network->ssid_len, network->ssid,
5619				network->bssid);
5620		return 0;
5621	}
5622
5623	/* TODO: Perform any further minimal comparititive tests.  We do not
5624	 * want to put too much policy logic here; intelligent scan selection
5625	 * should occur within a generic IEEE 802.11 user space tool.  */
5626
5627	/* Set up 'new' AP to this network */
5628	ipw_copy_rates(&match->rates, &rates);
5629	match->network = network;
5630	IPW_DEBUG_MERGE("Network '%*pE (%pM)' is a viable match.\n",
5631			network->ssid_len, network->ssid, network->bssid);
5632
5633	return 1;
5634}
5635
5636static void ipw_merge_adhoc_network(struct work_struct *work)
5637{
5638	struct ipw_priv *priv =
5639		container_of(work, struct ipw_priv, merge_networks);
5640	struct libipw_network *network = NULL;
5641	struct ipw_network_match match = {
5642		.network = priv->assoc_network
5643	};
5644
5645	if ((priv->status & STATUS_ASSOCIATED) &&
5646	    (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5647		/* First pass through ROAM process -- look for a better
5648		 * network */
5649		unsigned long flags;
5650
5651		spin_lock_irqsave(&priv->ieee->lock, flags);
5652		list_for_each_entry(network, &priv->ieee->network_list, list) {
5653			if (network != priv->assoc_network)
5654				ipw_find_adhoc_network(priv, &match, network,
5655						       1);
5656		}
5657		spin_unlock_irqrestore(&priv->ieee->lock, flags);
5658
5659		if (match.network == priv->assoc_network) {
5660			IPW_DEBUG_MERGE("No better ADHOC in this network to "
5661					"merge to.\n");
5662			return;
5663		}
5664
5665		mutex_lock(&priv->mutex);
5666		if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5667			IPW_DEBUG_MERGE("remove network %*pE\n",
5668					priv->essid_len, priv->essid);
5669			ipw_remove_current_network(priv);
5670		}
5671
5672		ipw_disassociate(priv);
5673		priv->assoc_network = match.network;
5674		mutex_unlock(&priv->mutex);
5675		return;
5676	}
5677}
5678
5679static int ipw_best_network(struct ipw_priv *priv,
5680			    struct ipw_network_match *match,
5681			    struct libipw_network *network, int roaming)
5682{
5683	struct ipw_supported_rates rates;
5684
5685	/* Verify that this network's capability is compatible with the
5686	 * current mode (AdHoc or Infrastructure) */
5687	if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5688	     !(network->capability & WLAN_CAPABILITY_ESS)) ||
5689	    (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5690	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5691		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
5692				network->ssid_len, network->ssid,
5693				network->bssid);
5694		return 0;
5695	}
5696
5697	if (unlikely(roaming)) {
5698		/* If we are roaming, then ensure check if this is a valid
5699		 * network to try and roam to */
5700		if ((network->ssid_len != match->network->ssid_len) ||
5701		    memcmp(network->ssid, match->network->ssid,
5702			   network->ssid_len)) {
5703			IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
5704					network->ssid_len, network->ssid,
5705					network->bssid);
5706			return 0;
5707		}
5708	} else {
5709		/* If an ESSID has been configured then compare the broadcast
5710		 * ESSID to ours */
5711		if ((priv->config & CFG_STATIC_ESSID) &&
5712		    ((network->ssid_len != priv->essid_len) ||
5713		     memcmp(network->ssid, priv->essid,
5714			    min(network->ssid_len, priv->essid_len)))) {
5715			IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
5716					network->ssid_len, network->ssid,
5717					network->bssid, priv->essid_len,
5718					priv->essid);
5719			return 0;
5720		}
5721	}
5722
5723	/* If the old network rate is better than this one, don't bother
5724	 * testing everything else. */
5725	if (match->network && match->network->stats.rssi > network->stats.rssi) {
5726		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because '%*pE (%pM)' has a stronger signal.\n",
5727				network->ssid_len, network->ssid,
5728				network->bssid, match->network->ssid_len,
5729				match->network->ssid, match->network->bssid);
5730		return 0;
5731	}
5732
5733	/* If this network has already had an association attempt within the
5734	 * last 3 seconds, do not try and associate again... */
5735	if (network->last_associate &&
5736	    time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5737		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of storming (%ums since last assoc attempt).\n",
5738				network->ssid_len, network->ssid,
5739				network->bssid,
5740				jiffies_to_msecs(jiffies -
5741						 network->last_associate));
5742		return 0;
5743	}
5744
5745	/* Now go through and see if the requested network is valid... */
5746	if (priv->ieee->scan_age != 0 &&
5747	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5748		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of age: %ums.\n",
5749				network->ssid_len, network->ssid,
5750				network->bssid,
5751				jiffies_to_msecs(jiffies -
5752						 network->last_scanned));
5753		return 0;
5754	}
5755
5756	if ((priv->config & CFG_STATIC_CHANNEL) &&
5757	    (network->channel != priv->channel)) {
5758		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
5759				network->ssid_len, network->ssid,
5760				network->bssid,
5761				network->channel, priv->channel);
5762		return 0;
5763	}
5764
5765	/* Verify privacy compatibility */
5766	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5767	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5768		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
5769				network->ssid_len, network->ssid,
5770				network->bssid,
5771				priv->capability & CAP_PRIVACY_ON ? "on" :
5772				"off",
5773				network->capability &
5774				WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5775		return 0;
5776	}
5777
5778	if ((priv->config & CFG_STATIC_BSSID) &&
5779	    !ether_addr_equal(network->bssid, priv->bssid)) {
5780		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of BSSID mismatch: %pM.\n",
5781				network->ssid_len, network->ssid,
5782				network->bssid, priv->bssid);
5783		return 0;
5784	}
5785
5786	/* Filter out any incompatible freq / mode combinations */
5787	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5788		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
5789				network->ssid_len, network->ssid,
5790				network->bssid);
5791		return 0;
5792	}
5793
5794	/* Filter out invalid channel in current GEO */
5795	if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5796		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid channel in current GEO\n",
5797				network->ssid_len, network->ssid,
5798				network->bssid);
5799		return 0;
5800	}
5801
5802	/* Ensure that the rates supported by the driver are compatible with
5803	 * this AP, including verification of basic rates (mandatory) */
5804	if (!ipw_compatible_rates(priv, network, &rates)) {
5805		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
5806				network->ssid_len, network->ssid,
5807				network->bssid);
5808		return 0;
5809	}
5810
5811	if (rates.num_rates == 0) {
5812		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
5813				network->ssid_len, network->ssid,
5814				network->bssid);
5815		return 0;
5816	}
5817
5818	/* TODO: Perform any further minimal comparititive tests.  We do not
5819	 * want to put too much policy logic here; intelligent scan selection
5820	 * should occur within a generic IEEE 802.11 user space tool.  */
5821
5822	/* Set up 'new' AP to this network */
5823	ipw_copy_rates(&match->rates, &rates);
5824	match->network = network;
5825
5826	IPW_DEBUG_ASSOC("Network '%*pE (%pM)' is a viable match.\n",
5827			network->ssid_len, network->ssid, network->bssid);
5828
5829	return 1;
5830}
5831
5832static void ipw_adhoc_create(struct ipw_priv *priv,
5833			     struct libipw_network *network)
5834{
5835	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5836	int i;
5837
5838	/*
5839	 * For the purposes of scanning, we can set our wireless mode
5840	 * to trigger scans across combinations of bands, but when it
5841	 * comes to creating a new ad-hoc network, we have tell the FW
5842	 * exactly which band to use.
5843	 *
5844	 * We also have the possibility of an invalid channel for the
5845	 * chossen band.  Attempting to create a new ad-hoc network
5846	 * with an invalid channel for wireless mode will trigger a
5847	 * FW fatal error.
5848	 *
5849	 */
5850	switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5851	case LIBIPW_52GHZ_BAND:
5852		network->mode = IEEE_A;
5853		i = libipw_channel_to_index(priv->ieee, priv->channel);
5854		BUG_ON(i == -1);
5855		if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5856			IPW_WARNING("Overriding invalid channel\n");
5857			priv->channel = geo->a[0].channel;
5858		}
5859		break;
5860
5861	case LIBIPW_24GHZ_BAND:
5862		if (priv->ieee->mode & IEEE_G)
5863			network->mode = IEEE_G;
5864		else
5865			network->mode = IEEE_B;
5866		i = libipw_channel_to_index(priv->ieee, priv->channel);
5867		BUG_ON(i == -1);
5868		if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5869			IPW_WARNING("Overriding invalid channel\n");
5870			priv->channel = geo->bg[0].channel;
5871		}
5872		break;
5873
5874	default:
5875		IPW_WARNING("Overriding invalid channel\n");
5876		if (priv->ieee->mode & IEEE_A) {
5877			network->mode = IEEE_A;
5878			priv->channel = geo->a[0].channel;
5879		} else if (priv->ieee->mode & IEEE_G) {
5880			network->mode = IEEE_G;
5881			priv->channel = geo->bg[0].channel;
5882		} else {
5883			network->mode = IEEE_B;
5884			priv->channel = geo->bg[0].channel;
5885		}
5886		break;
5887	}
5888
5889	network->channel = priv->channel;
5890	priv->config |= CFG_ADHOC_PERSIST;
5891	ipw_create_bssid(priv, network->bssid);
5892	network->ssid_len = priv->essid_len;
5893	memcpy(network->ssid, priv->essid, priv->essid_len);
5894	memset(&network->stats, 0, sizeof(network->stats));
5895	network->capability = WLAN_CAPABILITY_IBSS;
5896	if (!(priv->config & CFG_PREAMBLE_LONG))
5897		network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5898	if (priv->capability & CAP_PRIVACY_ON)
5899		network->capability |= WLAN_CAPABILITY_PRIVACY;
5900	network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5901	memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5902	network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5903	memcpy(network->rates_ex,
5904	       &priv->rates.supported_rates[network->rates_len],
5905	       network->rates_ex_len);
5906	network->last_scanned = 0;
5907	network->flags = 0;
5908	network->last_associate = 0;
5909	network->time_stamp[0] = 0;
5910	network->time_stamp[1] = 0;
5911	network->beacon_interval = 100;	/* Default */
5912	network->listen_interval = 10;	/* Default */
5913	network->atim_window = 0;	/* Default */
5914	network->wpa_ie_len = 0;
5915	network->rsn_ie_len = 0;
5916}
5917
5918static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5919{
5920	struct ipw_tgi_tx_key key;
5921
5922	if (!(priv->ieee->sec.flags & (1 << index)))
5923		return;
5924
5925	key.key_id = index;
5926	memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5927	key.security_type = type;
5928	key.station_index = 0;	/* always 0 for BSS */
5929	key.flags = 0;
5930	/* 0 for new key; previous value of counter (after fatal error) */
5931	key.tx_counter[0] = cpu_to_le32(0);
5932	key.tx_counter[1] = cpu_to_le32(0);
5933
5934	ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5935}
5936
5937static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5938{
5939	struct ipw_wep_key key;
5940	int i;
5941
5942	key.cmd_id = DINO_CMD_WEP_KEY;
5943	key.seq_num = 0;
5944
5945	/* Note: AES keys cannot be set for multiple times.
5946	 * Only set it at the first time. */
5947	for (i = 0; i < 4; i++) {
5948		key.key_index = i | type;
5949		if (!(priv->ieee->sec.flags & (1 << i))) {
5950			key.key_size = 0;
5951			continue;
5952		}
5953
5954		key.key_size = priv->ieee->sec.key_sizes[i];
5955		memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5956
5957		ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5958	}
5959}
5960
5961static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5962{
5963	if (priv->ieee->host_encrypt)
5964		return;
5965
5966	switch (level) {
5967	case SEC_LEVEL_3:
5968		priv->sys_config.disable_unicast_decryption = 0;
5969		priv->ieee->host_decrypt = 0;
5970		break;
5971	case SEC_LEVEL_2:
5972		priv->sys_config.disable_unicast_decryption = 1;
5973		priv->ieee->host_decrypt = 1;
5974		break;
5975	case SEC_LEVEL_1:
5976		priv->sys_config.disable_unicast_decryption = 0;
5977		priv->ieee->host_decrypt = 0;
5978		break;
5979	case SEC_LEVEL_0:
5980		priv->sys_config.disable_unicast_decryption = 1;
5981		break;
5982	default:
5983		break;
5984	}
5985}
5986
5987static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5988{
5989	if (priv->ieee->host_encrypt)
5990		return;
5991
5992	switch (level) {
5993	case SEC_LEVEL_3:
5994		priv->sys_config.disable_multicast_decryption = 0;
5995		break;
5996	case SEC_LEVEL_2:
5997		priv->sys_config.disable_multicast_decryption = 1;
5998		break;
5999	case SEC_LEVEL_1:
6000		priv->sys_config.disable_multicast_decryption = 0;
6001		break;
6002	case SEC_LEVEL_0:
6003		priv->sys_config.disable_multicast_decryption = 1;
6004		break;
6005	default:
6006		break;
6007	}
6008}
6009
6010static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6011{
6012	switch (priv->ieee->sec.level) {
6013	case SEC_LEVEL_3:
6014		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6015			ipw_send_tgi_tx_key(priv,
6016					    DCT_FLAG_EXT_SECURITY_CCM,
6017					    priv->ieee->sec.active_key);
6018
6019		if (!priv->ieee->host_mc_decrypt)
6020			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6021		break;
6022	case SEC_LEVEL_2:
6023		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6024			ipw_send_tgi_tx_key(priv,
6025					    DCT_FLAG_EXT_SECURITY_TKIP,
6026					    priv->ieee->sec.active_key);
6027		break;
6028	case SEC_LEVEL_1:
6029		ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6030		ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6031		ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6032		break;
6033	case SEC_LEVEL_0:
6034	default:
6035		break;
6036	}
6037}
6038
6039static void ipw_adhoc_check(void *data)
6040{
6041	struct ipw_priv *priv = data;
6042
6043	if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6044	    !(priv->config & CFG_ADHOC_PERSIST)) {
6045		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6046			  IPW_DL_STATE | IPW_DL_ASSOC,
6047			  "Missed beacon: %d - disassociate\n",
6048			  priv->missed_adhoc_beacons);
6049		ipw_remove_current_network(priv);
6050		ipw_disassociate(priv);
6051		return;
6052	}
6053
6054	schedule_delayed_work(&priv->adhoc_check,
6055			      le16_to_cpu(priv->assoc_request.beacon_interval));
6056}
6057
6058static void ipw_bg_adhoc_check(struct work_struct *work)
6059{
6060	struct ipw_priv *priv =
6061		container_of(work, struct ipw_priv, adhoc_check.work);
6062	mutex_lock(&priv->mutex);
6063	ipw_adhoc_check(priv);
6064	mutex_unlock(&priv->mutex);
6065}
6066
6067static void ipw_debug_config(struct ipw_priv *priv)
6068{
6069	IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6070		       "[CFG 0x%08X]\n", priv->config);
6071	if (priv->config & CFG_STATIC_CHANNEL)
6072		IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6073	else
6074		IPW_DEBUG_INFO("Channel unlocked.\n");
6075	if (priv->config & CFG_STATIC_ESSID)
6076		IPW_DEBUG_INFO("ESSID locked to '%*pE'\n",
6077			       priv->essid_len, priv->essid);
6078	else
6079		IPW_DEBUG_INFO("ESSID unlocked.\n");
6080	if (priv->config & CFG_STATIC_BSSID)
6081		IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6082	else
6083		IPW_DEBUG_INFO("BSSID unlocked.\n");
6084	if (priv->capability & CAP_PRIVACY_ON)
6085		IPW_DEBUG_INFO("PRIVACY on\n");
6086	else
6087		IPW_DEBUG_INFO("PRIVACY off\n");
6088	IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6089}
6090
6091static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6092{
6093	/* TODO: Verify that this works... */
6094	struct ipw_fixed_rate fr;
6095	u32 reg;
6096	u16 mask = 0;
6097	u16 new_tx_rates = priv->rates_mask;
6098
6099	/* Identify 'current FW band' and match it with the fixed
6100	 * Tx rates */
6101
6102	switch (priv->ieee->freq_band) {
6103	case LIBIPW_52GHZ_BAND:	/* A only */
6104		/* IEEE_A */
6105		if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6106			/* Invalid fixed rate mask */
6107			IPW_DEBUG_WX
6108			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6109			new_tx_rates = 0;
6110			break;
6111		}
6112
6113		new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6114		break;
6115
6116	default:		/* 2.4Ghz or Mixed */
6117		/* IEEE_B */
6118		if (mode == IEEE_B) {
6119			if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6120				/* Invalid fixed rate mask */
6121				IPW_DEBUG_WX
6122				    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6123				new_tx_rates = 0;
6124			}
6125			break;
6126		}
6127
6128		/* IEEE_G */
6129		if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6130				    LIBIPW_OFDM_RATES_MASK)) {
6131			/* Invalid fixed rate mask */
6132			IPW_DEBUG_WX
6133			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6134			new_tx_rates = 0;
6135			break;
6136		}
6137
6138		if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6139			mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6140			new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6141		}
6142
6143		if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6144			mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6145			new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6146		}
6147
6148		if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6149			mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6150			new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6151		}
6152
6153		new_tx_rates |= mask;
6154		break;
6155	}
6156
6157	fr.tx_rates = cpu_to_le16(new_tx_rates);
6158
6159	reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6160	ipw_write_reg32(priv, reg, *(u32 *) & fr);
6161}
6162
6163static void ipw_abort_scan(struct ipw_priv *priv)
6164{
6165	int err;
6166
6167	if (priv->status & STATUS_SCAN_ABORTING) {
6168		IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6169		return;
6170	}
6171	priv->status |= STATUS_SCAN_ABORTING;
6172
6173	err = ipw_send_scan_abort(priv);
6174	if (err)
6175		IPW_DEBUG_HC("Request to abort scan failed.\n");
6176}
6177
6178static void ipw_add_scan_channels(struct ipw_priv *priv,
6179				  struct ipw_scan_request_ext *scan,
6180				  int scan_type)
6181{
6182	int channel_index = 0;
6183	const struct libipw_geo *geo;
6184	int i;
6185
6186	geo = libipw_get_geo(priv->ieee);
6187
6188	if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6189		int start = channel_index;
6190		for (i = 0; i < geo->a_channels; i++) {
6191			if ((priv->status & STATUS_ASSOCIATED) &&
6192			    geo->a[i].channel == priv->channel)
6193				continue;
6194			channel_index++;
6195			scan->channels_list[channel_index] = geo->a[i].channel;
6196			ipw_set_scan_type(scan, channel_index,
6197					  geo->a[i].
6198					  flags & LIBIPW_CH_PASSIVE_ONLY ?
6199					  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6200					  scan_type);
6201		}
6202
6203		if (start != channel_index) {
6204			scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6205			    (channel_index - start);
6206			channel_index++;
6207		}
6208	}
6209
6210	if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6211		int start = channel_index;
6212		if (priv->config & CFG_SPEED_SCAN) {
6213			int index;
6214			u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6215				/* nop out the list */
6216				[0] = 0
6217			};
6218
6219			u8 channel;
6220			while (channel_index < IPW_SCAN_CHANNELS - 1) {
6221				channel =
6222				    priv->speed_scan[priv->speed_scan_pos];
6223				if (channel == 0) {
6224					priv->speed_scan_pos = 0;
6225					channel = priv->speed_scan[0];
6226				}
6227				if ((priv->status & STATUS_ASSOCIATED) &&
6228				    channel == priv->channel) {
6229					priv->speed_scan_pos++;
6230					continue;
6231				}
6232
6233				/* If this channel has already been
6234				 * added in scan, break from loop
6235				 * and this will be the first channel
6236				 * in the next scan.
6237				 */
6238				if (channels[channel - 1] != 0)
6239					break;
6240
6241				channels[channel - 1] = 1;
6242				priv->speed_scan_pos++;
6243				channel_index++;
6244				scan->channels_list[channel_index] = channel;
6245				index =
6246				    libipw_channel_to_index(priv->ieee, channel);
6247				ipw_set_scan_type(scan, channel_index,
6248						  geo->bg[index].
6249						  flags &
6250						  LIBIPW_CH_PASSIVE_ONLY ?
6251						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6252						  : scan_type);
6253			}
6254		} else {
6255			for (i = 0; i < geo->bg_channels; i++) {
6256				if ((priv->status & STATUS_ASSOCIATED) &&
6257				    geo->bg[i].channel == priv->channel)
6258					continue;
6259				channel_index++;
6260				scan->channels_list[channel_index] =
6261				    geo->bg[i].channel;
6262				ipw_set_scan_type(scan, channel_index,
6263						  geo->bg[i].
6264						  flags &
6265						  LIBIPW_CH_PASSIVE_ONLY ?
6266						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6267						  : scan_type);
6268			}
6269		}
6270
6271		if (start != channel_index) {
6272			scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6273			    (channel_index - start);
6274		}
6275	}
6276}
6277
6278static int ipw_passive_dwell_time(struct ipw_priv *priv)
6279{
6280	/* staying on passive channels longer than the DTIM interval during a
6281	 * scan, while associated, causes the firmware to cancel the scan
6282	 * without notification. Hence, don't stay on passive channels longer
6283	 * than the beacon interval.
6284	 */
6285	if (priv->status & STATUS_ASSOCIATED
6286	    && priv->assoc_network->beacon_interval > 10)
6287		return priv->assoc_network->beacon_interval - 10;
6288	else
6289		return 120;
6290}
6291
6292static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6293{
6294	struct ipw_scan_request_ext scan;
6295	int err = 0, scan_type;
6296
6297	if (!(priv->status & STATUS_INIT) ||
6298	    (priv->status & STATUS_EXIT_PENDING))
6299		return 0;
6300
6301	mutex_lock(&priv->mutex);
6302
6303	if (direct && (priv->direct_scan_ssid_len == 0)) {
6304		IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6305		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6306		goto done;
6307	}
6308
6309	if (priv->status & STATUS_SCANNING) {
6310		IPW_DEBUG_HC("Concurrent scan requested.  Queuing.\n");
6311		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6312					STATUS_SCAN_PENDING;
6313		goto done;
6314	}
6315
6316	if (!(priv->status & STATUS_SCAN_FORCED) &&
6317	    priv->status & STATUS_SCAN_ABORTING) {
6318		IPW_DEBUG_HC("Scan request while abort pending.  Queuing.\n");
6319		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6320					STATUS_SCAN_PENDING;
6321		goto done;
6322	}
6323
6324	if (priv->status & STATUS_RF_KILL_MASK) {
6325		IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6326		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6327					STATUS_SCAN_PENDING;
6328		goto done;
6329	}
6330
6331	memset(&scan, 0, sizeof(scan));
6332	scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6333
6334	if (type == IW_SCAN_TYPE_PASSIVE) {
6335		IPW_DEBUG_WX("use passive scanning\n");
6336		scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6337		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6338			cpu_to_le16(ipw_passive_dwell_time(priv));
6339		ipw_add_scan_channels(priv, &scan, scan_type);
6340		goto send_request;
6341	}
6342
6343	/* Use active scan by default. */
6344	if (priv->config & CFG_SPEED_SCAN)
6345		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6346			cpu_to_le16(30);
6347	else
6348		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6349			cpu_to_le16(20);
6350
6351	scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6352		cpu_to_le16(20);
6353
6354	scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6355		cpu_to_le16(ipw_passive_dwell_time(priv));
6356	scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6357
6358#ifdef CONFIG_IPW2200_MONITOR
6359	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6360		u8 channel;
6361		u8 band = 0;
6362
6363		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6364		case LIBIPW_52GHZ_BAND:
6365			band = (u8) (IPW_A_MODE << 6) | 1;
6366			channel = priv->channel;
6367			break;
6368
6369		case LIBIPW_24GHZ_BAND:
6370			band = (u8) (IPW_B_MODE << 6) | 1;
6371			channel = priv->channel;
6372			break;
6373
6374		default:
6375			band = (u8) (IPW_B_MODE << 6) | 1;
6376			channel = 9;
6377			break;
6378		}
6379
6380		scan.channels_list[0] = band;
6381		scan.channels_list[1] = channel;
6382		ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6383
6384		/* NOTE:  The card will sit on this channel for this time
6385		 * period.  Scan aborts are timing sensitive and frequently
6386		 * result in firmware restarts.  As such, it is best to
6387		 * set a small dwell_time here and just keep re-issuing
6388		 * scans.  Otherwise fast channel hopping will not actually
6389		 * hop channels.
6390		 *
6391		 * TODO: Move SPEED SCAN support to all modes and bands */
6392		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6393			cpu_to_le16(2000);
6394	} else {
6395#endif				/* CONFIG_IPW2200_MONITOR */
6396		/* Honor direct scans first, otherwise if we are roaming make
6397		 * this a direct scan for the current network.  Finally,
6398		 * ensure that every other scan is a fast channel hop scan */
6399		if (direct) {
6400			err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6401			                    priv->direct_scan_ssid_len);
6402			if (err) {
6403				IPW_DEBUG_HC("Attempt to send SSID command  "
6404					     "failed\n");
6405				goto done;
6406			}
6407
6408			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6409		} else if ((priv->status & STATUS_ROAMING)
6410			   || (!(priv->status & STATUS_ASSOCIATED)
6411			       && (priv->config & CFG_STATIC_ESSID)
6412			       && (le32_to_cpu(scan.full_scan_index) % 2))) {
6413			err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6414			if (err) {
6415				IPW_DEBUG_HC("Attempt to send SSID command "
6416					     "failed.\n");
6417				goto done;
6418			}
6419
6420			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6421		} else
6422			scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6423
6424		ipw_add_scan_channels(priv, &scan, scan_type);
6425#ifdef CONFIG_IPW2200_MONITOR
6426	}
6427#endif
6428
6429send_request:
6430	err = ipw_send_scan_request_ext(priv, &scan);
6431	if (err) {
6432		IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6433		goto done;
6434	}
6435
6436	priv->status |= STATUS_SCANNING;
6437	if (direct) {
6438		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6439		priv->direct_scan_ssid_len = 0;
6440	} else
6441		priv->status &= ~STATUS_SCAN_PENDING;
6442
6443	schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
6444done:
6445	mutex_unlock(&priv->mutex);
6446	return err;
6447}
6448
6449static void ipw_request_passive_scan(struct work_struct *work)
6450{
6451	struct ipw_priv *priv =
6452		container_of(work, struct ipw_priv, request_passive_scan.work);
6453	ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6454}
6455
6456static void ipw_request_scan(struct work_struct *work)
6457{
6458	struct ipw_priv *priv =
6459		container_of(work, struct ipw_priv, request_scan.work);
6460	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6461}
6462
6463static void ipw_request_direct_scan(struct work_struct *work)
6464{
6465	struct ipw_priv *priv =
6466		container_of(work, struct ipw_priv, request_direct_scan.work);
6467	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6468}
6469
6470static void ipw_bg_abort_scan(struct work_struct *work)
6471{
6472	struct ipw_priv *priv =
6473		container_of(work, struct ipw_priv, abort_scan);
6474	mutex_lock(&priv->mutex);
6475	ipw_abort_scan(priv);
6476	mutex_unlock(&priv->mutex);
6477}
6478
6479static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6480{
6481	/* This is called when wpa_supplicant loads and closes the driver
6482	 * interface. */
6483	priv->ieee->wpa_enabled = value;
6484	return 0;
6485}
6486
6487static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6488{
6489	struct libipw_device *ieee = priv->ieee;
6490	struct libipw_security sec = {
6491		.flags = SEC_AUTH_MODE,
6492	};
6493	int ret = 0;
6494
6495	if (value & IW_AUTH_ALG_SHARED_KEY) {
6496		sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6497		ieee->open_wep = 0;
6498	} else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6499		sec.auth_mode = WLAN_AUTH_OPEN;
6500		ieee->open_wep = 1;
6501	} else if (value & IW_AUTH_ALG_LEAP) {
6502		sec.auth_mode = WLAN_AUTH_LEAP;
6503		ieee->open_wep = 1;
6504	} else
6505		return -EINVAL;
6506
6507	if (ieee->set_security)
6508		ieee->set_security(ieee->dev, &sec);
6509	else
6510		ret = -EOPNOTSUPP;
6511
6512	return ret;
6513}
6514
6515static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6516				int wpa_ie_len)
6517{
6518	/* make sure WPA is enabled */
6519	ipw_wpa_enable(priv, 1);
6520}
6521
6522static int ipw_set_rsn_capa(struct ipw_priv *priv,
6523			    char *capabilities, int length)
6524{
6525	IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6526
6527	return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6528				capabilities);
6529}
6530
6531/*
6532 * WE-18 support
6533 */
6534
6535/* SIOCSIWGENIE */
6536static int ipw_wx_set_genie(struct net_device *dev,
6537			    struct iw_request_info *info,
6538			    union iwreq_data *wrqu, char *extra)
6539{
6540	struct ipw_priv *priv = libipw_priv(dev);
6541	struct libipw_device *ieee = priv->ieee;
6542	u8 *buf;
6543	int err = 0;
6544
6545	if (wrqu->data.length > MAX_WPA_IE_LEN ||
6546	    (wrqu->data.length && extra == NULL))
6547		return -EINVAL;
6548
6549	if (wrqu->data.length) {
6550		buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
6551		if (buf == NULL) {
6552			err = -ENOMEM;
6553			goto out;
6554		}
6555
6556		kfree(ieee->wpa_ie);
6557		ieee->wpa_ie = buf;
6558		ieee->wpa_ie_len = wrqu->data.length;
6559	} else {
6560		kfree(ieee->wpa_ie);
6561		ieee->wpa_ie = NULL;
6562		ieee->wpa_ie_len = 0;
6563	}
6564
6565	ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6566      out:
6567	return err;
6568}
6569
6570/* SIOCGIWGENIE */
6571static int ipw_wx_get_genie(struct net_device *dev,
6572			    struct iw_request_info *info,
6573			    union iwreq_data *wrqu, char *extra)
6574{
6575	struct ipw_priv *priv = libipw_priv(dev);
6576	struct libipw_device *ieee = priv->ieee;
6577	int err = 0;
6578
6579	if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6580		wrqu->data.length = 0;
6581		goto out;
6582	}
6583
6584	if (wrqu->data.length < ieee->wpa_ie_len) {
6585		err = -E2BIG;
6586		goto out;
6587	}
6588
6589	wrqu->data.length = ieee->wpa_ie_len;
6590	memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6591
6592      out:
6593	return err;
6594}
6595
6596static int wext_cipher2level(int cipher)
6597{
6598	switch (cipher) {
6599	case IW_AUTH_CIPHER_NONE:
6600		return SEC_LEVEL_0;
6601	case IW_AUTH_CIPHER_WEP40:
6602	case IW_AUTH_CIPHER_WEP104:
6603		return SEC_LEVEL_1;
6604	case IW_AUTH_CIPHER_TKIP:
6605		return SEC_LEVEL_2;
6606	case IW_AUTH_CIPHER_CCMP:
6607		return SEC_LEVEL_3;
6608	default:
6609		return -1;
6610	}
6611}
6612
6613/* SIOCSIWAUTH */
6614static int ipw_wx_set_auth(struct net_device *dev,
6615			   struct iw_request_info *info,
6616			   union iwreq_data *wrqu, char *extra)
6617{
6618	struct ipw_priv *priv = libipw_priv(dev);
6619	struct libipw_device *ieee = priv->ieee;
6620	struct iw_param *param = &wrqu->param;
6621	struct lib80211_crypt_data *crypt;
6622	unsigned long flags;
6623	int ret = 0;
6624
6625	switch (param->flags & IW_AUTH_INDEX) {
6626	case IW_AUTH_WPA_VERSION:
6627		break;
6628	case IW_AUTH_CIPHER_PAIRWISE:
6629		ipw_set_hw_decrypt_unicast(priv,
6630					   wext_cipher2level(param->value));
6631		break;
6632	case IW_AUTH_CIPHER_GROUP:
6633		ipw_set_hw_decrypt_multicast(priv,
6634					     wext_cipher2level(param->value));
6635		break;
6636	case IW_AUTH_KEY_MGMT:
6637		/*
6638		 * ipw2200 does not use these parameters
6639		 */
6640		break;
6641
6642	case IW_AUTH_TKIP_COUNTERMEASURES:
6643		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6644		if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6645			break;
6646
6647		flags = crypt->ops->get_flags(crypt->priv);
6648
6649		if (param->value)
6650			flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6651		else
6652			flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6653
6654		crypt->ops->set_flags(flags, crypt->priv);
6655
6656		break;
6657
6658	case IW_AUTH_DROP_UNENCRYPTED:{
6659			/* HACK:
6660			 *
6661			 * wpa_supplicant calls set_wpa_enabled when the driver
6662			 * is loaded and unloaded, regardless of if WPA is being
6663			 * used.  No other calls are made which can be used to
6664			 * determine if encryption will be used or not prior to
6665			 * association being expected.  If encryption is not being
6666			 * used, drop_unencrypted is set to false, else true -- we
6667			 * can use this to determine if the CAP_PRIVACY_ON bit should
6668			 * be set.
6669			 */
6670			struct libipw_security sec = {
6671				.flags = SEC_ENABLED,
6672				.enabled = param->value,
6673			};
6674			priv->ieee->drop_unencrypted = param->value;
6675			/* We only change SEC_LEVEL for open mode. Others
6676			 * are set by ipw_wpa_set_encryption.
6677			 */
6678			if (!param->value) {
6679				sec.flags |= SEC_LEVEL;
6680				sec.level = SEC_LEVEL_0;
6681			} else {
6682				sec.flags |= SEC_LEVEL;
6683				sec.level = SEC_LEVEL_1;
6684			}
6685			if (priv->ieee->set_security)
6686				priv->ieee->set_security(priv->ieee->dev, &sec);
6687			break;
6688		}
6689
6690	case IW_AUTH_80211_AUTH_ALG:
6691		ret = ipw_wpa_set_auth_algs(priv, param->value);
6692		break;
6693
6694	case IW_AUTH_WPA_ENABLED:
6695		ret = ipw_wpa_enable(priv, param->value);
6696		ipw_disassociate(priv);
6697		break;
6698
6699	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6700		ieee->ieee802_1x = param->value;
6701		break;
6702
6703	case IW_AUTH_PRIVACY_INVOKED:
6704		ieee->privacy_invoked = param->value;
6705		break;
6706
6707	default:
6708		return -EOPNOTSUPP;
6709	}
6710	return ret;
6711}
6712
6713/* SIOCGIWAUTH */
6714static int ipw_wx_get_auth(struct net_device *dev,
6715			   struct iw_request_info *info,
6716			   union iwreq_data *wrqu, char *extra)
6717{
6718	struct ipw_priv *priv = libipw_priv(dev);
6719	struct libipw_device *ieee = priv->ieee;
6720	struct lib80211_crypt_data *crypt;
6721	struct iw_param *param = &wrqu->param;
6722
6723	switch (param->flags & IW_AUTH_INDEX) {
6724	case IW_AUTH_WPA_VERSION:
6725	case IW_AUTH_CIPHER_PAIRWISE:
6726	case IW_AUTH_CIPHER_GROUP:
6727	case IW_AUTH_KEY_MGMT:
6728		/*
6729		 * wpa_supplicant will control these internally
6730		 */
6731		return -EOPNOTSUPP;
6732
6733	case IW_AUTH_TKIP_COUNTERMEASURES:
6734		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6735		if (!crypt || !crypt->ops->get_flags)
6736			break;
6737
6738		param->value = (crypt->ops->get_flags(crypt->priv) &
6739				IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6740
6741		break;
6742
6743	case IW_AUTH_DROP_UNENCRYPTED:
6744		param->value = ieee->drop_unencrypted;
6745		break;
6746
6747	case IW_AUTH_80211_AUTH_ALG:
6748		param->value = ieee->sec.auth_mode;
6749		break;
6750
6751	case IW_AUTH_WPA_ENABLED:
6752		param->value = ieee->wpa_enabled;
6753		break;
6754
6755	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6756		param->value = ieee->ieee802_1x;
6757		break;
6758
6759	case IW_AUTH_ROAMING_CONTROL:
6760	case IW_AUTH_PRIVACY_INVOKED:
6761		param->value = ieee->privacy_invoked;
6762		break;
6763
6764	default:
6765		return -EOPNOTSUPP;
6766	}
6767	return 0;
6768}
6769
6770/* SIOCSIWENCODEEXT */
6771static int ipw_wx_set_encodeext(struct net_device *dev,
6772				struct iw_request_info *info,
6773				union iwreq_data *wrqu, char *extra)
6774{
6775	struct ipw_priv *priv = libipw_priv(dev);
6776	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6777
6778	if (hwcrypto) {
6779		if (ext->alg == IW_ENCODE_ALG_TKIP) {
6780			/* IPW HW can't build TKIP MIC,
6781			   host decryption still needed */
6782			if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6783				priv->ieee->host_mc_decrypt = 1;
6784			else {
6785				priv->ieee->host_encrypt = 0;
6786				priv->ieee->host_encrypt_msdu = 1;
6787				priv->ieee->host_decrypt = 1;
6788			}
6789		} else {
6790			priv->ieee->host_encrypt = 0;
6791			priv->ieee->host_encrypt_msdu = 0;
6792			priv->ieee->host_decrypt = 0;
6793			priv->ieee->host_mc_decrypt = 0;
6794		}
6795	}
6796
6797	return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6798}
6799
6800/* SIOCGIWENCODEEXT */
6801static int ipw_wx_get_encodeext(struct net_device *dev,
6802				struct iw_request_info *info,
6803				union iwreq_data *wrqu, char *extra)
6804{
6805	struct ipw_priv *priv = libipw_priv(dev);
6806	return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6807}
6808
6809/* SIOCSIWMLME */
6810static int ipw_wx_set_mlme(struct net_device *dev,
6811			   struct iw_request_info *info,
6812			   union iwreq_data *wrqu, char *extra)
6813{
6814	struct ipw_priv *priv = libipw_priv(dev);
6815	struct iw_mlme *mlme = (struct iw_mlme *)extra;
6816	__le16 reason;
6817
6818	reason = cpu_to_le16(mlme->reason_code);
6819
6820	switch (mlme->cmd) {
6821	case IW_MLME_DEAUTH:
6822		/* silently ignore */
6823		break;
6824
6825	case IW_MLME_DISASSOC:
6826		ipw_disassociate(priv);
6827		break;
6828
6829	default:
6830		return -EOPNOTSUPP;
6831	}
6832	return 0;
6833}
6834
6835#ifdef CONFIG_IPW2200_QOS
6836
6837/* QoS */
6838/*
6839* get the modulation type of the current network or
6840* the card current mode
6841*/
6842static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6843{
6844	u8 mode = 0;
6845
6846	if (priv->status & STATUS_ASSOCIATED) {
6847		unsigned long flags;
6848
6849		spin_lock_irqsave(&priv->ieee->lock, flags);
6850		mode = priv->assoc_network->mode;
6851		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6852	} else {
6853		mode = priv->ieee->mode;
6854	}
6855	IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6856	return mode;
6857}
6858
6859/*
6860* Handle management frame beacon and probe response
6861*/
6862static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6863					 int active_network,
6864					 struct libipw_network *network)
6865{
6866	u32 size = sizeof(struct libipw_qos_parameters);
6867
6868	if (network->capability & WLAN_CAPABILITY_IBSS)
6869		network->qos_data.active = network->qos_data.supported;
6870
6871	if (network->flags & NETWORK_HAS_QOS_MASK) {
6872		if (active_network &&
6873		    (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6874			network->qos_data.active = network->qos_data.supported;
6875
6876		if ((network->qos_data.active == 1) && (active_network == 1) &&
6877		    (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6878		    (network->qos_data.old_param_count !=
6879		     network->qos_data.param_count)) {
6880			network->qos_data.old_param_count =
6881			    network->qos_data.param_count;
6882			schedule_work(&priv->qos_activate);
6883			IPW_DEBUG_QOS("QoS parameters change call "
6884				      "qos_activate\n");
6885		}
6886	} else {
6887		if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6888			memcpy(&network->qos_data.parameters,
6889			       &def_parameters_CCK, size);
6890		else
6891			memcpy(&network->qos_data.parameters,
6892			       &def_parameters_OFDM, size);
6893
6894		if ((network->qos_data.active == 1) && (active_network == 1)) {
6895			IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6896			schedule_work(&priv->qos_activate);
6897		}
6898
6899		network->qos_data.active = 0;
6900		network->qos_data.supported = 0;
6901	}
6902	if ((priv->status & STATUS_ASSOCIATED) &&
6903	    (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6904		if (!ether_addr_equal(network->bssid, priv->bssid))
6905			if (network->capability & WLAN_CAPABILITY_IBSS)
6906				if ((network->ssid_len ==
6907				     priv->assoc_network->ssid_len) &&
6908				    !memcmp(network->ssid,
6909					    priv->assoc_network->ssid,
6910					    network->ssid_len)) {
6911					schedule_work(&priv->merge_networks);
6912				}
6913	}
6914
6915	return 0;
6916}
6917
6918/*
6919* This function set up the firmware to support QoS. It sends
6920* IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6921*/
6922static int ipw_qos_activate(struct ipw_priv *priv,
6923			    struct libipw_qos_data *qos_network_data)
6924{
6925	int err;
6926	struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
6927	struct libipw_qos_parameters *active_one = NULL;
6928	u32 size = sizeof(struct libipw_qos_parameters);
6929	u32 burst_duration;
6930	int i;
6931	u8 type;
6932
6933	type = ipw_qos_current_mode(priv);
6934
6935	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6936	memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6937	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6938	memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6939
6940	if (qos_network_data == NULL) {
6941		if (type == IEEE_B) {
6942			IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6943			active_one = &def_parameters_CCK;
6944		} else
6945			active_one = &def_parameters_OFDM;
6946
6947		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6948		burst_duration = ipw_qos_get_burst_duration(priv);
6949		for (i = 0; i < QOS_QUEUE_NUM; i++)
6950			qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6951			    cpu_to_le16(burst_duration);
6952	} else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6953		if (type == IEEE_B) {
6954			IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
6955				      type);
6956			if (priv->qos_data.qos_enable == 0)
6957				active_one = &def_parameters_CCK;
6958			else
6959				active_one = priv->qos_data.def_qos_parm_CCK;
6960		} else {
6961			if (priv->qos_data.qos_enable == 0)
6962				active_one = &def_parameters_OFDM;
6963			else
6964				active_one = priv->qos_data.def_qos_parm_OFDM;
6965		}
6966		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6967	} else {
6968		unsigned long flags;
6969		int active;
6970
6971		spin_lock_irqsave(&priv->ieee->lock, flags);
6972		active_one = &(qos_network_data->parameters);
6973		qos_network_data->old_param_count =
6974		    qos_network_data->param_count;
6975		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6976		active = qos_network_data->supported;
6977		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6978
6979		if (active == 0) {
6980			burst_duration = ipw_qos_get_burst_duration(priv);
6981			for (i = 0; i < QOS_QUEUE_NUM; i++)
6982				qos_parameters[QOS_PARAM_SET_ACTIVE].
6983				    tx_op_limit[i] = cpu_to_le16(burst_duration);
6984		}
6985	}
6986
6987	IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6988	err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
6989	if (err)
6990		IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6991
6992	return err;
6993}
6994
6995/*
6996* send IPW_CMD_WME_INFO to the firmware
6997*/
6998static int ipw_qos_set_info_element(struct ipw_priv *priv)
6999{
7000	int ret = 0;
7001	struct libipw_qos_information_element qos_info;
7002
7003	if (priv == NULL)
7004		return -1;
7005
7006	qos_info.elementID = QOS_ELEMENT_ID;
7007	qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
7008
7009	qos_info.version = QOS_VERSION_1;
7010	qos_info.ac_info = 0;
7011
7012	memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7013	qos_info.qui_type = QOS_OUI_TYPE;
7014	qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7015
7016	ret = ipw_send_qos_info_command(priv, &qos_info);
7017	if (ret != 0) {
7018		IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7019	}
7020	return ret;
7021}
7022
7023/*
7024* Set the QoS parameter with the association request structure
7025*/
7026static int ipw_qos_association(struct ipw_priv *priv,
7027			       struct libipw_network *network)
7028{
7029	int err = 0;
7030	struct libipw_qos_data *qos_data = NULL;
7031	struct libipw_qos_data ibss_data = {
7032		.supported = 1,
7033		.active = 1,
7034	};
7035
7036	switch (priv->ieee->iw_mode) {
7037	case IW_MODE_ADHOC:
7038		BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7039
7040		qos_data = &ibss_data;
7041		break;
7042
7043	case IW_MODE_INFRA:
7044		qos_data = &network->qos_data;
7045		break;
7046
7047	default:
7048		BUG();
7049		break;
7050	}
7051
7052	err = ipw_qos_activate(priv, qos_data);
7053	if (err) {
7054		priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7055		return err;
7056	}
7057
7058	if (priv->qos_data.qos_enable && qos_data->supported) {
7059		IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7060		priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7061		return ipw_qos_set_info_element(priv);
7062	}
7063
7064	return 0;
7065}
7066
7067/*
7068* handling the beaconing responses. if we get different QoS setting
7069* off the network from the associated setting, adjust the QoS
7070* setting
7071*/
7072static int ipw_qos_association_resp(struct ipw_priv *priv,
7073				    struct libipw_network *network)
7074{
7075	int ret = 0;
7076	unsigned long flags;
7077	u32 size = sizeof(struct libipw_qos_parameters);
7078	int set_qos_param = 0;
7079
7080	if ((priv == NULL) || (network == NULL) ||
7081	    (priv->assoc_network == NULL))
7082		return ret;
7083
7084	if (!(priv->status & STATUS_ASSOCIATED))
7085		return ret;
7086
7087	if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7088		return ret;
7089
7090	spin_lock_irqsave(&priv->ieee->lock, flags);
7091	if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7092		memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7093		       sizeof(struct libipw_qos_data));
7094		priv->assoc_network->qos_data.active = 1;
7095		if ((network->qos_data.old_param_count !=
7096		     network->qos_data.param_count)) {
7097			set_qos_param = 1;
7098			network->qos_data.old_param_count =
7099			    network->qos_data.param_count;
7100		}
7101
7102	} else {
7103		if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7104			memcpy(&priv->assoc_network->qos_data.parameters,
7105			       &def_parameters_CCK, size);
7106		else
7107			memcpy(&priv->assoc_network->qos_data.parameters,
7108			       &def_parameters_OFDM, size);
7109		priv->assoc_network->qos_data.active = 0;
7110		priv->assoc_network->qos_data.supported = 0;
7111		set_qos_param = 1;
7112	}
7113
7114	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7115
7116	if (set_qos_param == 1)
7117		schedule_work(&priv->qos_activate);
7118
7119	return ret;
7120}
7121
7122static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7123{
7124	u32 ret = 0;
7125
7126	if ((priv == NULL))
7127		return 0;
7128
7129	if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7130		ret = priv->qos_data.burst_duration_CCK;
7131	else
7132		ret = priv->qos_data.burst_duration_OFDM;
7133
7134	return ret;
7135}
7136
7137/*
7138* Initialize the setting of QoS global
7139*/
7140static void ipw_qos_init(struct ipw_priv *priv, int enable,
7141			 int burst_enable, u32 burst_duration_CCK,
7142			 u32 burst_duration_OFDM)
7143{
7144	priv->qos_data.qos_enable = enable;
7145
7146	if (priv->qos_data.qos_enable) {
7147		priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7148		priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7149		IPW_DEBUG_QOS("QoS is enabled\n");
7150	} else {
7151		priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7152		priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7153		IPW_DEBUG_QOS("QoS is not enabled\n");
7154	}
7155
7156	priv->qos_data.burst_enable = burst_enable;
7157
7158	if (burst_enable) {
7159		priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7160		priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7161	} else {
7162		priv->qos_data.burst_duration_CCK = 0;
7163		priv->qos_data.burst_duration_OFDM = 0;
7164	}
7165}
7166
7167/*
7168* map the packet priority to the right TX Queue
7169*/
7170static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7171{
7172	if (priority > 7 || !priv->qos_data.qos_enable)
7173		priority = 0;
7174
7175	return from_priority_to_tx_queue[priority] - 1;
7176}
7177
7178static int ipw_is_qos_active(struct net_device *dev,
7179			     struct sk_buff *skb)
7180{
7181	struct ipw_priv *priv = libipw_priv(dev);
7182	struct libipw_qos_data *qos_data = NULL;
7183	int active, supported;
7184	u8 *daddr = skb->data + ETH_ALEN;
7185	int unicast = !is_multicast_ether_addr(daddr);
7186
7187	if (!(priv->status & STATUS_ASSOCIATED))
7188		return 0;
7189
7190	qos_data = &priv->assoc_network->qos_data;
7191
7192	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7193		if (unicast == 0)
7194			qos_data->active = 0;
7195		else
7196			qos_data->active = qos_data->supported;
7197	}
7198	active = qos_data->active;
7199	supported = qos_data->supported;
7200	IPW_DEBUG_QOS("QoS  %d network is QoS active %d  supported %d  "
7201		      "unicast %d\n",
7202		      priv->qos_data.qos_enable, active, supported, unicast);
7203	if (active && priv->qos_data.qos_enable)
7204		return 1;
7205
7206	return 0;
7207
7208}
7209/*
7210* add QoS parameter to the TX command
7211*/
7212static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7213					u16 priority,
7214					struct tfd_data *tfd)
7215{
7216	int tx_queue_id = 0;
7217
7218
7219	tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7220	tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7221
7222	if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7223		tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7224		tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7225	}
7226	return 0;
7227}
7228
7229/*
7230* background support to run QoS activate functionality
7231*/
7232static void ipw_bg_qos_activate(struct work_struct *work)
7233{
7234	struct ipw_priv *priv =
7235		container_of(work, struct ipw_priv, qos_activate);
7236
7237	mutex_lock(&priv->mutex);
7238
7239	if (priv->status & STATUS_ASSOCIATED)
7240		ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7241
7242	mutex_unlock(&priv->mutex);
7243}
7244
7245static int ipw_handle_probe_response(struct net_device *dev,
7246				     struct libipw_probe_response *resp,
7247				     struct libipw_network *network)
7248{
7249	struct ipw_priv *priv = libipw_priv(dev);
7250	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7251			      (network == priv->assoc_network));
7252
7253	ipw_qos_handle_probe_response(priv, active_network, network);
7254
7255	return 0;
7256}
7257
7258static int ipw_handle_beacon(struct net_device *dev,
7259			     struct libipw_beacon *resp,
7260			     struct libipw_network *network)
7261{
7262	struct ipw_priv *priv = libipw_priv(dev);
7263	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7264			      (network == priv->assoc_network));
7265
7266	ipw_qos_handle_probe_response(priv, active_network, network);
7267
7268	return 0;
7269}
7270
7271static int ipw_handle_assoc_response(struct net_device *dev,
7272				     struct libipw_assoc_response *resp,
7273				     struct libipw_network *network)
7274{
7275	struct ipw_priv *priv = libipw_priv(dev);
7276	ipw_qos_association_resp(priv, network);
7277	return 0;
7278}
7279
7280static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7281				       *qos_param)
7282{
7283	return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7284				sizeof(*qos_param) * 3, qos_param);
7285}
7286
7287static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7288				     *qos_param)
7289{
7290	return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7291				qos_param);
7292}
7293
7294#endif				/* CONFIG_IPW2200_QOS */
7295
7296static int ipw_associate_network(struct ipw_priv *priv,
7297				 struct libipw_network *network,
7298				 struct ipw_supported_rates *rates, int roaming)
7299{
7300	int err;
7301
7302	if (priv->config & CFG_FIXED_RATE)
7303		ipw_set_fixed_rate(priv, network->mode);
7304
7305	if (!(priv->config & CFG_STATIC_ESSID)) {
7306		priv->essid_len = min(network->ssid_len,
7307				      (u8) IW_ESSID_MAX_SIZE);
7308		memcpy(priv->essid, network->ssid, priv->essid_len);
7309	}
7310
7311	network->last_associate = jiffies;
7312
7313	memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7314	priv->assoc_request.channel = network->channel;
7315	priv->assoc_request.auth_key = 0;
7316
7317	if ((priv->capability & CAP_PRIVACY_ON) &&
7318	    (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7319		priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7320		priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7321
7322		if (priv->ieee->sec.level == SEC_LEVEL_1)
7323			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7324
7325	} else if ((priv->capability & CAP_PRIVACY_ON) &&
7326		   (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7327		priv->assoc_request.auth_type = AUTH_LEAP;
7328	else
7329		priv->assoc_request.auth_type = AUTH_OPEN;
7330
7331	if (priv->ieee->wpa_ie_len) {
7332		priv->assoc_request.policy_support = cpu_to_le16(0x02);	/* RSN active */
7333		ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7334				 priv->ieee->wpa_ie_len);
7335	}
7336
7337	/*
7338	 * It is valid for our ieee device to support multiple modes, but
7339	 * when it comes to associating to a given network we have to choose
7340	 * just one mode.
7341	 */
7342	if (network->mode & priv->ieee->mode & IEEE_A)
7343		priv->assoc_request.ieee_mode = IPW_A_MODE;
7344	else if (network->mode & priv->ieee->mode & IEEE_G)
7345		priv->assoc_request.ieee_mode = IPW_G_MODE;
7346	else if (network->mode & priv->ieee->mode & IEEE_B)
7347		priv->assoc_request.ieee_mode = IPW_B_MODE;
7348
7349	priv->assoc_request.capability = cpu_to_le16(network->capability);
7350	if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7351	    && !(priv->config & CFG_PREAMBLE_LONG)) {
7352		priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7353	} else {
7354		priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7355
7356		/* Clear the short preamble if we won't be supporting it */
7357		priv->assoc_request.capability &=
7358		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7359	}
7360
7361	/* Clear capability bits that aren't used in Ad Hoc */
7362	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7363		priv->assoc_request.capability &=
7364		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7365
7366	IPW_DEBUG_ASSOC("%ssociation attempt: '%*pE', channel %d, 802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7367			roaming ? "Rea" : "A",
7368			priv->essid_len, priv->essid,
7369			network->channel,
7370			ipw_modes[priv->assoc_request.ieee_mode],
7371			rates->num_rates,
7372			(priv->assoc_request.preamble_length ==
7373			 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7374			network->capability &
7375			WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7376			priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7377			priv->capability & CAP_PRIVACY_ON ?
7378			(priv->capability & CAP_SHARED_KEY ? "(shared)" :
7379			 "(open)") : "",
7380			priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7381			priv->capability & CAP_PRIVACY_ON ?
7382			'1' + priv->ieee->sec.active_key : '.',
7383			priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7384
7385	priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7386	if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7387	    (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7388		priv->assoc_request.assoc_type = HC_IBSS_START;
7389		priv->assoc_request.assoc_tsf_msw = 0;
7390		priv->assoc_request.assoc_tsf_lsw = 0;
7391	} else {
7392		if (unlikely(roaming))
7393			priv->assoc_request.assoc_type = HC_REASSOCIATE;
7394		else
7395			priv->assoc_request.assoc_type = HC_ASSOCIATE;
7396		priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7397		priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7398	}
7399
7400	memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7401
7402	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7403		eth_broadcast_addr(priv->assoc_request.dest);
7404		priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7405	} else {
7406		memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7407		priv->assoc_request.atim_window = 0;
7408	}
7409
7410	priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7411
7412	err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7413	if (err) {
7414		IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7415		return err;
7416	}
7417
7418	rates->ieee_mode = priv->assoc_request.ieee_mode;
7419	rates->purpose = IPW_RATE_CONNECT;
7420	ipw_send_supported_rates(priv, rates);
7421
7422	if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7423		priv->sys_config.dot11g_auto_detection = 1;
7424	else
7425		priv->sys_config.dot11g_auto_detection = 0;
7426
7427	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7428		priv->sys_config.answer_broadcast_ssid_probe = 1;
7429	else
7430		priv->sys_config.answer_broadcast_ssid_probe = 0;
7431
7432	err = ipw_send_system_config(priv);
7433	if (err) {
7434		IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7435		return err;
7436	}
7437
7438	IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7439	err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7440	if (err) {
7441		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7442		return err;
7443	}
7444
7445	/*
7446	 * If preemption is enabled, it is possible for the association
7447	 * to complete before we return from ipw_send_associate.  Therefore
7448	 * we have to be sure and update our priviate data first.
7449	 */
7450	priv->channel = network->channel;
7451	memcpy(priv->bssid, network->bssid, ETH_ALEN);
7452	priv->status |= STATUS_ASSOCIATING;
7453	priv->status &= ~STATUS_SECURITY_UPDATED;
7454
7455	priv->assoc_network = network;
7456
7457#ifdef CONFIG_IPW2200_QOS
7458	ipw_qos_association(priv, network);
7459#endif
7460
7461	err = ipw_send_associate(priv, &priv->assoc_request);
7462	if (err) {
7463		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7464		return err;
7465	}
7466
7467	IPW_DEBUG(IPW_DL_STATE, "associating: '%*pE' %pM\n",
7468		  priv->essid_len, priv->essid, priv->bssid);
7469
7470	return 0;
7471}
7472
7473static void ipw_roam(void *data)
7474{
7475	struct ipw_priv *priv = data;
7476	struct libipw_network *network = NULL;
7477	struct ipw_network_match match = {
7478		.network = priv->assoc_network
7479	};
7480
7481	/* The roaming process is as follows:
7482	 *
7483	 * 1.  Missed beacon threshold triggers the roaming process by
7484	 *     setting the status ROAM bit and requesting a scan.
7485	 * 2.  When the scan completes, it schedules the ROAM work
7486	 * 3.  The ROAM work looks at all of the known networks for one that
7487	 *     is a better network than the currently associated.  If none
7488	 *     found, the ROAM process is over (ROAM bit cleared)
7489	 * 4.  If a better network is found, a disassociation request is
7490	 *     sent.
7491	 * 5.  When the disassociation completes, the roam work is again
7492	 *     scheduled.  The second time through, the driver is no longer
7493	 *     associated, and the newly selected network is sent an
7494	 *     association request.
7495	 * 6.  At this point ,the roaming process is complete and the ROAM
7496	 *     status bit is cleared.
7497	 */
7498
7499	/* If we are no longer associated, and the roaming bit is no longer
7500	 * set, then we are not actively roaming, so just return */
7501	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7502		return;
7503
7504	if (priv->status & STATUS_ASSOCIATED) {
7505		/* First pass through ROAM process -- look for a better
7506		 * network */
7507		unsigned long flags;
7508		u8 rssi = priv->assoc_network->stats.rssi;
7509		priv->assoc_network->stats.rssi = -128;
7510		spin_lock_irqsave(&priv->ieee->lock, flags);
7511		list_for_each_entry(network, &priv->ieee->network_list, list) {
7512			if (network != priv->assoc_network)
7513				ipw_best_network(priv, &match, network, 1);
7514		}
7515		spin_unlock_irqrestore(&priv->ieee->lock, flags);
7516		priv->assoc_network->stats.rssi = rssi;
7517
7518		if (match.network == priv->assoc_network) {
7519			IPW_DEBUG_ASSOC("No better APs in this network to "
7520					"roam to.\n");
7521			priv->status &= ~STATUS_ROAMING;
7522			ipw_debug_config(priv);
7523			return;
7524		}
7525
7526		ipw_send_disassociate(priv, 1);
7527		priv->assoc_network = match.network;
7528
7529		return;
7530	}
7531
7532	/* Second pass through ROAM process -- request association */
7533	ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7534	ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7535	priv->status &= ~STATUS_ROAMING;
7536}
7537
7538static void ipw_bg_roam(struct work_struct *work)
7539{
7540	struct ipw_priv *priv =
7541		container_of(work, struct ipw_priv, roam);
7542	mutex_lock(&priv->mutex);
7543	ipw_roam(priv);
7544	mutex_unlock(&priv->mutex);
7545}
7546
7547static int ipw_associate(void *data)
7548{
7549	struct ipw_priv *priv = data;
7550
7551	struct libipw_network *network = NULL;
7552	struct ipw_network_match match = {
7553		.network = NULL
7554	};
7555	struct ipw_supported_rates *rates;
7556	struct list_head *element;
7557	unsigned long flags;
7558
7559	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7560		IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7561		return 0;
7562	}
7563
7564	if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7565		IPW_DEBUG_ASSOC("Not attempting association (already in "
7566				"progress)\n");
7567		return 0;
7568	}
7569
7570	if (priv->status & STATUS_DISASSOCIATING) {
7571		IPW_DEBUG_ASSOC("Not attempting association (in "
7572				"disassociating)\n ");
7573		schedule_work(&priv->associate);
7574		return 0;
7575	}
7576
7577	if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7578		IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7579				"initialized)\n");
7580		return 0;
7581	}
7582
7583	if (!(priv->config & CFG_ASSOCIATE) &&
7584	    !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7585		IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7586		return 0;
7587	}
7588
7589	/* Protect our use of the network_list */
7590	spin_lock_irqsave(&priv->ieee->lock, flags);
7591	list_for_each_entry(network, &priv->ieee->network_list, list)
7592	    ipw_best_network(priv, &match, network, 0);
7593
7594	network = match.network;
7595	rates = &match.rates;
7596
7597	if (network == NULL &&
7598	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
7599	    priv->config & CFG_ADHOC_CREATE &&
7600	    priv->config & CFG_STATIC_ESSID &&
7601	    priv->config & CFG_STATIC_CHANNEL) {
7602		/* Use oldest network if the free list is empty */
7603		if (list_empty(&priv->ieee->network_free_list)) {
7604			struct libipw_network *oldest = NULL;
7605			struct libipw_network *target;
7606
7607			list_for_each_entry(target, &priv->ieee->network_list, list) {
7608				if ((oldest == NULL) ||
7609				    (target->last_scanned < oldest->last_scanned))
7610					oldest = target;
7611			}
7612
7613			/* If there are no more slots, expire the oldest */
7614			list_del(&oldest->list);
7615			target = oldest;
7616			IPW_DEBUG_ASSOC("Expired '%*pE' (%pM) from network list.\n",
7617					target->ssid_len, target->ssid,
7618					target->bssid);
7619			list_add_tail(&target->list,
7620				      &priv->ieee->network_free_list);
7621		}
7622
7623		element = priv->ieee->network_free_list.next;
7624		network = list_entry(element, struct libipw_network, list);
7625		ipw_adhoc_create(priv, network);
7626		rates = &priv->rates;
7627		list_del(element);
7628		list_add_tail(&network->list, &priv->ieee->network_list);
7629	}
7630	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7631
7632	/* If we reached the end of the list, then we don't have any valid
7633	 * matching APs */
7634	if (!network) {
7635		ipw_debug_config(priv);
7636
7637		if (!(priv->status & STATUS_SCANNING)) {
7638			if (!(priv->config & CFG_SPEED_SCAN))
7639				schedule_delayed_work(&priv->request_scan,
7640						      SCAN_INTERVAL);
7641			else
7642				schedule_delayed_work(&priv->request_scan, 0);
7643		}
7644
7645		return 0;
7646	}
7647
7648	ipw_associate_network(priv, network, rates, 0);
7649
7650	return 1;
7651}
7652
7653static void ipw_bg_associate(struct work_struct *work)
7654{
7655	struct ipw_priv *priv =
7656		container_of(work, struct ipw_priv, associate);
7657	mutex_lock(&priv->mutex);
7658	ipw_associate(priv);
7659	mutex_unlock(&priv->mutex);
7660}
7661
7662static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7663				      struct sk_buff *skb)
7664{
7665	struct ieee80211_hdr *hdr;
7666	u16 fc;
7667
7668	hdr = (struct ieee80211_hdr *)skb->data;
7669	fc = le16_to_cpu(hdr->frame_control);
7670	if (!(fc & IEEE80211_FCTL_PROTECTED))
7671		return;
7672
7673	fc &= ~IEEE80211_FCTL_PROTECTED;
7674	hdr->frame_control = cpu_to_le16(fc);
7675	switch (priv->ieee->sec.level) {
7676	case SEC_LEVEL_3:
7677		/* Remove CCMP HDR */
7678		memmove(skb->data + LIBIPW_3ADDR_LEN,
7679			skb->data + LIBIPW_3ADDR_LEN + 8,
7680			skb->len - LIBIPW_3ADDR_LEN - 8);
7681		skb_trim(skb, skb->len - 16);	/* CCMP_HDR_LEN + CCMP_MIC_LEN */
7682		break;
7683	case SEC_LEVEL_2:
7684		break;
7685	case SEC_LEVEL_1:
7686		/* Remove IV */
7687		memmove(skb->data + LIBIPW_3ADDR_LEN,
7688			skb->data + LIBIPW_3ADDR_LEN + 4,
7689			skb->len - LIBIPW_3ADDR_LEN - 4);
7690		skb_trim(skb, skb->len - 8);	/* IV + ICV */
7691		break;
7692	case SEC_LEVEL_0:
7693		break;
7694	default:
7695		printk(KERN_ERR "Unknown security level %d\n",
7696		       priv->ieee->sec.level);
7697		break;
7698	}
7699}
7700
7701static void ipw_handle_data_packet(struct ipw_priv *priv,
7702				   struct ipw_rx_mem_buffer *rxb,
7703				   struct libipw_rx_stats *stats)
7704{
7705	struct net_device *dev = priv->net_dev;
7706	struct libipw_hdr_4addr *hdr;
7707	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7708
7709	/* We received data from the HW, so stop the watchdog */
7710	dev->trans_start = jiffies;
7711
7712	/* We only process data packets if the
7713	 * interface is open */
7714	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7715		     skb_tailroom(rxb->skb))) {
7716		dev->stats.rx_errors++;
7717		priv->wstats.discard.misc++;
7718		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7719		return;
7720	} else if (unlikely(!netif_running(priv->net_dev))) {
7721		dev->stats.rx_dropped++;
7722		priv->wstats.discard.misc++;
7723		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7724		return;
7725	}
7726
7727	/* Advance skb->data to the start of the actual payload */
7728	skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7729
7730	/* Set the size of the skb to the size of the frame */
7731	skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7732
7733	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7734
7735	/* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7736	hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7737	if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7738	    (is_multicast_ether_addr(hdr->addr1) ?
7739	     !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7740		ipw_rebuild_decrypted_skb(priv, rxb->skb);
7741
7742	if (!libipw_rx(priv->ieee, rxb->skb, stats))
7743		dev->stats.rx_errors++;
7744	else {			/* libipw_rx succeeded, so it now owns the SKB */
7745		rxb->skb = NULL;
7746		__ipw_led_activity_on(priv);
7747	}
7748}
7749
7750#ifdef CONFIG_IPW2200_RADIOTAP
7751static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7752					   struct ipw_rx_mem_buffer *rxb,
7753					   struct libipw_rx_stats *stats)
7754{
7755	struct net_device *dev = priv->net_dev;
7756	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7757	struct ipw_rx_frame *frame = &pkt->u.frame;
7758
7759	/* initial pull of some data */
7760	u16 received_channel = frame->received_channel;
7761	u8 antennaAndPhy = frame->antennaAndPhy;
7762	s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM;	/* call it signed anyhow */
7763	u16 pktrate = frame->rate;
7764
7765	/* Magic struct that slots into the radiotap header -- no reason
7766	 * to build this manually element by element, we can write it much
7767	 * more efficiently than we can parse it. ORDER MATTERS HERE */
7768	struct ipw_rt_hdr *ipw_rt;
7769
7770	unsigned short len = le16_to_cpu(pkt->u.frame.length);
7771
7772	/* We received data from the HW, so stop the watchdog */
7773	dev->trans_start = jiffies;
7774
7775	/* We only process data packets if the
7776	 * interface is open */
7777	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7778		     skb_tailroom(rxb->skb))) {
7779		dev->stats.rx_errors++;
7780		priv->wstats.discard.misc++;
7781		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7782		return;
7783	} else if (unlikely(!netif_running(priv->net_dev))) {
7784		dev->stats.rx_dropped++;
7785		priv->wstats.discard.misc++;
7786		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7787		return;
7788	}
7789
7790	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7791	 * that now */
7792	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7793		/* FIXME: Should alloc bigger skb instead */
7794		dev->stats.rx_dropped++;
7795		priv->wstats.discard.misc++;
7796		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7797		return;
7798	}
7799
7800	/* copy the frame itself */
7801	memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7802		rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7803
7804	ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7805
7806	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7807	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
7808	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr));	/* total header+data */
7809
7810	/* Big bitfield of all the fields we provide in radiotap */
7811	ipw_rt->rt_hdr.it_present = cpu_to_le32(
7812	     (1 << IEEE80211_RADIOTAP_TSFT) |
7813	     (1 << IEEE80211_RADIOTAP_FLAGS) |
7814	     (1 << IEEE80211_RADIOTAP_RATE) |
7815	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
7816	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7817	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7818	     (1 << IEEE80211_RADIOTAP_ANTENNA));
7819
7820	/* Zero the flags, we'll add to them as we go */
7821	ipw_rt->rt_flags = 0;
7822	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7823			       frame->parent_tsf[2] << 16 |
7824			       frame->parent_tsf[1] << 8  |
7825			       frame->parent_tsf[0]);
7826
7827	/* Convert signal to DBM */
7828	ipw_rt->rt_dbmsignal = antsignal;
7829	ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7830
7831	/* Convert the channel data and set the flags */
7832	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7833	if (received_channel > 14) {	/* 802.11a */
7834		ipw_rt->rt_chbitmask =
7835		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7836	} else if (antennaAndPhy & 32) {	/* 802.11b */
7837		ipw_rt->rt_chbitmask =
7838		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7839	} else {		/* 802.11g */
7840		ipw_rt->rt_chbitmask =
7841		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7842	}
7843
7844	/* set the rate in multiples of 500k/s */
7845	switch (pktrate) {
7846	case IPW_TX_RATE_1MB:
7847		ipw_rt->rt_rate = 2;
7848		break;
7849	case IPW_TX_RATE_2MB:
7850		ipw_rt->rt_rate = 4;
7851		break;
7852	case IPW_TX_RATE_5MB:
7853		ipw_rt->rt_rate = 10;
7854		break;
7855	case IPW_TX_RATE_6MB:
7856		ipw_rt->rt_rate = 12;
7857		break;
7858	case IPW_TX_RATE_9MB:
7859		ipw_rt->rt_rate = 18;
7860		break;
7861	case IPW_TX_RATE_11MB:
7862		ipw_rt->rt_rate = 22;
7863		break;
7864	case IPW_TX_RATE_12MB:
7865		ipw_rt->rt_rate = 24;
7866		break;
7867	case IPW_TX_RATE_18MB:
7868		ipw_rt->rt_rate = 36;
7869		break;
7870	case IPW_TX_RATE_24MB:
7871		ipw_rt->rt_rate = 48;
7872		break;
7873	case IPW_TX_RATE_36MB:
7874		ipw_rt->rt_rate = 72;
7875		break;
7876	case IPW_TX_RATE_48MB:
7877		ipw_rt->rt_rate = 96;
7878		break;
7879	case IPW_TX_RATE_54MB:
7880		ipw_rt->rt_rate = 108;
7881		break;
7882	default:
7883		ipw_rt->rt_rate = 0;
7884		break;
7885	}
7886
7887	/* antenna number */
7888	ipw_rt->rt_antenna = (antennaAndPhy & 3);	/* Is this right? */
7889
7890	/* set the preamble flag if we have it */
7891	if ((antennaAndPhy & 64))
7892		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7893
7894	/* Set the size of the skb to the size of the frame */
7895	skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7896
7897	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7898
7899	if (!libipw_rx(priv->ieee, rxb->skb, stats))
7900		dev->stats.rx_errors++;
7901	else {			/* libipw_rx succeeded, so it now owns the SKB */
7902		rxb->skb = NULL;
7903		/* no LED during capture */
7904	}
7905}
7906#endif
7907
7908#ifdef CONFIG_IPW2200_PROMISCUOUS
7909#define libipw_is_probe_response(fc) \
7910   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7911    (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7912
7913#define libipw_is_management(fc) \
7914   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7915
7916#define libipw_is_control(fc) \
7917   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7918
7919#define libipw_is_data(fc) \
7920   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7921
7922#define libipw_is_assoc_request(fc) \
7923   ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7924
7925#define libipw_is_reassoc_request(fc) \
7926   ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7927
7928static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7929				      struct ipw_rx_mem_buffer *rxb,
7930				      struct libipw_rx_stats *stats)
7931{
7932	struct net_device *dev = priv->prom_net_dev;
7933	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7934	struct ipw_rx_frame *frame = &pkt->u.frame;
7935	struct ipw_rt_hdr *ipw_rt;
7936
7937	/* First cache any information we need before we overwrite
7938	 * the information provided in the skb from the hardware */
7939	struct ieee80211_hdr *hdr;
7940	u16 channel = frame->received_channel;
7941	u8 phy_flags = frame->antennaAndPhy;
7942	s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7943	s8 noise = (s8) le16_to_cpu(frame->noise);
7944	u8 rate = frame->rate;
7945	unsigned short len = le16_to_cpu(pkt->u.frame.length);
7946	struct sk_buff *skb;
7947	int hdr_only = 0;
7948	u16 filter = priv->prom_priv->filter;
7949
7950	/* If the filter is set to not include Rx frames then return */
7951	if (filter & IPW_PROM_NO_RX)
7952		return;
7953
7954	/* We received data from the HW, so stop the watchdog */
7955	dev->trans_start = jiffies;
7956
7957	if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7958		dev->stats.rx_errors++;
7959		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7960		return;
7961	}
7962
7963	/* We only process data packets if the interface is open */
7964	if (unlikely(!netif_running(dev))) {
7965		dev->stats.rx_dropped++;
7966		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7967		return;
7968	}
7969
7970	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7971	 * that now */
7972	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7973		/* FIXME: Should alloc bigger skb instead */
7974		dev->stats.rx_dropped++;
7975		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7976		return;
7977	}
7978
7979	hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7980	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
7981		if (filter & IPW_PROM_NO_MGMT)
7982			return;
7983		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7984			hdr_only = 1;
7985	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
7986		if (filter & IPW_PROM_NO_CTL)
7987			return;
7988		if (filter & IPW_PROM_CTL_HEADER_ONLY)
7989			hdr_only = 1;
7990	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
7991		if (filter & IPW_PROM_NO_DATA)
7992			return;
7993		if (filter & IPW_PROM_DATA_HEADER_ONLY)
7994			hdr_only = 1;
7995	}
7996
7997	/* Copy the SKB since this is for the promiscuous side */
7998	skb = skb_copy(rxb->skb, GFP_ATOMIC);
7999	if (skb == NULL) {
8000		IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8001		return;
8002	}
8003
8004	/* copy the frame data to write after where the radiotap header goes */
8005	ipw_rt = (void *)skb->data;
8006
8007	if (hdr_only)
8008		len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
8009
8010	memcpy(ipw_rt->payload, hdr, len);
8011
8012	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8013	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
8014	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt));	/* total header+data */
8015
8016	/* Set the size of the skb to the size of the frame */
8017	skb_put(skb, sizeof(*ipw_rt) + len);
8018
8019	/* Big bitfield of all the fields we provide in radiotap */
8020	ipw_rt->rt_hdr.it_present = cpu_to_le32(
8021	     (1 << IEEE80211_RADIOTAP_TSFT) |
8022	     (1 << IEEE80211_RADIOTAP_FLAGS) |
8023	     (1 << IEEE80211_RADIOTAP_RATE) |
8024	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
8025	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8026	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8027	     (1 << IEEE80211_RADIOTAP_ANTENNA));
8028
8029	/* Zero the flags, we'll add to them as we go */
8030	ipw_rt->rt_flags = 0;
8031	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8032			       frame->parent_tsf[2] << 16 |
8033			       frame->parent_tsf[1] << 8  |
8034			       frame->parent_tsf[0]);
8035
8036	/* Convert to DBM */
8037	ipw_rt->rt_dbmsignal = signal;
8038	ipw_rt->rt_dbmnoise = noise;
8039
8040	/* Convert the channel data and set the flags */
8041	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8042	if (channel > 14) {	/* 802.11a */
8043		ipw_rt->rt_chbitmask =
8044		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8045	} else if (phy_flags & (1 << 5)) {	/* 802.11b */
8046		ipw_rt->rt_chbitmask =
8047		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8048	} else {		/* 802.11g */
8049		ipw_rt->rt_chbitmask =
8050		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8051	}
8052
8053	/* set the rate in multiples of 500k/s */
8054	switch (rate) {
8055	case IPW_TX_RATE_1MB:
8056		ipw_rt->rt_rate = 2;
8057		break;
8058	case IPW_TX_RATE_2MB:
8059		ipw_rt->rt_rate = 4;
8060		break;
8061	case IPW_TX_RATE_5MB:
8062		ipw_rt->rt_rate = 10;
8063		break;
8064	case IPW_TX_RATE_6MB:
8065		ipw_rt->rt_rate = 12;
8066		break;
8067	case IPW_TX_RATE_9MB:
8068		ipw_rt->rt_rate = 18;
8069		break;
8070	case IPW_TX_RATE_11MB:
8071		ipw_rt->rt_rate = 22;
8072		break;
8073	case IPW_TX_RATE_12MB:
8074		ipw_rt->rt_rate = 24;
8075		break;
8076	case IPW_TX_RATE_18MB:
8077		ipw_rt->rt_rate = 36;
8078		break;
8079	case IPW_TX_RATE_24MB:
8080		ipw_rt->rt_rate = 48;
8081		break;
8082	case IPW_TX_RATE_36MB:
8083		ipw_rt->rt_rate = 72;
8084		break;
8085	case IPW_TX_RATE_48MB:
8086		ipw_rt->rt_rate = 96;
8087		break;
8088	case IPW_TX_RATE_54MB:
8089		ipw_rt->rt_rate = 108;
8090		break;
8091	default:
8092		ipw_rt->rt_rate = 0;
8093		break;
8094	}
8095
8096	/* antenna number */
8097	ipw_rt->rt_antenna = (phy_flags & 3);
8098
8099	/* set the preamble flag if we have it */
8100	if (phy_flags & (1 << 6))
8101		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8102
8103	IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8104
8105	if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8106		dev->stats.rx_errors++;
8107		dev_kfree_skb_any(skb);
8108	}
8109}
8110#endif
8111
8112static int is_network_packet(struct ipw_priv *priv,
8113				    struct libipw_hdr_4addr *header)
8114{
8115	/* Filter incoming packets to determine if they are targeted toward
8116	 * this network, discarding packets coming from ourselves */
8117	switch (priv->ieee->iw_mode) {
8118	case IW_MODE_ADHOC:	/* Header: Dest. | Source    | BSSID */
8119		/* packets from our adapter are dropped (echo) */
8120		if (ether_addr_equal(header->addr2, priv->net_dev->dev_addr))
8121			return 0;
8122
8123		/* {broad,multi}cast packets to our BSSID go through */
8124		if (is_multicast_ether_addr(header->addr1))
8125			return ether_addr_equal(header->addr3, priv->bssid);
8126
8127		/* packets to our adapter go through */
8128		return ether_addr_equal(header->addr1,
8129					priv->net_dev->dev_addr);
8130
8131	case IW_MODE_INFRA:	/* Header: Dest. | BSSID | Source */
8132		/* packets from our adapter are dropped (echo) */
8133		if (ether_addr_equal(header->addr3, priv->net_dev->dev_addr))
8134			return 0;
8135
8136		/* {broad,multi}cast packets to our BSS go through */
8137		if (is_multicast_ether_addr(header->addr1))
8138			return ether_addr_equal(header->addr2, priv->bssid);
8139
8140		/* packets to our adapter go through */
8141		return ether_addr_equal(header->addr1,
8142					priv->net_dev->dev_addr);
8143	}
8144
8145	return 1;
8146}
8147
8148#define IPW_PACKET_RETRY_TIME HZ
8149
8150static  int is_duplicate_packet(struct ipw_priv *priv,
8151				      struct libipw_hdr_4addr *header)
8152{
8153	u16 sc = le16_to_cpu(header->seq_ctl);
8154	u16 seq = WLAN_GET_SEQ_SEQ(sc);
8155	u16 frag = WLAN_GET_SEQ_FRAG(sc);
8156	u16 *last_seq, *last_frag;
8157	unsigned long *last_time;
8158
8159	switch (priv->ieee->iw_mode) {
8160	case IW_MODE_ADHOC:
8161		{
8162			struct list_head *p;
8163			struct ipw_ibss_seq *entry = NULL;
8164			u8 *mac = header->addr2;
8165			int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8166
8167			list_for_each(p, &priv->ibss_mac_hash[index]) {
8168				entry =
8169				    list_entry(p, struct ipw_ibss_seq, list);
8170				if (ether_addr_equal(entry->mac, mac))
8171					break;
8172			}
8173			if (p == &priv->ibss_mac_hash[index]) {
8174				entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8175				if (!entry) {
8176					IPW_ERROR
8177					    ("Cannot malloc new mac entry\n");
8178					return 0;
8179				}
8180				memcpy(entry->mac, mac, ETH_ALEN);
8181				entry->seq_num = seq;
8182				entry->frag_num = frag;
8183				entry->packet_time = jiffies;
8184				list_add(&entry->list,
8185					 &priv->ibss_mac_hash[index]);
8186				return 0;
8187			}
8188			last_seq = &entry->seq_num;
8189			last_frag = &entry->frag_num;
8190			last_time = &entry->packet_time;
8191			break;
8192		}
8193	case IW_MODE_INFRA:
8194		last_seq = &priv->last_seq_num;
8195		last_frag = &priv->last_frag_num;
8196		last_time = &priv->last_packet_time;
8197		break;
8198	default:
8199		return 0;
8200	}
8201	if ((*last_seq == seq) &&
8202	    time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8203		if (*last_frag == frag)
8204			goto drop;
8205		if (*last_frag + 1 != frag)
8206			/* out-of-order fragment */
8207			goto drop;
8208	} else
8209		*last_seq = seq;
8210
8211	*last_frag = frag;
8212	*last_time = jiffies;
8213	return 0;
8214
8215      drop:
8216	/* Comment this line now since we observed the card receives
8217	 * duplicate packets but the FCTL_RETRY bit is not set in the
8218	 * IBSS mode with fragmentation enabled.
8219	 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8220	return 1;
8221}
8222
8223static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8224				   struct ipw_rx_mem_buffer *rxb,
8225				   struct libipw_rx_stats *stats)
8226{
8227	struct sk_buff *skb = rxb->skb;
8228	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8229	struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8230	    (skb->data + IPW_RX_FRAME_SIZE);
8231
8232	libipw_rx_mgt(priv->ieee, header, stats);
8233
8234	if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8235	    ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8236	      IEEE80211_STYPE_PROBE_RESP) ||
8237	     (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8238	      IEEE80211_STYPE_BEACON))) {
8239		if (ether_addr_equal(header->addr3, priv->bssid))
8240			ipw_add_station(priv, header->addr2);
8241	}
8242
8243	if (priv->config & CFG_NET_STATS) {
8244		IPW_DEBUG_HC("sending stat packet\n");
8245
8246		/* Set the size of the skb to the size of the full
8247		 * ipw header and 802.11 frame */
8248		skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8249			IPW_RX_FRAME_SIZE);
8250
8251		/* Advance past the ipw packet header to the 802.11 frame */
8252		skb_pull(skb, IPW_RX_FRAME_SIZE);
8253
8254		/* Push the libipw_rx_stats before the 802.11 frame */
8255		memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8256
8257		skb->dev = priv->ieee->dev;
8258
8259		/* Point raw at the libipw_stats */
8260		skb_reset_mac_header(skb);
8261
8262		skb->pkt_type = PACKET_OTHERHOST;
8263		skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8264		memset(skb->cb, 0, sizeof(rxb->skb->cb));
8265		netif_rx(skb);
8266		rxb->skb = NULL;
8267	}
8268}
8269
8270/*
8271 * Main entry function for receiving a packet with 80211 headers.  This
8272 * should be called when ever the FW has notified us that there is a new
8273 * skb in the receive queue.
8274 */
8275static void ipw_rx(struct ipw_priv *priv)
8276{
8277	struct ipw_rx_mem_buffer *rxb;
8278	struct ipw_rx_packet *pkt;
8279	struct libipw_hdr_4addr *header;
8280	u32 r, w, i;
8281	u8 network_packet;
8282	u8 fill_rx = 0;
8283
8284	r = ipw_read32(priv, IPW_RX_READ_INDEX);
8285	w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8286	i = priv->rxq->read;
8287
8288	if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8289		fill_rx = 1;
8290
8291	while (i != r) {
8292		rxb = priv->rxq->queue[i];
8293		if (unlikely(rxb == NULL)) {
8294			printk(KERN_CRIT "Queue not allocated!\n");
8295			break;
8296		}
8297		priv->rxq->queue[i] = NULL;
8298
8299		pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8300					    IPW_RX_BUF_SIZE,
8301					    PCI_DMA_FROMDEVICE);
8302
8303		pkt = (struct ipw_rx_packet *)rxb->skb->data;
8304		IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8305			     pkt->header.message_type,
8306			     pkt->header.rx_seq_num, pkt->header.control_bits);
8307
8308		switch (pkt->header.message_type) {
8309		case RX_FRAME_TYPE:	/* 802.11 frame */  {
8310				struct libipw_rx_stats stats = {
8311					.rssi = pkt->u.frame.rssi_dbm -
8312					    IPW_RSSI_TO_DBM,
8313					.signal =
8314					    pkt->u.frame.rssi_dbm -
8315					    IPW_RSSI_TO_DBM + 0x100,
8316					.noise =
8317					    le16_to_cpu(pkt->u.frame.noise),
8318					.rate = pkt->u.frame.rate,
8319					.mac_time = jiffies,
8320					.received_channel =
8321					    pkt->u.frame.received_channel,
8322					.freq =
8323					    (pkt->u.frame.
8324					     control & (1 << 0)) ?
8325					    LIBIPW_24GHZ_BAND :
8326					    LIBIPW_52GHZ_BAND,
8327					.len = le16_to_cpu(pkt->u.frame.length),
8328				};
8329
8330				if (stats.rssi != 0)
8331					stats.mask |= LIBIPW_STATMASK_RSSI;
8332				if (stats.signal != 0)
8333					stats.mask |= LIBIPW_STATMASK_SIGNAL;
8334				if (stats.noise != 0)
8335					stats.mask |= LIBIPW_STATMASK_NOISE;
8336				if (stats.rate != 0)
8337					stats.mask |= LIBIPW_STATMASK_RATE;
8338
8339				priv->rx_packets++;
8340
8341#ifdef CONFIG_IPW2200_PROMISCUOUS
8342	if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8343		ipw_handle_promiscuous_rx(priv, rxb, &stats);
8344#endif
8345
8346#ifdef CONFIG_IPW2200_MONITOR
8347				if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8348#ifdef CONFIG_IPW2200_RADIOTAP
8349
8350                ipw_handle_data_packet_monitor(priv,
8351					       rxb,
8352					       &stats);
8353#else
8354		ipw_handle_data_packet(priv, rxb,
8355				       &stats);
8356#endif
8357					break;
8358				}
8359#endif
8360
8361				header =
8362				    (struct libipw_hdr_4addr *)(rxb->skb->
8363								   data +
8364								   IPW_RX_FRAME_SIZE);
8365				/* TODO: Check Ad-Hoc dest/source and make sure
8366				 * that we are actually parsing these packets
8367				 * correctly -- we should probably use the
8368				 * frame control of the packet and disregard
8369				 * the current iw_mode */
8370
8371				network_packet =
8372				    is_network_packet(priv, header);
8373				if (network_packet && priv->assoc_network) {
8374					priv->assoc_network->stats.rssi =
8375					    stats.rssi;
8376					priv->exp_avg_rssi =
8377					    exponential_average(priv->exp_avg_rssi,
8378					    stats.rssi, DEPTH_RSSI);
8379				}
8380
8381				IPW_DEBUG_RX("Frame: len=%u\n",
8382					     le16_to_cpu(pkt->u.frame.length));
8383
8384				if (le16_to_cpu(pkt->u.frame.length) <
8385				    libipw_get_hdrlen(le16_to_cpu(
8386						    header->frame_ctl))) {
8387					IPW_DEBUG_DROP
8388					    ("Received packet is too small. "
8389					     "Dropping.\n");
8390					priv->net_dev->stats.rx_errors++;
8391					priv->wstats.discard.misc++;
8392					break;
8393				}
8394
8395				switch (WLAN_FC_GET_TYPE
8396					(le16_to_cpu(header->frame_ctl))) {
8397
8398				case IEEE80211_FTYPE_MGMT:
8399					ipw_handle_mgmt_packet(priv, rxb,
8400							       &stats);
8401					break;
8402
8403				case IEEE80211_FTYPE_CTL:
8404					break;
8405
8406				case IEEE80211_FTYPE_DATA:
8407					if (unlikely(!network_packet ||
8408						     is_duplicate_packet(priv,
8409									 header)))
8410					{
8411						IPW_DEBUG_DROP("Dropping: "
8412							       "%pM, "
8413							       "%pM, "
8414							       "%pM\n",
8415							       header->addr1,
8416							       header->addr2,
8417							       header->addr3);
8418						break;
8419					}
8420
8421					ipw_handle_data_packet(priv, rxb,
8422							       &stats);
8423
8424					break;
8425				}
8426				break;
8427			}
8428
8429		case RX_HOST_NOTIFICATION_TYPE:{
8430				IPW_DEBUG_RX
8431				    ("Notification: subtype=%02X flags=%02X size=%d\n",
8432				     pkt->u.notification.subtype,
8433				     pkt->u.notification.flags,
8434				     le16_to_cpu(pkt->u.notification.size));
8435				ipw_rx_notification(priv, &pkt->u.notification);
8436				break;
8437			}
8438
8439		default:
8440			IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8441				     pkt->header.message_type);
8442			break;
8443		}
8444
8445		/* For now we just don't re-use anything.  We can tweak this
8446		 * later to try and re-use notification packets and SKBs that
8447		 * fail to Rx correctly */
8448		if (rxb->skb != NULL) {
8449			dev_kfree_skb_any(rxb->skb);
8450			rxb->skb = NULL;
8451		}
8452
8453		pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8454				 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8455		list_add_tail(&rxb->list, &priv->rxq->rx_used);
8456
8457		i = (i + 1) % RX_QUEUE_SIZE;
8458
8459		/* If there are a lot of unsued frames, restock the Rx queue
8460		 * so the ucode won't assert */
8461		if (fill_rx) {
8462			priv->rxq->read = i;
8463			ipw_rx_queue_replenish(priv);
8464		}
8465	}
8466
8467	/* Backtrack one entry */
8468	priv->rxq->read = i;
8469	ipw_rx_queue_restock(priv);
8470}
8471
8472#define DEFAULT_RTS_THRESHOLD     2304U
8473#define MIN_RTS_THRESHOLD         1U
8474#define MAX_RTS_THRESHOLD         2304U
8475#define DEFAULT_BEACON_INTERVAL   100U
8476#define	DEFAULT_SHORT_RETRY_LIMIT 7U
8477#define	DEFAULT_LONG_RETRY_LIMIT  4U
8478
8479/**
8480 * ipw_sw_reset
8481 * @option: options to control different reset behaviour
8482 * 	    0 = reset everything except the 'disable' module_param
8483 * 	    1 = reset everything and print out driver info (for probe only)
8484 * 	    2 = reset everything
8485 */
8486static int ipw_sw_reset(struct ipw_priv *priv, int option)
8487{
8488	int band, modulation;
8489	int old_mode = priv->ieee->iw_mode;
8490
8491	/* Initialize module parameter values here */
8492	priv->config = 0;
8493
8494	/* We default to disabling the LED code as right now it causes
8495	 * too many systems to lock up... */
8496	if (!led_support)
8497		priv->config |= CFG_NO_LED;
8498
8499	if (associate)
8500		priv->config |= CFG_ASSOCIATE;
8501	else
8502		IPW_DEBUG_INFO("Auto associate disabled.\n");
8503
8504	if (auto_create)
8505		priv->config |= CFG_ADHOC_CREATE;
8506	else
8507		IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8508
8509	priv->config &= ~CFG_STATIC_ESSID;
8510	priv->essid_len = 0;
8511	memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8512
8513	if (disable && option) {
8514		priv->status |= STATUS_RF_KILL_SW;
8515		IPW_DEBUG_INFO("Radio disabled.\n");
8516	}
8517
8518	if (default_channel != 0) {
8519		priv->config |= CFG_STATIC_CHANNEL;
8520		priv->channel = default_channel;
8521		IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8522		/* TODO: Validate that provided channel is in range */
8523	}
8524#ifdef CONFIG_IPW2200_QOS
8525	ipw_qos_init(priv, qos_enable, qos_burst_enable,
8526		     burst_duration_CCK, burst_duration_OFDM);
8527#endif				/* CONFIG_IPW2200_QOS */
8528
8529	switch (network_mode) {
8530	case 1:
8531		priv->ieee->iw_mode = IW_MODE_ADHOC;
8532		priv->net_dev->type = ARPHRD_ETHER;
8533
8534		break;
8535#ifdef CONFIG_IPW2200_MONITOR
8536	case 2:
8537		priv->ieee->iw_mode = IW_MODE_MONITOR;
8538#ifdef CONFIG_IPW2200_RADIOTAP
8539		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8540#else
8541		priv->net_dev->type = ARPHRD_IEEE80211;
8542#endif
8543		break;
8544#endif
8545	default:
8546	case 0:
8547		priv->net_dev->type = ARPHRD_ETHER;
8548		priv->ieee->iw_mode = IW_MODE_INFRA;
8549		break;
8550	}
8551
8552	if (hwcrypto) {
8553		priv->ieee->host_encrypt = 0;
8554		priv->ieee->host_encrypt_msdu = 0;
8555		priv->ieee->host_decrypt = 0;
8556		priv->ieee->host_mc_decrypt = 0;
8557	}
8558	IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8559
8560	/* IPW2200/2915 is abled to do hardware fragmentation. */
8561	priv->ieee->host_open_frag = 0;
8562
8563	if ((priv->pci_dev->device == 0x4223) ||
8564	    (priv->pci_dev->device == 0x4224)) {
8565		if (option == 1)
8566			printk(KERN_INFO DRV_NAME
8567			       ": Detected Intel PRO/Wireless 2915ABG Network "
8568			       "Connection\n");
8569		priv->ieee->abg_true = 1;
8570		band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8571		modulation = LIBIPW_OFDM_MODULATION |
8572		    LIBIPW_CCK_MODULATION;
8573		priv->adapter = IPW_2915ABG;
8574		priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8575	} else {
8576		if (option == 1)
8577			printk(KERN_INFO DRV_NAME
8578			       ": Detected Intel PRO/Wireless 2200BG Network "
8579			       "Connection\n");
8580
8581		priv->ieee->abg_true = 0;
8582		band = LIBIPW_24GHZ_BAND;
8583		modulation = LIBIPW_OFDM_MODULATION |
8584		    LIBIPW_CCK_MODULATION;
8585		priv->adapter = IPW_2200BG;
8586		priv->ieee->mode = IEEE_G | IEEE_B;
8587	}
8588
8589	priv->ieee->freq_band = band;
8590	priv->ieee->modulation = modulation;
8591
8592	priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8593
8594	priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8595	priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8596
8597	priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8598	priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8599	priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8600
8601	/* If power management is turned on, default to AC mode */
8602	priv->power_mode = IPW_POWER_AC;
8603	priv->tx_power = IPW_TX_POWER_DEFAULT;
8604
8605	return old_mode == priv->ieee->iw_mode;
8606}
8607
8608/*
8609 * This file defines the Wireless Extension handlers.  It does not
8610 * define any methods of hardware manipulation and relies on the
8611 * functions defined in ipw_main to provide the HW interaction.
8612 *
8613 * The exception to this is the use of the ipw_get_ordinal()
8614 * function used to poll the hardware vs. making unnecessary calls.
8615 *
8616 */
8617
8618static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8619{
8620	if (channel == 0) {
8621		IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8622		priv->config &= ~CFG_STATIC_CHANNEL;
8623		IPW_DEBUG_ASSOC("Attempting to associate with new "
8624				"parameters.\n");
8625		ipw_associate(priv);
8626		return 0;
8627	}
8628
8629	priv->config |= CFG_STATIC_CHANNEL;
8630
8631	if (priv->channel == channel) {
8632		IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8633			       channel);
8634		return 0;
8635	}
8636
8637	IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8638	priv->channel = channel;
8639
8640#ifdef CONFIG_IPW2200_MONITOR
8641	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8642		int i;
8643		if (priv->status & STATUS_SCANNING) {
8644			IPW_DEBUG_SCAN("Scan abort triggered due to "
8645				       "channel change.\n");
8646			ipw_abort_scan(priv);
8647		}
8648
8649		for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8650			udelay(10);
8651
8652		if (priv->status & STATUS_SCANNING)
8653			IPW_DEBUG_SCAN("Still scanning...\n");
8654		else
8655			IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8656				       1000 - i);
8657
8658		return 0;
8659	}
8660#endif				/* CONFIG_IPW2200_MONITOR */
8661
8662	/* Network configuration changed -- force [re]association */
8663	IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8664	if (!ipw_disassociate(priv))
8665		ipw_associate(priv);
8666
8667	return 0;
8668}
8669
8670static int ipw_wx_set_freq(struct net_device *dev,
8671			   struct iw_request_info *info,
8672			   union iwreq_data *wrqu, char *extra)
8673{
8674	struct ipw_priv *priv = libipw_priv(dev);
8675	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8676	struct iw_freq *fwrq = &wrqu->freq;
8677	int ret = 0, i;
8678	u8 channel, flags;
8679	int band;
8680
8681	if (fwrq->m == 0) {
8682		IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8683		mutex_lock(&priv->mutex);
8684		ret = ipw_set_channel(priv, 0);
8685		mutex_unlock(&priv->mutex);
8686		return ret;
8687	}
8688	/* if setting by freq convert to channel */
8689	if (fwrq->e == 1) {
8690		channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8691		if (channel == 0)
8692			return -EINVAL;
8693	} else
8694		channel = fwrq->m;
8695
8696	if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8697		return -EINVAL;
8698
8699	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8700		i = libipw_channel_to_index(priv->ieee, channel);
8701		if (i == -1)
8702			return -EINVAL;
8703
8704		flags = (band == LIBIPW_24GHZ_BAND) ?
8705		    geo->bg[i].flags : geo->a[i].flags;
8706		if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8707			IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8708			return -EINVAL;
8709		}
8710	}
8711
8712	IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8713	mutex_lock(&priv->mutex);
8714	ret = ipw_set_channel(priv, channel);
8715	mutex_unlock(&priv->mutex);
8716	return ret;
8717}
8718
8719static int ipw_wx_get_freq(struct net_device *dev,
8720			   struct iw_request_info *info,
8721			   union iwreq_data *wrqu, char *extra)
8722{
8723	struct ipw_priv *priv = libipw_priv(dev);
8724
8725	wrqu->freq.e = 0;
8726
8727	/* If we are associated, trying to associate, or have a statically
8728	 * configured CHANNEL then return that; otherwise return ANY */
8729	mutex_lock(&priv->mutex);
8730	if (priv->config & CFG_STATIC_CHANNEL ||
8731	    priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8732		int i;
8733
8734		i = libipw_channel_to_index(priv->ieee, priv->channel);
8735		BUG_ON(i == -1);
8736		wrqu->freq.e = 1;
8737
8738		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8739		case LIBIPW_52GHZ_BAND:
8740			wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8741			break;
8742
8743		case LIBIPW_24GHZ_BAND:
8744			wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8745			break;
8746
8747		default:
8748			BUG();
8749		}
8750	} else
8751		wrqu->freq.m = 0;
8752
8753	mutex_unlock(&priv->mutex);
8754	IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8755	return 0;
8756}
8757
8758static int ipw_wx_set_mode(struct net_device *dev,
8759			   struct iw_request_info *info,
8760			   union iwreq_data *wrqu, char *extra)
8761{
8762	struct ipw_priv *priv = libipw_priv(dev);
8763	int err = 0;
8764
8765	IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8766
8767	switch (wrqu->mode) {
8768#ifdef CONFIG_IPW2200_MONITOR
8769	case IW_MODE_MONITOR:
8770#endif
8771	case IW_MODE_ADHOC:
8772	case IW_MODE_INFRA:
8773		break;
8774	case IW_MODE_AUTO:
8775		wrqu->mode = IW_MODE_INFRA;
8776		break;
8777	default:
8778		return -EINVAL;
8779	}
8780	if (wrqu->mode == priv->ieee->iw_mode)
8781		return 0;
8782
8783	mutex_lock(&priv->mutex);
8784
8785	ipw_sw_reset(priv, 0);
8786
8787#ifdef CONFIG_IPW2200_MONITOR
8788	if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8789		priv->net_dev->type = ARPHRD_ETHER;
8790
8791	if (wrqu->mode == IW_MODE_MONITOR)
8792#ifdef CONFIG_IPW2200_RADIOTAP
8793		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8794#else
8795		priv->net_dev->type = ARPHRD_IEEE80211;
8796#endif
8797#endif				/* CONFIG_IPW2200_MONITOR */
8798
8799	/* Free the existing firmware and reset the fw_loaded
8800	 * flag so ipw_load() will bring in the new firmware */
8801	free_firmware();
8802
8803	priv->ieee->iw_mode = wrqu->mode;
8804
8805	schedule_work(&priv->adapter_restart);
8806	mutex_unlock(&priv->mutex);
8807	return err;
8808}
8809
8810static int ipw_wx_get_mode(struct net_device *dev,
8811			   struct iw_request_info *info,
8812			   union iwreq_data *wrqu, char *extra)
8813{
8814	struct ipw_priv *priv = libipw_priv(dev);
8815	mutex_lock(&priv->mutex);
8816	wrqu->mode = priv->ieee->iw_mode;
8817	IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8818	mutex_unlock(&priv->mutex);
8819	return 0;
8820}
8821
8822/* Values are in microsecond */
8823static const s32 timeout_duration[] = {
8824	350000,
8825	250000,
8826	75000,
8827	37000,
8828	25000,
8829};
8830
8831static const s32 period_duration[] = {
8832	400000,
8833	700000,
8834	1000000,
8835	1000000,
8836	1000000
8837};
8838
8839static int ipw_wx_get_range(struct net_device *dev,
8840			    struct iw_request_info *info,
8841			    union iwreq_data *wrqu, char *extra)
8842{
8843	struct ipw_priv *priv = libipw_priv(dev);
8844	struct iw_range *range = (struct iw_range *)extra;
8845	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8846	int i = 0, j;
8847
8848	wrqu->data.length = sizeof(*range);
8849	memset(range, 0, sizeof(*range));
8850
8851	/* 54Mbs == ~27 Mb/s real (802.11g) */
8852	range->throughput = 27 * 1000 * 1000;
8853
8854	range->max_qual.qual = 100;
8855	/* TODO: Find real max RSSI and stick here */
8856	range->max_qual.level = 0;
8857	range->max_qual.noise = 0;
8858	range->max_qual.updated = 7;	/* Updated all three */
8859
8860	range->avg_qual.qual = 70;
8861	/* TODO: Find real 'good' to 'bad' threshold value for RSSI */
8862	range->avg_qual.level = 0;	/* FIXME to real average level */
8863	range->avg_qual.noise = 0;
8864	range->avg_qual.updated = 7;	/* Updated all three */
8865	mutex_lock(&priv->mutex);
8866	range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8867
8868	for (i = 0; i < range->num_bitrates; i++)
8869		range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8870		    500000;
8871
8872	range->max_rts = DEFAULT_RTS_THRESHOLD;
8873	range->min_frag = MIN_FRAG_THRESHOLD;
8874	range->max_frag = MAX_FRAG_THRESHOLD;
8875
8876	range->encoding_size[0] = 5;
8877	range->encoding_size[1] = 13;
8878	range->num_encoding_sizes = 2;
8879	range->max_encoding_tokens = WEP_KEYS;
8880
8881	/* Set the Wireless Extension versions */
8882	range->we_version_compiled = WIRELESS_EXT;
8883	range->we_version_source = 18;
8884
8885	i = 0;
8886	if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8887		for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8888			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8889			    (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8890				continue;
8891
8892			range->freq[i].i = geo->bg[j].channel;
8893			range->freq[i].m = geo->bg[j].freq * 100000;
8894			range->freq[i].e = 1;
8895			i++;
8896		}
8897	}
8898
8899	if (priv->ieee->mode & IEEE_A) {
8900		for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8901			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8902			    (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8903				continue;
8904
8905			range->freq[i].i = geo->a[j].channel;
8906			range->freq[i].m = geo->a[j].freq * 100000;
8907			range->freq[i].e = 1;
8908			i++;
8909		}
8910	}
8911
8912	range->num_channels = i;
8913	range->num_frequency = i;
8914
8915	mutex_unlock(&priv->mutex);
8916
8917	/* Event capability (kernel + driver) */
8918	range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8919				IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8920				IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8921				IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8922	range->event_capa[1] = IW_EVENT_CAPA_K_1;
8923
8924	range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8925		IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8926
8927	range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8928
8929	IPW_DEBUG_WX("GET Range\n");
8930	return 0;
8931}
8932
8933static int ipw_wx_set_wap(struct net_device *dev,
8934			  struct iw_request_info *info,
8935			  union iwreq_data *wrqu, char *extra)
8936{
8937	struct ipw_priv *priv = libipw_priv(dev);
8938
8939	if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8940		return -EINVAL;
8941	mutex_lock(&priv->mutex);
8942	if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
8943	    is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
8944		/* we disable mandatory BSSID association */
8945		IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8946		priv->config &= ~CFG_STATIC_BSSID;
8947		IPW_DEBUG_ASSOC("Attempting to associate with new "
8948				"parameters.\n");
8949		ipw_associate(priv);
8950		mutex_unlock(&priv->mutex);
8951		return 0;
8952	}
8953
8954	priv->config |= CFG_STATIC_BSSID;
8955	if (ether_addr_equal(priv->bssid, wrqu->ap_addr.sa_data)) {
8956		IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8957		mutex_unlock(&priv->mutex);
8958		return 0;
8959	}
8960
8961	IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
8962		     wrqu->ap_addr.sa_data);
8963
8964	memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8965
8966	/* Network configuration changed -- force [re]association */
8967	IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8968	if (!ipw_disassociate(priv))
8969		ipw_associate(priv);
8970
8971	mutex_unlock(&priv->mutex);
8972	return 0;
8973}
8974
8975static int ipw_wx_get_wap(struct net_device *dev,
8976			  struct iw_request_info *info,
8977			  union iwreq_data *wrqu, char *extra)
8978{
8979	struct ipw_priv *priv = libipw_priv(dev);
8980
8981	/* If we are associated, trying to associate, or have a statically
8982	 * configured BSSID then return that; otherwise return ANY */
8983	mutex_lock(&priv->mutex);
8984	if (priv->config & CFG_STATIC_BSSID ||
8985	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8986		wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8987		memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8988	} else
8989		eth_zero_addr(wrqu->ap_addr.sa_data);
8990
8991	IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
8992		     wrqu->ap_addr.sa_data);
8993	mutex_unlock(&priv->mutex);
8994	return 0;
8995}
8996
8997static int ipw_wx_set_essid(struct net_device *dev,
8998			    struct iw_request_info *info,
8999			    union iwreq_data *wrqu, char *extra)
9000{
9001	struct ipw_priv *priv = libipw_priv(dev);
9002        int length;
9003
9004        mutex_lock(&priv->mutex);
9005
9006        if (!wrqu->essid.flags)
9007        {
9008                IPW_DEBUG_WX("Setting ESSID to ANY\n");
9009                ipw_disassociate(priv);
9010                priv->config &= ~CFG_STATIC_ESSID;
9011                ipw_associate(priv);
9012                mutex_unlock(&priv->mutex);
9013                return 0;
9014        }
9015
9016	length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9017
9018	priv->config |= CFG_STATIC_ESSID;
9019
9020	if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9021	    && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9022		IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9023		mutex_unlock(&priv->mutex);
9024		return 0;
9025	}
9026
9027	IPW_DEBUG_WX("Setting ESSID: '%*pE' (%d)\n", length, extra, length);
9028
9029	priv->essid_len = length;
9030	memcpy(priv->essid, extra, priv->essid_len);
9031
9032	/* Network configuration changed -- force [re]association */
9033	IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9034	if (!ipw_disassociate(priv))
9035		ipw_associate(priv);
9036
9037	mutex_unlock(&priv->mutex);
9038	return 0;
9039}
9040
9041static int ipw_wx_get_essid(struct net_device *dev,
9042			    struct iw_request_info *info,
9043			    union iwreq_data *wrqu, char *extra)
9044{
9045	struct ipw_priv *priv = libipw_priv(dev);
9046
9047	/* If we are associated, trying to associate, or have a statically
9048	 * configured ESSID then return that; otherwise return ANY */
9049	mutex_lock(&priv->mutex);
9050	if (priv->config & CFG_STATIC_ESSID ||
9051	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9052		IPW_DEBUG_WX("Getting essid: '%*pE'\n",
9053			     priv->essid_len, priv->essid);
9054		memcpy(extra, priv->essid, priv->essid_len);
9055		wrqu->essid.length = priv->essid_len;
9056		wrqu->essid.flags = 1;	/* active */
9057	} else {
9058		IPW_DEBUG_WX("Getting essid: ANY\n");
9059		wrqu->essid.length = 0;
9060		wrqu->essid.flags = 0;	/* active */
9061	}
9062	mutex_unlock(&priv->mutex);
9063	return 0;
9064}
9065
9066static int ipw_wx_set_nick(struct net_device *dev,
9067			   struct iw_request_info *info,
9068			   union iwreq_data *wrqu, char *extra)
9069{
9070	struct ipw_priv *priv = libipw_priv(dev);
9071
9072	IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9073	if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9074		return -E2BIG;
9075	mutex_lock(&priv->mutex);
9076	wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
9077	memset(priv->nick, 0, sizeof(priv->nick));
9078	memcpy(priv->nick, extra, wrqu->data.length);
9079	IPW_DEBUG_TRACE("<<\n");
9080	mutex_unlock(&priv->mutex);
9081	return 0;
9082
9083}
9084
9085static int ipw_wx_get_nick(struct net_device *dev,
9086			   struct iw_request_info *info,
9087			   union iwreq_data *wrqu, char *extra)
9088{
9089	struct ipw_priv *priv = libipw_priv(dev);
9090	IPW_DEBUG_WX("Getting nick\n");
9091	mutex_lock(&priv->mutex);
9092	wrqu->data.length = strlen(priv->nick);
9093	memcpy(extra, priv->nick, wrqu->data.length);
9094	wrqu->data.flags = 1;	/* active */
9095	mutex_unlock(&priv->mutex);
9096	return 0;
9097}
9098
9099static int ipw_wx_set_sens(struct net_device *dev,
9100			    struct iw_request_info *info,
9101			    union iwreq_data *wrqu, char *extra)
9102{
9103	struct ipw_priv *priv = libipw_priv(dev);
9104	int err = 0;
9105
9106	IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9107	IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9108	mutex_lock(&priv->mutex);
9109
9110	if (wrqu->sens.fixed == 0)
9111	{
9112		priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9113		priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9114		goto out;
9115	}
9116	if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9117	    (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9118		err = -EINVAL;
9119		goto out;
9120	}
9121
9122	priv->roaming_threshold = wrqu->sens.value;
9123	priv->disassociate_threshold = 3*wrqu->sens.value;
9124      out:
9125	mutex_unlock(&priv->mutex);
9126	return err;
9127}
9128
9129static int ipw_wx_get_sens(struct net_device *dev,
9130			    struct iw_request_info *info,
9131			    union iwreq_data *wrqu, char *extra)
9132{
9133	struct ipw_priv *priv = libipw_priv(dev);
9134	mutex_lock(&priv->mutex);
9135	wrqu->sens.fixed = 1;
9136	wrqu->sens.value = priv->roaming_threshold;
9137	mutex_unlock(&priv->mutex);
9138
9139	IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9140		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9141
9142	return 0;
9143}
9144
9145static int ipw_wx_set_rate(struct net_device *dev,
9146			   struct iw_request_info *info,
9147			   union iwreq_data *wrqu, char *extra)
9148{
9149	/* TODO: We should use semaphores or locks for access to priv */
9150	struct ipw_priv *priv = libipw_priv(dev);
9151	u32 target_rate = wrqu->bitrate.value;
9152	u32 fixed, mask;
9153
9154	/* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9155	/* value = X, fixed = 1 means only rate X */
9156	/* value = X, fixed = 0 means all rates lower equal X */
9157
9158	if (target_rate == -1) {
9159		fixed = 0;
9160		mask = LIBIPW_DEFAULT_RATES_MASK;
9161		/* Now we should reassociate */
9162		goto apply;
9163	}
9164
9165	mask = 0;
9166	fixed = wrqu->bitrate.fixed;
9167
9168	if (target_rate == 1000000 || !fixed)
9169		mask |= LIBIPW_CCK_RATE_1MB_MASK;
9170	if (target_rate == 1000000)
9171		goto apply;
9172
9173	if (target_rate == 2000000 || !fixed)
9174		mask |= LIBIPW_CCK_RATE_2MB_MASK;
9175	if (target_rate == 2000000)
9176		goto apply;
9177
9178	if (target_rate == 5500000 || !fixed)
9179		mask |= LIBIPW_CCK_RATE_5MB_MASK;
9180	if (target_rate == 5500000)
9181		goto apply;
9182
9183	if (target_rate == 6000000 || !fixed)
9184		mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9185	if (target_rate == 6000000)
9186		goto apply;
9187
9188	if (target_rate == 9000000 || !fixed)
9189		mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9190	if (target_rate == 9000000)
9191		goto apply;
9192
9193	if (target_rate == 11000000 || !fixed)
9194		mask |= LIBIPW_CCK_RATE_11MB_MASK;
9195	if (target_rate == 11000000)
9196		goto apply;
9197
9198	if (target_rate == 12000000 || !fixed)
9199		mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9200	if (target_rate == 12000000)
9201		goto apply;
9202
9203	if (target_rate == 18000000 || !fixed)
9204		mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9205	if (target_rate == 18000000)
9206		goto apply;
9207
9208	if (target_rate == 24000000 || !fixed)
9209		mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9210	if (target_rate == 24000000)
9211		goto apply;
9212
9213	if (target_rate == 36000000 || !fixed)
9214		mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9215	if (target_rate == 36000000)
9216		goto apply;
9217
9218	if (target_rate == 48000000 || !fixed)
9219		mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9220	if (target_rate == 48000000)
9221		goto apply;
9222
9223	if (target_rate == 54000000 || !fixed)
9224		mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9225	if (target_rate == 54000000)
9226		goto apply;
9227
9228	IPW_DEBUG_WX("invalid rate specified, returning error\n");
9229	return -EINVAL;
9230
9231      apply:
9232	IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9233		     mask, fixed ? "fixed" : "sub-rates");
9234	mutex_lock(&priv->mutex);
9235	if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9236		priv->config &= ~CFG_FIXED_RATE;
9237		ipw_set_fixed_rate(priv, priv->ieee->mode);
9238	} else
9239		priv->config |= CFG_FIXED_RATE;
9240
9241	if (priv->rates_mask == mask) {
9242		IPW_DEBUG_WX("Mask set to current mask.\n");
9243		mutex_unlock(&priv->mutex);
9244		return 0;
9245	}
9246
9247	priv->rates_mask = mask;
9248
9249	/* Network configuration changed -- force [re]association */
9250	IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9251	if (!ipw_disassociate(priv))
9252		ipw_associate(priv);
9253
9254	mutex_unlock(&priv->mutex);
9255	return 0;
9256}
9257
9258static int ipw_wx_get_rate(struct net_device *dev,
9259			   struct iw_request_info *info,
9260			   union iwreq_data *wrqu, char *extra)
9261{
9262	struct ipw_priv *priv = libipw_priv(dev);
9263	mutex_lock(&priv->mutex);
9264	wrqu->bitrate.value = priv->last_rate;
9265	wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9266	mutex_unlock(&priv->mutex);
9267	IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9268	return 0;
9269}
9270
9271static int ipw_wx_set_rts(struct net_device *dev,
9272			  struct iw_request_info *info,
9273			  union iwreq_data *wrqu, char *extra)
9274{
9275	struct ipw_priv *priv = libipw_priv(dev);
9276	mutex_lock(&priv->mutex);
9277	if (wrqu->rts.disabled || !wrqu->rts.fixed)
9278		priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9279	else {
9280		if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9281		    wrqu->rts.value > MAX_RTS_THRESHOLD) {
9282			mutex_unlock(&priv->mutex);
9283			return -EINVAL;
9284		}
9285		priv->rts_threshold = wrqu->rts.value;
9286	}
9287
9288	ipw_send_rts_threshold(priv, priv->rts_threshold);
9289	mutex_unlock(&priv->mutex);
9290	IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9291	return 0;
9292}
9293
9294static int ipw_wx_get_rts(struct net_device *dev,
9295			  struct iw_request_info *info,
9296			  union iwreq_data *wrqu, char *extra)
9297{
9298	struct ipw_priv *priv = libipw_priv(dev);
9299	mutex_lock(&priv->mutex);
9300	wrqu->rts.value = priv->rts_threshold;
9301	wrqu->rts.fixed = 0;	/* no auto select */
9302	wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9303	mutex_unlock(&priv->mutex);
9304	IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9305	return 0;
9306}
9307
9308static int ipw_wx_set_txpow(struct net_device *dev,
9309			    struct iw_request_info *info,
9310			    union iwreq_data *wrqu, char *extra)
9311{
9312	struct ipw_priv *priv = libipw_priv(dev);
9313	int err = 0;
9314
9315	mutex_lock(&priv->mutex);
9316	if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9317		err = -EINPROGRESS;
9318		goto out;
9319	}
9320
9321	if (!wrqu->power.fixed)
9322		wrqu->power.value = IPW_TX_POWER_DEFAULT;
9323
9324	if (wrqu->power.flags != IW_TXPOW_DBM) {
9325		err = -EINVAL;
9326		goto out;
9327	}
9328
9329	if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9330	    (wrqu->power.value < IPW_TX_POWER_MIN)) {
9331		err = -EINVAL;
9332		goto out;
9333	}
9334
9335	priv->tx_power = wrqu->power.value;
9336	err = ipw_set_tx_power(priv);
9337      out:
9338	mutex_unlock(&priv->mutex);
9339	return err;
9340}
9341
9342static int ipw_wx_get_txpow(struct net_device *dev,
9343			    struct iw_request_info *info,
9344			    union iwreq_data *wrqu, char *extra)
9345{
9346	struct ipw_priv *priv = libipw_priv(dev);
9347	mutex_lock(&priv->mutex);
9348	wrqu->power.value = priv->tx_power;
9349	wrqu->power.fixed = 1;
9350	wrqu->power.flags = IW_TXPOW_DBM;
9351	wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9352	mutex_unlock(&priv->mutex);
9353
9354	IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9355		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9356
9357	return 0;
9358}
9359
9360static int ipw_wx_set_frag(struct net_device *dev,
9361			   struct iw_request_info *info,
9362			   union iwreq_data *wrqu, char *extra)
9363{
9364	struct ipw_priv *priv = libipw_priv(dev);
9365	mutex_lock(&priv->mutex);
9366	if (wrqu->frag.disabled || !wrqu->frag.fixed)
9367		priv->ieee->fts = DEFAULT_FTS;
9368	else {
9369		if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9370		    wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9371			mutex_unlock(&priv->mutex);
9372			return -EINVAL;
9373		}
9374
9375		priv->ieee->fts = wrqu->frag.value & ~0x1;
9376	}
9377
9378	ipw_send_frag_threshold(priv, wrqu->frag.value);
9379	mutex_unlock(&priv->mutex);
9380	IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9381	return 0;
9382}
9383
9384static int ipw_wx_get_frag(struct net_device *dev,
9385			   struct iw_request_info *info,
9386			   union iwreq_data *wrqu, char *extra)
9387{
9388	struct ipw_priv *priv = libipw_priv(dev);
9389	mutex_lock(&priv->mutex);
9390	wrqu->frag.value = priv->ieee->fts;
9391	wrqu->frag.fixed = 0;	/* no auto select */
9392	wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9393	mutex_unlock(&priv->mutex);
9394	IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9395
9396	return 0;
9397}
9398
9399static int ipw_wx_set_retry(struct net_device *dev,
9400			    struct iw_request_info *info,
9401			    union iwreq_data *wrqu, char *extra)
9402{
9403	struct ipw_priv *priv = libipw_priv(dev);
9404
9405	if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9406		return -EINVAL;
9407
9408	if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9409		return 0;
9410
9411	if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9412		return -EINVAL;
9413
9414	mutex_lock(&priv->mutex);
9415	if (wrqu->retry.flags & IW_RETRY_SHORT)
9416		priv->short_retry_limit = (u8) wrqu->retry.value;
9417	else if (wrqu->retry.flags & IW_RETRY_LONG)
9418		priv->long_retry_limit = (u8) wrqu->retry.value;
9419	else {
9420		priv->short_retry_limit = (u8) wrqu->retry.value;
9421		priv->long_retry_limit = (u8) wrqu->retry.value;
9422	}
9423
9424	ipw_send_retry_limit(priv, priv->short_retry_limit,
9425			     priv->long_retry_limit);
9426	mutex_unlock(&priv->mutex);
9427	IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9428		     priv->short_retry_limit, priv->long_retry_limit);
9429	return 0;
9430}
9431
9432static int ipw_wx_get_retry(struct net_device *dev,
9433			    struct iw_request_info *info,
9434			    union iwreq_data *wrqu, char *extra)
9435{
9436	struct ipw_priv *priv = libipw_priv(dev);
9437
9438	mutex_lock(&priv->mutex);
9439	wrqu->retry.disabled = 0;
9440
9441	if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9442		mutex_unlock(&priv->mutex);
9443		return -EINVAL;
9444	}
9445
9446	if (wrqu->retry.flags & IW_RETRY_LONG) {
9447		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9448		wrqu->retry.value = priv->long_retry_limit;
9449	} else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9450		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9451		wrqu->retry.value = priv->short_retry_limit;
9452	} else {
9453		wrqu->retry.flags = IW_RETRY_LIMIT;
9454		wrqu->retry.value = priv->short_retry_limit;
9455	}
9456	mutex_unlock(&priv->mutex);
9457
9458	IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9459
9460	return 0;
9461}
9462
9463static int ipw_wx_set_scan(struct net_device *dev,
9464			   struct iw_request_info *info,
9465			   union iwreq_data *wrqu, char *extra)
9466{
9467	struct ipw_priv *priv = libipw_priv(dev);
9468	struct iw_scan_req *req = (struct iw_scan_req *)extra;
9469	struct delayed_work *work = NULL;
9470
9471	mutex_lock(&priv->mutex);
9472
9473	priv->user_requested_scan = 1;
9474
9475	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9476		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9477			int len = min((int)req->essid_len,
9478			              (int)sizeof(priv->direct_scan_ssid));
9479			memcpy(priv->direct_scan_ssid, req->essid, len);
9480			priv->direct_scan_ssid_len = len;
9481			work = &priv->request_direct_scan;
9482		} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9483			work = &priv->request_passive_scan;
9484		}
9485	} else {
9486		/* Normal active broadcast scan */
9487		work = &priv->request_scan;
9488	}
9489
9490	mutex_unlock(&priv->mutex);
9491
9492	IPW_DEBUG_WX("Start scan\n");
9493
9494	schedule_delayed_work(work, 0);
9495
9496	return 0;
9497}
9498
9499static int ipw_wx_get_scan(struct net_device *dev,
9500			   struct iw_request_info *info,
9501			   union iwreq_data *wrqu, char *extra)
9502{
9503	struct ipw_priv *priv = libipw_priv(dev);
9504	return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9505}
9506
9507static int ipw_wx_set_encode(struct net_device *dev,
9508			     struct iw_request_info *info,
9509			     union iwreq_data *wrqu, char *key)
9510{
9511	struct ipw_priv *priv = libipw_priv(dev);
9512	int ret;
9513	u32 cap = priv->capability;
9514
9515	mutex_lock(&priv->mutex);
9516	ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9517
9518	/* In IBSS mode, we need to notify the firmware to update
9519	 * the beacon info after we changed the capability. */
9520	if (cap != priv->capability &&
9521	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
9522	    priv->status & STATUS_ASSOCIATED)
9523		ipw_disassociate(priv);
9524
9525	mutex_unlock(&priv->mutex);
9526	return ret;
9527}
9528
9529static int ipw_wx_get_encode(struct net_device *dev,
9530			     struct iw_request_info *info,
9531			     union iwreq_data *wrqu, char *key)
9532{
9533	struct ipw_priv *priv = libipw_priv(dev);
9534	return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9535}
9536
9537static int ipw_wx_set_power(struct net_device *dev,
9538			    struct iw_request_info *info,
9539			    union iwreq_data *wrqu, char *extra)
9540{
9541	struct ipw_priv *priv = libipw_priv(dev);
9542	int err;
9543	mutex_lock(&priv->mutex);
9544	if (wrqu->power.disabled) {
9545		priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9546		err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9547		if (err) {
9548			IPW_DEBUG_WX("failed setting power mode.\n");
9549			mutex_unlock(&priv->mutex);
9550			return err;
9551		}
9552		IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9553		mutex_unlock(&priv->mutex);
9554		return 0;
9555	}
9556
9557	switch (wrqu->power.flags & IW_POWER_MODE) {
9558	case IW_POWER_ON:	/* If not specified */
9559	case IW_POWER_MODE:	/* If set all mask */
9560	case IW_POWER_ALL_R:	/* If explicitly state all */
9561		break;
9562	default:		/* Otherwise we don't support it */
9563		IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9564			     wrqu->power.flags);
9565		mutex_unlock(&priv->mutex);
9566		return -EOPNOTSUPP;
9567	}
9568
9569	/* If the user hasn't specified a power management mode yet, default
9570	 * to BATTERY */
9571	if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9572		priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9573	else
9574		priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9575
9576	err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9577	if (err) {
9578		IPW_DEBUG_WX("failed setting power mode.\n");
9579		mutex_unlock(&priv->mutex);
9580		return err;
9581	}
9582
9583	IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9584	mutex_unlock(&priv->mutex);
9585	return 0;
9586}
9587
9588static int ipw_wx_get_power(struct net_device *dev,
9589			    struct iw_request_info *info,
9590			    union iwreq_data *wrqu, char *extra)
9591{
9592	struct ipw_priv *priv = libipw_priv(dev);
9593	mutex_lock(&priv->mutex);
9594	if (!(priv->power_mode & IPW_POWER_ENABLED))
9595		wrqu->power.disabled = 1;
9596	else
9597		wrqu->power.disabled = 0;
9598
9599	mutex_unlock(&priv->mutex);
9600	IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9601
9602	return 0;
9603}
9604
9605static int ipw_wx_set_powermode(struct net_device *dev,
9606				struct iw_request_info *info,
9607				union iwreq_data *wrqu, char *extra)
9608{
9609	struct ipw_priv *priv = libipw_priv(dev);
9610	int mode = *(int *)extra;
9611	int err;
9612
9613	mutex_lock(&priv->mutex);
9614	if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9615		mode = IPW_POWER_AC;
9616
9617	if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9618		err = ipw_send_power_mode(priv, mode);
9619		if (err) {
9620			IPW_DEBUG_WX("failed setting power mode.\n");
9621			mutex_unlock(&priv->mutex);
9622			return err;
9623		}
9624		priv->power_mode = IPW_POWER_ENABLED | mode;
9625	}
9626	mutex_unlock(&priv->mutex);
9627	return 0;
9628}
9629
9630#define MAX_WX_STRING 80
9631static int ipw_wx_get_powermode(struct net_device *dev,
9632				struct iw_request_info *info,
9633				union iwreq_data *wrqu, char *extra)
9634{
9635	struct ipw_priv *priv = libipw_priv(dev);
9636	int level = IPW_POWER_LEVEL(priv->power_mode);
9637	char *p = extra;
9638
9639	p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9640
9641	switch (level) {
9642	case IPW_POWER_AC:
9643		p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9644		break;
9645	case IPW_POWER_BATTERY:
9646		p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9647		break;
9648	default:
9649		p += snprintf(p, MAX_WX_STRING - (p - extra),
9650			      "(Timeout %dms, Period %dms)",
9651			      timeout_duration[level - 1] / 1000,
9652			      period_duration[level - 1] / 1000);
9653	}
9654
9655	if (!(priv->power_mode & IPW_POWER_ENABLED))
9656		p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9657
9658	wrqu->data.length = p - extra + 1;
9659
9660	return 0;
9661}
9662
9663static int ipw_wx_set_wireless_mode(struct net_device *dev,
9664				    struct iw_request_info *info,
9665				    union iwreq_data *wrqu, char *extra)
9666{
9667	struct ipw_priv *priv = libipw_priv(dev);
9668	int mode = *(int *)extra;
9669	u8 band = 0, modulation = 0;
9670
9671	if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9672		IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9673		return -EINVAL;
9674	}
9675	mutex_lock(&priv->mutex);
9676	if (priv->adapter == IPW_2915ABG) {
9677		priv->ieee->abg_true = 1;
9678		if (mode & IEEE_A) {
9679			band |= LIBIPW_52GHZ_BAND;
9680			modulation |= LIBIPW_OFDM_MODULATION;
9681		} else
9682			priv->ieee->abg_true = 0;
9683	} else {
9684		if (mode & IEEE_A) {
9685			IPW_WARNING("Attempt to set 2200BG into "
9686				    "802.11a mode\n");
9687			mutex_unlock(&priv->mutex);
9688			return -EINVAL;
9689		}
9690
9691		priv->ieee->abg_true = 0;
9692	}
9693
9694	if (mode & IEEE_B) {
9695		band |= LIBIPW_24GHZ_BAND;
9696		modulation |= LIBIPW_CCK_MODULATION;
9697	} else
9698		priv->ieee->abg_true = 0;
9699
9700	if (mode & IEEE_G) {
9701		band |= LIBIPW_24GHZ_BAND;
9702		modulation |= LIBIPW_OFDM_MODULATION;
9703	} else
9704		priv->ieee->abg_true = 0;
9705
9706	priv->ieee->mode = mode;
9707	priv->ieee->freq_band = band;
9708	priv->ieee->modulation = modulation;
9709	init_supported_rates(priv, &priv->rates);
9710
9711	/* Network configuration changed -- force [re]association */
9712	IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9713	if (!ipw_disassociate(priv)) {
9714		ipw_send_supported_rates(priv, &priv->rates);
9715		ipw_associate(priv);
9716	}
9717
9718	/* Update the band LEDs */
9719	ipw_led_band_on(priv);
9720
9721	IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9722		     mode & IEEE_A ? 'a' : '.',
9723		     mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9724	mutex_unlock(&priv->mutex);
9725	return 0;
9726}
9727
9728static int ipw_wx_get_wireless_mode(struct net_device *dev,
9729				    struct iw_request_info *info,
9730				    union iwreq_data *wrqu, char *extra)
9731{
9732	struct ipw_priv *priv = libipw_priv(dev);
9733	mutex_lock(&priv->mutex);
9734	switch (priv->ieee->mode) {
9735	case IEEE_A:
9736		strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9737		break;
9738	case IEEE_B:
9739		strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9740		break;
9741	case IEEE_A | IEEE_B:
9742		strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9743		break;
9744	case IEEE_G:
9745		strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9746		break;
9747	case IEEE_A | IEEE_G:
9748		strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9749		break;
9750	case IEEE_B | IEEE_G:
9751		strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9752		break;
9753	case IEEE_A | IEEE_B | IEEE_G:
9754		strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9755		break;
9756	default:
9757		strncpy(extra, "unknown", MAX_WX_STRING);
9758		break;
9759	}
9760	extra[MAX_WX_STRING - 1] = '\0';
9761
9762	IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9763
9764	wrqu->data.length = strlen(extra) + 1;
9765	mutex_unlock(&priv->mutex);
9766
9767	return 0;
9768}
9769
9770static int ipw_wx_set_preamble(struct net_device *dev,
9771			       struct iw_request_info *info,
9772			       union iwreq_data *wrqu, char *extra)
9773{
9774	struct ipw_priv *priv = libipw_priv(dev);
9775	int mode = *(int *)extra;
9776	mutex_lock(&priv->mutex);
9777	/* Switching from SHORT -> LONG requires a disassociation */
9778	if (mode == 1) {
9779		if (!(priv->config & CFG_PREAMBLE_LONG)) {
9780			priv->config |= CFG_PREAMBLE_LONG;
9781
9782			/* Network configuration changed -- force [re]association */
9783			IPW_DEBUG_ASSOC
9784			    ("[re]association triggered due to preamble change.\n");
9785			if (!ipw_disassociate(priv))
9786				ipw_associate(priv);
9787		}
9788		goto done;
9789	}
9790
9791	if (mode == 0) {
9792		priv->config &= ~CFG_PREAMBLE_LONG;
9793		goto done;
9794	}
9795	mutex_unlock(&priv->mutex);
9796	return -EINVAL;
9797
9798      done:
9799	mutex_unlock(&priv->mutex);
9800	return 0;
9801}
9802
9803static int ipw_wx_get_preamble(struct net_device *dev,
9804			       struct iw_request_info *info,
9805			       union iwreq_data *wrqu, char *extra)
9806{
9807	struct ipw_priv *priv = libipw_priv(dev);
9808	mutex_lock(&priv->mutex);
9809	if (priv->config & CFG_PREAMBLE_LONG)
9810		snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9811	else
9812		snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9813	mutex_unlock(&priv->mutex);
9814	return 0;
9815}
9816
9817#ifdef CONFIG_IPW2200_MONITOR
9818static int ipw_wx_set_monitor(struct net_device *dev,
9819			      struct iw_request_info *info,
9820			      union iwreq_data *wrqu, char *extra)
9821{
9822	struct ipw_priv *priv = libipw_priv(dev);
9823	int *parms = (int *)extra;
9824	int enable = (parms[0] > 0);
9825	mutex_lock(&priv->mutex);
9826	IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9827	if (enable) {
9828		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9829#ifdef CONFIG_IPW2200_RADIOTAP
9830			priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9831#else
9832			priv->net_dev->type = ARPHRD_IEEE80211;
9833#endif
9834			schedule_work(&priv->adapter_restart);
9835		}
9836
9837		ipw_set_channel(priv, parms[1]);
9838	} else {
9839		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9840			mutex_unlock(&priv->mutex);
9841			return 0;
9842		}
9843		priv->net_dev->type = ARPHRD_ETHER;
9844		schedule_work(&priv->adapter_restart);
9845	}
9846	mutex_unlock(&priv->mutex);
9847	return 0;
9848}
9849
9850#endif				/* CONFIG_IPW2200_MONITOR */
9851
9852static int ipw_wx_reset(struct net_device *dev,
9853			struct iw_request_info *info,
9854			union iwreq_data *wrqu, char *extra)
9855{
9856	struct ipw_priv *priv = libipw_priv(dev);
9857	IPW_DEBUG_WX("RESET\n");
9858	schedule_work(&priv->adapter_restart);
9859	return 0;
9860}
9861
9862static int ipw_wx_sw_reset(struct net_device *dev,
9863			   struct iw_request_info *info,
9864			   union iwreq_data *wrqu, char *extra)
9865{
9866	struct ipw_priv *priv = libipw_priv(dev);
9867	union iwreq_data wrqu_sec = {
9868		.encoding = {
9869			     .flags = IW_ENCODE_DISABLED,
9870			     },
9871	};
9872	int ret;
9873
9874	IPW_DEBUG_WX("SW_RESET\n");
9875
9876	mutex_lock(&priv->mutex);
9877
9878	ret = ipw_sw_reset(priv, 2);
9879	if (!ret) {
9880		free_firmware();
9881		ipw_adapter_restart(priv);
9882	}
9883
9884	/* The SW reset bit might have been toggled on by the 'disable'
9885	 * module parameter, so take appropriate action */
9886	ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9887
9888	mutex_unlock(&priv->mutex);
9889	libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9890	mutex_lock(&priv->mutex);
9891
9892	if (!(priv->status & STATUS_RF_KILL_MASK)) {
9893		/* Configuration likely changed -- force [re]association */
9894		IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9895				"reset.\n");
9896		if (!ipw_disassociate(priv))
9897			ipw_associate(priv);
9898	}
9899
9900	mutex_unlock(&priv->mutex);
9901
9902	return 0;
9903}
9904
9905/* Rebase the WE IOCTLs to zero for the handler array */
9906static iw_handler ipw_wx_handlers[] = {
9907	IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
9908	IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
9909	IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
9910	IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
9911	IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
9912	IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
9913	IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
9914	IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
9915	IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
9916	IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
9917	IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
9918	IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
9919	IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
9920	IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
9921	IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
9922	IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
9923	IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
9924	IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
9925	IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
9926	IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
9927	IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
9928	IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
9929	IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
9930	IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
9931	IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
9932	IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
9933	IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
9934	IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
9935	IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
9936	IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
9937	IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
9938	IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
9939	IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
9940	IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
9941	IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
9942	IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
9943	IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
9944	IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
9945	IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
9946	IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
9947	IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
9948};
9949
9950enum {
9951	IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9952	IPW_PRIV_GET_POWER,
9953	IPW_PRIV_SET_MODE,
9954	IPW_PRIV_GET_MODE,
9955	IPW_PRIV_SET_PREAMBLE,
9956	IPW_PRIV_GET_PREAMBLE,
9957	IPW_PRIV_RESET,
9958	IPW_PRIV_SW_RESET,
9959#ifdef CONFIG_IPW2200_MONITOR
9960	IPW_PRIV_SET_MONITOR,
9961#endif
9962};
9963
9964static struct iw_priv_args ipw_priv_args[] = {
9965	{
9966	 .cmd = IPW_PRIV_SET_POWER,
9967	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9968	 .name = "set_power"},
9969	{
9970	 .cmd = IPW_PRIV_GET_POWER,
9971	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9972	 .name = "get_power"},
9973	{
9974	 .cmd = IPW_PRIV_SET_MODE,
9975	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9976	 .name = "set_mode"},
9977	{
9978	 .cmd = IPW_PRIV_GET_MODE,
9979	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9980	 .name = "get_mode"},
9981	{
9982	 .cmd = IPW_PRIV_SET_PREAMBLE,
9983	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9984	 .name = "set_preamble"},
9985	{
9986	 .cmd = IPW_PRIV_GET_PREAMBLE,
9987	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9988	 .name = "get_preamble"},
9989	{
9990	 IPW_PRIV_RESET,
9991	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9992	{
9993	 IPW_PRIV_SW_RESET,
9994	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9995#ifdef CONFIG_IPW2200_MONITOR
9996	{
9997	 IPW_PRIV_SET_MONITOR,
9998	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9999#endif				/* CONFIG_IPW2200_MONITOR */
10000};
10001
10002static iw_handler ipw_priv_handler[] = {
10003	ipw_wx_set_powermode,
10004	ipw_wx_get_powermode,
10005	ipw_wx_set_wireless_mode,
10006	ipw_wx_get_wireless_mode,
10007	ipw_wx_set_preamble,
10008	ipw_wx_get_preamble,
10009	ipw_wx_reset,
10010	ipw_wx_sw_reset,
10011#ifdef CONFIG_IPW2200_MONITOR
10012	ipw_wx_set_monitor,
10013#endif
10014};
10015
10016static struct iw_handler_def ipw_wx_handler_def = {
10017	.standard = ipw_wx_handlers,
10018	.num_standard = ARRAY_SIZE(ipw_wx_handlers),
10019	.num_private = ARRAY_SIZE(ipw_priv_handler),
10020	.num_private_args = ARRAY_SIZE(ipw_priv_args),
10021	.private = ipw_priv_handler,
10022	.private_args = ipw_priv_args,
10023	.get_wireless_stats = ipw_get_wireless_stats,
10024};
10025
10026/*
10027 * Get wireless statistics.
10028 * Called by /proc/net/wireless
10029 * Also called by SIOCGIWSTATS
10030 */
10031static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10032{
10033	struct ipw_priv *priv = libipw_priv(dev);
10034	struct iw_statistics *wstats;
10035
10036	wstats = &priv->wstats;
10037
10038	/* if hw is disabled, then ipw_get_ordinal() can't be called.
10039	 * netdev->get_wireless_stats seems to be called before fw is
10040	 * initialized.  STATUS_ASSOCIATED will only be set if the hw is up
10041	 * and associated; if not associcated, the values are all meaningless
10042	 * anyway, so set them all to NULL and INVALID */
10043	if (!(priv->status & STATUS_ASSOCIATED)) {
10044		wstats->miss.beacon = 0;
10045		wstats->discard.retries = 0;
10046		wstats->qual.qual = 0;
10047		wstats->qual.level = 0;
10048		wstats->qual.noise = 0;
10049		wstats->qual.updated = 7;
10050		wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10051		    IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10052		return wstats;
10053	}
10054
10055	wstats->qual.qual = priv->quality;
10056	wstats->qual.level = priv->exp_avg_rssi;
10057	wstats->qual.noise = priv->exp_avg_noise;
10058	wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10059	    IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10060
10061	wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10062	wstats->discard.retries = priv->last_tx_failures;
10063	wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10064
10065/*	if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10066	goto fail_get_ordinal;
10067	wstats->discard.retries += tx_retry; */
10068
10069	return wstats;
10070}
10071
10072/* net device stuff */
10073
10074static  void init_sys_config(struct ipw_sys_config *sys_config)
10075{
10076	memset(sys_config, 0, sizeof(struct ipw_sys_config));
10077	sys_config->bt_coexistence = 0;
10078	sys_config->answer_broadcast_ssid_probe = 0;
10079	sys_config->accept_all_data_frames = 0;
10080	sys_config->accept_non_directed_frames = 1;
10081	sys_config->exclude_unicast_unencrypted = 0;
10082	sys_config->disable_unicast_decryption = 1;
10083	sys_config->exclude_multicast_unencrypted = 0;
10084	sys_config->disable_multicast_decryption = 1;
10085	if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10086		antenna = CFG_SYS_ANTENNA_BOTH;
10087	sys_config->antenna_diversity = antenna;
10088	sys_config->pass_crc_to_host = 0;	/* TODO: See if 1 gives us FCS */
10089	sys_config->dot11g_auto_detection = 0;
10090	sys_config->enable_cts_to_self = 0;
10091	sys_config->bt_coexist_collision_thr = 0;
10092	sys_config->pass_noise_stats_to_host = 1;	/* 1 -- fix for 256 */
10093	sys_config->silence_threshold = 0x1e;
10094}
10095
10096static int ipw_net_open(struct net_device *dev)
10097{
10098	IPW_DEBUG_INFO("dev->open\n");
10099	netif_start_queue(dev);
10100	return 0;
10101}
10102
10103static int ipw_net_stop(struct net_device *dev)
10104{
10105	IPW_DEBUG_INFO("dev->close\n");
10106	netif_stop_queue(dev);
10107	return 0;
10108}
10109
10110/*
10111todo:
10112
10113modify to send one tfd per fragment instead of using chunking.  otherwise
10114we need to heavily modify the libipw_skb_to_txb.
10115*/
10116
10117static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10118			     int pri)
10119{
10120	struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10121	    txb->fragments[0]->data;
10122	int i = 0;
10123	struct tfd_frame *tfd;
10124#ifdef CONFIG_IPW2200_QOS
10125	int tx_id = ipw_get_tx_queue_number(priv, pri);
10126	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10127#else
10128	struct clx2_tx_queue *txq = &priv->txq[0];
10129#endif
10130	struct clx2_queue *q = &txq->q;
10131	u8 id, hdr_len, unicast;
10132	int fc;
10133
10134	if (!(priv->status & STATUS_ASSOCIATED))
10135		goto drop;
10136
10137	hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10138	switch (priv->ieee->iw_mode) {
10139	case IW_MODE_ADHOC:
10140		unicast = !is_multicast_ether_addr(hdr->addr1);
10141		id = ipw_find_station(priv, hdr->addr1);
10142		if (id == IPW_INVALID_STATION) {
10143			id = ipw_add_station(priv, hdr->addr1);
10144			if (id == IPW_INVALID_STATION) {
10145				IPW_WARNING("Attempt to send data to "
10146					    "invalid cell: %pM\n",
10147					    hdr->addr1);
10148				goto drop;
10149			}
10150		}
10151		break;
10152
10153	case IW_MODE_INFRA:
10154	default:
10155		unicast = !is_multicast_ether_addr(hdr->addr3);
10156		id = 0;
10157		break;
10158	}
10159
10160	tfd = &txq->bd[q->first_empty];
10161	txq->txb[q->first_empty] = txb;
10162	memset(tfd, 0, sizeof(*tfd));
10163	tfd->u.data.station_number = id;
10164
10165	tfd->control_flags.message_type = TX_FRAME_TYPE;
10166	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10167
10168	tfd->u.data.cmd_id = DINO_CMD_TX;
10169	tfd->u.data.len = cpu_to_le16(txb->payload_size);
10170
10171	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10172		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10173	else
10174		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10175
10176	if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10177		tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10178
10179	fc = le16_to_cpu(hdr->frame_ctl);
10180	hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10181
10182	memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10183
10184	if (likely(unicast))
10185		tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10186
10187	if (txb->encrypted && !priv->ieee->host_encrypt) {
10188		switch (priv->ieee->sec.level) {
10189		case SEC_LEVEL_3:
10190			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10191			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10192			/* XXX: ACK flag must be set for CCMP even if it
10193			 * is a multicast/broadcast packet, because CCMP
10194			 * group communication encrypted by GTK is
10195			 * actually done by the AP. */
10196			if (!unicast)
10197				tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10198
10199			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10200			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10201			tfd->u.data.key_index = 0;
10202			tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10203			break;
10204		case SEC_LEVEL_2:
10205			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10206			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10207			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10208			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10209			tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10210			break;
10211		case SEC_LEVEL_1:
10212			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10213			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10214			tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10215			if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10216			    40)
10217				tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10218			else
10219				tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10220			break;
10221		case SEC_LEVEL_0:
10222			break;
10223		default:
10224			printk(KERN_ERR "Unknown security level %d\n",
10225			       priv->ieee->sec.level);
10226			break;
10227		}
10228	} else
10229		/* No hardware encryption */
10230		tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10231
10232#ifdef CONFIG_IPW2200_QOS
10233	if (fc & IEEE80211_STYPE_QOS_DATA)
10234		ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10235#endif				/* CONFIG_IPW2200_QOS */
10236
10237	/* payload */
10238	tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10239						 txb->nr_frags));
10240	IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10241		       txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10242	for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10243		IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10244			       i, le32_to_cpu(tfd->u.data.num_chunks),
10245			       txb->fragments[i]->len - hdr_len);
10246		IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10247			     i, tfd->u.data.num_chunks,
10248			     txb->fragments[i]->len - hdr_len);
10249		printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10250			   txb->fragments[i]->len - hdr_len);
10251
10252		tfd->u.data.chunk_ptr[i] =
10253		    cpu_to_le32(pci_map_single
10254				(priv->pci_dev,
10255				 txb->fragments[i]->data + hdr_len,
10256				 txb->fragments[i]->len - hdr_len,
10257				 PCI_DMA_TODEVICE));
10258		tfd->u.data.chunk_len[i] =
10259		    cpu_to_le16(txb->fragments[i]->len - hdr_len);
10260	}
10261
10262	if (i != txb->nr_frags) {
10263		struct sk_buff *skb;
10264		u16 remaining_bytes = 0;
10265		int j;
10266
10267		for (j = i; j < txb->nr_frags; j++)
10268			remaining_bytes += txb->fragments[j]->len - hdr_len;
10269
10270		printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10271		       remaining_bytes);
10272		skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10273		if (skb != NULL) {
10274			tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10275			for (j = i; j < txb->nr_frags; j++) {
10276				int size = txb->fragments[j]->len - hdr_len;
10277
10278				printk(KERN_INFO "Adding frag %d %d...\n",
10279				       j, size);
10280				memcpy(skb_put(skb, size),
10281				       txb->fragments[j]->data + hdr_len, size);
10282			}
10283			dev_kfree_skb_any(txb->fragments[i]);
10284			txb->fragments[i] = skb;
10285			tfd->u.data.chunk_ptr[i] =
10286			    cpu_to_le32(pci_map_single
10287					(priv->pci_dev, skb->data,
10288					 remaining_bytes,
10289					 PCI_DMA_TODEVICE));
10290
10291			le32_add_cpu(&tfd->u.data.num_chunks, 1);
10292		}
10293	}
10294
10295	/* kick DMA */
10296	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10297	ipw_write32(priv, q->reg_w, q->first_empty);
10298
10299	if (ipw_tx_queue_space(q) < q->high_mark)
10300		netif_stop_queue(priv->net_dev);
10301
10302	return NETDEV_TX_OK;
10303
10304      drop:
10305	IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10306	libipw_txb_free(txb);
10307	return NETDEV_TX_OK;
10308}
10309
10310static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10311{
10312	struct ipw_priv *priv = libipw_priv(dev);
10313#ifdef CONFIG_IPW2200_QOS
10314	int tx_id = ipw_get_tx_queue_number(priv, pri);
10315	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10316#else
10317	struct clx2_tx_queue *txq = &priv->txq[0];
10318#endif				/* CONFIG_IPW2200_QOS */
10319
10320	if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10321		return 1;
10322
10323	return 0;
10324}
10325
10326#ifdef CONFIG_IPW2200_PROMISCUOUS
10327static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10328				      struct libipw_txb *txb)
10329{
10330	struct libipw_rx_stats dummystats;
10331	struct ieee80211_hdr *hdr;
10332	u8 n;
10333	u16 filter = priv->prom_priv->filter;
10334	int hdr_only = 0;
10335
10336	if (filter & IPW_PROM_NO_TX)
10337		return;
10338
10339	memset(&dummystats, 0, sizeof(dummystats));
10340
10341	/* Filtering of fragment chains is done against the first fragment */
10342	hdr = (void *)txb->fragments[0]->data;
10343	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10344		if (filter & IPW_PROM_NO_MGMT)
10345			return;
10346		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10347			hdr_only = 1;
10348	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10349		if (filter & IPW_PROM_NO_CTL)
10350			return;
10351		if (filter & IPW_PROM_CTL_HEADER_ONLY)
10352			hdr_only = 1;
10353	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10354		if (filter & IPW_PROM_NO_DATA)
10355			return;
10356		if (filter & IPW_PROM_DATA_HEADER_ONLY)
10357			hdr_only = 1;
10358	}
10359
10360	for(n=0; n<txb->nr_frags; ++n) {
10361		struct sk_buff *src = txb->fragments[n];
10362		struct sk_buff *dst;
10363		struct ieee80211_radiotap_header *rt_hdr;
10364		int len;
10365
10366		if (hdr_only) {
10367			hdr = (void *)src->data;
10368			len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10369		} else
10370			len = src->len;
10371
10372		dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
10373		if (!dst)
10374			continue;
10375
10376		rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10377
10378		rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10379		rt_hdr->it_pad = 0;
10380		rt_hdr->it_present = 0; /* after all, it's just an idea */
10381		rt_hdr->it_present |=  cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10382
10383		*(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10384			ieee80211chan2mhz(priv->channel));
10385		if (priv->channel > 14) 	/* 802.11a */
10386			*(__le16*)skb_put(dst, sizeof(u16)) =
10387				cpu_to_le16(IEEE80211_CHAN_OFDM |
10388					     IEEE80211_CHAN_5GHZ);
10389		else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10390			*(__le16*)skb_put(dst, sizeof(u16)) =
10391				cpu_to_le16(IEEE80211_CHAN_CCK |
10392					     IEEE80211_CHAN_2GHZ);
10393		else 		/* 802.11g */
10394			*(__le16*)skb_put(dst, sizeof(u16)) =
10395				cpu_to_le16(IEEE80211_CHAN_OFDM |
10396				 IEEE80211_CHAN_2GHZ);
10397
10398		rt_hdr->it_len = cpu_to_le16(dst->len);
10399
10400		skb_copy_from_linear_data(src, skb_put(dst, len), len);
10401
10402		if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10403			dev_kfree_skb_any(dst);
10404	}
10405}
10406#endif
10407
10408static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10409					   struct net_device *dev, int pri)
10410{
10411	struct ipw_priv *priv = libipw_priv(dev);
10412	unsigned long flags;
10413	netdev_tx_t ret;
10414
10415	IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10416	spin_lock_irqsave(&priv->lock, flags);
10417
10418#ifdef CONFIG_IPW2200_PROMISCUOUS
10419	if (rtap_iface && netif_running(priv->prom_net_dev))
10420		ipw_handle_promiscuous_tx(priv, txb);
10421#endif
10422
10423	ret = ipw_tx_skb(priv, txb, pri);
10424	if (ret == NETDEV_TX_OK)
10425		__ipw_led_activity_on(priv);
10426	spin_unlock_irqrestore(&priv->lock, flags);
10427
10428	return ret;
10429}
10430
10431static void ipw_net_set_multicast_list(struct net_device *dev)
10432{
10433
10434}
10435
10436static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10437{
10438	struct ipw_priv *priv = libipw_priv(dev);
10439	struct sockaddr *addr = p;
10440
10441	if (!is_valid_ether_addr(addr->sa_data))
10442		return -EADDRNOTAVAIL;
10443	mutex_lock(&priv->mutex);
10444	priv->config |= CFG_CUSTOM_MAC;
10445	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10446	printk(KERN_INFO "%s: Setting MAC to %pM\n",
10447	       priv->net_dev->name, priv->mac_addr);
10448	schedule_work(&priv->adapter_restart);
10449	mutex_unlock(&priv->mutex);
10450	return 0;
10451}
10452
10453static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10454				    struct ethtool_drvinfo *info)
10455{
10456	struct ipw_priv *p = libipw_priv(dev);
10457	char vers[64];
10458	char date[32];
10459	u32 len;
10460
10461	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
10462	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
10463
10464	len = sizeof(vers);
10465	ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10466	len = sizeof(date);
10467	ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10468
10469	snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10470		 vers, date);
10471	strlcpy(info->bus_info, pci_name(p->pci_dev),
10472		sizeof(info->bus_info));
10473	info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10474}
10475
10476static u32 ipw_ethtool_get_link(struct net_device *dev)
10477{
10478	struct ipw_priv *priv = libipw_priv(dev);
10479	return (priv->status & STATUS_ASSOCIATED) != 0;
10480}
10481
10482static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10483{
10484	return IPW_EEPROM_IMAGE_SIZE;
10485}
10486
10487static int ipw_ethtool_get_eeprom(struct net_device *dev,
10488				  struct ethtool_eeprom *eeprom, u8 * bytes)
10489{
10490	struct ipw_priv *p = libipw_priv(dev);
10491
10492	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10493		return -EINVAL;
10494	mutex_lock(&p->mutex);
10495	memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10496	mutex_unlock(&p->mutex);
10497	return 0;
10498}
10499
10500static int ipw_ethtool_set_eeprom(struct net_device *dev,
10501				  struct ethtool_eeprom *eeprom, u8 * bytes)
10502{
10503	struct ipw_priv *p = libipw_priv(dev);
10504	int i;
10505
10506	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10507		return -EINVAL;
10508	mutex_lock(&p->mutex);
10509	memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10510	for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10511		ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10512	mutex_unlock(&p->mutex);
10513	return 0;
10514}
10515
10516static const struct ethtool_ops ipw_ethtool_ops = {
10517	.get_link = ipw_ethtool_get_link,
10518	.get_drvinfo = ipw_ethtool_get_drvinfo,
10519	.get_eeprom_len = ipw_ethtool_get_eeprom_len,
10520	.get_eeprom = ipw_ethtool_get_eeprom,
10521	.set_eeprom = ipw_ethtool_set_eeprom,
10522};
10523
10524static irqreturn_t ipw_isr(int irq, void *data)
10525{
10526	struct ipw_priv *priv = data;
10527	u32 inta, inta_mask;
10528
10529	if (!priv)
10530		return IRQ_NONE;
10531
10532	spin_lock(&priv->irq_lock);
10533
10534	if (!(priv->status & STATUS_INT_ENABLED)) {
10535		/* IRQ is disabled */
10536		goto none;
10537	}
10538
10539	inta = ipw_read32(priv, IPW_INTA_RW);
10540	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10541
10542	if (inta == 0xFFFFFFFF) {
10543		/* Hardware disappeared */
10544		IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10545		goto none;
10546	}
10547
10548	if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10549		/* Shared interrupt */
10550		goto none;
10551	}
10552
10553	/* tell the device to stop sending interrupts */
10554	__ipw_disable_interrupts(priv);
10555
10556	/* ack current interrupts */
10557	inta &= (IPW_INTA_MASK_ALL & inta_mask);
10558	ipw_write32(priv, IPW_INTA_RW, inta);
10559
10560	/* Cache INTA value for our tasklet */
10561	priv->isr_inta = inta;
10562
10563	tasklet_schedule(&priv->irq_tasklet);
10564
10565	spin_unlock(&priv->irq_lock);
10566
10567	return IRQ_HANDLED;
10568      none:
10569	spin_unlock(&priv->irq_lock);
10570	return IRQ_NONE;
10571}
10572
10573static void ipw_rf_kill(void *adapter)
10574{
10575	struct ipw_priv *priv = adapter;
10576	unsigned long flags;
10577
10578	spin_lock_irqsave(&priv->lock, flags);
10579
10580	if (rf_kill_active(priv)) {
10581		IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10582		schedule_delayed_work(&priv->rf_kill, 2 * HZ);
10583		goto exit_unlock;
10584	}
10585
10586	/* RF Kill is now disabled, so bring the device back up */
10587
10588	if (!(priv->status & STATUS_RF_KILL_MASK)) {
10589		IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10590				  "device\n");
10591
10592		/* we can not do an adapter restart while inside an irq lock */
10593		schedule_work(&priv->adapter_restart);
10594	} else
10595		IPW_DEBUG_RF_KILL("HW RF Kill deactivated.  SW RF Kill still "
10596				  "enabled\n");
10597
10598      exit_unlock:
10599	spin_unlock_irqrestore(&priv->lock, flags);
10600}
10601
10602static void ipw_bg_rf_kill(struct work_struct *work)
10603{
10604	struct ipw_priv *priv =
10605		container_of(work, struct ipw_priv, rf_kill.work);
10606	mutex_lock(&priv->mutex);
10607	ipw_rf_kill(priv);
10608	mutex_unlock(&priv->mutex);
10609}
10610
10611static void ipw_link_up(struct ipw_priv *priv)
10612{
10613	priv->last_seq_num = -1;
10614	priv->last_frag_num = -1;
10615	priv->last_packet_time = 0;
10616
10617	netif_carrier_on(priv->net_dev);
10618
10619	cancel_delayed_work(&priv->request_scan);
10620	cancel_delayed_work(&priv->request_direct_scan);
10621	cancel_delayed_work(&priv->request_passive_scan);
10622	cancel_delayed_work(&priv->scan_event);
10623	ipw_reset_stats(priv);
10624	/* Ensure the rate is updated immediately */
10625	priv->last_rate = ipw_get_current_rate(priv);
10626	ipw_gather_stats(priv);
10627	ipw_led_link_up(priv);
10628	notify_wx_assoc_event(priv);
10629
10630	if (priv->config & CFG_BACKGROUND_SCAN)
10631		schedule_delayed_work(&priv->request_scan, HZ);
10632}
10633
10634static void ipw_bg_link_up(struct work_struct *work)
10635{
10636	struct ipw_priv *priv =
10637		container_of(work, struct ipw_priv, link_up);
10638	mutex_lock(&priv->mutex);
10639	ipw_link_up(priv);
10640	mutex_unlock(&priv->mutex);
10641}
10642
10643static void ipw_link_down(struct ipw_priv *priv)
10644{
10645	ipw_led_link_down(priv);
10646	netif_carrier_off(priv->net_dev);
10647	notify_wx_assoc_event(priv);
10648
10649	/* Cancel any queued work ... */
10650	cancel_delayed_work(&priv->request_scan);
10651	cancel_delayed_work(&priv->request_direct_scan);
10652	cancel_delayed_work(&priv->request_passive_scan);
10653	cancel_delayed_work(&priv->adhoc_check);
10654	cancel_delayed_work(&priv->gather_stats);
10655
10656	ipw_reset_stats(priv);
10657
10658	if (!(priv->status & STATUS_EXIT_PENDING)) {
10659		/* Queue up another scan... */
10660		schedule_delayed_work(&priv->request_scan, 0);
10661	} else
10662		cancel_delayed_work(&priv->scan_event);
10663}
10664
10665static void ipw_bg_link_down(struct work_struct *work)
10666{
10667	struct ipw_priv *priv =
10668		container_of(work, struct ipw_priv, link_down);
10669	mutex_lock(&priv->mutex);
10670	ipw_link_down(priv);
10671	mutex_unlock(&priv->mutex);
10672}
10673
10674static int ipw_setup_deferred_work(struct ipw_priv *priv)
10675{
10676	int ret = 0;
10677
10678	init_waitqueue_head(&priv->wait_command_queue);
10679	init_waitqueue_head(&priv->wait_state);
10680
10681	INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10682	INIT_WORK(&priv->associate, ipw_bg_associate);
10683	INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10684	INIT_WORK(&priv->system_config, ipw_system_config);
10685	INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10686	INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10687	INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10688	INIT_WORK(&priv->up, ipw_bg_up);
10689	INIT_WORK(&priv->down, ipw_bg_down);
10690	INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10691	INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10692	INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10693	INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10694	INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10695	INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10696	INIT_WORK(&priv->roam, ipw_bg_roam);
10697	INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10698	INIT_WORK(&priv->link_up, ipw_bg_link_up);
10699	INIT_WORK(&priv->link_down, ipw_bg_link_down);
10700	INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10701	INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10702	INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10703	INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10704
10705#ifdef CONFIG_IPW2200_QOS
10706	INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10707#endif				/* CONFIG_IPW2200_QOS */
10708
10709	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10710		     ipw_irq_tasklet, (unsigned long)priv);
10711
10712	return ret;
10713}
10714
10715static void shim__set_security(struct net_device *dev,
10716			       struct libipw_security *sec)
10717{
10718	struct ipw_priv *priv = libipw_priv(dev);
10719	int i;
10720	for (i = 0; i < 4; i++) {
10721		if (sec->flags & (1 << i)) {
10722			priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10723			priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10724			if (sec->key_sizes[i] == 0)
10725				priv->ieee->sec.flags &= ~(1 << i);
10726			else {
10727				memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10728				       sec->key_sizes[i]);
10729				priv->ieee->sec.flags |= (1 << i);
10730			}
10731			priv->status |= STATUS_SECURITY_UPDATED;
10732		} else if (sec->level != SEC_LEVEL_1)
10733			priv->ieee->sec.flags &= ~(1 << i);
10734	}
10735
10736	if (sec->flags & SEC_ACTIVE_KEY) {
10737		if (sec->active_key <= 3) {
10738			priv->ieee->sec.active_key = sec->active_key;
10739			priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10740		} else
10741			priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10742		priv->status |= STATUS_SECURITY_UPDATED;
10743	} else
10744		priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10745
10746	if ((sec->flags & SEC_AUTH_MODE) &&
10747	    (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10748		priv->ieee->sec.auth_mode = sec->auth_mode;
10749		priv->ieee->sec.flags |= SEC_AUTH_MODE;
10750		if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10751			priv->capability |= CAP_SHARED_KEY;
10752		else
10753			priv->capability &= ~CAP_SHARED_KEY;
10754		priv->status |= STATUS_SECURITY_UPDATED;
10755	}
10756
10757	if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10758		priv->ieee->sec.flags |= SEC_ENABLED;
10759		priv->ieee->sec.enabled = sec->enabled;
10760		priv->status |= STATUS_SECURITY_UPDATED;
10761		if (sec->enabled)
10762			priv->capability |= CAP_PRIVACY_ON;
10763		else
10764			priv->capability &= ~CAP_PRIVACY_ON;
10765	}
10766
10767	if (sec->flags & SEC_ENCRYPT)
10768		priv->ieee->sec.encrypt = sec->encrypt;
10769
10770	if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10771		priv->ieee->sec.level = sec->level;
10772		priv->ieee->sec.flags |= SEC_LEVEL;
10773		priv->status |= STATUS_SECURITY_UPDATED;
10774	}
10775
10776	if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10777		ipw_set_hwcrypto_keys(priv);
10778
10779	/* To match current functionality of ipw2100 (which works well w/
10780	 * various supplicants, we don't force a disassociate if the
10781	 * privacy capability changes ... */
10782#if 0
10783	if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10784	    (((priv->assoc_request.capability &
10785	       cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10786	     (!(priv->assoc_request.capability &
10787		cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10788		IPW_DEBUG_ASSOC("Disassociating due to capability "
10789				"change.\n");
10790		ipw_disassociate(priv);
10791	}
10792#endif
10793}
10794
10795static int init_supported_rates(struct ipw_priv *priv,
10796				struct ipw_supported_rates *rates)
10797{
10798	/* TODO: Mask out rates based on priv->rates_mask */
10799
10800	memset(rates, 0, sizeof(*rates));
10801	/* configure supported rates */
10802	switch (priv->ieee->freq_band) {
10803	case LIBIPW_52GHZ_BAND:
10804		rates->ieee_mode = IPW_A_MODE;
10805		rates->purpose = IPW_RATE_CAPABILITIES;
10806		ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10807					LIBIPW_OFDM_DEFAULT_RATES_MASK);
10808		break;
10809
10810	default:		/* Mixed or 2.4Ghz */
10811		rates->ieee_mode = IPW_G_MODE;
10812		rates->purpose = IPW_RATE_CAPABILITIES;
10813		ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10814				       LIBIPW_CCK_DEFAULT_RATES_MASK);
10815		if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10816			ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10817						LIBIPW_OFDM_DEFAULT_RATES_MASK);
10818		}
10819		break;
10820	}
10821
10822	return 0;
10823}
10824
10825static int ipw_config(struct ipw_priv *priv)
10826{
10827	/* This is only called from ipw_up, which resets/reloads the firmware
10828	   so, we don't need to first disable the card before we configure
10829	   it */
10830	if (ipw_set_tx_power(priv))
10831		goto error;
10832
10833	/* initialize adapter address */
10834	if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10835		goto error;
10836
10837	/* set basic system config settings */
10838	init_sys_config(&priv->sys_config);
10839
10840	/* Support Bluetooth if we have BT h/w on board, and user wants to.
10841	 * Does not support BT priority yet (don't abort or defer our Tx) */
10842	if (bt_coexist) {
10843		unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10844
10845		if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10846			priv->sys_config.bt_coexistence
10847			    |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10848		if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10849			priv->sys_config.bt_coexistence
10850			    |= CFG_BT_COEXISTENCE_OOB;
10851	}
10852
10853#ifdef CONFIG_IPW2200_PROMISCUOUS
10854	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10855		priv->sys_config.accept_all_data_frames = 1;
10856		priv->sys_config.accept_non_directed_frames = 1;
10857		priv->sys_config.accept_all_mgmt_bcpr = 1;
10858		priv->sys_config.accept_all_mgmt_frames = 1;
10859	}
10860#endif
10861
10862	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10863		priv->sys_config.answer_broadcast_ssid_probe = 1;
10864	else
10865		priv->sys_config.answer_broadcast_ssid_probe = 0;
10866
10867	if (ipw_send_system_config(priv))
10868		goto error;
10869
10870	init_supported_rates(priv, &priv->rates);
10871	if (ipw_send_supported_rates(priv, &priv->rates))
10872		goto error;
10873
10874	/* Set request-to-send threshold */
10875	if (priv->rts_threshold) {
10876		if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10877			goto error;
10878	}
10879#ifdef CONFIG_IPW2200_QOS
10880	IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10881	ipw_qos_activate(priv, NULL);
10882#endif				/* CONFIG_IPW2200_QOS */
10883
10884	if (ipw_set_random_seed(priv))
10885		goto error;
10886
10887	/* final state transition to the RUN state */
10888	if (ipw_send_host_complete(priv))
10889		goto error;
10890
10891	priv->status |= STATUS_INIT;
10892
10893	ipw_led_init(priv);
10894	ipw_led_radio_on(priv);
10895	priv->notif_missed_beacons = 0;
10896
10897	/* Set hardware WEP key if it is configured. */
10898	if ((priv->capability & CAP_PRIVACY_ON) &&
10899	    (priv->ieee->sec.level == SEC_LEVEL_1) &&
10900	    !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10901		ipw_set_hwcrypto_keys(priv);
10902
10903	return 0;
10904
10905      error:
10906	return -EIO;
10907}
10908
10909/*
10910 * NOTE:
10911 *
10912 * These tables have been tested in conjunction with the
10913 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10914 *
10915 * Altering this values, using it on other hardware, or in geographies
10916 * not intended for resale of the above mentioned Intel adapters has
10917 * not been tested.
10918 *
10919 * Remember to update the table in README.ipw2200 when changing this
10920 * table.
10921 *
10922 */
10923static const struct libipw_geo ipw_geos[] = {
10924	{			/* Restricted */
10925	 "---",
10926	 .bg_channels = 11,
10927	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10928		{2427, 4}, {2432, 5}, {2437, 6},
10929		{2442, 7}, {2447, 8}, {2452, 9},
10930		{2457, 10}, {2462, 11}},
10931	 },
10932
10933	{			/* Custom US/Canada */
10934	 "ZZF",
10935	 .bg_channels = 11,
10936	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10937		{2427, 4}, {2432, 5}, {2437, 6},
10938		{2442, 7}, {2447, 8}, {2452, 9},
10939		{2457, 10}, {2462, 11}},
10940	 .a_channels = 8,
10941	 .a = {{5180, 36},
10942	       {5200, 40},
10943	       {5220, 44},
10944	       {5240, 48},
10945	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10946	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10947	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10948	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
10949	 },
10950
10951	{			/* Rest of World */
10952	 "ZZD",
10953	 .bg_channels = 13,
10954	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10955		{2427, 4}, {2432, 5}, {2437, 6},
10956		{2442, 7}, {2447, 8}, {2452, 9},
10957		{2457, 10}, {2462, 11}, {2467, 12},
10958		{2472, 13}},
10959	 },
10960
10961	{			/* Custom USA & Europe & High */
10962	 "ZZA",
10963	 .bg_channels = 11,
10964	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10965		{2427, 4}, {2432, 5}, {2437, 6},
10966		{2442, 7}, {2447, 8}, {2452, 9},
10967		{2457, 10}, {2462, 11}},
10968	 .a_channels = 13,
10969	 .a = {{5180, 36},
10970	       {5200, 40},
10971	       {5220, 44},
10972	       {5240, 48},
10973	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10974	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10975	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10976	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
10977	       {5745, 149},
10978	       {5765, 153},
10979	       {5785, 157},
10980	       {5805, 161},
10981	       {5825, 165}},
10982	 },
10983
10984	{			/* Custom NA & Europe */
10985	 "ZZB",
10986	 .bg_channels = 11,
10987	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10988		{2427, 4}, {2432, 5}, {2437, 6},
10989		{2442, 7}, {2447, 8}, {2452, 9},
10990		{2457, 10}, {2462, 11}},
10991	 .a_channels = 13,
10992	 .a = {{5180, 36},
10993	       {5200, 40},
10994	       {5220, 44},
10995	       {5240, 48},
10996	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10997	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10998	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10999	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11000	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11001	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11002	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11003	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11004	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11005	 },
11006
11007	{			/* Custom Japan */
11008	 "ZZC",
11009	 .bg_channels = 11,
11010	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11011		{2427, 4}, {2432, 5}, {2437, 6},
11012		{2442, 7}, {2447, 8}, {2452, 9},
11013		{2457, 10}, {2462, 11}},
11014	 .a_channels = 4,
11015	 .a = {{5170, 34}, {5190, 38},
11016	       {5210, 42}, {5230, 46}},
11017	 },
11018
11019	{			/* Custom */
11020	 "ZZM",
11021	 .bg_channels = 11,
11022	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11023		{2427, 4}, {2432, 5}, {2437, 6},
11024		{2442, 7}, {2447, 8}, {2452, 9},
11025		{2457, 10}, {2462, 11}},
11026	 },
11027
11028	{			/* Europe */
11029	 "ZZE",
11030	 .bg_channels = 13,
11031	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11032		{2427, 4}, {2432, 5}, {2437, 6},
11033		{2442, 7}, {2447, 8}, {2452, 9},
11034		{2457, 10}, {2462, 11}, {2467, 12},
11035		{2472, 13}},
11036	 .a_channels = 19,
11037	 .a = {{5180, 36},
11038	       {5200, 40},
11039	       {5220, 44},
11040	       {5240, 48},
11041	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11042	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11043	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11044	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11045	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11046	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11047	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11048	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11049	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11050	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11051	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11052	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11053	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11054	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11055	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11056	 },
11057
11058	{			/* Custom Japan */
11059	 "ZZJ",
11060	 .bg_channels = 14,
11061	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11062		{2427, 4}, {2432, 5}, {2437, 6},
11063		{2442, 7}, {2447, 8}, {2452, 9},
11064		{2457, 10}, {2462, 11}, {2467, 12},
11065		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11066	 .a_channels = 4,
11067	 .a = {{5170, 34}, {5190, 38},
11068	       {5210, 42}, {5230, 46}},
11069	 },
11070
11071	{			/* Rest of World */
11072	 "ZZR",
11073	 .bg_channels = 14,
11074	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11075		{2427, 4}, {2432, 5}, {2437, 6},
11076		{2442, 7}, {2447, 8}, {2452, 9},
11077		{2457, 10}, {2462, 11}, {2467, 12},
11078		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11079			     LIBIPW_CH_PASSIVE_ONLY}},
11080	 },
11081
11082	{			/* High Band */
11083	 "ZZH",
11084	 .bg_channels = 13,
11085	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11086		{2427, 4}, {2432, 5}, {2437, 6},
11087		{2442, 7}, {2447, 8}, {2452, 9},
11088		{2457, 10}, {2462, 11},
11089		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11090		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11091	 .a_channels = 4,
11092	 .a = {{5745, 149}, {5765, 153},
11093	       {5785, 157}, {5805, 161}},
11094	 },
11095
11096	{			/* Custom Europe */
11097	 "ZZG",
11098	 .bg_channels = 13,
11099	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11100		{2427, 4}, {2432, 5}, {2437, 6},
11101		{2442, 7}, {2447, 8}, {2452, 9},
11102		{2457, 10}, {2462, 11},
11103		{2467, 12}, {2472, 13}},
11104	 .a_channels = 4,
11105	 .a = {{5180, 36}, {5200, 40},
11106	       {5220, 44}, {5240, 48}},
11107	 },
11108
11109	{			/* Europe */
11110	 "ZZK",
11111	 .bg_channels = 13,
11112	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11113		{2427, 4}, {2432, 5}, {2437, 6},
11114		{2442, 7}, {2447, 8}, {2452, 9},
11115		{2457, 10}, {2462, 11},
11116		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11117		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11118	 .a_channels = 24,
11119	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11120	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11121	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11122	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11123	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11124	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11125	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11126	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11127	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11128	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11129	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11130	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11131	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11132	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11133	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11134	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11135	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11136	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11137	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11138	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11139	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11140	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11141	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11142	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11143	 },
11144
11145	{			/* Europe */
11146	 "ZZL",
11147	 .bg_channels = 11,
11148	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11149		{2427, 4}, {2432, 5}, {2437, 6},
11150		{2442, 7}, {2447, 8}, {2452, 9},
11151		{2457, 10}, {2462, 11}},
11152	 .a_channels = 13,
11153	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11154	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11155	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11156	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11157	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11158	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11159	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11160	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11161	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11162	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11163	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11164	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11165	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11166	 }
11167};
11168
11169static void ipw_set_geo(struct ipw_priv *priv)
11170{
11171	int j;
11172
11173	for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11174		if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11175			    ipw_geos[j].name, 3))
11176			break;
11177	}
11178
11179	if (j == ARRAY_SIZE(ipw_geos)) {
11180		IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11181			    priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11182			    priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11183			    priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11184		j = 0;
11185	}
11186
11187	libipw_set_geo(priv->ieee, &ipw_geos[j]);
11188}
11189
11190#define MAX_HW_RESTARTS 5
11191static int ipw_up(struct ipw_priv *priv)
11192{
11193	int rc, i;
11194
11195	/* Age scan list entries found before suspend */
11196	if (priv->suspend_time) {
11197		libipw_networks_age(priv->ieee, priv->suspend_time);
11198		priv->suspend_time = 0;
11199	}
11200
11201	if (priv->status & STATUS_EXIT_PENDING)
11202		return -EIO;
11203
11204	if (cmdlog && !priv->cmdlog) {
11205		priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11206				       GFP_KERNEL);
11207		if (priv->cmdlog == NULL) {
11208			IPW_ERROR("Error allocating %d command log entries.\n",
11209				  cmdlog);
11210			return -ENOMEM;
11211		} else {
11212			priv->cmdlog_len = cmdlog;
11213		}
11214	}
11215
11216	for (i = 0; i < MAX_HW_RESTARTS; i++) {
11217		/* Load the microcode, firmware, and eeprom.
11218		 * Also start the clocks. */
11219		rc = ipw_load(priv);
11220		if (rc) {
11221			IPW_ERROR("Unable to load firmware: %d\n", rc);
11222			return rc;
11223		}
11224
11225		ipw_init_ordinals(priv);
11226		if (!(priv->config & CFG_CUSTOM_MAC))
11227			eeprom_parse_mac(priv, priv->mac_addr);
11228		memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11229
11230		ipw_set_geo(priv);
11231
11232		if (priv->status & STATUS_RF_KILL_SW) {
11233			IPW_WARNING("Radio disabled by module parameter.\n");
11234			return 0;
11235		} else if (rf_kill_active(priv)) {
11236			IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11237				    "Kill switch must be turned off for "
11238				    "wireless networking to work.\n");
11239			schedule_delayed_work(&priv->rf_kill, 2 * HZ);
11240			return 0;
11241		}
11242
11243		rc = ipw_config(priv);
11244		if (!rc) {
11245			IPW_DEBUG_INFO("Configured device on count %i\n", i);
11246
11247			/* If configure to try and auto-associate, kick
11248			 * off a scan. */
11249			schedule_delayed_work(&priv->request_scan, 0);
11250
11251			return 0;
11252		}
11253
11254		IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11255		IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11256			       i, MAX_HW_RESTARTS);
11257
11258		/* We had an error bringing up the hardware, so take it
11259		 * all the way back down so we can try again */
11260		ipw_down(priv);
11261	}
11262
11263	/* tried to restart and config the device for as long as our
11264	 * patience could withstand */
11265	IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11266
11267	return -EIO;
11268}
11269
11270static void ipw_bg_up(struct work_struct *work)
11271{
11272	struct ipw_priv *priv =
11273		container_of(work, struct ipw_priv, up);
11274	mutex_lock(&priv->mutex);
11275	ipw_up(priv);
11276	mutex_unlock(&priv->mutex);
11277}
11278
11279static void ipw_deinit(struct ipw_priv *priv)
11280{
11281	int i;
11282
11283	if (priv->status & STATUS_SCANNING) {
11284		IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11285		ipw_abort_scan(priv);
11286	}
11287
11288	if (priv->status & STATUS_ASSOCIATED) {
11289		IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11290		ipw_disassociate(priv);
11291	}
11292
11293	ipw_led_shutdown(priv);
11294
11295	/* Wait up to 1s for status to change to not scanning and not
11296	 * associated (disassociation can take a while for a ful 802.11
11297	 * exchange */
11298	for (i = 1000; i && (priv->status &
11299			     (STATUS_DISASSOCIATING |
11300			      STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11301		udelay(10);
11302
11303	if (priv->status & (STATUS_DISASSOCIATING |
11304			    STATUS_ASSOCIATED | STATUS_SCANNING))
11305		IPW_DEBUG_INFO("Still associated or scanning...\n");
11306	else
11307		IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11308
11309	/* Attempt to disable the card */
11310	ipw_send_card_disable(priv, 0);
11311
11312	priv->status &= ~STATUS_INIT;
11313}
11314
11315static void ipw_down(struct ipw_priv *priv)
11316{
11317	int exit_pending = priv->status & STATUS_EXIT_PENDING;
11318
11319	priv->status |= STATUS_EXIT_PENDING;
11320
11321	if (ipw_is_init(priv))
11322		ipw_deinit(priv);
11323
11324	/* Wipe out the EXIT_PENDING status bit if we are not actually
11325	 * exiting the module */
11326	if (!exit_pending)
11327		priv->status &= ~STATUS_EXIT_PENDING;
11328
11329	/* tell the device to stop sending interrupts */
11330	ipw_disable_interrupts(priv);
11331
11332	/* Clear all bits but the RF Kill */
11333	priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11334	netif_carrier_off(priv->net_dev);
11335
11336	ipw_stop_nic(priv);
11337
11338	ipw_led_radio_off(priv);
11339}
11340
11341static void ipw_bg_down(struct work_struct *work)
11342{
11343	struct ipw_priv *priv =
11344		container_of(work, struct ipw_priv, down);
11345	mutex_lock(&priv->mutex);
11346	ipw_down(priv);
11347	mutex_unlock(&priv->mutex);
11348}
11349
11350static int ipw_wdev_init(struct net_device *dev)
11351{
11352	int i, rc = 0;
11353	struct ipw_priv *priv = libipw_priv(dev);
11354	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11355	struct wireless_dev *wdev = &priv->ieee->wdev;
11356
11357	memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11358
11359	/* fill-out priv->ieee->bg_band */
11360	if (geo->bg_channels) {
11361		struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11362
11363		bg_band->band = IEEE80211_BAND_2GHZ;
11364		bg_band->n_channels = geo->bg_channels;
11365		bg_band->channels = kcalloc(geo->bg_channels,
11366					    sizeof(struct ieee80211_channel),
11367					    GFP_KERNEL);
11368		if (!bg_band->channels) {
11369			rc = -ENOMEM;
11370			goto out;
11371		}
11372		/* translate geo->bg to bg_band.channels */
11373		for (i = 0; i < geo->bg_channels; i++) {
11374			bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
11375			bg_band->channels[i].center_freq = geo->bg[i].freq;
11376			bg_band->channels[i].hw_value = geo->bg[i].channel;
11377			bg_band->channels[i].max_power = geo->bg[i].max_power;
11378			if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11379				bg_band->channels[i].flags |=
11380					IEEE80211_CHAN_NO_IR;
11381			if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11382				bg_band->channels[i].flags |=
11383					IEEE80211_CHAN_NO_IR;
11384			if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11385				bg_band->channels[i].flags |=
11386					IEEE80211_CHAN_RADAR;
11387			/* No equivalent for LIBIPW_CH_80211H_RULES,
11388			   LIBIPW_CH_UNIFORM_SPREADING, or
11389			   LIBIPW_CH_B_ONLY... */
11390		}
11391		/* point at bitrate info */
11392		bg_band->bitrates = ipw2200_bg_rates;
11393		bg_band->n_bitrates = ipw2200_num_bg_rates;
11394
11395		wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
11396	}
11397
11398	/* fill-out priv->ieee->a_band */
11399	if (geo->a_channels) {
11400		struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11401
11402		a_band->band = IEEE80211_BAND_5GHZ;
11403		a_band->n_channels = geo->a_channels;
11404		a_band->channels = kcalloc(geo->a_channels,
11405					   sizeof(struct ieee80211_channel),
11406					   GFP_KERNEL);
11407		if (!a_band->channels) {
11408			rc = -ENOMEM;
11409			goto out;
11410		}
11411		/* translate geo->a to a_band.channels */
11412		for (i = 0; i < geo->a_channels; i++) {
11413			a_band->channels[i].band = IEEE80211_BAND_5GHZ;
11414			a_band->channels[i].center_freq = geo->a[i].freq;
11415			a_band->channels[i].hw_value = geo->a[i].channel;
11416			a_band->channels[i].max_power = geo->a[i].max_power;
11417			if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11418				a_band->channels[i].flags |=
11419					IEEE80211_CHAN_NO_IR;
11420			if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11421				a_band->channels[i].flags |=
11422					IEEE80211_CHAN_NO_IR;
11423			if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11424				a_band->channels[i].flags |=
11425					IEEE80211_CHAN_RADAR;
11426			/* No equivalent for LIBIPW_CH_80211H_RULES,
11427			   LIBIPW_CH_UNIFORM_SPREADING, or
11428			   LIBIPW_CH_B_ONLY... */
11429		}
11430		/* point at bitrate info */
11431		a_band->bitrates = ipw2200_a_rates;
11432		a_band->n_bitrates = ipw2200_num_a_rates;
11433
11434		wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11435	}
11436
11437	wdev->wiphy->cipher_suites = ipw_cipher_suites;
11438	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
11439
11440	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11441
11442	/* With that information in place, we can now register the wiphy... */
11443	if (wiphy_register(wdev->wiphy))
11444		rc = -EIO;
11445out:
11446	return rc;
11447}
11448
11449/* PCI driver stuff */
11450static const struct pci_device_id card_ids[] = {
11451	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11452	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11453	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11454	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11455	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11456	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11457	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11458	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11459	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11460	{PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11461	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11462	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11463	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11464	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11465	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11466	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11467	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11468	{PCI_VDEVICE(INTEL, 0x104f), 0},
11469	{PCI_VDEVICE(INTEL, 0x4220), 0},	/* BG */
11470	{PCI_VDEVICE(INTEL, 0x4221), 0},	/* BG */
11471	{PCI_VDEVICE(INTEL, 0x4223), 0},	/* ABG */
11472	{PCI_VDEVICE(INTEL, 0x4224), 0},	/* ABG */
11473
11474	/* required last entry */
11475	{0,}
11476};
11477
11478MODULE_DEVICE_TABLE(pci, card_ids);
11479
11480static struct attribute *ipw_sysfs_entries[] = {
11481	&dev_attr_rf_kill.attr,
11482	&dev_attr_direct_dword.attr,
11483	&dev_attr_indirect_byte.attr,
11484	&dev_attr_indirect_dword.attr,
11485	&dev_attr_mem_gpio_reg.attr,
11486	&dev_attr_command_event_reg.attr,
11487	&dev_attr_nic_type.attr,
11488	&dev_attr_status.attr,
11489	&dev_attr_cfg.attr,
11490	&dev_attr_error.attr,
11491	&dev_attr_event_log.attr,
11492	&dev_attr_cmd_log.attr,
11493	&dev_attr_eeprom_delay.attr,
11494	&dev_attr_ucode_version.attr,
11495	&dev_attr_rtc.attr,
11496	&dev_attr_scan_age.attr,
11497	&dev_attr_led.attr,
11498	&dev_attr_speed_scan.attr,
11499	&dev_attr_net_stats.attr,
11500	&dev_attr_channels.attr,
11501#ifdef CONFIG_IPW2200_PROMISCUOUS
11502	&dev_attr_rtap_iface.attr,
11503	&dev_attr_rtap_filter.attr,
11504#endif
11505	NULL
11506};
11507
11508static struct attribute_group ipw_attribute_group = {
11509	.name = NULL,		/* put in device directory */
11510	.attrs = ipw_sysfs_entries,
11511};
11512
11513#ifdef CONFIG_IPW2200_PROMISCUOUS
11514static int ipw_prom_open(struct net_device *dev)
11515{
11516	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11517	struct ipw_priv *priv = prom_priv->priv;
11518
11519	IPW_DEBUG_INFO("prom dev->open\n");
11520	netif_carrier_off(dev);
11521
11522	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11523		priv->sys_config.accept_all_data_frames = 1;
11524		priv->sys_config.accept_non_directed_frames = 1;
11525		priv->sys_config.accept_all_mgmt_bcpr = 1;
11526		priv->sys_config.accept_all_mgmt_frames = 1;
11527
11528		ipw_send_system_config(priv);
11529	}
11530
11531	return 0;
11532}
11533
11534static int ipw_prom_stop(struct net_device *dev)
11535{
11536	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11537	struct ipw_priv *priv = prom_priv->priv;
11538
11539	IPW_DEBUG_INFO("prom dev->stop\n");
11540
11541	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11542		priv->sys_config.accept_all_data_frames = 0;
11543		priv->sys_config.accept_non_directed_frames = 0;
11544		priv->sys_config.accept_all_mgmt_bcpr = 0;
11545		priv->sys_config.accept_all_mgmt_frames = 0;
11546
11547		ipw_send_system_config(priv);
11548	}
11549
11550	return 0;
11551}
11552
11553static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11554					    struct net_device *dev)
11555{
11556	IPW_DEBUG_INFO("prom dev->xmit\n");
11557	dev_kfree_skb(skb);
11558	return NETDEV_TX_OK;
11559}
11560
11561static const struct net_device_ops ipw_prom_netdev_ops = {
11562	.ndo_open 		= ipw_prom_open,
11563	.ndo_stop		= ipw_prom_stop,
11564	.ndo_start_xmit		= ipw_prom_hard_start_xmit,
11565	.ndo_change_mtu		= libipw_change_mtu,
11566	.ndo_set_mac_address 	= eth_mac_addr,
11567	.ndo_validate_addr	= eth_validate_addr,
11568};
11569
11570static int ipw_prom_alloc(struct ipw_priv *priv)
11571{
11572	int rc = 0;
11573
11574	if (priv->prom_net_dev)
11575		return -EPERM;
11576
11577	priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11578	if (priv->prom_net_dev == NULL)
11579		return -ENOMEM;
11580
11581	priv->prom_priv = libipw_priv(priv->prom_net_dev);
11582	priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11583	priv->prom_priv->priv = priv;
11584
11585	strcpy(priv->prom_net_dev->name, "rtap%d");
11586	memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11587
11588	priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11589	priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11590
11591	priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11592	SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11593
11594	rc = register_netdev(priv->prom_net_dev);
11595	if (rc) {
11596		free_libipw(priv->prom_net_dev, 1);
11597		priv->prom_net_dev = NULL;
11598		return rc;
11599	}
11600
11601	return 0;
11602}
11603
11604static void ipw_prom_free(struct ipw_priv *priv)
11605{
11606	if (!priv->prom_net_dev)
11607		return;
11608
11609	unregister_netdev(priv->prom_net_dev);
11610	free_libipw(priv->prom_net_dev, 1);
11611
11612	priv->prom_net_dev = NULL;
11613}
11614
11615#endif
11616
11617static const struct net_device_ops ipw_netdev_ops = {
11618	.ndo_open		= ipw_net_open,
11619	.ndo_stop		= ipw_net_stop,
11620	.ndo_set_rx_mode	= ipw_net_set_multicast_list,
11621	.ndo_set_mac_address	= ipw_net_set_mac_address,
11622	.ndo_start_xmit		= libipw_xmit,
11623	.ndo_change_mtu		= libipw_change_mtu,
11624	.ndo_validate_addr	= eth_validate_addr,
11625};
11626
11627static int ipw_pci_probe(struct pci_dev *pdev,
11628				   const struct pci_device_id *ent)
11629{
11630	int err = 0;
11631	struct net_device *net_dev;
11632	void __iomem *base;
11633	u32 length, val;
11634	struct ipw_priv *priv;
11635	int i;
11636
11637	net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11638	if (net_dev == NULL) {
11639		err = -ENOMEM;
11640		goto out;
11641	}
11642
11643	priv = libipw_priv(net_dev);
11644	priv->ieee = netdev_priv(net_dev);
11645
11646	priv->net_dev = net_dev;
11647	priv->pci_dev = pdev;
11648	ipw_debug_level = debug;
11649	spin_lock_init(&priv->irq_lock);
11650	spin_lock_init(&priv->lock);
11651	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11652		INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11653
11654	mutex_init(&priv->mutex);
11655	if (pci_enable_device(pdev)) {
11656		err = -ENODEV;
11657		goto out_free_libipw;
11658	}
11659
11660	pci_set_master(pdev);
11661
11662	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11663	if (!err)
11664		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11665	if (err) {
11666		printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11667		goto out_pci_disable_device;
11668	}
11669
11670	pci_set_drvdata(pdev, priv);
11671
11672	err = pci_request_regions(pdev, DRV_NAME);
11673	if (err)
11674		goto out_pci_disable_device;
11675
11676	/* We disable the RETRY_TIMEOUT register (0x41) to keep
11677	 * PCI Tx retries from interfering with C3 CPU state */
11678	pci_read_config_dword(pdev, 0x40, &val);
11679	if ((val & 0x0000ff00) != 0)
11680		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11681
11682	length = pci_resource_len(pdev, 0);
11683	priv->hw_len = length;
11684
11685	base = pci_ioremap_bar(pdev, 0);
11686	if (!base) {
11687		err = -ENODEV;
11688		goto out_pci_release_regions;
11689	}
11690
11691	priv->hw_base = base;
11692	IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11693	IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11694
11695	err = ipw_setup_deferred_work(priv);
11696	if (err) {
11697		IPW_ERROR("Unable to setup deferred work\n");
11698		goto out_iounmap;
11699	}
11700
11701	ipw_sw_reset(priv, 1);
11702
11703	err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11704	if (err) {
11705		IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11706		goto out_iounmap;
11707	}
11708
11709	SET_NETDEV_DEV(net_dev, &pdev->dev);
11710
11711	mutex_lock(&priv->mutex);
11712
11713	priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11714	priv->ieee->set_security = shim__set_security;
11715	priv->ieee->is_queue_full = ipw_net_is_queue_full;
11716
11717#ifdef CONFIG_IPW2200_QOS
11718	priv->ieee->is_qos_active = ipw_is_qos_active;
11719	priv->ieee->handle_probe_response = ipw_handle_beacon;
11720	priv->ieee->handle_beacon = ipw_handle_probe_response;
11721	priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11722#endif				/* CONFIG_IPW2200_QOS */
11723
11724	priv->ieee->perfect_rssi = -20;
11725	priv->ieee->worst_rssi = -85;
11726
11727	net_dev->netdev_ops = &ipw_netdev_ops;
11728	priv->wireless_data.spy_data = &priv->ieee->spy_data;
11729	net_dev->wireless_data = &priv->wireless_data;
11730	net_dev->wireless_handlers = &ipw_wx_handler_def;
11731	net_dev->ethtool_ops = &ipw_ethtool_ops;
11732
11733	err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11734	if (err) {
11735		IPW_ERROR("failed to create sysfs device attributes\n");
11736		mutex_unlock(&priv->mutex);
11737		goto out_release_irq;
11738	}
11739
11740	if (ipw_up(priv)) {
11741		mutex_unlock(&priv->mutex);
11742		err = -EIO;
11743		goto out_remove_sysfs;
11744	}
11745
11746	mutex_unlock(&priv->mutex);
11747
11748	err = ipw_wdev_init(net_dev);
11749	if (err) {
11750		IPW_ERROR("failed to register wireless device\n");
11751		goto out_remove_sysfs;
11752	}
11753
11754	err = register_netdev(net_dev);
11755	if (err) {
11756		IPW_ERROR("failed to register network device\n");
11757		goto out_unregister_wiphy;
11758	}
11759
11760#ifdef CONFIG_IPW2200_PROMISCUOUS
11761	if (rtap_iface) {
11762	        err = ipw_prom_alloc(priv);
11763		if (err) {
11764			IPW_ERROR("Failed to register promiscuous network "
11765				  "device (error %d).\n", err);
11766			unregister_netdev(priv->net_dev);
11767			goto out_unregister_wiphy;
11768		}
11769	}
11770#endif
11771
11772	printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11773	       "channels, %d 802.11a channels)\n",
11774	       priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11775	       priv->ieee->geo.a_channels);
11776
11777	return 0;
11778
11779      out_unregister_wiphy:
11780	wiphy_unregister(priv->ieee->wdev.wiphy);
11781	kfree(priv->ieee->a_band.channels);
11782	kfree(priv->ieee->bg_band.channels);
11783      out_remove_sysfs:
11784	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11785      out_release_irq:
11786	free_irq(pdev->irq, priv);
11787      out_iounmap:
11788	iounmap(priv->hw_base);
11789      out_pci_release_regions:
11790	pci_release_regions(pdev);
11791      out_pci_disable_device:
11792	pci_disable_device(pdev);
11793      out_free_libipw:
11794	free_libipw(priv->net_dev, 0);
11795      out:
11796	return err;
11797}
11798
11799static void ipw_pci_remove(struct pci_dev *pdev)
11800{
11801	struct ipw_priv *priv = pci_get_drvdata(pdev);
11802	struct list_head *p, *q;
11803	int i;
11804
11805	if (!priv)
11806		return;
11807
11808	mutex_lock(&priv->mutex);
11809
11810	priv->status |= STATUS_EXIT_PENDING;
11811	ipw_down(priv);
11812	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11813
11814	mutex_unlock(&priv->mutex);
11815
11816	unregister_netdev(priv->net_dev);
11817
11818	if (priv->rxq) {
11819		ipw_rx_queue_free(priv, priv->rxq);
11820		priv->rxq = NULL;
11821	}
11822	ipw_tx_queue_free(priv);
11823
11824	if (priv->cmdlog) {
11825		kfree(priv->cmdlog);
11826		priv->cmdlog = NULL;
11827	}
11828
11829	/* make sure all works are inactive */
11830	cancel_delayed_work_sync(&priv->adhoc_check);
11831	cancel_work_sync(&priv->associate);
11832	cancel_work_sync(&priv->disassociate);
11833	cancel_work_sync(&priv->system_config);
11834	cancel_work_sync(&priv->rx_replenish);
11835	cancel_work_sync(&priv->adapter_restart);
11836	cancel_delayed_work_sync(&priv->rf_kill);
11837	cancel_work_sync(&priv->up);
11838	cancel_work_sync(&priv->down);
11839	cancel_delayed_work_sync(&priv->request_scan);
11840	cancel_delayed_work_sync(&priv->request_direct_scan);
11841	cancel_delayed_work_sync(&priv->request_passive_scan);
11842	cancel_delayed_work_sync(&priv->scan_event);
11843	cancel_delayed_work_sync(&priv->gather_stats);
11844	cancel_work_sync(&priv->abort_scan);
11845	cancel_work_sync(&priv->roam);
11846	cancel_delayed_work_sync(&priv->scan_check);
11847	cancel_work_sync(&priv->link_up);
11848	cancel_work_sync(&priv->link_down);
11849	cancel_delayed_work_sync(&priv->led_link_on);
11850	cancel_delayed_work_sync(&priv->led_link_off);
11851	cancel_delayed_work_sync(&priv->led_act_off);
11852	cancel_work_sync(&priv->merge_networks);
11853
11854	/* Free MAC hash list for ADHOC */
11855	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11856		list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11857			list_del(p);
11858			kfree(list_entry(p, struct ipw_ibss_seq, list));
11859		}
11860	}
11861
11862	kfree(priv->error);
11863	priv->error = NULL;
11864
11865#ifdef CONFIG_IPW2200_PROMISCUOUS
11866	ipw_prom_free(priv);
11867#endif
11868
11869	free_irq(pdev->irq, priv);
11870	iounmap(priv->hw_base);
11871	pci_release_regions(pdev);
11872	pci_disable_device(pdev);
11873	/* wiphy_unregister needs to be here, before free_libipw */
11874	wiphy_unregister(priv->ieee->wdev.wiphy);
11875	kfree(priv->ieee->a_band.channels);
11876	kfree(priv->ieee->bg_band.channels);
11877	free_libipw(priv->net_dev, 0);
11878	free_firmware();
11879}
11880
11881#ifdef CONFIG_PM
11882static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11883{
11884	struct ipw_priv *priv = pci_get_drvdata(pdev);
11885	struct net_device *dev = priv->net_dev;
11886
11887	printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11888
11889	/* Take down the device; powers it off, etc. */
11890	ipw_down(priv);
11891
11892	/* Remove the PRESENT state of the device */
11893	netif_device_detach(dev);
11894
11895	pci_save_state(pdev);
11896	pci_disable_device(pdev);
11897	pci_set_power_state(pdev, pci_choose_state(pdev, state));
11898
11899	priv->suspend_at = get_seconds();
11900
11901	return 0;
11902}
11903
11904static int ipw_pci_resume(struct pci_dev *pdev)
11905{
11906	struct ipw_priv *priv = pci_get_drvdata(pdev);
11907	struct net_device *dev = priv->net_dev;
11908	int err;
11909	u32 val;
11910
11911	printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11912
11913	pci_set_power_state(pdev, PCI_D0);
11914	err = pci_enable_device(pdev);
11915	if (err) {
11916		printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11917		       dev->name);
11918		return err;
11919	}
11920	pci_restore_state(pdev);
11921
11922	/*
11923	 * Suspend/Resume resets the PCI configuration space, so we have to
11924	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11925	 * from interfering with C3 CPU state. pci_restore_state won't help
11926	 * here since it only restores the first 64 bytes pci config header.
11927	 */
11928	pci_read_config_dword(pdev, 0x40, &val);
11929	if ((val & 0x0000ff00) != 0)
11930		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11931
11932	/* Set the device back into the PRESENT state; this will also wake
11933	 * the queue of needed */
11934	netif_device_attach(dev);
11935
11936	priv->suspend_time = get_seconds() - priv->suspend_at;
11937
11938	/* Bring the device back up */
11939	schedule_work(&priv->up);
11940
11941	return 0;
11942}
11943#endif
11944
11945static void ipw_pci_shutdown(struct pci_dev *pdev)
11946{
11947	struct ipw_priv *priv = pci_get_drvdata(pdev);
11948
11949	/* Take down the device; powers it off, etc. */
11950	ipw_down(priv);
11951
11952	pci_disable_device(pdev);
11953}
11954
11955/* driver initialization stuff */
11956static struct pci_driver ipw_driver = {
11957	.name = DRV_NAME,
11958	.id_table = card_ids,
11959	.probe = ipw_pci_probe,
11960	.remove = ipw_pci_remove,
11961#ifdef CONFIG_PM
11962	.suspend = ipw_pci_suspend,
11963	.resume = ipw_pci_resume,
11964#endif
11965	.shutdown = ipw_pci_shutdown,
11966};
11967
11968static int __init ipw_init(void)
11969{
11970	int ret;
11971
11972	printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11973	printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11974
11975	ret = pci_register_driver(&ipw_driver);
11976	if (ret) {
11977		IPW_ERROR("Unable to initialize PCI module\n");
11978		return ret;
11979	}
11980
11981	ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11982	if (ret) {
11983		IPW_ERROR("Unable to create driver sysfs file\n");
11984		pci_unregister_driver(&ipw_driver);
11985		return ret;
11986	}
11987
11988	return ret;
11989}
11990
11991static void __exit ipw_exit(void)
11992{
11993	driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11994	pci_unregister_driver(&ipw_driver);
11995}
11996
11997module_param(disable, int, 0444);
11998MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11999
12000module_param(associate, int, 0444);
12001MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
12002
12003module_param(auto_create, int, 0444);
12004MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
12005
12006module_param_named(led, led_support, int, 0444);
12007MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
12008
12009module_param(debug, int, 0444);
12010MODULE_PARM_DESC(debug, "debug output mask");
12011
12012module_param_named(channel, default_channel, int, 0444);
12013MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
12014
12015#ifdef CONFIG_IPW2200_PROMISCUOUS
12016module_param(rtap_iface, int, 0444);
12017MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
12018#endif
12019
12020#ifdef CONFIG_IPW2200_QOS
12021module_param(qos_enable, int, 0444);
12022MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
12023
12024module_param(qos_burst_enable, int, 0444);
12025MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
12026
12027module_param(qos_no_ack_mask, int, 0444);
12028MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12029
12030module_param(burst_duration_CCK, int, 0444);
12031MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12032
12033module_param(burst_duration_OFDM, int, 0444);
12034MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12035#endif				/* CONFIG_IPW2200_QOS */
12036
12037#ifdef CONFIG_IPW2200_MONITOR
12038module_param_named(mode, network_mode, int, 0444);
12039MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12040#else
12041module_param_named(mode, network_mode, int, 0444);
12042MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12043#endif
12044
12045module_param(bt_coexist, int, 0444);
12046MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12047
12048module_param(hwcrypto, int, 0444);
12049MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12050
12051module_param(cmdlog, int, 0444);
12052MODULE_PARM_DESC(cmdlog,
12053		 "allocate a ring buffer for logging firmware commands");
12054
12055module_param(roaming, int, 0444);
12056MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12057
12058module_param(antenna, int, 0444);
12059MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12060
12061module_exit(ipw_exit);
12062module_init(ipw_init);
12063