1/*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/kthread.h>
20#include <linux/printk.h>
21#include <linux/pci_ids.h>
22#include <linux/netdevice.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/mmc/sdio.h>
26#include <linux/mmc/sdio_ids.h>
27#include <linux/mmc/sdio_func.h>
28#include <linux/mmc/card.h>
29#include <linux/semaphore.h>
30#include <linux/firmware.h>
31#include <linux/module.h>
32#include <linux/bcma/bcma.h>
33#include <linux/debugfs.h>
34#include <linux/vmalloc.h>
35#include <linux/platform_data/brcmfmac-sdio.h>
36#include <linux/moduleparam.h>
37#include <asm/unaligned.h>
38#include <defs.h>
39#include <brcmu_wifi.h>
40#include <brcmu_utils.h>
41#include <brcm_hw_ids.h>
42#include <soc.h>
43#include "sdio.h"
44#include "chip.h"
45#include "firmware.h"
46
47#define DCMD_RESP_TIMEOUT	2000	/* In milli second */
48#define CTL_DONE_TIMEOUT	2000	/* In milli second */
49
50#ifdef DEBUG
51
52#define BRCMF_TRAP_INFO_SIZE	80
53
54#define CBUF_LEN	(128)
55
56/* Device console log buffer state */
57#define CONSOLE_BUFFER_MAX	2024
58
59struct rte_log_le {
60	__le32 buf;		/* Can't be pointer on (64-bit) hosts */
61	__le32 buf_size;
62	__le32 idx;
63	char *_buf_compat;	/* Redundant pointer for backward compat. */
64};
65
66struct rte_console {
67	/* Virtual UART
68	 * When there is no UART (e.g. Quickturn),
69	 * the host should write a complete
70	 * input line directly into cbuf and then write
71	 * the length into vcons_in.
72	 * This may also be used when there is a real UART
73	 * (at risk of conflicting with
74	 * the real UART).  vcons_out is currently unused.
75	 */
76	uint vcons_in;
77	uint vcons_out;
78
79	/* Output (logging) buffer
80	 * Console output is written to a ring buffer log_buf at index log_idx.
81	 * The host may read the output when it sees log_idx advance.
82	 * Output will be lost if the output wraps around faster than the host
83	 * polls.
84	 */
85	struct rte_log_le log_le;
86
87	/* Console input line buffer
88	 * Characters are read one at a time into cbuf
89	 * until <CR> is received, then
90	 * the buffer is processed as a command line.
91	 * Also used for virtual UART.
92	 */
93	uint cbuf_idx;
94	char cbuf[CBUF_LEN];
95};
96
97#endif				/* DEBUG */
98#include <chipcommon.h>
99
100#include "bus.h"
101#include "debug.h"
102#include "tracepoint.h"
103
104#define TXQLEN		2048	/* bulk tx queue length */
105#define TXHI		(TXQLEN - 256)	/* turn on flow control above TXHI */
106#define TXLOW		(TXHI - 256)	/* turn off flow control below TXLOW */
107#define PRIOMASK	7
108
109#define TXRETRIES	2	/* # of retries for tx frames */
110
111#define BRCMF_RXBOUND	50	/* Default for max rx frames in
112				 one scheduling */
113
114#define BRCMF_TXBOUND	20	/* Default for max tx frames in
115				 one scheduling */
116
117#define BRCMF_TXMINMAX	1	/* Max tx frames if rx still pending */
118
119#define MEMBLOCK	2048	/* Block size used for downloading
120				 of dongle image */
121#define MAX_DATA_BUF	(32 * 1024)	/* Must be large enough to hold
122				 biggest possible glom */
123
124#define BRCMF_FIRSTREAD	(1 << 6)
125
126
127/* SBSDIO_DEVICE_CTL */
128
129/* 1: device will assert busy signal when receiving CMD53 */
130#define SBSDIO_DEVCTL_SETBUSY		0x01
131/* 1: assertion of sdio interrupt is synchronous to the sdio clock */
132#define SBSDIO_DEVCTL_SPI_INTR_SYNC	0x02
133/* 1: mask all interrupts to host except the chipActive (rev 8) */
134#define SBSDIO_DEVCTL_CA_INT_ONLY	0x04
135/* 1: isolate internal sdio signals, put external pads in tri-state; requires
136 * sdio bus power cycle to clear (rev 9) */
137#define SBSDIO_DEVCTL_PADS_ISO		0x08
138/* Force SD->SB reset mapping (rev 11) */
139#define SBSDIO_DEVCTL_SB_RST_CTL	0x30
140/*   Determined by CoreControl bit */
141#define SBSDIO_DEVCTL_RST_CORECTL	0x00
142/*   Force backplane reset */
143#define SBSDIO_DEVCTL_RST_BPRESET	0x10
144/*   Force no backplane reset */
145#define SBSDIO_DEVCTL_RST_NOBPRESET	0x20
146
147/* direct(mapped) cis space */
148
149/* MAPPED common CIS address */
150#define SBSDIO_CIS_BASE_COMMON		0x1000
151/* maximum bytes in one CIS */
152#define SBSDIO_CIS_SIZE_LIMIT		0x200
153/* cis offset addr is < 17 bits */
154#define SBSDIO_CIS_OFT_ADDR_MASK	0x1FFFF
155
156/* manfid tuple length, include tuple, link bytes */
157#define SBSDIO_CIS_MANFID_TUPLE_LEN	6
158
159#define CORE_BUS_REG(base, field) \
160		(base + offsetof(struct sdpcmd_regs, field))
161
162/* SDIO function 1 register CHIPCLKCSR */
163/* Force ALP request to backplane */
164#define SBSDIO_FORCE_ALP		0x01
165/* Force HT request to backplane */
166#define SBSDIO_FORCE_HT			0x02
167/* Force ILP request to backplane */
168#define SBSDIO_FORCE_ILP		0x04
169/* Make ALP ready (power up xtal) */
170#define SBSDIO_ALP_AVAIL_REQ		0x08
171/* Make HT ready (power up PLL) */
172#define SBSDIO_HT_AVAIL_REQ		0x10
173/* Squelch clock requests from HW */
174#define SBSDIO_FORCE_HW_CLKREQ_OFF	0x20
175/* Status: ALP is ready */
176#define SBSDIO_ALP_AVAIL		0x40
177/* Status: HT is ready */
178#define SBSDIO_HT_AVAIL			0x80
179#define SBSDIO_CSR_MASK			0x1F
180#define SBSDIO_AVBITS		(SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
181#define SBSDIO_ALPAV(regval)	((regval) & SBSDIO_AVBITS)
182#define SBSDIO_HTAV(regval)	(((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
183#define SBSDIO_ALPONLY(regval)	(SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
184#define SBSDIO_CLKAV(regval, alponly) \
185	(SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
186
187/* intstatus */
188#define I_SMB_SW0	(1 << 0)	/* To SB Mail S/W interrupt 0 */
189#define I_SMB_SW1	(1 << 1)	/* To SB Mail S/W interrupt 1 */
190#define I_SMB_SW2	(1 << 2)	/* To SB Mail S/W interrupt 2 */
191#define I_SMB_SW3	(1 << 3)	/* To SB Mail S/W interrupt 3 */
192#define I_SMB_SW_MASK	0x0000000f	/* To SB Mail S/W interrupts mask */
193#define I_SMB_SW_SHIFT	0	/* To SB Mail S/W interrupts shift */
194#define I_HMB_SW0	(1 << 4)	/* To Host Mail S/W interrupt 0 */
195#define I_HMB_SW1	(1 << 5)	/* To Host Mail S/W interrupt 1 */
196#define I_HMB_SW2	(1 << 6)	/* To Host Mail S/W interrupt 2 */
197#define I_HMB_SW3	(1 << 7)	/* To Host Mail S/W interrupt 3 */
198#define I_HMB_SW_MASK	0x000000f0	/* To Host Mail S/W interrupts mask */
199#define I_HMB_SW_SHIFT	4	/* To Host Mail S/W interrupts shift */
200#define I_WR_OOSYNC	(1 << 8)	/* Write Frame Out Of Sync */
201#define I_RD_OOSYNC	(1 << 9)	/* Read Frame Out Of Sync */
202#define	I_PC		(1 << 10)	/* descriptor error */
203#define	I_PD		(1 << 11)	/* data error */
204#define	I_DE		(1 << 12)	/* Descriptor protocol Error */
205#define	I_RU		(1 << 13)	/* Receive descriptor Underflow */
206#define	I_RO		(1 << 14)	/* Receive fifo Overflow */
207#define	I_XU		(1 << 15)	/* Transmit fifo Underflow */
208#define	I_RI		(1 << 16)	/* Receive Interrupt */
209#define I_BUSPWR	(1 << 17)	/* SDIO Bus Power Change (rev 9) */
210#define I_XMTDATA_AVAIL (1 << 23)	/* bits in fifo */
211#define	I_XI		(1 << 24)	/* Transmit Interrupt */
212#define I_RF_TERM	(1 << 25)	/* Read Frame Terminate */
213#define I_WF_TERM	(1 << 26)	/* Write Frame Terminate */
214#define I_PCMCIA_XU	(1 << 27)	/* PCMCIA Transmit FIFO Underflow */
215#define I_SBINT		(1 << 28)	/* sbintstatus Interrupt */
216#define I_CHIPACTIVE	(1 << 29)	/* chip from doze to active state */
217#define I_SRESET	(1 << 30)	/* CCCR RES interrupt */
218#define I_IOE2		(1U << 31)	/* CCCR IOE2 Bit Changed */
219#define	I_ERRORS	(I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
220#define I_DMA		(I_RI | I_XI | I_ERRORS)
221
222/* corecontrol */
223#define CC_CISRDY		(1 << 0)	/* CIS Ready */
224#define CC_BPRESEN		(1 << 1)	/* CCCR RES signal */
225#define CC_F2RDY		(1 << 2)	/* set CCCR IOR2 bit */
226#define CC_CLRPADSISO		(1 << 3)	/* clear SDIO pads isolation */
227#define CC_XMTDATAAVAIL_MODE	(1 << 4)
228#define CC_XMTDATAAVAIL_CTRL	(1 << 5)
229
230/* SDA_FRAMECTRL */
231#define SFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
232#define SFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
233#define SFC_CRC4WOOS	(1 << 2)	/* CRC error for write out of sync */
234#define SFC_ABORTALL	(1 << 3)	/* Abort all in-progress frames */
235
236/*
237 * Software allocation of To SB Mailbox resources
238 */
239
240/* tosbmailbox bits corresponding to intstatus bits */
241#define SMB_NAK		(1 << 0)	/* Frame NAK */
242#define SMB_INT_ACK	(1 << 1)	/* Host Interrupt ACK */
243#define SMB_USE_OOB	(1 << 2)	/* Use OOB Wakeup */
244#define SMB_DEV_INT	(1 << 3)	/* Miscellaneous Interrupt */
245
246/* tosbmailboxdata */
247#define SMB_DATA_VERSION_SHIFT	16	/* host protocol version */
248
249/*
250 * Software allocation of To Host Mailbox resources
251 */
252
253/* intstatus bits */
254#define I_HMB_FC_STATE	I_HMB_SW0	/* Flow Control State */
255#define I_HMB_FC_CHANGE	I_HMB_SW1	/* Flow Control State Changed */
256#define I_HMB_FRAME_IND	I_HMB_SW2	/* Frame Indication */
257#define I_HMB_HOST_INT	I_HMB_SW3	/* Miscellaneous Interrupt */
258
259/* tohostmailboxdata */
260#define HMB_DATA_NAKHANDLED	1	/* retransmit NAK'd frame */
261#define HMB_DATA_DEVREADY	2	/* talk to host after enable */
262#define HMB_DATA_FC		4	/* per prio flowcontrol update flag */
263#define HMB_DATA_FWREADY	8	/* fw ready for protocol activity */
264
265#define HMB_DATA_FCDATA_MASK	0xff000000
266#define HMB_DATA_FCDATA_SHIFT	24
267
268#define HMB_DATA_VERSION_MASK	0x00ff0000
269#define HMB_DATA_VERSION_SHIFT	16
270
271/*
272 * Software-defined protocol header
273 */
274
275/* Current protocol version */
276#define SDPCM_PROT_VERSION	4
277
278/*
279 * Shared structure between dongle and the host.
280 * The structure contains pointers to trap or assert information.
281 */
282#define SDPCM_SHARED_VERSION       0x0003
283#define SDPCM_SHARED_VERSION_MASK  0x00FF
284#define SDPCM_SHARED_ASSERT_BUILT  0x0100
285#define SDPCM_SHARED_ASSERT        0x0200
286#define SDPCM_SHARED_TRAP          0x0400
287
288/* Space for header read, limit for data packets */
289#define MAX_HDR_READ	(1 << 6)
290#define MAX_RX_DATASZ	2048
291
292/* Bump up limit on waiting for HT to account for first startup;
293 * if the image is doing a CRC calculation before programming the PMU
294 * for HT availability, it could take a couple hundred ms more, so
295 * max out at a 1 second (1000000us).
296 */
297#undef PMU_MAX_TRANSITION_DLY
298#define PMU_MAX_TRANSITION_DLY 1000000
299
300/* Value for ChipClockCSR during initial setup */
301#define BRCMF_INIT_CLKCTL1	(SBSDIO_FORCE_HW_CLKREQ_OFF |	\
302					SBSDIO_ALP_AVAIL_REQ)
303
304/* Flags for SDH calls */
305#define F2SYNC	(SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
306
307#define BRCMF_IDLE_ACTIVE	0	/* Do not request any SD clock change
308					 * when idle
309					 */
310#define BRCMF_IDLE_INTERVAL	1
311
312#define KSO_WAIT_US 50
313#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
314
315/*
316 * Conversion of 802.1D priority to precedence level
317 */
318static uint prio2prec(u32 prio)
319{
320	return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
321	       (prio^2) : prio;
322}
323
324#ifdef DEBUG
325/* Device console log buffer state */
326struct brcmf_console {
327	uint count;		/* Poll interval msec counter */
328	uint log_addr;		/* Log struct address (fixed) */
329	struct rte_log_le log_le;	/* Log struct (host copy) */
330	uint bufsize;		/* Size of log buffer */
331	u8 *buf;		/* Log buffer (host copy) */
332	uint last;		/* Last buffer read index */
333};
334
335struct brcmf_trap_info {
336	__le32		type;
337	__le32		epc;
338	__le32		cpsr;
339	__le32		spsr;
340	__le32		r0;	/* a1 */
341	__le32		r1;	/* a2 */
342	__le32		r2;	/* a3 */
343	__le32		r3;	/* a4 */
344	__le32		r4;	/* v1 */
345	__le32		r5;	/* v2 */
346	__le32		r6;	/* v3 */
347	__le32		r7;	/* v4 */
348	__le32		r8;	/* v5 */
349	__le32		r9;	/* sb/v6 */
350	__le32		r10;	/* sl/v7 */
351	__le32		r11;	/* fp/v8 */
352	__le32		r12;	/* ip */
353	__le32		r13;	/* sp */
354	__le32		r14;	/* lr */
355	__le32		pc;	/* r15 */
356};
357#endif				/* DEBUG */
358
359struct sdpcm_shared {
360	u32 flags;
361	u32 trap_addr;
362	u32 assert_exp_addr;
363	u32 assert_file_addr;
364	u32 assert_line;
365	u32 console_addr;	/* Address of struct rte_console */
366	u32 msgtrace_addr;
367	u8 tag[32];
368	u32 brpt_addr;
369};
370
371struct sdpcm_shared_le {
372	__le32 flags;
373	__le32 trap_addr;
374	__le32 assert_exp_addr;
375	__le32 assert_file_addr;
376	__le32 assert_line;
377	__le32 console_addr;	/* Address of struct rte_console */
378	__le32 msgtrace_addr;
379	u8 tag[32];
380	__le32 brpt_addr;
381};
382
383/* dongle SDIO bus specific header info */
384struct brcmf_sdio_hdrinfo {
385	u8 seq_num;
386	u8 channel;
387	u16 len;
388	u16 len_left;
389	u16 len_nxtfrm;
390	u8 dat_offset;
391	bool lastfrm;
392	u16 tail_pad;
393};
394
395/*
396 * hold counter variables
397 */
398struct brcmf_sdio_count {
399	uint intrcount;		/* Count of device interrupt callbacks */
400	uint lastintrs;		/* Count as of last watchdog timer */
401	uint pollcnt;		/* Count of active polls */
402	uint regfails;		/* Count of R_REG failures */
403	uint tx_sderrs;		/* Count of tx attempts with sd errors */
404	uint fcqueued;		/* Tx packets that got queued */
405	uint rxrtx;		/* Count of rtx requests (NAK to dongle) */
406	uint rx_toolong;	/* Receive frames too long to receive */
407	uint rxc_errors;	/* SDIO errors when reading control frames */
408	uint rx_hdrfail;	/* SDIO errors on header reads */
409	uint rx_badhdr;		/* Bad received headers (roosync?) */
410	uint rx_badseq;		/* Mismatched rx sequence number */
411	uint fc_rcvd;		/* Number of flow-control events received */
412	uint fc_xoff;		/* Number which turned on flow-control */
413	uint fc_xon;		/* Number which turned off flow-control */
414	uint rxglomfail;	/* Failed deglom attempts */
415	uint rxglomframes;	/* Number of glom frames (superframes) */
416	uint rxglompkts;	/* Number of packets from glom frames */
417	uint f2rxhdrs;		/* Number of header reads */
418	uint f2rxdata;		/* Number of frame data reads */
419	uint f2txdata;		/* Number of f2 frame writes */
420	uint f1regdata;		/* Number of f1 register accesses */
421	uint tickcnt;		/* Number of watchdog been schedule */
422	ulong tx_ctlerrs;	/* Err of sending ctrl frames */
423	ulong tx_ctlpkts;	/* Ctrl frames sent to dongle */
424	ulong rx_ctlerrs;	/* Err of processing rx ctrl frames */
425	ulong rx_ctlpkts;	/* Ctrl frames processed from dongle */
426	ulong rx_readahead_cnt;	/* packets where header read-ahead was used */
427};
428
429/* misc chip info needed by some of the routines */
430/* Private data for SDIO bus interaction */
431struct brcmf_sdio {
432	struct brcmf_sdio_dev *sdiodev;	/* sdio device handler */
433	struct brcmf_chip *ci;	/* Chip info struct */
434
435	u32 hostintmask;	/* Copy of Host Interrupt Mask */
436	atomic_t intstatus;	/* Intstatus bits (events) pending */
437	atomic_t fcstate;	/* State of dongle flow-control */
438
439	uint blocksize;		/* Block size of SDIO transfers */
440	uint roundup;		/* Max roundup limit */
441
442	struct pktq txq;	/* Queue length used for flow-control */
443	u8 flowcontrol;	/* per prio flow control bitmask */
444	u8 tx_seq;		/* Transmit sequence number (next) */
445	u8 tx_max;		/* Maximum transmit sequence allowed */
446
447	u8 *hdrbuf;		/* buffer for handling rx frame */
448	u8 *rxhdr;		/* Header of current rx frame (in hdrbuf) */
449	u8 rx_seq;		/* Receive sequence number (expected) */
450	struct brcmf_sdio_hdrinfo cur_read;
451				/* info of current read frame */
452	bool rxskip;		/* Skip receive (awaiting NAK ACK) */
453	bool rxpending;		/* Data frame pending in dongle */
454
455	uint rxbound;		/* Rx frames to read before resched */
456	uint txbound;		/* Tx frames to send before resched */
457	uint txminmax;
458
459	struct sk_buff *glomd;	/* Packet containing glomming descriptor */
460	struct sk_buff_head glom; /* Packet list for glommed superframe */
461	uint glomerr;		/* Glom packet read errors */
462
463	u8 *rxbuf;		/* Buffer for receiving control packets */
464	uint rxblen;		/* Allocated length of rxbuf */
465	u8 *rxctl;		/* Aligned pointer into rxbuf */
466	u8 *rxctl_orig;		/* pointer for freeing rxctl */
467	uint rxlen;		/* Length of valid data in buffer */
468	spinlock_t rxctl_lock;	/* protection lock for ctrl frame resources */
469
470	u8 sdpcm_ver;	/* Bus protocol reported by dongle */
471
472	bool intr;		/* Use interrupts */
473	bool poll;		/* Use polling */
474	atomic_t ipend;		/* Device interrupt is pending */
475	uint spurious;		/* Count of spurious interrupts */
476	uint pollrate;		/* Ticks between device polls */
477	uint polltick;		/* Tick counter */
478
479#ifdef DEBUG
480	uint console_interval;
481	struct brcmf_console console;	/* Console output polling support */
482	uint console_addr;	/* Console address from shared struct */
483#endif				/* DEBUG */
484
485	uint clkstate;		/* State of sd and backplane clock(s) */
486	s32 idletime;		/* Control for activity timeout */
487	s32 idlecount;		/* Activity timeout counter */
488	s32 idleclock;		/* How to set bus driver when idle */
489	bool rxflow_mode;	/* Rx flow control mode */
490	bool rxflow;		/* Is rx flow control on */
491	bool alp_only;		/* Don't use HT clock (ALP only) */
492
493	u8 *ctrl_frame_buf;
494	u16 ctrl_frame_len;
495	bool ctrl_frame_stat;
496	int ctrl_frame_err;
497
498	spinlock_t txq_lock;		/* protect bus->txq */
499	wait_queue_head_t ctrl_wait;
500	wait_queue_head_t dcmd_resp_wait;
501
502	struct timer_list timer;
503	struct completion watchdog_wait;
504	struct task_struct *watchdog_tsk;
505	bool wd_timer_valid;
506	uint save_ms;
507
508	struct workqueue_struct *brcmf_wq;
509	struct work_struct datawork;
510	bool dpc_triggered;
511	bool dpc_running;
512
513	bool txoff;		/* Transmit flow-controlled */
514	struct brcmf_sdio_count sdcnt;
515	bool sr_enabled; /* SaveRestore enabled */
516	bool sleeping;
517
518	u8 tx_hdrlen;		/* sdio bus header length for tx packet */
519	bool txglom;		/* host tx glomming enable flag */
520	u16 head_align;		/* buffer pointer alignment */
521	u16 sgentry_align;	/* scatter-gather buffer alignment */
522};
523
524/* clkstate */
525#define CLK_NONE	0
526#define CLK_SDONLY	1
527#define CLK_PENDING	2
528#define CLK_AVAIL	3
529
530#ifdef DEBUG
531static int qcount[NUMPRIO];
532#endif				/* DEBUG */
533
534#define DEFAULT_SDIO_DRIVE_STRENGTH	6	/* in milliamps */
535
536#define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
537
538/* Retry count for register access failures */
539static const uint retry_limit = 2;
540
541/* Limit on rounding up frames */
542static const uint max_roundup = 512;
543
544#define ALIGNMENT  4
545
546enum brcmf_sdio_frmtype {
547	BRCMF_SDIO_FT_NORMAL,
548	BRCMF_SDIO_FT_SUPER,
549	BRCMF_SDIO_FT_SUB,
550};
551
552#define SDIOD_DRVSTR_KEY(chip, pmu)     (((chip) << 16) | (pmu))
553
554/* SDIO Pad drive strength to select value mappings */
555struct sdiod_drive_str {
556	u8 strength;	/* Pad Drive Strength in mA */
557	u8 sel;		/* Chip-specific select value */
558};
559
560/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
561static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
562	{32, 0x6},
563	{26, 0x7},
564	{22, 0x4},
565	{16, 0x5},
566	{12, 0x2},
567	{8, 0x3},
568	{4, 0x0},
569	{0, 0x1}
570};
571
572/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
573static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
574	{6, 0x7},
575	{5, 0x6},
576	{4, 0x5},
577	{3, 0x4},
578	{2, 0x2},
579	{1, 0x1},
580	{0, 0x0}
581};
582
583/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
584static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
585	{3, 0x3},
586	{2, 0x2},
587	{1, 0x1},
588	{0, 0x0} };
589
590/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
591static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
592	{16, 0x7},
593	{12, 0x5},
594	{8,  0x3},
595	{4,  0x1}
596};
597
598#define BCM43143_FIRMWARE_NAME		"brcm/brcmfmac43143-sdio.bin"
599#define BCM43143_NVRAM_NAME		"brcm/brcmfmac43143-sdio.txt"
600#define BCM43241B0_FIRMWARE_NAME	"brcm/brcmfmac43241b0-sdio.bin"
601#define BCM43241B0_NVRAM_NAME		"brcm/brcmfmac43241b0-sdio.txt"
602#define BCM43241B4_FIRMWARE_NAME	"brcm/brcmfmac43241b4-sdio.bin"
603#define BCM43241B4_NVRAM_NAME		"brcm/brcmfmac43241b4-sdio.txt"
604#define BCM4329_FIRMWARE_NAME		"brcm/brcmfmac4329-sdio.bin"
605#define BCM4329_NVRAM_NAME		"brcm/brcmfmac4329-sdio.txt"
606#define BCM4330_FIRMWARE_NAME		"brcm/brcmfmac4330-sdio.bin"
607#define BCM4330_NVRAM_NAME		"brcm/brcmfmac4330-sdio.txt"
608#define BCM4334_FIRMWARE_NAME		"brcm/brcmfmac4334-sdio.bin"
609#define BCM4334_NVRAM_NAME		"brcm/brcmfmac4334-sdio.txt"
610#define BCM43340_FIRMWARE_NAME		"brcm/brcmfmac43340-sdio.bin"
611#define BCM43340_NVRAM_NAME		"brcm/brcmfmac43340-sdio.txt"
612#define BCM4335_FIRMWARE_NAME		"brcm/brcmfmac4335-sdio.bin"
613#define BCM4335_NVRAM_NAME		"brcm/brcmfmac4335-sdio.txt"
614#define BCM43362_FIRMWARE_NAME		"brcm/brcmfmac43362-sdio.bin"
615#define BCM43362_NVRAM_NAME		"brcm/brcmfmac43362-sdio.txt"
616#define BCM4339_FIRMWARE_NAME		"brcm/brcmfmac4339-sdio.bin"
617#define BCM4339_NVRAM_NAME		"brcm/brcmfmac4339-sdio.txt"
618#define BCM43430_FIRMWARE_NAME		"brcm/brcmfmac43430-sdio.bin"
619#define BCM43430_NVRAM_NAME		"brcm/brcmfmac43430-sdio.txt"
620#define BCM43455_FIRMWARE_NAME		"brcm/brcmfmac43455-sdio.bin"
621#define BCM43455_NVRAM_NAME		"brcm/brcmfmac43455-sdio.txt"
622#define BCM4354_FIRMWARE_NAME		"brcm/brcmfmac4354-sdio.bin"
623#define BCM4354_NVRAM_NAME		"brcm/brcmfmac4354-sdio.txt"
624
625MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
626MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
627MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
628MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
629MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
630MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
631MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
632MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
633MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
634MODULE_FIRMWARE(BCM4330_NVRAM_NAME);
635MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
636MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
637MODULE_FIRMWARE(BCM43340_FIRMWARE_NAME);
638MODULE_FIRMWARE(BCM43340_NVRAM_NAME);
639MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
640MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
641MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
642MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
643MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
644MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
645MODULE_FIRMWARE(BCM43430_FIRMWARE_NAME);
646MODULE_FIRMWARE(BCM43430_NVRAM_NAME);
647MODULE_FIRMWARE(BCM43455_FIRMWARE_NAME);
648MODULE_FIRMWARE(BCM43455_NVRAM_NAME);
649MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME);
650MODULE_FIRMWARE(BCM4354_NVRAM_NAME);
651
652struct brcmf_firmware_names {
653	u32 chipid;
654	u32 revmsk;
655	const char *bin;
656	const char *nv;
657};
658
659enum brcmf_firmware_type {
660	BRCMF_FIRMWARE_BIN,
661	BRCMF_FIRMWARE_NVRAM
662};
663
664#define BRCMF_FIRMWARE_NVRAM(name) \
665	name ## _FIRMWARE_NAME, name ## _NVRAM_NAME
666
667static const struct brcmf_firmware_names brcmf_fwname_data[] = {
668	{ BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
669	{ BRCM_CC_43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
670	{ BRCM_CC_43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
671	{ BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
672	{ BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
673	{ BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
674	{ BRCM_CC_43340_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43340) },
675	{ BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
676	{ BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
677	{ BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
678	{ BRCM_CC_43430_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43430) },
679	{ BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43455) },
680	{ BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
681};
682
683static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci,
684				  struct brcmf_sdio_dev *sdiodev)
685{
686	int i;
687	char end;
688
689	for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
690		if (brcmf_fwname_data[i].chipid == ci->chip &&
691		    brcmf_fwname_data[i].revmsk & BIT(ci->chiprev))
692			break;
693	}
694
695	if (i == ARRAY_SIZE(brcmf_fwname_data)) {
696		brcmf_err("Unknown chipid %d [%d]\n", ci->chip, ci->chiprev);
697		return -ENODEV;
698	}
699
700	/* check if firmware path is provided by module parameter */
701	if (brcmf_firmware_path[0] != '\0') {
702		strlcpy(sdiodev->fw_name, brcmf_firmware_path,
703			sizeof(sdiodev->fw_name));
704		strlcpy(sdiodev->nvram_name, brcmf_firmware_path,
705			sizeof(sdiodev->nvram_name));
706
707		end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1];
708		if (end != '/') {
709			strlcat(sdiodev->fw_name, "/",
710				sizeof(sdiodev->fw_name));
711			strlcat(sdiodev->nvram_name, "/",
712				sizeof(sdiodev->nvram_name));
713		}
714	}
715	strlcat(sdiodev->fw_name, brcmf_fwname_data[i].bin,
716		sizeof(sdiodev->fw_name));
717	strlcat(sdiodev->nvram_name, brcmf_fwname_data[i].nv,
718		sizeof(sdiodev->nvram_name));
719
720	return 0;
721}
722
723static void pkt_align(struct sk_buff *p, int len, int align)
724{
725	uint datalign;
726	datalign = (unsigned long)(p->data);
727	datalign = roundup(datalign, (align)) - datalign;
728	if (datalign)
729		skb_pull(p, datalign);
730	__skb_trim(p, len);
731}
732
733/* To check if there's window offered */
734static bool data_ok(struct brcmf_sdio *bus)
735{
736	return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
737	       ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
738}
739
740/*
741 * Reads a register in the SDIO hardware block. This block occupies a series of
742 * adresses on the 32 bit backplane bus.
743 */
744static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
745{
746	struct brcmf_core *core;
747	int ret;
748
749	core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
750	*regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
751
752	return ret;
753}
754
755static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
756{
757	struct brcmf_core *core;
758	int ret;
759
760	core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
761	brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
762
763	return ret;
764}
765
766static int
767brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
768{
769	u8 wr_val = 0, rd_val, cmp_val, bmask;
770	int err = 0;
771	int try_cnt = 0;
772
773	brcmf_dbg(TRACE, "Enter: on=%d\n", on);
774
775	wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
776	/* 1st KSO write goes to AOS wake up core if device is asleep  */
777	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
778			  wr_val, &err);
779
780	if (on) {
781		/* device WAKEUP through KSO:
782		 * write bit 0 & read back until
783		 * both bits 0 (kso bit) & 1 (dev on status) are set
784		 */
785		cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK |
786			  SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK;
787		bmask = cmp_val;
788		usleep_range(2000, 3000);
789	} else {
790		/* Put device to sleep, turn off KSO */
791		cmp_val = 0;
792		/* only check for bit0, bit1(dev on status) may not
793		 * get cleared right away
794		 */
795		bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
796	}
797
798	do {
799		/* reliable KSO bit set/clr:
800		 * the sdiod sleep write access is synced to PMU 32khz clk
801		 * just one write attempt may fail,
802		 * read it back until it matches written value
803		 */
804		rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
805					   &err);
806		if (((rd_val & bmask) == cmp_val) && !err)
807			break;
808
809		udelay(KSO_WAIT_US);
810		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
811				  wr_val, &err);
812	} while (try_cnt++ < MAX_KSO_ATTEMPTS);
813
814	if (try_cnt > 2)
815		brcmf_dbg(SDIO, "try_cnt=%d rd_val=0x%x err=%d\n", try_cnt,
816			  rd_val, err);
817
818	if (try_cnt > MAX_KSO_ATTEMPTS)
819		brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err);
820
821	return err;
822}
823
824#define HOSTINTMASK		(I_HMB_SW_MASK | I_CHIPACTIVE)
825
826/* Turn backplane clock on or off */
827static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
828{
829	int err;
830	u8 clkctl, clkreq, devctl;
831	unsigned long timeout;
832
833	brcmf_dbg(SDIO, "Enter\n");
834
835	clkctl = 0;
836
837	if (bus->sr_enabled) {
838		bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
839		return 0;
840	}
841
842	if (on) {
843		/* Request HT Avail */
844		clkreq =
845		    bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
846
847		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
848				  clkreq, &err);
849		if (err) {
850			brcmf_err("HT Avail request error: %d\n", err);
851			return -EBADE;
852		}
853
854		/* Check current status */
855		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
856					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
857		if (err) {
858			brcmf_err("HT Avail read error: %d\n", err);
859			return -EBADE;
860		}
861
862		/* Go to pending and await interrupt if appropriate */
863		if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
864			/* Allow only clock-available interrupt */
865			devctl = brcmf_sdiod_regrb(bus->sdiodev,
866						   SBSDIO_DEVICE_CTL, &err);
867			if (err) {
868				brcmf_err("Devctl error setting CA: %d\n",
869					  err);
870				return -EBADE;
871			}
872
873			devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
874			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
875					  devctl, &err);
876			brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
877			bus->clkstate = CLK_PENDING;
878
879			return 0;
880		} else if (bus->clkstate == CLK_PENDING) {
881			/* Cancel CA-only interrupt filter */
882			devctl = brcmf_sdiod_regrb(bus->sdiodev,
883						   SBSDIO_DEVICE_CTL, &err);
884			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
885			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
886					  devctl, &err);
887		}
888
889		/* Otherwise, wait here (polling) for HT Avail */
890		timeout = jiffies +
891			  msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
892		while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
893			clkctl = brcmf_sdiod_regrb(bus->sdiodev,
894						   SBSDIO_FUNC1_CHIPCLKCSR,
895						   &err);
896			if (time_after(jiffies, timeout))
897				break;
898			else
899				usleep_range(5000, 10000);
900		}
901		if (err) {
902			brcmf_err("HT Avail request error: %d\n", err);
903			return -EBADE;
904		}
905		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
906			brcmf_err("HT Avail timeout (%d): clkctl 0x%02x\n",
907				  PMU_MAX_TRANSITION_DLY, clkctl);
908			return -EBADE;
909		}
910
911		/* Mark clock available */
912		bus->clkstate = CLK_AVAIL;
913		brcmf_dbg(SDIO, "CLKCTL: turned ON\n");
914
915#if defined(DEBUG)
916		if (!bus->alp_only) {
917			if (SBSDIO_ALPONLY(clkctl))
918				brcmf_err("HT Clock should be on\n");
919		}
920#endif				/* defined (DEBUG) */
921
922	} else {
923		clkreq = 0;
924
925		if (bus->clkstate == CLK_PENDING) {
926			/* Cancel CA-only interrupt filter */
927			devctl = brcmf_sdiod_regrb(bus->sdiodev,
928						   SBSDIO_DEVICE_CTL, &err);
929			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
930			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
931					  devctl, &err);
932		}
933
934		bus->clkstate = CLK_SDONLY;
935		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
936				  clkreq, &err);
937		brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
938		if (err) {
939			brcmf_err("Failed access turning clock off: %d\n",
940				  err);
941			return -EBADE;
942		}
943	}
944	return 0;
945}
946
947/* Change idle/active SD state */
948static int brcmf_sdio_sdclk(struct brcmf_sdio *bus, bool on)
949{
950	brcmf_dbg(SDIO, "Enter\n");
951
952	if (on)
953		bus->clkstate = CLK_SDONLY;
954	else
955		bus->clkstate = CLK_NONE;
956
957	return 0;
958}
959
960/* Transition SD and backplane clock readiness */
961static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
962{
963#ifdef DEBUG
964	uint oldstate = bus->clkstate;
965#endif				/* DEBUG */
966
967	brcmf_dbg(SDIO, "Enter\n");
968
969	/* Early exit if we're already there */
970	if (bus->clkstate == target)
971		return 0;
972
973	switch (target) {
974	case CLK_AVAIL:
975		/* Make sure SD clock is available */
976		if (bus->clkstate == CLK_NONE)
977			brcmf_sdio_sdclk(bus, true);
978		/* Now request HT Avail on the backplane */
979		brcmf_sdio_htclk(bus, true, pendok);
980		break;
981
982	case CLK_SDONLY:
983		/* Remove HT request, or bring up SD clock */
984		if (bus->clkstate == CLK_NONE)
985			brcmf_sdio_sdclk(bus, true);
986		else if (bus->clkstate == CLK_AVAIL)
987			brcmf_sdio_htclk(bus, false, false);
988		else
989			brcmf_err("request for %d -> %d\n",
990				  bus->clkstate, target);
991		break;
992
993	case CLK_NONE:
994		/* Make sure to remove HT request */
995		if (bus->clkstate == CLK_AVAIL)
996			brcmf_sdio_htclk(bus, false, false);
997		/* Now remove the SD clock */
998		brcmf_sdio_sdclk(bus, false);
999		break;
1000	}
1001#ifdef DEBUG
1002	brcmf_dbg(SDIO, "%d -> %d\n", oldstate, bus->clkstate);
1003#endif				/* DEBUG */
1004
1005	return 0;
1006}
1007
1008static int
1009brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
1010{
1011	int err = 0;
1012	u8 clkcsr;
1013
1014	brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
1015		  (sleep ? "SLEEP" : "WAKE"),
1016		  (bus->sleeping ? "SLEEP" : "WAKE"));
1017
1018	/* If SR is enabled control bus state with KSO */
1019	if (bus->sr_enabled) {
1020		/* Done if we're already in the requested state */
1021		if (sleep == bus->sleeping)
1022			goto end;
1023
1024		/* Going to sleep */
1025		if (sleep) {
1026			clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
1027						   SBSDIO_FUNC1_CHIPCLKCSR,
1028						   &err);
1029			if ((clkcsr & SBSDIO_CSR_MASK) == 0) {
1030				brcmf_dbg(SDIO, "no clock, set ALP\n");
1031				brcmf_sdiod_regwb(bus->sdiodev,
1032						  SBSDIO_FUNC1_CHIPCLKCSR,
1033						  SBSDIO_ALP_AVAIL_REQ, &err);
1034			}
1035			err = brcmf_sdio_kso_control(bus, false);
1036		} else {
1037			err = brcmf_sdio_kso_control(bus, true);
1038		}
1039		if (err) {
1040			brcmf_err("error while changing bus sleep state %d\n",
1041				  err);
1042			goto done;
1043		}
1044	}
1045
1046end:
1047	/* control clocks */
1048	if (sleep) {
1049		if (!bus->sr_enabled)
1050			brcmf_sdio_clkctl(bus, CLK_NONE, pendok);
1051	} else {
1052		brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
1053		brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
1054	}
1055	bus->sleeping = sleep;
1056	brcmf_dbg(SDIO, "new state %s\n",
1057		  (sleep ? "SLEEP" : "WAKE"));
1058done:
1059	brcmf_dbg(SDIO, "Exit: err=%d\n", err);
1060	return err;
1061
1062}
1063
1064#ifdef DEBUG
1065static inline bool brcmf_sdio_valid_shared_address(u32 addr)
1066{
1067	return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
1068}
1069
1070static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
1071				 struct sdpcm_shared *sh)
1072{
1073	u32 addr = 0;
1074	int rv;
1075	u32 shaddr = 0;
1076	struct sdpcm_shared_le sh_le;
1077	__le32 addr_le;
1078
1079	sdio_claim_host(bus->sdiodev->func[1]);
1080	brcmf_sdio_bus_sleep(bus, false, false);
1081
1082	/*
1083	 * Read last word in socram to determine
1084	 * address of sdpcm_shared structure
1085	 */
1086	shaddr = bus->ci->rambase + bus->ci->ramsize - 4;
1087	if (!bus->ci->rambase && brcmf_chip_sr_capable(bus->ci))
1088		shaddr -= bus->ci->srsize;
1089	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr,
1090			       (u8 *)&addr_le, 4);
1091	if (rv < 0)
1092		goto fail;
1093
1094	/*
1095	 * Check if addr is valid.
1096	 * NVRAM length at the end of memory should have been overwritten.
1097	 */
1098	addr = le32_to_cpu(addr_le);
1099	if (!brcmf_sdio_valid_shared_address(addr)) {
1100		brcmf_err("invalid sdpcm_shared address 0x%08X\n", addr);
1101		rv = -EINVAL;
1102		goto fail;
1103	}
1104
1105	brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr);
1106
1107	/* Read hndrte_shared structure */
1108	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
1109			       sizeof(struct sdpcm_shared_le));
1110	if (rv < 0)
1111		goto fail;
1112
1113	sdio_release_host(bus->sdiodev->func[1]);
1114
1115	/* Endianness */
1116	sh->flags = le32_to_cpu(sh_le.flags);
1117	sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
1118	sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
1119	sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
1120	sh->assert_line = le32_to_cpu(sh_le.assert_line);
1121	sh->console_addr = le32_to_cpu(sh_le.console_addr);
1122	sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
1123
1124	if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
1125		brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
1126			  SDPCM_SHARED_VERSION,
1127			  sh->flags & SDPCM_SHARED_VERSION_MASK);
1128		return -EPROTO;
1129	}
1130	return 0;
1131
1132fail:
1133	brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n",
1134		  rv, addr);
1135	sdio_release_host(bus->sdiodev->func[1]);
1136	return rv;
1137}
1138
1139static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
1140{
1141	struct sdpcm_shared sh;
1142
1143	if (brcmf_sdio_readshared(bus, &sh) == 0)
1144		bus->console_addr = sh.console_addr;
1145}
1146#else
1147static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
1148{
1149}
1150#endif /* DEBUG */
1151
1152static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
1153{
1154	u32 intstatus = 0;
1155	u32 hmb_data;
1156	u8 fcbits;
1157	int ret;
1158
1159	brcmf_dbg(SDIO, "Enter\n");
1160
1161	/* Read mailbox data and ack that we did so */
1162	ret = r_sdreg32(bus, &hmb_data,
1163			offsetof(struct sdpcmd_regs, tohostmailboxdata));
1164
1165	if (ret == 0)
1166		w_sdreg32(bus, SMB_INT_ACK,
1167			  offsetof(struct sdpcmd_regs, tosbmailbox));
1168	bus->sdcnt.f1regdata += 2;
1169
1170	/* Dongle recomposed rx frames, accept them again */
1171	if (hmb_data & HMB_DATA_NAKHANDLED) {
1172		brcmf_dbg(SDIO, "Dongle reports NAK handled, expect rtx of %d\n",
1173			  bus->rx_seq);
1174		if (!bus->rxskip)
1175			brcmf_err("unexpected NAKHANDLED!\n");
1176
1177		bus->rxskip = false;
1178		intstatus |= I_HMB_FRAME_IND;
1179	}
1180
1181	/*
1182	 * DEVREADY does not occur with gSPI.
1183	 */
1184	if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
1185		bus->sdpcm_ver =
1186		    (hmb_data & HMB_DATA_VERSION_MASK) >>
1187		    HMB_DATA_VERSION_SHIFT;
1188		if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
1189			brcmf_err("Version mismatch, dongle reports %d, "
1190				  "expecting %d\n",
1191				  bus->sdpcm_ver, SDPCM_PROT_VERSION);
1192		else
1193			brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
1194				  bus->sdpcm_ver);
1195
1196		/*
1197		 * Retrieve console state address now that firmware should have
1198		 * updated it.
1199		 */
1200		brcmf_sdio_get_console_addr(bus);
1201	}
1202
1203	/*
1204	 * Flow Control has been moved into the RX headers and this out of band
1205	 * method isn't used any more.
1206	 * remaining backward compatible with older dongles.
1207	 */
1208	if (hmb_data & HMB_DATA_FC) {
1209		fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >>
1210							HMB_DATA_FCDATA_SHIFT;
1211
1212		if (fcbits & ~bus->flowcontrol)
1213			bus->sdcnt.fc_xoff++;
1214
1215		if (bus->flowcontrol & ~fcbits)
1216			bus->sdcnt.fc_xon++;
1217
1218		bus->sdcnt.fc_rcvd++;
1219		bus->flowcontrol = fcbits;
1220	}
1221
1222	/* Shouldn't be any others */
1223	if (hmb_data & ~(HMB_DATA_DEVREADY |
1224			 HMB_DATA_NAKHANDLED |
1225			 HMB_DATA_FC |
1226			 HMB_DATA_FWREADY |
1227			 HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK))
1228		brcmf_err("Unknown mailbox data content: 0x%02x\n",
1229			  hmb_data);
1230
1231	return intstatus;
1232}
1233
1234static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1235{
1236	uint retries = 0;
1237	u16 lastrbc;
1238	u8 hi, lo;
1239	int err;
1240
1241	brcmf_err("%sterminate frame%s\n",
1242		  abort ? "abort command, " : "",
1243		  rtx ? ", send NAK" : "");
1244
1245	if (abort)
1246		brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
1247
1248	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
1249			  SFC_RF_TERM, &err);
1250	bus->sdcnt.f1regdata++;
1251
1252	/* Wait until the packet has been flushed (device/FIFO stable) */
1253	for (lastrbc = retries = 0xffff; retries > 0; retries--) {
1254		hi = brcmf_sdiod_regrb(bus->sdiodev,
1255				       SBSDIO_FUNC1_RFRAMEBCHI, &err);
1256		lo = brcmf_sdiod_regrb(bus->sdiodev,
1257				       SBSDIO_FUNC1_RFRAMEBCLO, &err);
1258		bus->sdcnt.f1regdata += 2;
1259
1260		if ((hi == 0) && (lo == 0))
1261			break;
1262
1263		if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
1264			brcmf_err("count growing: last 0x%04x now 0x%04x\n",
1265				  lastrbc, (hi << 8) + lo);
1266		}
1267		lastrbc = (hi << 8) + lo;
1268	}
1269
1270	if (!retries)
1271		brcmf_err("count never zeroed: last 0x%04x\n", lastrbc);
1272	else
1273		brcmf_dbg(SDIO, "flush took %d iterations\n", 0xffff - retries);
1274
1275	if (rtx) {
1276		bus->sdcnt.rxrtx++;
1277		err = w_sdreg32(bus, SMB_NAK,
1278				offsetof(struct sdpcmd_regs, tosbmailbox));
1279
1280		bus->sdcnt.f1regdata++;
1281		if (err == 0)
1282			bus->rxskip = true;
1283	}
1284
1285	/* Clear partial in any case */
1286	bus->cur_read.len = 0;
1287}
1288
1289static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
1290{
1291	struct brcmf_sdio_dev *sdiodev = bus->sdiodev;
1292	u8 i, hi, lo;
1293
1294	/* On failure, abort the command and terminate the frame */
1295	brcmf_err("sdio error, abort command and terminate frame\n");
1296	bus->sdcnt.tx_sderrs++;
1297
1298	brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2);
1299	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
1300	bus->sdcnt.f1regdata++;
1301
1302	for (i = 0; i < 3; i++) {
1303		hi = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
1304		lo = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
1305		bus->sdcnt.f1regdata += 2;
1306		if ((hi == 0) && (lo == 0))
1307			break;
1308	}
1309}
1310
1311/* return total length of buffer chain */
1312static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus)
1313{
1314	struct sk_buff *p;
1315	uint total;
1316
1317	total = 0;
1318	skb_queue_walk(&bus->glom, p)
1319		total += p->len;
1320	return total;
1321}
1322
1323static void brcmf_sdio_free_glom(struct brcmf_sdio *bus)
1324{
1325	struct sk_buff *cur, *next;
1326
1327	skb_queue_walk_safe(&bus->glom, cur, next) {
1328		skb_unlink(cur, &bus->glom);
1329		brcmu_pkt_buf_free_skb(cur);
1330	}
1331}
1332
1333/**
1334 * brcmfmac sdio bus specific header
1335 * This is the lowest layer header wrapped on the packets transmitted between
1336 * host and WiFi dongle which contains information needed for SDIO core and
1337 * firmware
1338 *
1339 * It consists of 3 parts: hardware header, hardware extension header and
1340 * software header
1341 * hardware header (frame tag) - 4 bytes
1342 * Byte 0~1: Frame length
1343 * Byte 2~3: Checksum, bit-wise inverse of frame length
1344 * hardware extension header - 8 bytes
1345 * Tx glom mode only, N/A for Rx or normal Tx
1346 * Byte 0~1: Packet length excluding hw frame tag
1347 * Byte 2: Reserved
1348 * Byte 3: Frame flags, bit 0: last frame indication
1349 * Byte 4~5: Reserved
1350 * Byte 6~7: Tail padding length
1351 * software header - 8 bytes
1352 * Byte 0: Rx/Tx sequence number
1353 * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
1354 * Byte 2: Length of next data frame, reserved for Tx
1355 * Byte 3: Data offset
1356 * Byte 4: Flow control bits, reserved for Tx
1357 * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet
1358 * Byte 6~7: Reserved
1359 */
1360#define SDPCM_HWHDR_LEN			4
1361#define SDPCM_HWEXT_LEN			8
1362#define SDPCM_SWHDR_LEN			8
1363#define SDPCM_HDRLEN			(SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
1364/* software header */
1365#define SDPCM_SEQ_MASK			0x000000ff
1366#define SDPCM_SEQ_WRAP			256
1367#define SDPCM_CHANNEL_MASK		0x00000f00
1368#define SDPCM_CHANNEL_SHIFT		8
1369#define SDPCM_CONTROL_CHANNEL		0	/* Control */
1370#define SDPCM_EVENT_CHANNEL		1	/* Asyc Event Indication */
1371#define SDPCM_DATA_CHANNEL		2	/* Data Xmit/Recv */
1372#define SDPCM_GLOM_CHANNEL		3	/* Coalesced packets */
1373#define SDPCM_TEST_CHANNEL		15	/* Test/debug packets */
1374#define SDPCM_GLOMDESC(p)		(((u8 *)p)[1] & 0x80)
1375#define SDPCM_NEXTLEN_MASK		0x00ff0000
1376#define SDPCM_NEXTLEN_SHIFT		16
1377#define SDPCM_DOFFSET_MASK		0xff000000
1378#define SDPCM_DOFFSET_SHIFT		24
1379#define SDPCM_FCMASK_MASK		0x000000ff
1380#define SDPCM_WINDOW_MASK		0x0000ff00
1381#define SDPCM_WINDOW_SHIFT		8
1382
1383static inline u8 brcmf_sdio_getdatoffset(u8 *swheader)
1384{
1385	u32 hdrvalue;
1386	hdrvalue = *(u32 *)swheader;
1387	return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
1388}
1389
1390static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
1391			      struct brcmf_sdio_hdrinfo *rd,
1392			      enum brcmf_sdio_frmtype type)
1393{
1394	u16 len, checksum;
1395	u8 rx_seq, fc, tx_seq_max;
1396	u32 swheader;
1397
1398	trace_brcmf_sdpcm_hdr(SDPCM_RX, header);
1399
1400	/* hw header */
1401	len = get_unaligned_le16(header);
1402	checksum = get_unaligned_le16(header + sizeof(u16));
1403	/* All zero means no more to read */
1404	if (!(len | checksum)) {
1405		bus->rxpending = false;
1406		return -ENODATA;
1407	}
1408	if ((u16)(~(len ^ checksum))) {
1409		brcmf_err("HW header checksum error\n");
1410		bus->sdcnt.rx_badhdr++;
1411		brcmf_sdio_rxfail(bus, false, false);
1412		return -EIO;
1413	}
1414	if (len < SDPCM_HDRLEN) {
1415		brcmf_err("HW header length error\n");
1416		return -EPROTO;
1417	}
1418	if (type == BRCMF_SDIO_FT_SUPER &&
1419	    (roundup(len, bus->blocksize) != rd->len)) {
1420		brcmf_err("HW superframe header length error\n");
1421		return -EPROTO;
1422	}
1423	if (type == BRCMF_SDIO_FT_SUB && len > rd->len) {
1424		brcmf_err("HW subframe header length error\n");
1425		return -EPROTO;
1426	}
1427	rd->len = len;
1428
1429	/* software header */
1430	header += SDPCM_HWHDR_LEN;
1431	swheader = le32_to_cpu(*(__le32 *)header);
1432	if (type == BRCMF_SDIO_FT_SUPER && SDPCM_GLOMDESC(header)) {
1433		brcmf_err("Glom descriptor found in superframe head\n");
1434		rd->len = 0;
1435		return -EINVAL;
1436	}
1437	rx_seq = (u8)(swheader & SDPCM_SEQ_MASK);
1438	rd->channel = (swheader & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT;
1439	if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
1440	    type != BRCMF_SDIO_FT_SUPER) {
1441		brcmf_err("HW header length too long\n");
1442		bus->sdcnt.rx_toolong++;
1443		brcmf_sdio_rxfail(bus, false, false);
1444		rd->len = 0;
1445		return -EPROTO;
1446	}
1447	if (type == BRCMF_SDIO_FT_SUPER && rd->channel != SDPCM_GLOM_CHANNEL) {
1448		brcmf_err("Wrong channel for superframe\n");
1449		rd->len = 0;
1450		return -EINVAL;
1451	}
1452	if (type == BRCMF_SDIO_FT_SUB && rd->channel != SDPCM_DATA_CHANNEL &&
1453	    rd->channel != SDPCM_EVENT_CHANNEL) {
1454		brcmf_err("Wrong channel for subframe\n");
1455		rd->len = 0;
1456		return -EINVAL;
1457	}
1458	rd->dat_offset = brcmf_sdio_getdatoffset(header);
1459	if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
1460		brcmf_err("seq %d: bad data offset\n", rx_seq);
1461		bus->sdcnt.rx_badhdr++;
1462		brcmf_sdio_rxfail(bus, false, false);
1463		rd->len = 0;
1464		return -ENXIO;
1465	}
1466	if (rd->seq_num != rx_seq) {
1467		brcmf_err("seq %d: sequence number error, expect %d\n",
1468			  rx_seq, rd->seq_num);
1469		bus->sdcnt.rx_badseq++;
1470		rd->seq_num = rx_seq;
1471	}
1472	/* no need to check the reset for subframe */
1473	if (type == BRCMF_SDIO_FT_SUB)
1474		return 0;
1475	rd->len_nxtfrm = (swheader & SDPCM_NEXTLEN_MASK) >> SDPCM_NEXTLEN_SHIFT;
1476	if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
1477		/* only warm for NON glom packet */
1478		if (rd->channel != SDPCM_GLOM_CHANNEL)
1479			brcmf_err("seq %d: next length error\n", rx_seq);
1480		rd->len_nxtfrm = 0;
1481	}
1482	swheader = le32_to_cpu(*(__le32 *)(header + 4));
1483	fc = swheader & SDPCM_FCMASK_MASK;
1484	if (bus->flowcontrol != fc) {
1485		if (~bus->flowcontrol & fc)
1486			bus->sdcnt.fc_xoff++;
1487		if (bus->flowcontrol & ~fc)
1488			bus->sdcnt.fc_xon++;
1489		bus->sdcnt.fc_rcvd++;
1490		bus->flowcontrol = fc;
1491	}
1492	tx_seq_max = (swheader & SDPCM_WINDOW_MASK) >> SDPCM_WINDOW_SHIFT;
1493	if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
1494		brcmf_err("seq %d: max tx seq number error\n", rx_seq);
1495		tx_seq_max = bus->tx_seq + 2;
1496	}
1497	bus->tx_max = tx_seq_max;
1498
1499	return 0;
1500}
1501
1502static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length)
1503{
1504	*(__le16 *)header = cpu_to_le16(frm_length);
1505	*(((__le16 *)header) + 1) = cpu_to_le16(~frm_length);
1506}
1507
1508static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
1509			      struct brcmf_sdio_hdrinfo *hd_info)
1510{
1511	u32 hdrval;
1512	u8 hdr_offset;
1513
1514	brcmf_sdio_update_hwhdr(header, hd_info->len);
1515	hdr_offset = SDPCM_HWHDR_LEN;
1516
1517	if (bus->txglom) {
1518		hdrval = (hd_info->len - hdr_offset) | (hd_info->lastfrm << 24);
1519		*((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
1520		hdrval = (u16)hd_info->tail_pad << 16;
1521		*(((__le32 *)(header + hdr_offset)) + 1) = cpu_to_le32(hdrval);
1522		hdr_offset += SDPCM_HWEXT_LEN;
1523	}
1524
1525	hdrval = hd_info->seq_num;
1526	hdrval |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
1527		  SDPCM_CHANNEL_MASK;
1528	hdrval |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
1529		  SDPCM_DOFFSET_MASK;
1530	*((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
1531	*(((__le32 *)(header + hdr_offset)) + 1) = 0;
1532	trace_brcmf_sdpcm_hdr(SDPCM_TX + !!(bus->txglom), header);
1533}
1534
1535static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1536{
1537	u16 dlen, totlen;
1538	u8 *dptr, num = 0;
1539	u16 sublen;
1540	struct sk_buff *pfirst, *pnext;
1541
1542	int errcode;
1543	u8 doff, sfdoff;
1544
1545	struct brcmf_sdio_hdrinfo rd_new;
1546
1547	/* If packets, issue read(s) and send up packet chain */
1548	/* Return sequence numbers consumed? */
1549
1550	brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
1551		  bus->glomd, skb_peek(&bus->glom));
1552
1553	/* If there's a descriptor, generate the packet chain */
1554	if (bus->glomd) {
1555		pfirst = pnext = NULL;
1556		dlen = (u16) (bus->glomd->len);
1557		dptr = bus->glomd->data;
1558		if (!dlen || (dlen & 1)) {
1559			brcmf_err("bad glomd len(%d), ignore descriptor\n",
1560				  dlen);
1561			dlen = 0;
1562		}
1563
1564		for (totlen = num = 0; dlen; num++) {
1565			/* Get (and move past) next length */
1566			sublen = get_unaligned_le16(dptr);
1567			dlen -= sizeof(u16);
1568			dptr += sizeof(u16);
1569			if ((sublen < SDPCM_HDRLEN) ||
1570			    ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
1571				brcmf_err("descriptor len %d bad: %d\n",
1572					  num, sublen);
1573				pnext = NULL;
1574				break;
1575			}
1576			if (sublen % bus->sgentry_align) {
1577				brcmf_err("sublen %d not multiple of %d\n",
1578					  sublen, bus->sgentry_align);
1579			}
1580			totlen += sublen;
1581
1582			/* For last frame, adjust read len so total
1583				 is a block multiple */
1584			if (!dlen) {
1585				sublen +=
1586				    (roundup(totlen, bus->blocksize) - totlen);
1587				totlen = roundup(totlen, bus->blocksize);
1588			}
1589
1590			/* Allocate/chain packet for next subframe */
1591			pnext = brcmu_pkt_buf_get_skb(sublen + bus->sgentry_align);
1592			if (pnext == NULL) {
1593				brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
1594					  num, sublen);
1595				break;
1596			}
1597			skb_queue_tail(&bus->glom, pnext);
1598
1599			/* Adhere to start alignment requirements */
1600			pkt_align(pnext, sublen, bus->sgentry_align);
1601		}
1602
1603		/* If all allocations succeeded, save packet chain
1604			 in bus structure */
1605		if (pnext) {
1606			brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
1607				  totlen, num);
1608			if (BRCMF_GLOM_ON() && bus->cur_read.len &&
1609			    totlen != bus->cur_read.len) {
1610				brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
1611					  bus->cur_read.len, totlen, rxseq);
1612			}
1613			pfirst = pnext = NULL;
1614		} else {
1615			brcmf_sdio_free_glom(bus);
1616			num = 0;
1617		}
1618
1619		/* Done with descriptor packet */
1620		brcmu_pkt_buf_free_skb(bus->glomd);
1621		bus->glomd = NULL;
1622		bus->cur_read.len = 0;
1623	}
1624
1625	/* Ok -- either we just generated a packet chain,
1626		 or had one from before */
1627	if (!skb_queue_empty(&bus->glom)) {
1628		if (BRCMF_GLOM_ON()) {
1629			brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
1630			skb_queue_walk(&bus->glom, pnext) {
1631				brcmf_dbg(GLOM, "    %p: %p len 0x%04x (%d)\n",
1632					  pnext, (u8 *) (pnext->data),
1633					  pnext->len, pnext->len);
1634			}
1635		}
1636
1637		pfirst = skb_peek(&bus->glom);
1638		dlen = (u16) brcmf_sdio_glom_len(bus);
1639
1640		/* Do an SDIO read for the superframe.  Configurable iovar to
1641		 * read directly into the chained packet, or allocate a large
1642		 * packet and and copy into the chain.
1643		 */
1644		sdio_claim_host(bus->sdiodev->func[1]);
1645		errcode = brcmf_sdiod_recv_chain(bus->sdiodev,
1646						 &bus->glom, dlen);
1647		sdio_release_host(bus->sdiodev->func[1]);
1648		bus->sdcnt.f2rxdata++;
1649
1650		/* On failure, kill the superframe, allow a couple retries */
1651		if (errcode < 0) {
1652			brcmf_err("glom read of %d bytes failed: %d\n",
1653				  dlen, errcode);
1654
1655			sdio_claim_host(bus->sdiodev->func[1]);
1656			if (bus->glomerr++ < 3) {
1657				brcmf_sdio_rxfail(bus, true, true);
1658			} else {
1659				bus->glomerr = 0;
1660				brcmf_sdio_rxfail(bus, true, false);
1661				bus->sdcnt.rxglomfail++;
1662				brcmf_sdio_free_glom(bus);
1663			}
1664			sdio_release_host(bus->sdiodev->func[1]);
1665			return 0;
1666		}
1667
1668		brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1669				   pfirst->data, min_t(int, pfirst->len, 48),
1670				   "SUPERFRAME:\n");
1671
1672		rd_new.seq_num = rxseq;
1673		rd_new.len = dlen;
1674		sdio_claim_host(bus->sdiodev->func[1]);
1675		errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new,
1676					     BRCMF_SDIO_FT_SUPER);
1677		sdio_release_host(bus->sdiodev->func[1]);
1678		bus->cur_read.len = rd_new.len_nxtfrm << 4;
1679
1680		/* Remove superframe header, remember offset */
1681		skb_pull(pfirst, rd_new.dat_offset);
1682		sfdoff = rd_new.dat_offset;
1683		num = 0;
1684
1685		/* Validate all the subframe headers */
1686		skb_queue_walk(&bus->glom, pnext) {
1687			/* leave when invalid subframe is found */
1688			if (errcode)
1689				break;
1690
1691			rd_new.len = pnext->len;
1692			rd_new.seq_num = rxseq++;
1693			sdio_claim_host(bus->sdiodev->func[1]);
1694			errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new,
1695						     BRCMF_SDIO_FT_SUB);
1696			sdio_release_host(bus->sdiodev->func[1]);
1697			brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1698					   pnext->data, 32, "subframe:\n");
1699
1700			num++;
1701		}
1702
1703		if (errcode) {
1704			/* Terminate frame on error, request
1705				 a couple retries */
1706			sdio_claim_host(bus->sdiodev->func[1]);
1707			if (bus->glomerr++ < 3) {
1708				/* Restore superframe header space */
1709				skb_push(pfirst, sfdoff);
1710				brcmf_sdio_rxfail(bus, true, true);
1711			} else {
1712				bus->glomerr = 0;
1713				brcmf_sdio_rxfail(bus, true, false);
1714				bus->sdcnt.rxglomfail++;
1715				brcmf_sdio_free_glom(bus);
1716			}
1717			sdio_release_host(bus->sdiodev->func[1]);
1718			bus->cur_read.len = 0;
1719			return 0;
1720		}
1721
1722		/* Basic SD framing looks ok - process each packet (header) */
1723
1724		skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
1725			dptr = (u8 *) (pfirst->data);
1726			sublen = get_unaligned_le16(dptr);
1727			doff = brcmf_sdio_getdatoffset(&dptr[SDPCM_HWHDR_LEN]);
1728
1729			brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1730					   dptr, pfirst->len,
1731					   "Rx Subframe Data:\n");
1732
1733			__skb_trim(pfirst, sublen);
1734			skb_pull(pfirst, doff);
1735
1736			if (pfirst->len == 0) {
1737				skb_unlink(pfirst, &bus->glom);
1738				brcmu_pkt_buf_free_skb(pfirst);
1739				continue;
1740			}
1741
1742			brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1743					   pfirst->data,
1744					   min_t(int, pfirst->len, 32),
1745					   "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
1746					   bus->glom.qlen, pfirst, pfirst->data,
1747					   pfirst->len, pfirst->next,
1748					   pfirst->prev);
1749			skb_unlink(pfirst, &bus->glom);
1750			brcmf_rx_frame(bus->sdiodev->dev, pfirst);
1751			bus->sdcnt.rxglompkts++;
1752		}
1753
1754		bus->sdcnt.rxglomframes++;
1755	}
1756	return num;
1757}
1758
1759static int brcmf_sdio_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
1760				     bool *pending)
1761{
1762	DECLARE_WAITQUEUE(wait, current);
1763	int timeout = msecs_to_jiffies(DCMD_RESP_TIMEOUT);
1764
1765	/* Wait until control frame is available */
1766	add_wait_queue(&bus->dcmd_resp_wait, &wait);
1767	set_current_state(TASK_INTERRUPTIBLE);
1768
1769	while (!(*condition) && (!signal_pending(current) && timeout))
1770		timeout = schedule_timeout(timeout);
1771
1772	if (signal_pending(current))
1773		*pending = true;
1774
1775	set_current_state(TASK_RUNNING);
1776	remove_wait_queue(&bus->dcmd_resp_wait, &wait);
1777
1778	return timeout;
1779}
1780
1781static int brcmf_sdio_dcmd_resp_wake(struct brcmf_sdio *bus)
1782{
1783	if (waitqueue_active(&bus->dcmd_resp_wait))
1784		wake_up_interruptible(&bus->dcmd_resp_wait);
1785
1786	return 0;
1787}
1788static void
1789brcmf_sdio_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1790{
1791	uint rdlen, pad;
1792	u8 *buf = NULL, *rbuf;
1793	int sdret;
1794
1795	brcmf_dbg(TRACE, "Enter\n");
1796
1797	if (bus->rxblen)
1798		buf = vzalloc(bus->rxblen);
1799	if (!buf)
1800		goto done;
1801
1802	rbuf = bus->rxbuf;
1803	pad = ((unsigned long)rbuf % bus->head_align);
1804	if (pad)
1805		rbuf += (bus->head_align - pad);
1806
1807	/* Copy the already-read portion over */
1808	memcpy(buf, hdr, BRCMF_FIRSTREAD);
1809	if (len <= BRCMF_FIRSTREAD)
1810		goto gotpkt;
1811
1812	/* Raise rdlen to next SDIO block to avoid tail command */
1813	rdlen = len - BRCMF_FIRSTREAD;
1814	if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
1815		pad = bus->blocksize - (rdlen % bus->blocksize);
1816		if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
1817		    ((len + pad) < bus->sdiodev->bus_if->maxctl))
1818			rdlen += pad;
1819	} else if (rdlen % bus->head_align) {
1820		rdlen += bus->head_align - (rdlen % bus->head_align);
1821	}
1822
1823	/* Drop if the read is too big or it exceeds our maximum */
1824	if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
1825		brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
1826			  rdlen, bus->sdiodev->bus_if->maxctl);
1827		brcmf_sdio_rxfail(bus, false, false);
1828		goto done;
1829	}
1830
1831	if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
1832		brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
1833			  len, len - doff, bus->sdiodev->bus_if->maxctl);
1834		bus->sdcnt.rx_toolong++;
1835		brcmf_sdio_rxfail(bus, false, false);
1836		goto done;
1837	}
1838
1839	/* Read remain of frame body */
1840	sdret = brcmf_sdiod_recv_buf(bus->sdiodev, rbuf, rdlen);
1841	bus->sdcnt.f2rxdata++;
1842
1843	/* Control frame failures need retransmission */
1844	if (sdret < 0) {
1845		brcmf_err("read %d control bytes failed: %d\n",
1846			  rdlen, sdret);
1847		bus->sdcnt.rxc_errors++;
1848		brcmf_sdio_rxfail(bus, true, true);
1849		goto done;
1850	} else
1851		memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
1852
1853gotpkt:
1854
1855	brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
1856			   buf, len, "RxCtrl:\n");
1857
1858	/* Point to valid data and indicate its length */
1859	spin_lock_bh(&bus->rxctl_lock);
1860	if (bus->rxctl) {
1861		brcmf_err("last control frame is being processed.\n");
1862		spin_unlock_bh(&bus->rxctl_lock);
1863		vfree(buf);
1864		goto done;
1865	}
1866	bus->rxctl = buf + doff;
1867	bus->rxctl_orig = buf;
1868	bus->rxlen = len - doff;
1869	spin_unlock_bh(&bus->rxctl_lock);
1870
1871done:
1872	/* Awake any waiters */
1873	brcmf_sdio_dcmd_resp_wake(bus);
1874}
1875
1876/* Pad read to blocksize for efficiency */
1877static void brcmf_sdio_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
1878{
1879	if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
1880		*pad = bus->blocksize - (*rdlen % bus->blocksize);
1881		if (*pad <= bus->roundup && *pad < bus->blocksize &&
1882		    *rdlen + *pad + BRCMF_FIRSTREAD < MAX_RX_DATASZ)
1883			*rdlen += *pad;
1884	} else if (*rdlen % bus->head_align) {
1885		*rdlen += bus->head_align - (*rdlen % bus->head_align);
1886	}
1887}
1888
1889static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1890{
1891	struct sk_buff *pkt;		/* Packet for event or data frames */
1892	u16 pad;		/* Number of pad bytes to read */
1893	uint rxleft = 0;	/* Remaining number of frames allowed */
1894	int ret;		/* Return code from calls */
1895	uint rxcount = 0;	/* Total frames read */
1896	struct brcmf_sdio_hdrinfo *rd = &bus->cur_read, rd_new;
1897	u8 head_read = 0;
1898
1899	brcmf_dbg(TRACE, "Enter\n");
1900
1901	/* Not finished unless we encounter no more frames indication */
1902	bus->rxpending = true;
1903
1904	for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
1905	     !bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_SDIOD_DATA;
1906	     rd->seq_num++, rxleft--) {
1907
1908		/* Handle glomming separately */
1909		if (bus->glomd || !skb_queue_empty(&bus->glom)) {
1910			u8 cnt;
1911			brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
1912				  bus->glomd, skb_peek(&bus->glom));
1913			cnt = brcmf_sdio_rxglom(bus, rd->seq_num);
1914			brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
1915			rd->seq_num += cnt - 1;
1916			rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
1917			continue;
1918		}
1919
1920		rd->len_left = rd->len;
1921		/* read header first for unknow frame length */
1922		sdio_claim_host(bus->sdiodev->func[1]);
1923		if (!rd->len) {
1924			ret = brcmf_sdiod_recv_buf(bus->sdiodev,
1925						   bus->rxhdr, BRCMF_FIRSTREAD);
1926			bus->sdcnt.f2rxhdrs++;
1927			if (ret < 0) {
1928				brcmf_err("RXHEADER FAILED: %d\n",
1929					  ret);
1930				bus->sdcnt.rx_hdrfail++;
1931				brcmf_sdio_rxfail(bus, true, true);
1932				sdio_release_host(bus->sdiodev->func[1]);
1933				continue;
1934			}
1935
1936			brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
1937					   bus->rxhdr, SDPCM_HDRLEN,
1938					   "RxHdr:\n");
1939
1940			if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd,
1941					       BRCMF_SDIO_FT_NORMAL)) {
1942				sdio_release_host(bus->sdiodev->func[1]);
1943				if (!bus->rxpending)
1944					break;
1945				else
1946					continue;
1947			}
1948
1949			if (rd->channel == SDPCM_CONTROL_CHANNEL) {
1950				brcmf_sdio_read_control(bus, bus->rxhdr,
1951							rd->len,
1952							rd->dat_offset);
1953				/* prepare the descriptor for the next read */
1954				rd->len = rd->len_nxtfrm << 4;
1955				rd->len_nxtfrm = 0;
1956				/* treat all packet as event if we don't know */
1957				rd->channel = SDPCM_EVENT_CHANNEL;
1958				sdio_release_host(bus->sdiodev->func[1]);
1959				continue;
1960			}
1961			rd->len_left = rd->len > BRCMF_FIRSTREAD ?
1962				       rd->len - BRCMF_FIRSTREAD : 0;
1963			head_read = BRCMF_FIRSTREAD;
1964		}
1965
1966		brcmf_sdio_pad(bus, &pad, &rd->len_left);
1967
1968		pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
1969					    bus->head_align);
1970		if (!pkt) {
1971			/* Give up on data, request rtx of events */
1972			brcmf_err("brcmu_pkt_buf_get_skb failed\n");
1973			brcmf_sdio_rxfail(bus, false,
1974					    RETRYCHAN(rd->channel));
1975			sdio_release_host(bus->sdiodev->func[1]);
1976			continue;
1977		}
1978		skb_pull(pkt, head_read);
1979		pkt_align(pkt, rd->len_left, bus->head_align);
1980
1981		ret = brcmf_sdiod_recv_pkt(bus->sdiodev, pkt);
1982		bus->sdcnt.f2rxdata++;
1983		sdio_release_host(bus->sdiodev->func[1]);
1984
1985		if (ret < 0) {
1986			brcmf_err("read %d bytes from channel %d failed: %d\n",
1987				  rd->len, rd->channel, ret);
1988			brcmu_pkt_buf_free_skb(pkt);
1989			sdio_claim_host(bus->sdiodev->func[1]);
1990			brcmf_sdio_rxfail(bus, true,
1991					    RETRYCHAN(rd->channel));
1992			sdio_release_host(bus->sdiodev->func[1]);
1993			continue;
1994		}
1995
1996		if (head_read) {
1997			skb_push(pkt, head_read);
1998			memcpy(pkt->data, bus->rxhdr, head_read);
1999			head_read = 0;
2000		} else {
2001			memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
2002			rd_new.seq_num = rd->seq_num;
2003			sdio_claim_host(bus->sdiodev->func[1]);
2004			if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
2005					       BRCMF_SDIO_FT_NORMAL)) {
2006				rd->len = 0;
2007				brcmu_pkt_buf_free_skb(pkt);
2008			}
2009			bus->sdcnt.rx_readahead_cnt++;
2010			if (rd->len != roundup(rd_new.len, 16)) {
2011				brcmf_err("frame length mismatch:read %d, should be %d\n",
2012					  rd->len,
2013					  roundup(rd_new.len, 16) >> 4);
2014				rd->len = 0;
2015				brcmf_sdio_rxfail(bus, true, true);
2016				sdio_release_host(bus->sdiodev->func[1]);
2017				brcmu_pkt_buf_free_skb(pkt);
2018				continue;
2019			}
2020			sdio_release_host(bus->sdiodev->func[1]);
2021			rd->len_nxtfrm = rd_new.len_nxtfrm;
2022			rd->channel = rd_new.channel;
2023			rd->dat_offset = rd_new.dat_offset;
2024
2025			brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
2026					     BRCMF_DATA_ON()) &&
2027					   BRCMF_HDRS_ON(),
2028					   bus->rxhdr, SDPCM_HDRLEN,
2029					   "RxHdr:\n");
2030
2031			if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
2032				brcmf_err("readahead on control packet %d?\n",
2033					  rd_new.seq_num);
2034				/* Force retry w/normal header read */
2035				rd->len = 0;
2036				sdio_claim_host(bus->sdiodev->func[1]);
2037				brcmf_sdio_rxfail(bus, false, true);
2038				sdio_release_host(bus->sdiodev->func[1]);
2039				brcmu_pkt_buf_free_skb(pkt);
2040				continue;
2041			}
2042		}
2043
2044		brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
2045				   pkt->data, rd->len, "Rx Data:\n");
2046
2047		/* Save superframe descriptor and allocate packet frame */
2048		if (rd->channel == SDPCM_GLOM_CHANNEL) {
2049			if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_HWHDR_LEN])) {
2050				brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
2051					  rd->len);
2052				brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
2053						   pkt->data, rd->len,
2054						   "Glom Data:\n");
2055				__skb_trim(pkt, rd->len);
2056				skb_pull(pkt, SDPCM_HDRLEN);
2057				bus->glomd = pkt;
2058			} else {
2059				brcmf_err("%s: glom superframe w/o "
2060					  "descriptor!\n", __func__);
2061				sdio_claim_host(bus->sdiodev->func[1]);
2062				brcmf_sdio_rxfail(bus, false, false);
2063				sdio_release_host(bus->sdiodev->func[1]);
2064			}
2065			/* prepare the descriptor for the next read */
2066			rd->len = rd->len_nxtfrm << 4;
2067			rd->len_nxtfrm = 0;
2068			/* treat all packet as event if we don't know */
2069			rd->channel = SDPCM_EVENT_CHANNEL;
2070			continue;
2071		}
2072
2073		/* Fill in packet len and prio, deliver upward */
2074		__skb_trim(pkt, rd->len);
2075		skb_pull(pkt, rd->dat_offset);
2076
2077		/* prepare the descriptor for the next read */
2078		rd->len = rd->len_nxtfrm << 4;
2079		rd->len_nxtfrm = 0;
2080		/* treat all packet as event if we don't know */
2081		rd->channel = SDPCM_EVENT_CHANNEL;
2082
2083		if (pkt->len == 0) {
2084			brcmu_pkt_buf_free_skb(pkt);
2085			continue;
2086		}
2087
2088		brcmf_rx_frame(bus->sdiodev->dev, pkt);
2089	}
2090
2091	rxcount = maxframes - rxleft;
2092	/* Message if we hit the limit */
2093	if (!rxleft)
2094		brcmf_dbg(DATA, "hit rx limit of %d frames\n", maxframes);
2095	else
2096		brcmf_dbg(DATA, "processed %d frames\n", rxcount);
2097	/* Back off rxseq if awaiting rtx, update rx_seq */
2098	if (bus->rxskip)
2099		rd->seq_num--;
2100	bus->rx_seq = rd->seq_num;
2101
2102	return rxcount;
2103}
2104
2105static void
2106brcmf_sdio_wait_event_wakeup(struct brcmf_sdio *bus)
2107{
2108	if (waitqueue_active(&bus->ctrl_wait))
2109		wake_up_interruptible(&bus->ctrl_wait);
2110	return;
2111}
2112
2113static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
2114{
2115	u16 head_pad;
2116	u8 *dat_buf;
2117
2118	dat_buf = (u8 *)(pkt->data);
2119
2120	/* Check head padding */
2121	head_pad = ((unsigned long)dat_buf % bus->head_align);
2122	if (head_pad) {
2123		if (skb_headroom(pkt) < head_pad) {
2124			bus->sdiodev->bus_if->tx_realloc++;
2125			head_pad = 0;
2126			if (skb_cow(pkt, head_pad))
2127				return -ENOMEM;
2128		}
2129		skb_push(pkt, head_pad);
2130		dat_buf = (u8 *)(pkt->data);
2131		memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
2132	}
2133	return head_pad;
2134}
2135
2136/**
2137 * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
2138 * bus layer usage.
2139 */
2140/* flag marking a dummy skb added for DMA alignment requirement */
2141#define ALIGN_SKB_FLAG		0x8000
2142/* bit mask of data length chopped from the previous packet */
2143#define ALIGN_SKB_CHOP_LEN_MASK	0x7fff
2144
2145static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
2146				    struct sk_buff_head *pktq,
2147				    struct sk_buff *pkt, u16 total_len)
2148{
2149	struct brcmf_sdio_dev *sdiodev;
2150	struct sk_buff *pkt_pad;
2151	u16 tail_pad, tail_chop, chain_pad;
2152	unsigned int blksize;
2153	bool lastfrm;
2154	int ntail, ret;
2155
2156	sdiodev = bus->sdiodev;
2157	blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
2158	/* sg entry alignment should be a divisor of block size */
2159	WARN_ON(blksize % bus->sgentry_align);
2160
2161	/* Check tail padding */
2162	lastfrm = skb_queue_is_last(pktq, pkt);
2163	tail_pad = 0;
2164	tail_chop = pkt->len % bus->sgentry_align;
2165	if (tail_chop)
2166		tail_pad = bus->sgentry_align - tail_chop;
2167	chain_pad = (total_len + tail_pad) % blksize;
2168	if (lastfrm && chain_pad)
2169		tail_pad += blksize - chain_pad;
2170	if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
2171		pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop +
2172						bus->head_align);
2173		if (pkt_pad == NULL)
2174			return -ENOMEM;
2175		ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
2176		if (unlikely(ret < 0)) {
2177			kfree_skb(pkt_pad);
2178			return ret;
2179		}
2180		memcpy(pkt_pad->data,
2181		       pkt->data + pkt->len - tail_chop,
2182		       tail_chop);
2183		*(u16 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
2184		skb_trim(pkt, pkt->len - tail_chop);
2185		skb_trim(pkt_pad, tail_pad + tail_chop);
2186		__skb_queue_after(pktq, pkt, pkt_pad);
2187	} else {
2188		ntail = pkt->data_len + tail_pad -
2189			(pkt->end - pkt->tail);
2190		if (skb_cloned(pkt) || ntail > 0)
2191			if (pskb_expand_head(pkt, 0, ntail, GFP_ATOMIC))
2192				return -ENOMEM;
2193		if (skb_linearize(pkt))
2194			return -ENOMEM;
2195		__skb_put(pkt, tail_pad);
2196	}
2197
2198	return tail_pad;
2199}
2200
2201/**
2202 * brcmf_sdio_txpkt_prep - packet preparation for transmit
2203 * @bus: brcmf_sdio structure pointer
2204 * @pktq: packet list pointer
2205 * @chan: virtual channel to transmit the packet
2206 *
2207 * Processes to be applied to the packet
2208 *	- Align data buffer pointer
2209 *	- Align data buffer length
2210 *	- Prepare header
2211 * Return: negative value if there is error
2212 */
2213static int
2214brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
2215		      uint chan)
2216{
2217	u16 head_pad, total_len;
2218	struct sk_buff *pkt_next;
2219	u8 txseq;
2220	int ret;
2221	struct brcmf_sdio_hdrinfo hd_info = {0};
2222
2223	txseq = bus->tx_seq;
2224	total_len = 0;
2225	skb_queue_walk(pktq, pkt_next) {
2226		/* alignment packet inserted in previous
2227		 * loop cycle can be skipped as it is
2228		 * already properly aligned and does not
2229		 * need an sdpcm header.
2230		 */
2231		if (*(u16 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
2232			continue;
2233
2234		/* align packet data pointer */
2235		ret = brcmf_sdio_txpkt_hdalign(bus, pkt_next);
2236		if (ret < 0)
2237			return ret;
2238		head_pad = (u16)ret;
2239		if (head_pad)
2240			memset(pkt_next->data + bus->tx_hdrlen, 0, head_pad);
2241
2242		total_len += pkt_next->len;
2243
2244		hd_info.len = pkt_next->len;
2245		hd_info.lastfrm = skb_queue_is_last(pktq, pkt_next);
2246		if (bus->txglom && pktq->qlen > 1) {
2247			ret = brcmf_sdio_txpkt_prep_sg(bus, pktq,
2248						       pkt_next, total_len);
2249			if (ret < 0)
2250				return ret;
2251			hd_info.tail_pad = (u16)ret;
2252			total_len += (u16)ret;
2253		}
2254
2255		hd_info.channel = chan;
2256		hd_info.dat_offset = head_pad + bus->tx_hdrlen;
2257		hd_info.seq_num = txseq++;
2258
2259		/* Now fill the header */
2260		brcmf_sdio_hdpack(bus, pkt_next->data, &hd_info);
2261
2262		if (BRCMF_BYTES_ON() &&
2263		    ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
2264		     (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
2265			brcmf_dbg_hex_dump(true, pkt_next->data, hd_info.len,
2266					   "Tx Frame:\n");
2267		else if (BRCMF_HDRS_ON())
2268			brcmf_dbg_hex_dump(true, pkt_next->data,
2269					   head_pad + bus->tx_hdrlen,
2270					   "Tx Header:\n");
2271	}
2272	/* Hardware length tag of the first packet should be total
2273	 * length of the chain (including padding)
2274	 */
2275	if (bus->txglom)
2276		brcmf_sdio_update_hwhdr(pktq->next->data, total_len);
2277	return 0;
2278}
2279
2280/**
2281 * brcmf_sdio_txpkt_postp - packet post processing for transmit
2282 * @bus: brcmf_sdio structure pointer
2283 * @pktq: packet list pointer
2284 *
2285 * Processes to be applied to the packet
2286 *	- Remove head padding
2287 *	- Remove tail padding
2288 */
2289static void
2290brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
2291{
2292	u8 *hdr;
2293	u32 dat_offset;
2294	u16 tail_pad;
2295	u16 dummy_flags, chop_len;
2296	struct sk_buff *pkt_next, *tmp, *pkt_prev;
2297
2298	skb_queue_walk_safe(pktq, pkt_next, tmp) {
2299		dummy_flags = *(u16 *)(pkt_next->cb);
2300		if (dummy_flags & ALIGN_SKB_FLAG) {
2301			chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
2302			if (chop_len) {
2303				pkt_prev = pkt_next->prev;
2304				skb_put(pkt_prev, chop_len);
2305			}
2306			__skb_unlink(pkt_next, pktq);
2307			brcmu_pkt_buf_free_skb(pkt_next);
2308		} else {
2309			hdr = pkt_next->data + bus->tx_hdrlen - SDPCM_SWHDR_LEN;
2310			dat_offset = le32_to_cpu(*(__le32 *)hdr);
2311			dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >>
2312				     SDPCM_DOFFSET_SHIFT;
2313			skb_pull(pkt_next, dat_offset);
2314			if (bus->txglom) {
2315				tail_pad = le16_to_cpu(*(__le16 *)(hdr - 2));
2316				skb_trim(pkt_next, pkt_next->len - tail_pad);
2317			}
2318		}
2319	}
2320}
2321
2322/* Writes a HW/SW header into the packet and sends it. */
2323/* Assumes: (a) header space already there, (b) caller holds lock */
2324static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
2325			    uint chan)
2326{
2327	int ret;
2328	struct sk_buff *pkt_next, *tmp;
2329
2330	brcmf_dbg(TRACE, "Enter\n");
2331
2332	ret = brcmf_sdio_txpkt_prep(bus, pktq, chan);
2333	if (ret)
2334		goto done;
2335
2336	sdio_claim_host(bus->sdiodev->func[1]);
2337	ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
2338	bus->sdcnt.f2txdata++;
2339
2340	if (ret < 0)
2341		brcmf_sdio_txfail(bus);
2342
2343	sdio_release_host(bus->sdiodev->func[1]);
2344
2345done:
2346	brcmf_sdio_txpkt_postp(bus, pktq);
2347	if (ret == 0)
2348		bus->tx_seq = (bus->tx_seq + pktq->qlen) % SDPCM_SEQ_WRAP;
2349	skb_queue_walk_safe(pktq, pkt_next, tmp) {
2350		__skb_unlink(pkt_next, pktq);
2351		brcmf_txcomplete(bus->sdiodev->dev, pkt_next, ret == 0);
2352	}
2353	return ret;
2354}
2355
2356static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2357{
2358	struct sk_buff *pkt;
2359	struct sk_buff_head pktq;
2360	u32 intstatus = 0;
2361	int ret = 0, prec_out, i;
2362	uint cnt = 0;
2363	u8 tx_prec_map, pkt_num;
2364
2365	brcmf_dbg(TRACE, "Enter\n");
2366
2367	tx_prec_map = ~bus->flowcontrol;
2368
2369	/* Send frames until the limit or some other event */
2370	for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
2371		pkt_num = 1;
2372		if (bus->txglom)
2373			pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
2374					bus->sdiodev->txglomsz);
2375		pkt_num = min_t(u32, pkt_num,
2376				brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
2377		__skb_queue_head_init(&pktq);
2378		spin_lock_bh(&bus->txq_lock);
2379		for (i = 0; i < pkt_num; i++) {
2380			pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
2381					      &prec_out);
2382			if (pkt == NULL)
2383				break;
2384			__skb_queue_tail(&pktq, pkt);
2385		}
2386		spin_unlock_bh(&bus->txq_lock);
2387		if (i == 0)
2388			break;
2389
2390		ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
2391
2392		cnt += i;
2393
2394		/* In poll mode, need to check for other events */
2395		if (!bus->intr) {
2396			/* Check device status, signal pending interrupt */
2397			sdio_claim_host(bus->sdiodev->func[1]);
2398			ret = r_sdreg32(bus, &intstatus,
2399					offsetof(struct sdpcmd_regs,
2400						 intstatus));
2401			sdio_release_host(bus->sdiodev->func[1]);
2402			bus->sdcnt.f2txdata++;
2403			if (ret != 0)
2404				break;
2405			if (intstatus & bus->hostintmask)
2406				atomic_set(&bus->ipend, 1);
2407		}
2408	}
2409
2410	/* Deflow-control stack if needed */
2411	if ((bus->sdiodev->state == BRCMF_SDIOD_DATA) &&
2412	    bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
2413		bus->txoff = false;
2414		brcmf_txflowblock(bus->sdiodev->dev, false);
2415	}
2416
2417	return cnt;
2418}
2419
2420static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
2421{
2422	u8 doff;
2423	u16 pad;
2424	uint retries = 0;
2425	struct brcmf_sdio_hdrinfo hd_info = {0};
2426	int ret;
2427
2428	brcmf_dbg(TRACE, "Enter\n");
2429
2430	/* Back the pointer to make room for bus header */
2431	frame -= bus->tx_hdrlen;
2432	len += bus->tx_hdrlen;
2433
2434	/* Add alignment padding (optional for ctl frames) */
2435	doff = ((unsigned long)frame % bus->head_align);
2436	if (doff) {
2437		frame -= doff;
2438		len += doff;
2439		memset(frame + bus->tx_hdrlen, 0, doff);
2440	}
2441
2442	/* Round send length to next SDIO block */
2443	pad = 0;
2444	if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
2445		pad = bus->blocksize - (len % bus->blocksize);
2446		if ((pad > bus->roundup) || (pad >= bus->blocksize))
2447			pad = 0;
2448	} else if (len % bus->head_align) {
2449		pad = bus->head_align - (len % bus->head_align);
2450	}
2451	len += pad;
2452
2453	hd_info.len = len - pad;
2454	hd_info.channel = SDPCM_CONTROL_CHANNEL;
2455	hd_info.dat_offset = doff + bus->tx_hdrlen;
2456	hd_info.seq_num = bus->tx_seq;
2457	hd_info.lastfrm = true;
2458	hd_info.tail_pad = pad;
2459	brcmf_sdio_hdpack(bus, frame, &hd_info);
2460
2461	if (bus->txglom)
2462		brcmf_sdio_update_hwhdr(frame, len);
2463
2464	brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
2465			   frame, len, "Tx Frame:\n");
2466	brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
2467			   BRCMF_HDRS_ON(),
2468			   frame, min_t(u16, len, 16), "TxHdr:\n");
2469
2470	do {
2471		ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
2472
2473		if (ret < 0)
2474			brcmf_sdio_txfail(bus);
2475		else
2476			bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
2477	} while (ret < 0 && retries++ < TXRETRIES);
2478
2479	return ret;
2480}
2481
2482static void brcmf_sdio_bus_stop(struct device *dev)
2483{
2484	u32 local_hostintmask;
2485	u8 saveclk;
2486	int err;
2487	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2488	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2489	struct brcmf_sdio *bus = sdiodev->bus;
2490
2491	brcmf_dbg(TRACE, "Enter\n");
2492
2493	if (bus->watchdog_tsk) {
2494		send_sig(SIGTERM, bus->watchdog_tsk, 1);
2495		kthread_stop(bus->watchdog_tsk);
2496		bus->watchdog_tsk = NULL;
2497	}
2498
2499	if (sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
2500		sdio_claim_host(sdiodev->func[1]);
2501
2502		/* Enable clock for device interrupts */
2503		brcmf_sdio_bus_sleep(bus, false, false);
2504
2505		/* Disable and clear interrupts at the chip level also */
2506		w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
2507		local_hostintmask = bus->hostintmask;
2508		bus->hostintmask = 0;
2509
2510		/* Force backplane clocks to assure F2 interrupt propagates */
2511		saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
2512					    &err);
2513		if (!err)
2514			brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
2515					  (saveclk | SBSDIO_FORCE_HT), &err);
2516		if (err)
2517			brcmf_err("Failed to force clock for F2: err %d\n",
2518				  err);
2519
2520		/* Turn off the bus (F2), free any pending packets */
2521		brcmf_dbg(INTR, "disable SDIO interrupts\n");
2522		sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
2523
2524		/* Clear any pending interrupts now that F2 is disabled */
2525		w_sdreg32(bus, local_hostintmask,
2526			  offsetof(struct sdpcmd_regs, intstatus));
2527
2528		sdio_release_host(sdiodev->func[1]);
2529	}
2530	/* Clear the data packet queues */
2531	brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
2532
2533	/* Clear any held glomming stuff */
2534	brcmu_pkt_buf_free_skb(bus->glomd);
2535	brcmf_sdio_free_glom(bus);
2536
2537	/* Clear rx control and wake any waiters */
2538	spin_lock_bh(&bus->rxctl_lock);
2539	bus->rxlen = 0;
2540	spin_unlock_bh(&bus->rxctl_lock);
2541	brcmf_sdio_dcmd_resp_wake(bus);
2542
2543	/* Reset some F2 state stuff */
2544	bus->rxskip = false;
2545	bus->tx_seq = bus->rx_seq = 0;
2546}
2547
2548static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
2549{
2550	unsigned long flags;
2551
2552	if (bus->sdiodev->oob_irq_requested) {
2553		spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
2554		if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) {
2555			enable_irq(bus->sdiodev->pdata->oob_irq_nr);
2556			bus->sdiodev->irq_en = true;
2557		}
2558		spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags);
2559	}
2560}
2561
2562static void atomic_orr(int val, atomic_t *v)
2563{
2564	int old_val;
2565
2566	old_val = atomic_read(v);
2567	while (atomic_cmpxchg(v, old_val, val | old_val) != old_val)
2568		old_val = atomic_read(v);
2569}
2570
2571static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
2572{
2573	struct brcmf_core *buscore;
2574	u32 addr;
2575	unsigned long val;
2576	int ret;
2577
2578	buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
2579	addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
2580
2581	val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
2582	bus->sdcnt.f1regdata++;
2583	if (ret != 0)
2584		return ret;
2585
2586	val &= bus->hostintmask;
2587	atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
2588
2589	/* Clear interrupts */
2590	if (val) {
2591		brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
2592		bus->sdcnt.f1regdata++;
2593		atomic_orr(val, &bus->intstatus);
2594	}
2595
2596	return ret;
2597}
2598
2599static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
2600{
2601	u32 newstatus = 0;
2602	unsigned long intstatus;
2603	uint txlimit = bus->txbound;	/* Tx frames to send before resched */
2604	uint framecnt;			/* Temporary counter of tx/rx frames */
2605	int err = 0;
2606
2607	brcmf_dbg(TRACE, "Enter\n");
2608
2609	sdio_claim_host(bus->sdiodev->func[1]);
2610
2611	/* If waiting for HTAVAIL, check status */
2612	if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) {
2613		u8 clkctl, devctl = 0;
2614
2615#ifdef DEBUG
2616		/* Check for inconsistent device control */
2617		devctl = brcmf_sdiod_regrb(bus->sdiodev,
2618					   SBSDIO_DEVICE_CTL, &err);
2619#endif				/* DEBUG */
2620
2621		/* Read CSR, if clock on switch to AVAIL, else ignore */
2622		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
2623					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
2624
2625		brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
2626			  devctl, clkctl);
2627
2628		if (SBSDIO_HTAV(clkctl)) {
2629			devctl = brcmf_sdiod_regrb(bus->sdiodev,
2630						   SBSDIO_DEVICE_CTL, &err);
2631			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
2632			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
2633					  devctl, &err);
2634			bus->clkstate = CLK_AVAIL;
2635		}
2636	}
2637
2638	/* Make sure backplane clock is on */
2639	brcmf_sdio_bus_sleep(bus, false, true);
2640
2641	/* Pending interrupt indicates new device status */
2642	if (atomic_read(&bus->ipend) > 0) {
2643		atomic_set(&bus->ipend, 0);
2644		err = brcmf_sdio_intr_rstatus(bus);
2645	}
2646
2647	/* Start with leftover status bits */
2648	intstatus = atomic_xchg(&bus->intstatus, 0);
2649
2650	/* Handle flow-control change: read new state in case our ack
2651	 * crossed another change interrupt.  If change still set, assume
2652	 * FC ON for safety, let next loop through do the debounce.
2653	 */
2654	if (intstatus & I_HMB_FC_CHANGE) {
2655		intstatus &= ~I_HMB_FC_CHANGE;
2656		err = w_sdreg32(bus, I_HMB_FC_CHANGE,
2657				offsetof(struct sdpcmd_regs, intstatus));
2658
2659		err = r_sdreg32(bus, &newstatus,
2660				offsetof(struct sdpcmd_regs, intstatus));
2661		bus->sdcnt.f1regdata += 2;
2662		atomic_set(&bus->fcstate,
2663			   !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
2664		intstatus |= (newstatus & bus->hostintmask);
2665	}
2666
2667	/* Handle host mailbox indication */
2668	if (intstatus & I_HMB_HOST_INT) {
2669		intstatus &= ~I_HMB_HOST_INT;
2670		intstatus |= brcmf_sdio_hostmail(bus);
2671	}
2672
2673	sdio_release_host(bus->sdiodev->func[1]);
2674
2675	/* Generally don't ask for these, can get CRC errors... */
2676	if (intstatus & I_WR_OOSYNC) {
2677		brcmf_err("Dongle reports WR_OOSYNC\n");
2678		intstatus &= ~I_WR_OOSYNC;
2679	}
2680
2681	if (intstatus & I_RD_OOSYNC) {
2682		brcmf_err("Dongle reports RD_OOSYNC\n");
2683		intstatus &= ~I_RD_OOSYNC;
2684	}
2685
2686	if (intstatus & I_SBINT) {
2687		brcmf_err("Dongle reports SBINT\n");
2688		intstatus &= ~I_SBINT;
2689	}
2690
2691	/* Would be active due to wake-wlan in gSPI */
2692	if (intstatus & I_CHIPACTIVE) {
2693		brcmf_dbg(INFO, "Dongle reports CHIPACTIVE\n");
2694		intstatus &= ~I_CHIPACTIVE;
2695	}
2696
2697	/* Ignore frame indications if rxskip is set */
2698	if (bus->rxskip)
2699		intstatus &= ~I_HMB_FRAME_IND;
2700
2701	/* On frame indication, read available frames */
2702	if ((intstatus & I_HMB_FRAME_IND) && (bus->clkstate == CLK_AVAIL)) {
2703		brcmf_sdio_readframes(bus, bus->rxbound);
2704		if (!bus->rxpending)
2705			intstatus &= ~I_HMB_FRAME_IND;
2706	}
2707
2708	/* Keep still-pending events for next scheduling */
2709	if (intstatus)
2710		atomic_orr(intstatus, &bus->intstatus);
2711
2712	brcmf_sdio_clrintr(bus);
2713
2714	if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
2715	    data_ok(bus)) {
2716		sdio_claim_host(bus->sdiodev->func[1]);
2717		if (bus->ctrl_frame_stat) {
2718			err = brcmf_sdio_tx_ctrlframe(bus,  bus->ctrl_frame_buf,
2719						      bus->ctrl_frame_len);
2720			bus->ctrl_frame_err = err;
2721			wmb();
2722			bus->ctrl_frame_stat = false;
2723		}
2724		sdio_release_host(bus->sdiodev->func[1]);
2725		brcmf_sdio_wait_event_wakeup(bus);
2726	}
2727	/* Send queued frames (limit 1 if rx may still be pending) */
2728	if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
2729	    brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit &&
2730	    data_ok(bus)) {
2731		framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
2732					    txlimit;
2733		brcmf_sdio_sendfromq(bus, framecnt);
2734	}
2735
2736	if ((bus->sdiodev->state != BRCMF_SDIOD_DATA) || (err != 0)) {
2737		brcmf_err("failed backplane access over SDIO, halting operation\n");
2738		atomic_set(&bus->intstatus, 0);
2739		if (bus->ctrl_frame_stat) {
2740			sdio_claim_host(bus->sdiodev->func[1]);
2741			if (bus->ctrl_frame_stat) {
2742				bus->ctrl_frame_err = -ENODEV;
2743				wmb();
2744				bus->ctrl_frame_stat = false;
2745				brcmf_sdio_wait_event_wakeup(bus);
2746			}
2747			sdio_release_host(bus->sdiodev->func[1]);
2748		}
2749	} else if (atomic_read(&bus->intstatus) ||
2750		   atomic_read(&bus->ipend) > 0 ||
2751		   (!atomic_read(&bus->fcstate) &&
2752		    brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
2753		    data_ok(bus))) {
2754		bus->dpc_triggered = true;
2755	}
2756}
2757
2758static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
2759{
2760	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2761	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2762	struct brcmf_sdio *bus = sdiodev->bus;
2763
2764	return &bus->txq;
2765}
2766
2767static bool brcmf_sdio_prec_enq(struct pktq *q, struct sk_buff *pkt, int prec)
2768{
2769	struct sk_buff *p;
2770	int eprec = -1;		/* precedence to evict from */
2771
2772	/* Fast case, precedence queue is not full and we are also not
2773	 * exceeding total queue length
2774	 */
2775	if (!pktq_pfull(q, prec) && !pktq_full(q)) {
2776		brcmu_pktq_penq(q, prec, pkt);
2777		return true;
2778	}
2779
2780	/* Determine precedence from which to evict packet, if any */
2781	if (pktq_pfull(q, prec)) {
2782		eprec = prec;
2783	} else if (pktq_full(q)) {
2784		p = brcmu_pktq_peek_tail(q, &eprec);
2785		if (eprec > prec)
2786			return false;
2787	}
2788
2789	/* Evict if needed */
2790	if (eprec >= 0) {
2791		/* Detect queueing to unconfigured precedence */
2792		if (eprec == prec)
2793			return false;	/* refuse newer (incoming) packet */
2794		/* Evict packet according to discard policy */
2795		p = brcmu_pktq_pdeq_tail(q, eprec);
2796		if (p == NULL)
2797			brcmf_err("brcmu_pktq_pdeq_tail() failed\n");
2798		brcmu_pkt_buf_free_skb(p);
2799	}
2800
2801	/* Enqueue */
2802	p = brcmu_pktq_penq(q, prec, pkt);
2803	if (p == NULL)
2804		brcmf_err("brcmu_pktq_penq() failed\n");
2805
2806	return p != NULL;
2807}
2808
2809static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
2810{
2811	int ret = -EBADE;
2812	uint prec;
2813	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2814	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2815	struct brcmf_sdio *bus = sdiodev->bus;
2816
2817	brcmf_dbg(TRACE, "Enter: pkt: data %p len %d\n", pkt->data, pkt->len);
2818
2819	/* Add space for the header */
2820	skb_push(pkt, bus->tx_hdrlen);
2821	/* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
2822
2823	prec = prio2prec((pkt->priority & PRIOMASK));
2824
2825	/* Check for existing queue, current flow-control,
2826			 pending event, or pending clock */
2827	brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
2828	bus->sdcnt.fcqueued++;
2829
2830	/* Priority based enq */
2831	spin_lock_bh(&bus->txq_lock);
2832	/* reset bus_flags in packet cb */
2833	*(u16 *)(pkt->cb) = 0;
2834	if (!brcmf_sdio_prec_enq(&bus->txq, pkt, prec)) {
2835		skb_pull(pkt, bus->tx_hdrlen);
2836		brcmf_err("out of bus->txq !!!\n");
2837		ret = -ENOSR;
2838	} else {
2839		ret = 0;
2840	}
2841
2842	if (pktq_len(&bus->txq) >= TXHI) {
2843		bus->txoff = true;
2844		brcmf_txflowblock(dev, true);
2845	}
2846	spin_unlock_bh(&bus->txq_lock);
2847
2848#ifdef DEBUG
2849	if (pktq_plen(&bus->txq, prec) > qcount[prec])
2850		qcount[prec] = pktq_plen(&bus->txq, prec);
2851#endif
2852
2853	brcmf_sdio_trigger_dpc(bus);
2854	return ret;
2855}
2856
2857#ifdef DEBUG
2858#define CONSOLE_LINE_MAX	192
2859
2860static int brcmf_sdio_readconsole(struct brcmf_sdio *bus)
2861{
2862	struct brcmf_console *c = &bus->console;
2863	u8 line[CONSOLE_LINE_MAX], ch;
2864	u32 n, idx, addr;
2865	int rv;
2866
2867	/* Don't do anything until FWREADY updates console address */
2868	if (bus->console_addr == 0)
2869		return 0;
2870
2871	/* Read console log struct */
2872	addr = bus->console_addr + offsetof(struct rte_console, log_le);
2873	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
2874			       sizeof(c->log_le));
2875	if (rv < 0)
2876		return rv;
2877
2878	/* Allocate console buffer (one time only) */
2879	if (c->buf == NULL) {
2880		c->bufsize = le32_to_cpu(c->log_le.buf_size);
2881		c->buf = kmalloc(c->bufsize, GFP_ATOMIC);
2882		if (c->buf == NULL)
2883			return -ENOMEM;
2884	}
2885
2886	idx = le32_to_cpu(c->log_le.idx);
2887
2888	/* Protect against corrupt value */
2889	if (idx > c->bufsize)
2890		return -EBADE;
2891
2892	/* Skip reading the console buffer if the index pointer
2893	 has not moved */
2894	if (idx == c->last)
2895		return 0;
2896
2897	/* Read the console buffer */
2898	addr = le32_to_cpu(c->log_le.buf);
2899	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
2900	if (rv < 0)
2901		return rv;
2902
2903	while (c->last != idx) {
2904		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
2905			if (c->last == idx) {
2906				/* This would output a partial line.
2907				 * Instead, back up
2908				 * the buffer pointer and output this
2909				 * line next time around.
2910				 */
2911				if (c->last >= n)
2912					c->last -= n;
2913				else
2914					c->last = c->bufsize - n;
2915				goto break2;
2916			}
2917			ch = c->buf[c->last];
2918			c->last = (c->last + 1) % c->bufsize;
2919			if (ch == '\n')
2920				break;
2921			line[n] = ch;
2922		}
2923
2924		if (n > 0) {
2925			if (line[n - 1] == '\r')
2926				n--;
2927			line[n] = 0;
2928			pr_debug("CONSOLE: %s\n", line);
2929		}
2930	}
2931break2:
2932
2933	return 0;
2934}
2935#endif				/* DEBUG */
2936
2937static int
2938brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2939{
2940	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2941	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2942	struct brcmf_sdio *bus = sdiodev->bus;
2943	int ret;
2944
2945	brcmf_dbg(TRACE, "Enter\n");
2946
2947	/* Send from dpc */
2948	bus->ctrl_frame_buf = msg;
2949	bus->ctrl_frame_len = msglen;
2950	wmb();
2951	bus->ctrl_frame_stat = true;
2952
2953	brcmf_sdio_trigger_dpc(bus);
2954	wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat,
2955					 msecs_to_jiffies(CTL_DONE_TIMEOUT));
2956	ret = 0;
2957	if (bus->ctrl_frame_stat) {
2958		sdio_claim_host(bus->sdiodev->func[1]);
2959		if (bus->ctrl_frame_stat) {
2960			brcmf_dbg(SDIO, "ctrl_frame timeout\n");
2961			bus->ctrl_frame_stat = false;
2962			ret = -ETIMEDOUT;
2963		}
2964		sdio_release_host(bus->sdiodev->func[1]);
2965	}
2966	if (!ret) {
2967		brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n",
2968			  bus->ctrl_frame_err);
2969		rmb();
2970		ret = bus->ctrl_frame_err;
2971	}
2972
2973	if (ret)
2974		bus->sdcnt.tx_ctlerrs++;
2975	else
2976		bus->sdcnt.tx_ctlpkts++;
2977
2978	return ret;
2979}
2980
2981#ifdef DEBUG
2982static int brcmf_sdio_dump_console(struct seq_file *seq, struct brcmf_sdio *bus,
2983				   struct sdpcm_shared *sh)
2984{
2985	u32 addr, console_ptr, console_size, console_index;
2986	char *conbuf = NULL;
2987	__le32 sh_val;
2988	int rv;
2989
2990	/* obtain console information from device memory */
2991	addr = sh->console_addr + offsetof(struct rte_console, log_le);
2992	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
2993			       (u8 *)&sh_val, sizeof(u32));
2994	if (rv < 0)
2995		return rv;
2996	console_ptr = le32_to_cpu(sh_val);
2997
2998	addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
2999	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
3000			       (u8 *)&sh_val, sizeof(u32));
3001	if (rv < 0)
3002		return rv;
3003	console_size = le32_to_cpu(sh_val);
3004
3005	addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
3006	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
3007			       (u8 *)&sh_val, sizeof(u32));
3008	if (rv < 0)
3009		return rv;
3010	console_index = le32_to_cpu(sh_val);
3011
3012	/* allocate buffer for console data */
3013	if (console_size <= CONSOLE_BUFFER_MAX)
3014		conbuf = vzalloc(console_size+1);
3015
3016	if (!conbuf)
3017		return -ENOMEM;
3018
3019	/* obtain the console data from device */
3020	conbuf[console_size] = '\0';
3021	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
3022			       console_size);
3023	if (rv < 0)
3024		goto done;
3025
3026	rv = seq_write(seq, conbuf + console_index,
3027		       console_size - console_index);
3028	if (rv < 0)
3029		goto done;
3030
3031	if (console_index > 0)
3032		rv = seq_write(seq, conbuf, console_index - 1);
3033
3034done:
3035	vfree(conbuf);
3036	return rv;
3037}
3038
3039static int brcmf_sdio_trap_info(struct seq_file *seq, struct brcmf_sdio *bus,
3040				struct sdpcm_shared *sh)
3041{
3042	int error;
3043	struct brcmf_trap_info tr;
3044
3045	if ((sh->flags & SDPCM_SHARED_TRAP) == 0) {
3046		brcmf_dbg(INFO, "no trap in firmware\n");
3047		return 0;
3048	}
3049
3050	error = brcmf_sdiod_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
3051				  sizeof(struct brcmf_trap_info));
3052	if (error < 0)
3053		return error;
3054
3055	seq_printf(seq,
3056		   "dongle trap info: type 0x%x @ epc 0x%08x\n"
3057		   "  cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
3058		   "  lr   0x%08x pc   0x%08x offset 0x%x\n"
3059		   "  r0   0x%08x r1   0x%08x r2 0x%08x r3 0x%08x\n"
3060		   "  r4   0x%08x r5   0x%08x r6 0x%08x r7 0x%08x\n",
3061		   le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
3062		   le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
3063		   le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
3064		   le32_to_cpu(tr.pc), sh->trap_addr,
3065		   le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
3066		   le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
3067		   le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
3068		   le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
3069
3070	return 0;
3071}
3072
3073static int brcmf_sdio_assert_info(struct seq_file *seq, struct brcmf_sdio *bus,
3074				  struct sdpcm_shared *sh)
3075{
3076	int error = 0;
3077	char file[80] = "?";
3078	char expr[80] = "<???>";
3079
3080	if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
3081		brcmf_dbg(INFO, "firmware not built with -assert\n");
3082		return 0;
3083	} else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) {
3084		brcmf_dbg(INFO, "no assert in dongle\n");
3085		return 0;
3086	}
3087
3088	sdio_claim_host(bus->sdiodev->func[1]);
3089	if (sh->assert_file_addr != 0) {
3090		error = brcmf_sdiod_ramrw(bus->sdiodev, false,
3091					  sh->assert_file_addr, (u8 *)file, 80);
3092		if (error < 0)
3093			return error;
3094	}
3095	if (sh->assert_exp_addr != 0) {
3096		error = brcmf_sdiod_ramrw(bus->sdiodev, false,
3097					  sh->assert_exp_addr, (u8 *)expr, 80);
3098		if (error < 0)
3099			return error;
3100	}
3101	sdio_release_host(bus->sdiodev->func[1]);
3102
3103	seq_printf(seq, "dongle assert: %s:%d: assert(%s)\n",
3104		   file, sh->assert_line, expr);
3105	return 0;
3106}
3107
3108static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
3109{
3110	int error;
3111	struct sdpcm_shared sh;
3112
3113	error = brcmf_sdio_readshared(bus, &sh);
3114
3115	if (error < 0)
3116		return error;
3117
3118	if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0)
3119		brcmf_dbg(INFO, "firmware not built with -assert\n");
3120	else if (sh.flags & SDPCM_SHARED_ASSERT)
3121		brcmf_err("assertion in dongle\n");
3122
3123	if (sh.flags & SDPCM_SHARED_TRAP)
3124		brcmf_err("firmware trap in dongle\n");
3125
3126	return 0;
3127}
3128
3129static int brcmf_sdio_died_dump(struct seq_file *seq, struct brcmf_sdio *bus)
3130{
3131	int error = 0;
3132	struct sdpcm_shared sh;
3133
3134	error = brcmf_sdio_readshared(bus, &sh);
3135	if (error < 0)
3136		goto done;
3137
3138	error = brcmf_sdio_assert_info(seq, bus, &sh);
3139	if (error < 0)
3140		goto done;
3141
3142	error = brcmf_sdio_trap_info(seq, bus, &sh);
3143	if (error < 0)
3144		goto done;
3145
3146	error = brcmf_sdio_dump_console(seq, bus, &sh);
3147
3148done:
3149	return error;
3150}
3151
3152static int brcmf_sdio_forensic_read(struct seq_file *seq, void *data)
3153{
3154	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
3155	struct brcmf_sdio *bus = bus_if->bus_priv.sdio->bus;
3156
3157	return brcmf_sdio_died_dump(seq, bus);
3158}
3159
3160static int brcmf_debugfs_sdio_count_read(struct seq_file *seq, void *data)
3161{
3162	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
3163	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3164	struct brcmf_sdio_count *sdcnt = &sdiodev->bus->sdcnt;
3165
3166	seq_printf(seq,
3167		   "intrcount:    %u\nlastintrs:    %u\n"
3168		   "pollcnt:      %u\nregfails:     %u\n"
3169		   "tx_sderrs:    %u\nfcqueued:     %u\n"
3170		   "rxrtx:        %u\nrx_toolong:   %u\n"
3171		   "rxc_errors:   %u\nrx_hdrfail:   %u\n"
3172		   "rx_badhdr:    %u\nrx_badseq:    %u\n"
3173		   "fc_rcvd:      %u\nfc_xoff:      %u\n"
3174		   "fc_xon:       %u\nrxglomfail:   %u\n"
3175		   "rxglomframes: %u\nrxglompkts:   %u\n"
3176		   "f2rxhdrs:     %u\nf2rxdata:     %u\n"
3177		   "f2txdata:     %u\nf1regdata:    %u\n"
3178		   "tickcnt:      %u\ntx_ctlerrs:   %lu\n"
3179		   "tx_ctlpkts:   %lu\nrx_ctlerrs:   %lu\n"
3180		   "rx_ctlpkts:   %lu\nrx_readahead: %lu\n",
3181		   sdcnt->intrcount, sdcnt->lastintrs,
3182		   sdcnt->pollcnt, sdcnt->regfails,
3183		   sdcnt->tx_sderrs, sdcnt->fcqueued,
3184		   sdcnt->rxrtx, sdcnt->rx_toolong,
3185		   sdcnt->rxc_errors, sdcnt->rx_hdrfail,
3186		   sdcnt->rx_badhdr, sdcnt->rx_badseq,
3187		   sdcnt->fc_rcvd, sdcnt->fc_xoff,
3188		   sdcnt->fc_xon, sdcnt->rxglomfail,
3189		   sdcnt->rxglomframes, sdcnt->rxglompkts,
3190		   sdcnt->f2rxhdrs, sdcnt->f2rxdata,
3191		   sdcnt->f2txdata, sdcnt->f1regdata,
3192		   sdcnt->tickcnt, sdcnt->tx_ctlerrs,
3193		   sdcnt->tx_ctlpkts, sdcnt->rx_ctlerrs,
3194		   sdcnt->rx_ctlpkts, sdcnt->rx_readahead_cnt);
3195
3196	return 0;
3197}
3198
3199static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
3200{
3201	struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
3202	struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
3203
3204	if (IS_ERR_OR_NULL(dentry))
3205		return;
3206
3207	brcmf_debugfs_add_entry(drvr, "forensics", brcmf_sdio_forensic_read);
3208	brcmf_debugfs_add_entry(drvr, "counters",
3209				brcmf_debugfs_sdio_count_read);
3210	debugfs_create_u32("console_interval", 0644, dentry,
3211			   &bus->console_interval);
3212}
3213#else
3214static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
3215{
3216	return 0;
3217}
3218
3219static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
3220{
3221}
3222#endif /* DEBUG */
3223
3224static int
3225brcmf_sdio_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
3226{
3227	int timeleft;
3228	uint rxlen = 0;
3229	bool pending;
3230	u8 *buf;
3231	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3232	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3233	struct brcmf_sdio *bus = sdiodev->bus;
3234
3235	brcmf_dbg(TRACE, "Enter\n");
3236
3237	/* Wait until control frame is available */
3238	timeleft = brcmf_sdio_dcmd_resp_wait(bus, &bus->rxlen, &pending);
3239
3240	spin_lock_bh(&bus->rxctl_lock);
3241	rxlen = bus->rxlen;
3242	memcpy(msg, bus->rxctl, min(msglen, rxlen));
3243	bus->rxctl = NULL;
3244	buf = bus->rxctl_orig;
3245	bus->rxctl_orig = NULL;
3246	bus->rxlen = 0;
3247	spin_unlock_bh(&bus->rxctl_lock);
3248	vfree(buf);
3249
3250	if (rxlen) {
3251		brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n",
3252			  rxlen, msglen);
3253	} else if (timeleft == 0) {
3254		brcmf_err("resumed on timeout\n");
3255		brcmf_sdio_checkdied(bus);
3256	} else if (pending) {
3257		brcmf_dbg(CTL, "cancelled\n");
3258		return -ERESTARTSYS;
3259	} else {
3260		brcmf_dbg(CTL, "resumed for unknown reason?\n");
3261		brcmf_sdio_checkdied(bus);
3262	}
3263
3264	if (rxlen)
3265		bus->sdcnt.rx_ctlpkts++;
3266	else
3267		bus->sdcnt.rx_ctlerrs++;
3268
3269	return rxlen ? (int)rxlen : -ETIMEDOUT;
3270}
3271
3272#ifdef DEBUG
3273static bool
3274brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr,
3275			u8 *ram_data, uint ram_sz)
3276{
3277	char *ram_cmp;
3278	int err;
3279	bool ret = true;
3280	int address;
3281	int offset;
3282	int len;
3283
3284	/* read back and verify */
3285	brcmf_dbg(INFO, "Compare RAM dl & ul at 0x%08x; size=%d\n", ram_addr,
3286		  ram_sz);
3287	ram_cmp = kmalloc(MEMBLOCK, GFP_KERNEL);
3288	/* do not proceed while no memory but  */
3289	if (!ram_cmp)
3290		return true;
3291
3292	address = ram_addr;
3293	offset = 0;
3294	while (offset < ram_sz) {
3295		len = ((offset + MEMBLOCK) < ram_sz) ? MEMBLOCK :
3296		      ram_sz - offset;
3297		err = brcmf_sdiod_ramrw(sdiodev, false, address, ram_cmp, len);
3298		if (err) {
3299			brcmf_err("error %d on reading %d membytes at 0x%08x\n",
3300				  err, len, address);
3301			ret = false;
3302			break;
3303		} else if (memcmp(ram_cmp, &ram_data[offset], len)) {
3304			brcmf_err("Downloaded RAM image is corrupted, block offset is %d, len is %d\n",
3305				  offset, len);
3306			ret = false;
3307			break;
3308		}
3309		offset += len;
3310		address += len;
3311	}
3312
3313	kfree(ram_cmp);
3314
3315	return ret;
3316}
3317#else	/* DEBUG */
3318static bool
3319brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr,
3320			u8 *ram_data, uint ram_sz)
3321{
3322	return true;
3323}
3324#endif	/* DEBUG */
3325
3326static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
3327					 const struct firmware *fw)
3328{
3329	int err;
3330
3331	brcmf_dbg(TRACE, "Enter\n");
3332
3333	err = brcmf_sdiod_ramrw(bus->sdiodev, true, bus->ci->rambase,
3334				(u8 *)fw->data, fw->size);
3335	if (err)
3336		brcmf_err("error %d on writing %d membytes at 0x%08x\n",
3337			  err, (int)fw->size, bus->ci->rambase);
3338	else if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
3339					  (u8 *)fw->data, fw->size))
3340		err = -EIO;
3341
3342	return err;
3343}
3344
3345static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
3346				     void *vars, u32 varsz)
3347{
3348	int address;
3349	int err;
3350
3351	brcmf_dbg(TRACE, "Enter\n");
3352
3353	address = bus->ci->ramsize - varsz + bus->ci->rambase;
3354	err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz);
3355	if (err)
3356		brcmf_err("error %d on writing %d nvram bytes at 0x%08x\n",
3357			  err, varsz, address);
3358	else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz))
3359		err = -EIO;
3360
3361	return err;
3362}
3363
3364static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
3365					const struct firmware *fw,
3366					void *nvram, u32 nvlen)
3367{
3368	int bcmerror = -EFAULT;
3369	u32 rstvec;
3370
3371	sdio_claim_host(bus->sdiodev->func[1]);
3372	brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
3373
3374	rstvec = get_unaligned_le32(fw->data);
3375	brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
3376
3377	bcmerror = brcmf_sdio_download_code_file(bus, fw);
3378	release_firmware(fw);
3379	if (bcmerror) {
3380		brcmf_err("dongle image file download failed\n");
3381		brcmf_fw_nvram_free(nvram);
3382		goto err;
3383	}
3384
3385	bcmerror = brcmf_sdio_download_nvram(bus, nvram, nvlen);
3386	brcmf_fw_nvram_free(nvram);
3387	if (bcmerror) {
3388		brcmf_err("dongle nvram file download failed\n");
3389		goto err;
3390	}
3391
3392	/* Take arm out of reset */
3393	if (!brcmf_chip_set_active(bus->ci, rstvec)) {
3394		brcmf_err("error getting out of ARM core reset\n");
3395		goto err;
3396	}
3397
3398	/* Allow full data communication using DPC from now on. */
3399	brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
3400	bcmerror = 0;
3401
3402err:
3403	brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
3404	sdio_release_host(bus->sdiodev->func[1]);
3405	return bcmerror;
3406}
3407
3408static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
3409{
3410	int err = 0;
3411	u8 val;
3412
3413	brcmf_dbg(TRACE, "Enter\n");
3414
3415	val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
3416	if (err) {
3417		brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
3418		return;
3419	}
3420
3421	val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
3422	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
3423	if (err) {
3424		brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
3425		return;
3426	}
3427
3428	/* Add CMD14 Support */
3429	brcmf_sdiod_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
3430			  (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
3431			   SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
3432			  &err);
3433	if (err) {
3434		brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
3435		return;
3436	}
3437
3438	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3439			  SBSDIO_FORCE_HT, &err);
3440	if (err) {
3441		brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
3442		return;
3443	}
3444
3445	/* set flag */
3446	bus->sr_enabled = true;
3447	brcmf_dbg(INFO, "SR enabled\n");
3448}
3449
3450/* enable KSO bit */
3451static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
3452{
3453	u8 val;
3454	int err = 0;
3455
3456	brcmf_dbg(TRACE, "Enter\n");
3457
3458	/* KSO bit added in SDIO core rev 12 */
3459	if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
3460		return 0;
3461
3462	val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
3463	if (err) {
3464		brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
3465		return err;
3466	}
3467
3468	if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
3469		val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
3470			SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
3471		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
3472				  val, &err);
3473		if (err) {
3474			brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
3475			return err;
3476		}
3477	}
3478
3479	return 0;
3480}
3481
3482
3483static int brcmf_sdio_bus_preinit(struct device *dev)
3484{
3485	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3486	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3487	struct brcmf_sdio *bus = sdiodev->bus;
3488	uint pad_size;
3489	u32 value;
3490	int err;
3491
3492	/* the commands below use the terms tx and rx from
3493	 * a device perspective, ie. bus:txglom affects the
3494	 * bus transfers from device to host.
3495	 */
3496	if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
3497		/* for sdio core rev < 12, disable txgloming */
3498		value = 0;
3499		err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
3500					   sizeof(u32));
3501	} else {
3502		/* otherwise, set txglomalign */
3503		value = 4;
3504		if (sdiodev->pdata)
3505			value = sdiodev->pdata->sd_sgentry_align;
3506		/* SDIO ADMA requires at least 32 bit alignment */
3507		value = max_t(u32, value, 4);
3508		err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
3509					   sizeof(u32));
3510	}
3511
3512	if (err < 0)
3513		goto done;
3514
3515	bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
3516	if (sdiodev->sg_support) {
3517		bus->txglom = false;
3518		value = 1;
3519		pad_size = bus->sdiodev->func[2]->cur_blksize << 1;
3520		err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
3521					   &value, sizeof(u32));
3522		if (err < 0) {
3523			/* bus:rxglom is allowed to fail */
3524			err = 0;
3525		} else {
3526			bus->txglom = true;
3527			bus->tx_hdrlen += SDPCM_HWEXT_LEN;
3528		}
3529	}
3530	brcmf_bus_add_txhdrlen(bus->sdiodev->dev, bus->tx_hdrlen);
3531
3532done:
3533	return err;
3534}
3535
3536void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
3537{
3538	if (!bus->dpc_triggered) {
3539		bus->dpc_triggered = true;
3540		queue_work(bus->brcmf_wq, &bus->datawork);
3541	}
3542}
3543
3544void brcmf_sdio_isr(struct brcmf_sdio *bus)
3545{
3546	brcmf_dbg(TRACE, "Enter\n");
3547
3548	if (!bus) {
3549		brcmf_err("bus is null pointer, exiting\n");
3550		return;
3551	}
3552
3553	if (bus->sdiodev->state != BRCMF_SDIOD_DATA) {
3554		brcmf_err("bus is down. we have nothing to do\n");
3555		return;
3556	}
3557	/* Count the interrupt call */
3558	bus->sdcnt.intrcount++;
3559	if (in_interrupt())
3560		atomic_set(&bus->ipend, 1);
3561	else
3562		if (brcmf_sdio_intr_rstatus(bus)) {
3563			brcmf_err("failed backplane access\n");
3564		}
3565
3566	/* Disable additional interrupts (is this needed now)? */
3567	if (!bus->intr)
3568		brcmf_err("isr w/o interrupt configured!\n");
3569
3570	bus->dpc_triggered = true;
3571	queue_work(bus->brcmf_wq, &bus->datawork);
3572}
3573
3574static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
3575{
3576	brcmf_dbg(TIMER, "Enter\n");
3577
3578	/* Poll period: check device if appropriate. */
3579	if (!bus->sr_enabled &&
3580	    bus->poll && (++bus->polltick >= bus->pollrate)) {
3581		u32 intstatus = 0;
3582
3583		/* Reset poll tick */
3584		bus->polltick = 0;
3585
3586		/* Check device if no interrupts */
3587		if (!bus->intr ||
3588		    (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
3589
3590			if (!bus->dpc_triggered) {
3591				u8 devpend;
3592
3593				sdio_claim_host(bus->sdiodev->func[1]);
3594				devpend = brcmf_sdiod_regrb(bus->sdiodev,
3595							    SDIO_CCCR_INTx,
3596							    NULL);
3597				sdio_release_host(bus->sdiodev->func[1]);
3598				intstatus = devpend & (INTR_STATUS_FUNC1 |
3599						       INTR_STATUS_FUNC2);
3600			}
3601
3602			/* If there is something, make like the ISR and
3603				 schedule the DPC */
3604			if (intstatus) {
3605				bus->sdcnt.pollcnt++;
3606				atomic_set(&bus->ipend, 1);
3607
3608				bus->dpc_triggered = true;
3609				queue_work(bus->brcmf_wq, &bus->datawork);
3610			}
3611		}
3612
3613		/* Update interrupt tracking */
3614		bus->sdcnt.lastintrs = bus->sdcnt.intrcount;
3615	}
3616#ifdef DEBUG
3617	/* Poll for console output periodically */
3618	if (bus->sdiodev->state == BRCMF_SDIOD_DATA &&
3619	    bus->console_interval != 0) {
3620		bus->console.count += BRCMF_WD_POLL_MS;
3621		if (bus->console.count >= bus->console_interval) {
3622			bus->console.count -= bus->console_interval;
3623			sdio_claim_host(bus->sdiodev->func[1]);
3624			/* Make sure backplane clock is on */
3625			brcmf_sdio_bus_sleep(bus, false, false);
3626			if (brcmf_sdio_readconsole(bus) < 0)
3627				/* stop on error */
3628				bus->console_interval = 0;
3629			sdio_release_host(bus->sdiodev->func[1]);
3630		}
3631	}
3632#endif				/* DEBUG */
3633
3634	/* On idle timeout clear activity flag and/or turn off clock */
3635	if (!bus->dpc_triggered) {
3636		rmb();
3637		if ((!bus->dpc_running) && (bus->idletime > 0) &&
3638		    (bus->clkstate == CLK_AVAIL)) {
3639			bus->idlecount++;
3640			if (bus->idlecount > bus->idletime) {
3641				brcmf_dbg(SDIO, "idle\n");
3642				sdio_claim_host(bus->sdiodev->func[1]);
3643				brcmf_sdio_wd_timer(bus, 0);
3644				bus->idlecount = 0;
3645				brcmf_sdio_bus_sleep(bus, true, false);
3646				sdio_release_host(bus->sdiodev->func[1]);
3647			}
3648		} else {
3649			bus->idlecount = 0;
3650		}
3651	} else {
3652		bus->idlecount = 0;
3653	}
3654}
3655
3656static void brcmf_sdio_dataworker(struct work_struct *work)
3657{
3658	struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
3659					      datawork);
3660
3661	bus->dpc_running = true;
3662	wmb();
3663	while (ACCESS_ONCE(bus->dpc_triggered)) {
3664		bus->dpc_triggered = false;
3665		brcmf_sdio_dpc(bus);
3666		bus->idlecount = 0;
3667	}
3668	bus->dpc_running = false;
3669	if (brcmf_sdiod_freezing(bus->sdiodev)) {
3670		brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DOWN);
3671		brcmf_sdiod_try_freeze(bus->sdiodev);
3672		brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
3673	}
3674}
3675
3676static void
3677brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
3678			     struct brcmf_chip *ci, u32 drivestrength)
3679{
3680	const struct sdiod_drive_str *str_tab = NULL;
3681	u32 str_mask;
3682	u32 str_shift;
3683	u32 base;
3684	u32 i;
3685	u32 drivestrength_sel = 0;
3686	u32 cc_data_temp;
3687	u32 addr;
3688
3689	if (!(ci->cc_caps & CC_CAP_PMU))
3690		return;
3691
3692	switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
3693	case SDIOD_DRVSTR_KEY(BRCM_CC_4330_CHIP_ID, 12):
3694		str_tab = sdiod_drvstr_tab1_1v8;
3695		str_mask = 0x00003800;
3696		str_shift = 11;
3697		break;
3698	case SDIOD_DRVSTR_KEY(BRCM_CC_4334_CHIP_ID, 17):
3699		str_tab = sdiod_drvstr_tab6_1v8;
3700		str_mask = 0x00001800;
3701		str_shift = 11;
3702		break;
3703	case SDIOD_DRVSTR_KEY(BRCM_CC_43143_CHIP_ID, 17):
3704		/* note: 43143 does not support tristate */
3705		i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
3706		if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
3707			str_tab = sdiod_drvstr_tab2_3v3;
3708			str_mask = 0x00000007;
3709			str_shift = 0;
3710		} else
3711			brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
3712				  ci->name, drivestrength);
3713		break;
3714	case SDIOD_DRVSTR_KEY(BRCM_CC_43362_CHIP_ID, 13):
3715		str_tab = sdiod_drive_strength_tab5_1v8;
3716		str_mask = 0x00003800;
3717		str_shift = 11;
3718		break;
3719	default:
3720		brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
3721			  ci->name, ci->chiprev, ci->pmurev);
3722		break;
3723	}
3724
3725	if (str_tab != NULL) {
3726		for (i = 0; str_tab[i].strength != 0; i++) {
3727			if (drivestrength >= str_tab[i].strength) {
3728				drivestrength_sel = str_tab[i].sel;
3729				break;
3730			}
3731		}
3732		base = brcmf_chip_get_chipcommon(ci)->base;
3733		addr = CORE_CC_REG(base, chipcontrol_addr);
3734		brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
3735		cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
3736		cc_data_temp &= ~str_mask;
3737		drivestrength_sel <<= str_shift;
3738		cc_data_temp |= drivestrength_sel;
3739		brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
3740
3741		brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
3742			  str_tab[i].strength, drivestrength, cc_data_temp);
3743	}
3744}
3745
3746static int brcmf_sdio_buscoreprep(void *ctx)
3747{
3748	struct brcmf_sdio_dev *sdiodev = ctx;
3749	int err = 0;
3750	u8 clkval, clkset;
3751
3752	/* Try forcing SDIO core to do ALPAvail request only */
3753	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
3754	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
3755	if (err) {
3756		brcmf_err("error writing for HT off\n");
3757		return err;
3758	}
3759
3760	/* If register supported, wait for ALPAvail and then force ALP */
3761	/* This may take up to 15 milliseconds */
3762	clkval = brcmf_sdiod_regrb(sdiodev,
3763				   SBSDIO_FUNC1_CHIPCLKCSR, NULL);
3764
3765	if ((clkval & ~SBSDIO_AVBITS) != clkset) {
3766		brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
3767			  clkset, clkval);
3768		return -EACCES;
3769	}
3770
3771	SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
3772					      SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
3773			!SBSDIO_ALPAV(clkval)),
3774			PMU_MAX_TRANSITION_DLY);
3775	if (!SBSDIO_ALPAV(clkval)) {
3776		brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
3777			  clkval);
3778		return -EBUSY;
3779	}
3780
3781	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
3782	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
3783	udelay(65);
3784
3785	/* Also, disable the extra SDIO pull-ups */
3786	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
3787
3788	return 0;
3789}
3790
3791static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip,
3792					u32 rstvec)
3793{
3794	struct brcmf_sdio_dev *sdiodev = ctx;
3795	struct brcmf_core *core;
3796	u32 reg_addr;
3797
3798	/* clear all interrupts */
3799	core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
3800	reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
3801	brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
3802
3803	if (rstvec)
3804		/* Write reset vector to address 0 */
3805		brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
3806				  sizeof(rstvec));
3807}
3808
3809static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
3810{
3811	struct brcmf_sdio_dev *sdiodev = ctx;
3812	u32 val, rev;
3813
3814	val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
3815	if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
3816	    addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
3817		rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
3818		if (rev >= 2) {
3819			val &= ~CID_ID_MASK;
3820			val |= BRCM_CC_4339_CHIP_ID;
3821		}
3822	}
3823	return val;
3824}
3825
3826static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
3827{
3828	struct brcmf_sdio_dev *sdiodev = ctx;
3829
3830	brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
3831}
3832
3833static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
3834	.prepare = brcmf_sdio_buscoreprep,
3835	.activate = brcmf_sdio_buscore_activate,
3836	.read32 = brcmf_sdio_buscore_read32,
3837	.write32 = brcmf_sdio_buscore_write32,
3838};
3839
3840static bool
3841brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
3842{
3843	u8 clkctl = 0;
3844	int err = 0;
3845	int reg_addr;
3846	u32 reg_val;
3847	u32 drivestrength;
3848
3849	sdio_claim_host(bus->sdiodev->func[1]);
3850
3851	pr_debug("F1 signature read @0x18000000=0x%4x\n",
3852		 brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
3853
3854	/*
3855	 * Force PLL off until brcmf_chip_attach()
3856	 * programs PLL control regs
3857	 */
3858
3859	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3860			  BRCMF_INIT_CLKCTL1, &err);
3861	if (!err)
3862		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
3863					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
3864
3865	if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
3866		brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
3867			  err, BRCMF_INIT_CLKCTL1, clkctl);
3868		goto fail;
3869	}
3870
3871	bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops);
3872	if (IS_ERR(bus->ci)) {
3873		brcmf_err("brcmf_chip_attach failed!\n");
3874		bus->ci = NULL;
3875		goto fail;
3876	}
3877
3878	if (brcmf_sdio_kso_init(bus)) {
3879		brcmf_err("error enabling KSO\n");
3880		goto fail;
3881	}
3882
3883	if ((bus->sdiodev->pdata) && (bus->sdiodev->pdata->drive_strength))
3884		drivestrength = bus->sdiodev->pdata->drive_strength;
3885	else
3886		drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
3887	brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
3888
3889	/* Set card control so an SDIO card reset does a WLAN backplane reset */
3890	reg_val = brcmf_sdiod_regrb(bus->sdiodev,
3891				    SDIO_CCCR_BRCM_CARDCTRL, &err);
3892	if (err)
3893		goto fail;
3894
3895	reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
3896
3897	brcmf_sdiod_regwb(bus->sdiodev,
3898			  SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
3899	if (err)
3900		goto fail;
3901
3902	/* set PMUControl so a backplane reset does PMU state reload */
3903	reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base,
3904			       pmucontrol);
3905	reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err);
3906	if (err)
3907		goto fail;
3908
3909	reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
3910
3911	brcmf_sdiod_regwl(bus->sdiodev, reg_addr, reg_val, &err);
3912	if (err)
3913		goto fail;
3914
3915	sdio_release_host(bus->sdiodev->func[1]);
3916
3917	brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
3918
3919	/* allocate header buffer */
3920	bus->hdrbuf = kzalloc(MAX_HDR_READ + bus->head_align, GFP_KERNEL);
3921	if (!bus->hdrbuf)
3922		return false;
3923	/* Locate an appropriately-aligned portion of hdrbuf */
3924	bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
3925				    bus->head_align);
3926
3927	/* Set the poll and/or interrupt flags */
3928	bus->intr = true;
3929	bus->poll = false;
3930	if (bus->poll)
3931		bus->pollrate = 1;
3932
3933	return true;
3934
3935fail:
3936	sdio_release_host(bus->sdiodev->func[1]);
3937	return false;
3938}
3939
3940static int
3941brcmf_sdio_watchdog_thread(void *data)
3942{
3943	struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
3944	int wait;
3945
3946	allow_signal(SIGTERM);
3947	/* Run until signal received */
3948	brcmf_sdiod_freezer_count(bus->sdiodev);
3949	while (1) {
3950		if (kthread_should_stop())
3951			break;
3952		brcmf_sdiod_freezer_uncount(bus->sdiodev);
3953		wait = wait_for_completion_interruptible(&bus->watchdog_wait);
3954		brcmf_sdiod_freezer_count(bus->sdiodev);
3955		brcmf_sdiod_try_freeze(bus->sdiodev);
3956		if (!wait) {
3957			brcmf_sdio_bus_watchdog(bus);
3958			/* Count the tick for reference */
3959			bus->sdcnt.tickcnt++;
3960			reinit_completion(&bus->watchdog_wait);
3961		} else
3962			break;
3963	}
3964	return 0;
3965}
3966
3967static void
3968brcmf_sdio_watchdog(unsigned long data)
3969{
3970	struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
3971
3972	if (bus->watchdog_tsk) {
3973		complete(&bus->watchdog_wait);
3974		/* Reschedule the watchdog */
3975		if (bus->wd_timer_valid)
3976			mod_timer(&bus->timer,
3977				  jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS));
3978	}
3979}
3980
3981static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
3982	.stop = brcmf_sdio_bus_stop,
3983	.preinit = brcmf_sdio_bus_preinit,
3984	.txdata = brcmf_sdio_bus_txdata,
3985	.txctl = brcmf_sdio_bus_txctl,
3986	.rxctl = brcmf_sdio_bus_rxctl,
3987	.gettxq = brcmf_sdio_bus_gettxq,
3988	.wowl_config = brcmf_sdio_wowl_config
3989};
3990
3991static void brcmf_sdio_firmware_callback(struct device *dev,
3992					 const struct firmware *code,
3993					 void *nvram, u32 nvram_len)
3994{
3995	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3996	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3997	struct brcmf_sdio *bus = sdiodev->bus;
3998	int err = 0;
3999	u8 saveclk;
4000
4001	brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
4002
4003	if (!bus_if->drvr)
4004		return;
4005
4006	/* try to download image and nvram to the dongle */
4007	bus->alp_only = true;
4008	err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
4009	if (err)
4010		goto fail;
4011	bus->alp_only = false;
4012
4013	/* Start the watchdog timer */
4014	bus->sdcnt.tickcnt = 0;
4015	brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
4016
4017	sdio_claim_host(sdiodev->func[1]);
4018
4019	/* Make sure backplane clock is on, needed to generate F2 interrupt */
4020	brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
4021	if (bus->clkstate != CLK_AVAIL)
4022		goto release;
4023
4024	/* Force clocks on backplane to be sure F2 interrupt propagates */
4025	saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
4026	if (!err) {
4027		brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
4028				  (saveclk | SBSDIO_FORCE_HT), &err);
4029	}
4030	if (err) {
4031		brcmf_err("Failed to force clock for F2: err %d\n", err);
4032		goto release;
4033	}
4034
4035	/* Enable function 2 (frame transfers) */
4036	w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
4037		  offsetof(struct sdpcmd_regs, tosbmailboxdata));
4038	err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]);
4039
4040
4041	brcmf_dbg(INFO, "enable F2: err=%d\n", err);
4042
4043	/* If F2 successfully enabled, set core and enable interrupts */
4044	if (!err) {
4045		/* Set up the interrupt mask and enable interrupts */
4046		bus->hostintmask = HOSTINTMASK;
4047		w_sdreg32(bus, bus->hostintmask,
4048			  offsetof(struct sdpcmd_regs, hostintmask));
4049
4050		brcmf_sdiod_regwb(sdiodev, SBSDIO_WATERMARK, 8, &err);
4051	} else {
4052		/* Disable F2 again */
4053		sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
4054		goto release;
4055	}
4056
4057	if (brcmf_chip_sr_capable(bus->ci)) {
4058		brcmf_sdio_sr_init(bus);
4059	} else {
4060		/* Restore previous clock setting */
4061		brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
4062				  saveclk, &err);
4063	}
4064
4065	if (err == 0) {
4066		err = brcmf_sdiod_intr_register(sdiodev);
4067		if (err != 0)
4068			brcmf_err("intr register failed:%d\n", err);
4069	}
4070
4071	/* If we didn't come up, turn off backplane clock */
4072	if (err != 0)
4073		brcmf_sdio_clkctl(bus, CLK_NONE, false);
4074
4075	sdio_release_host(sdiodev->func[1]);
4076
4077	err = brcmf_bus_start(dev);
4078	if (err != 0) {
4079		brcmf_err("dongle is not responding\n");
4080		goto fail;
4081	}
4082	return;
4083
4084release:
4085	sdio_release_host(sdiodev->func[1]);
4086fail:
4087	brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
4088	device_release_driver(dev);
4089}
4090
4091struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4092{
4093	int ret;
4094	struct brcmf_sdio *bus;
4095	struct workqueue_struct *wq;
4096
4097	brcmf_dbg(TRACE, "Enter\n");
4098
4099	/* Allocate private bus interface state */
4100	bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
4101	if (!bus)
4102		goto fail;
4103
4104	bus->sdiodev = sdiodev;
4105	sdiodev->bus = bus;
4106	skb_queue_head_init(&bus->glom);
4107	bus->txbound = BRCMF_TXBOUND;
4108	bus->rxbound = BRCMF_RXBOUND;
4109	bus->txminmax = BRCMF_TXMINMAX;
4110	bus->tx_seq = SDPCM_SEQ_WRAP - 1;
4111
4112	/* platform specific configuration:
4113	 *   alignments must be at least 4 bytes for ADMA
4114	 */
4115	bus->head_align = ALIGNMENT;
4116	bus->sgentry_align = ALIGNMENT;
4117	if (sdiodev->pdata) {
4118		if (sdiodev->pdata->sd_head_align > ALIGNMENT)
4119			bus->head_align = sdiodev->pdata->sd_head_align;
4120		if (sdiodev->pdata->sd_sgentry_align > ALIGNMENT)
4121			bus->sgentry_align = sdiodev->pdata->sd_sgentry_align;
4122	}
4123
4124	/* single-threaded workqueue */
4125	wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM,
4126				     dev_name(&sdiodev->func[1]->dev));
4127	if (!wq) {
4128		brcmf_err("insufficient memory to create txworkqueue\n");
4129		goto fail;
4130	}
4131	brcmf_sdiod_freezer_count(sdiodev);
4132	INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
4133	bus->brcmf_wq = wq;
4134
4135	/* attempt to attach to the dongle */
4136	if (!(brcmf_sdio_probe_attach(bus))) {
4137		brcmf_err("brcmf_sdio_probe_attach failed\n");
4138		goto fail;
4139	}
4140
4141	spin_lock_init(&bus->rxctl_lock);
4142	spin_lock_init(&bus->txq_lock);
4143	init_waitqueue_head(&bus->ctrl_wait);
4144	init_waitqueue_head(&bus->dcmd_resp_wait);
4145
4146	/* Set up the watchdog timer */
4147	init_timer(&bus->timer);
4148	bus->timer.data = (unsigned long)bus;
4149	bus->timer.function = brcmf_sdio_watchdog;
4150
4151	/* Initialize watchdog thread */
4152	init_completion(&bus->watchdog_wait);
4153	bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
4154					bus, "brcmf_wdog/%s",
4155					dev_name(&sdiodev->func[1]->dev));
4156	if (IS_ERR(bus->watchdog_tsk)) {
4157		pr_warn("brcmf_watchdog thread failed to start\n");
4158		bus->watchdog_tsk = NULL;
4159	}
4160	/* Initialize DPC thread */
4161	bus->dpc_triggered = false;
4162	bus->dpc_running = false;
4163
4164	/* Assign bus interface call back */
4165	bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
4166	bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops;
4167	bus->sdiodev->bus_if->chip = bus->ci->chip;
4168	bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
4169
4170	/* default sdio bus header length for tx packet */
4171	bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
4172
4173	/* Attach to the common layer, reserve hdr space */
4174	ret = brcmf_attach(bus->sdiodev->dev);
4175	if (ret != 0) {
4176		brcmf_err("brcmf_attach failed\n");
4177		goto fail;
4178	}
4179
4180	/* Query the F2 block size, set roundup accordingly */
4181	bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
4182	bus->roundup = min(max_roundup, bus->blocksize);
4183
4184	/* Allocate buffers */
4185	if (bus->sdiodev->bus_if->maxctl) {
4186		bus->sdiodev->bus_if->maxctl += bus->roundup;
4187		bus->rxblen =
4188		    roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
4189			    ALIGNMENT) + bus->head_align;
4190		bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
4191		if (!(bus->rxbuf)) {
4192			brcmf_err("rxbuf allocation failed\n");
4193			goto fail;
4194		}
4195	}
4196
4197	sdio_claim_host(bus->sdiodev->func[1]);
4198
4199	/* Disable F2 to clear any intermediate frame state on the dongle */
4200	sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
4201
4202	bus->rxflow = false;
4203
4204	/* Done with backplane-dependent accesses, can drop clock... */
4205	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
4206
4207	sdio_release_host(bus->sdiodev->func[1]);
4208
4209	/* ...and initialize clock/power states */
4210	bus->clkstate = CLK_SDONLY;
4211	bus->idletime = BRCMF_IDLE_INTERVAL;
4212	bus->idleclock = BRCMF_IDLE_ACTIVE;
4213
4214	/* SR state */
4215	bus->sr_enabled = false;
4216
4217	brcmf_sdio_debugfs_create(bus);
4218	brcmf_dbg(INFO, "completed!!\n");
4219
4220	ret = brcmf_sdio_get_fwnames(bus->ci, sdiodev);
4221	if (ret)
4222		goto fail;
4223
4224	ret = brcmf_fw_get_firmwares(sdiodev->dev, BRCMF_FW_REQUEST_NVRAM,
4225				     sdiodev->fw_name, sdiodev->nvram_name,
4226				     brcmf_sdio_firmware_callback);
4227	if (ret != 0) {
4228		brcmf_err("async firmware request failed: %d\n", ret);
4229		goto fail;
4230	}
4231
4232	return bus;
4233
4234fail:
4235	brcmf_sdio_remove(bus);
4236	return NULL;
4237}
4238
4239/* Detach and free everything */
4240void brcmf_sdio_remove(struct brcmf_sdio *bus)
4241{
4242	brcmf_dbg(TRACE, "Enter\n");
4243
4244	if (bus) {
4245		/* De-register interrupt handler */
4246		brcmf_sdiod_intr_unregister(bus->sdiodev);
4247
4248		brcmf_detach(bus->sdiodev->dev);
4249
4250		cancel_work_sync(&bus->datawork);
4251		if (bus->brcmf_wq)
4252			destroy_workqueue(bus->brcmf_wq);
4253
4254		if (bus->ci) {
4255			if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
4256				sdio_claim_host(bus->sdiodev->func[1]);
4257				brcmf_sdio_wd_timer(bus, 0);
4258				brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
4259				/* Leave the device in state where it is
4260				 * 'passive'. This is done by resetting all
4261				 * necessary cores.
4262				 */
4263				msleep(20);
4264				brcmf_chip_set_passive(bus->ci);
4265				brcmf_sdio_clkctl(bus, CLK_NONE, false);
4266				sdio_release_host(bus->sdiodev->func[1]);
4267			}
4268			brcmf_chip_detach(bus->ci);
4269		}
4270
4271		kfree(bus->rxbuf);
4272		kfree(bus->hdrbuf);
4273		kfree(bus);
4274	}
4275
4276	brcmf_dbg(TRACE, "Disconnected\n");
4277}
4278
4279void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
4280{
4281	/* Totally stop the timer */
4282	if (!wdtick && bus->wd_timer_valid) {
4283		del_timer_sync(&bus->timer);
4284		bus->wd_timer_valid = false;
4285		bus->save_ms = wdtick;
4286		return;
4287	}
4288
4289	/* don't start the wd until fw is loaded */
4290	if (bus->sdiodev->state != BRCMF_SDIOD_DATA)
4291		return;
4292
4293	if (wdtick) {
4294		if (bus->save_ms != BRCMF_WD_POLL_MS) {
4295			if (bus->wd_timer_valid)
4296				/* Stop timer and restart at new value */
4297				del_timer_sync(&bus->timer);
4298
4299			/* Create timer again when watchdog period is
4300			   dynamically changed or in the first instance
4301			 */
4302			bus->timer.expires =
4303				jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS);
4304			add_timer(&bus->timer);
4305
4306		} else {
4307			/* Re arm the timer, at last watchdog period */
4308			mod_timer(&bus->timer,
4309				jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS));
4310		}
4311
4312		bus->wd_timer_valid = true;
4313		bus->save_ms = wdtick;
4314	}
4315}
4316
4317int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep)
4318{
4319	int ret;
4320
4321	sdio_claim_host(bus->sdiodev->func[1]);
4322	ret = brcmf_sdio_bus_sleep(bus, sleep, false);
4323	sdio_release_host(bus->sdiodev->func[1]);
4324
4325	return ret;
4326}
4327
4328