1 /*
2  * Copyright (c) 2010 Broadcom Corporation
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/types.h>
18 #include <linux/atomic.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/printk.h>
22 #include <linux/pci_ids.h>
23 #include <linux/netdevice.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched.h>
26 #include <linux/mmc/sdio.h>
27 #include <linux/mmc/sdio_ids.h>
28 #include <linux/mmc/sdio_func.h>
29 #include <linux/mmc/card.h>
30 #include <linux/semaphore.h>
31 #include <linux/firmware.h>
32 #include <linux/module.h>
33 #include <linux/bcma/bcma.h>
34 #include <linux/debugfs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/platform_data/brcmfmac-sdio.h>
37 #include <linux/moduleparam.h>
38 #include <asm/unaligned.h>
39 #include <defs.h>
40 #include <brcmu_wifi.h>
41 #include <brcmu_utils.h>
42 #include <brcm_hw_ids.h>
43 #include <soc.h>
44 #include "sdio.h"
45 #include "chip.h"
46 #include "firmware.h"
47 
48 #define DCMD_RESP_TIMEOUT	2000	/* In milli second */
49 #define CTL_DONE_TIMEOUT	2000	/* In milli second */
50 
51 #ifdef DEBUG
52 
53 #define BRCMF_TRAP_INFO_SIZE	80
54 
55 #define CBUF_LEN	(128)
56 
57 /* Device console log buffer state */
58 #define CONSOLE_BUFFER_MAX	2024
59 
60 struct rte_log_le {
61 	__le32 buf;		/* Can't be pointer on (64-bit) hosts */
62 	__le32 buf_size;
63 	__le32 idx;
64 	char *_buf_compat;	/* Redundant pointer for backward compat. */
65 };
66 
67 struct rte_console {
68 	/* Virtual UART
69 	 * When there is no UART (e.g. Quickturn),
70 	 * the host should write a complete
71 	 * input line directly into cbuf and then write
72 	 * the length into vcons_in.
73 	 * This may also be used when there is a real UART
74 	 * (at risk of conflicting with
75 	 * the real UART).  vcons_out is currently unused.
76 	 */
77 	uint vcons_in;
78 	uint vcons_out;
79 
80 	/* Output (logging) buffer
81 	 * Console output is written to a ring buffer log_buf at index log_idx.
82 	 * The host may read the output when it sees log_idx advance.
83 	 * Output will be lost if the output wraps around faster than the host
84 	 * polls.
85 	 */
86 	struct rte_log_le log_le;
87 
88 	/* Console input line buffer
89 	 * Characters are read one at a time into cbuf
90 	 * until <CR> is received, then
91 	 * the buffer is processed as a command line.
92 	 * Also used for virtual UART.
93 	 */
94 	uint cbuf_idx;
95 	char cbuf[CBUF_LEN];
96 };
97 
98 #endif				/* DEBUG */
99 #include <chipcommon.h>
100 
101 #include "bus.h"
102 #include "debug.h"
103 #include "tracepoint.h"
104 
105 #define TXQLEN		2048	/* bulk tx queue length */
106 #define TXHI		(TXQLEN - 256)	/* turn on flow control above TXHI */
107 #define TXLOW		(TXHI - 256)	/* turn off flow control below TXLOW */
108 #define PRIOMASK	7
109 
110 #define TXRETRIES	2	/* # of retries for tx frames */
111 
112 #define BRCMF_RXBOUND	50	/* Default for max rx frames in
113 				 one scheduling */
114 
115 #define BRCMF_TXBOUND	20	/* Default for max tx frames in
116 				 one scheduling */
117 
118 #define BRCMF_TXMINMAX	1	/* Max tx frames if rx still pending */
119 
120 #define MEMBLOCK	2048	/* Block size used for downloading
121 				 of dongle image */
122 #define MAX_DATA_BUF	(32 * 1024)	/* Must be large enough to hold
123 				 biggest possible glom */
124 
125 #define BRCMF_FIRSTREAD	(1 << 6)
126 
127 #define BRCMF_CONSOLE	10	/* watchdog interval to poll console */
128 
129 /* SBSDIO_DEVICE_CTL */
130 
131 /* 1: device will assert busy signal when receiving CMD53 */
132 #define SBSDIO_DEVCTL_SETBUSY		0x01
133 /* 1: assertion of sdio interrupt is synchronous to the sdio clock */
134 #define SBSDIO_DEVCTL_SPI_INTR_SYNC	0x02
135 /* 1: mask all interrupts to host except the chipActive (rev 8) */
136 #define SBSDIO_DEVCTL_CA_INT_ONLY	0x04
137 /* 1: isolate internal sdio signals, put external pads in tri-state; requires
138  * sdio bus power cycle to clear (rev 9) */
139 #define SBSDIO_DEVCTL_PADS_ISO		0x08
140 /* Force SD->SB reset mapping (rev 11) */
141 #define SBSDIO_DEVCTL_SB_RST_CTL	0x30
142 /*   Determined by CoreControl bit */
143 #define SBSDIO_DEVCTL_RST_CORECTL	0x00
144 /*   Force backplane reset */
145 #define SBSDIO_DEVCTL_RST_BPRESET	0x10
146 /*   Force no backplane reset */
147 #define SBSDIO_DEVCTL_RST_NOBPRESET	0x20
148 
149 /* direct(mapped) cis space */
150 
151 /* MAPPED common CIS address */
152 #define SBSDIO_CIS_BASE_COMMON		0x1000
153 /* maximum bytes in one CIS */
154 #define SBSDIO_CIS_SIZE_LIMIT		0x200
155 /* cis offset addr is < 17 bits */
156 #define SBSDIO_CIS_OFT_ADDR_MASK	0x1FFFF
157 
158 /* manfid tuple length, include tuple, link bytes */
159 #define SBSDIO_CIS_MANFID_TUPLE_LEN	6
160 
161 #define CORE_BUS_REG(base, field) \
162 		(base + offsetof(struct sdpcmd_regs, field))
163 
164 /* SDIO function 1 register CHIPCLKCSR */
165 /* Force ALP request to backplane */
166 #define SBSDIO_FORCE_ALP		0x01
167 /* Force HT request to backplane */
168 #define SBSDIO_FORCE_HT			0x02
169 /* Force ILP request to backplane */
170 #define SBSDIO_FORCE_ILP		0x04
171 /* Make ALP ready (power up xtal) */
172 #define SBSDIO_ALP_AVAIL_REQ		0x08
173 /* Make HT ready (power up PLL) */
174 #define SBSDIO_HT_AVAIL_REQ		0x10
175 /* Squelch clock requests from HW */
176 #define SBSDIO_FORCE_HW_CLKREQ_OFF	0x20
177 /* Status: ALP is ready */
178 #define SBSDIO_ALP_AVAIL		0x40
179 /* Status: HT is ready */
180 #define SBSDIO_HT_AVAIL			0x80
181 #define SBSDIO_CSR_MASK			0x1F
182 #define SBSDIO_AVBITS		(SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
183 #define SBSDIO_ALPAV(regval)	((regval) & SBSDIO_AVBITS)
184 #define SBSDIO_HTAV(regval)	(((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
185 #define SBSDIO_ALPONLY(regval)	(SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
186 #define SBSDIO_CLKAV(regval, alponly) \
187 	(SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
188 
189 /* intstatus */
190 #define I_SMB_SW0	(1 << 0)	/* To SB Mail S/W interrupt 0 */
191 #define I_SMB_SW1	(1 << 1)	/* To SB Mail S/W interrupt 1 */
192 #define I_SMB_SW2	(1 << 2)	/* To SB Mail S/W interrupt 2 */
193 #define I_SMB_SW3	(1 << 3)	/* To SB Mail S/W interrupt 3 */
194 #define I_SMB_SW_MASK	0x0000000f	/* To SB Mail S/W interrupts mask */
195 #define I_SMB_SW_SHIFT	0	/* To SB Mail S/W interrupts shift */
196 #define I_HMB_SW0	(1 << 4)	/* To Host Mail S/W interrupt 0 */
197 #define I_HMB_SW1	(1 << 5)	/* To Host Mail S/W interrupt 1 */
198 #define I_HMB_SW2	(1 << 6)	/* To Host Mail S/W interrupt 2 */
199 #define I_HMB_SW3	(1 << 7)	/* To Host Mail S/W interrupt 3 */
200 #define I_HMB_SW_MASK	0x000000f0	/* To Host Mail S/W interrupts mask */
201 #define I_HMB_SW_SHIFT	4	/* To Host Mail S/W interrupts shift */
202 #define I_WR_OOSYNC	(1 << 8)	/* Write Frame Out Of Sync */
203 #define I_RD_OOSYNC	(1 << 9)	/* Read Frame Out Of Sync */
204 #define	I_PC		(1 << 10)	/* descriptor error */
205 #define	I_PD		(1 << 11)	/* data error */
206 #define	I_DE		(1 << 12)	/* Descriptor protocol Error */
207 #define	I_RU		(1 << 13)	/* Receive descriptor Underflow */
208 #define	I_RO		(1 << 14)	/* Receive fifo Overflow */
209 #define	I_XU		(1 << 15)	/* Transmit fifo Underflow */
210 #define	I_RI		(1 << 16)	/* Receive Interrupt */
211 #define I_BUSPWR	(1 << 17)	/* SDIO Bus Power Change (rev 9) */
212 #define I_XMTDATA_AVAIL (1 << 23)	/* bits in fifo */
213 #define	I_XI		(1 << 24)	/* Transmit Interrupt */
214 #define I_RF_TERM	(1 << 25)	/* Read Frame Terminate */
215 #define I_WF_TERM	(1 << 26)	/* Write Frame Terminate */
216 #define I_PCMCIA_XU	(1 << 27)	/* PCMCIA Transmit FIFO Underflow */
217 #define I_SBINT		(1 << 28)	/* sbintstatus Interrupt */
218 #define I_CHIPACTIVE	(1 << 29)	/* chip from doze to active state */
219 #define I_SRESET	(1 << 30)	/* CCCR RES interrupt */
220 #define I_IOE2		(1U << 31)	/* CCCR IOE2 Bit Changed */
221 #define	I_ERRORS	(I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
222 #define I_DMA		(I_RI | I_XI | I_ERRORS)
223 
224 /* corecontrol */
225 #define CC_CISRDY		(1 << 0)	/* CIS Ready */
226 #define CC_BPRESEN		(1 << 1)	/* CCCR RES signal */
227 #define CC_F2RDY		(1 << 2)	/* set CCCR IOR2 bit */
228 #define CC_CLRPADSISO		(1 << 3)	/* clear SDIO pads isolation */
229 #define CC_XMTDATAAVAIL_MODE	(1 << 4)
230 #define CC_XMTDATAAVAIL_CTRL	(1 << 5)
231 
232 /* SDA_FRAMECTRL */
233 #define SFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
234 #define SFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
235 #define SFC_CRC4WOOS	(1 << 2)	/* CRC error for write out of sync */
236 #define SFC_ABORTALL	(1 << 3)	/* Abort all in-progress frames */
237 
238 /*
239  * Software allocation of To SB Mailbox resources
240  */
241 
242 /* tosbmailbox bits corresponding to intstatus bits */
243 #define SMB_NAK		(1 << 0)	/* Frame NAK */
244 #define SMB_INT_ACK	(1 << 1)	/* Host Interrupt ACK */
245 #define SMB_USE_OOB	(1 << 2)	/* Use OOB Wakeup */
246 #define SMB_DEV_INT	(1 << 3)	/* Miscellaneous Interrupt */
247 
248 /* tosbmailboxdata */
249 #define SMB_DATA_VERSION_SHIFT	16	/* host protocol version */
250 
251 /*
252  * Software allocation of To Host Mailbox resources
253  */
254 
255 /* intstatus bits */
256 #define I_HMB_FC_STATE	I_HMB_SW0	/* Flow Control State */
257 #define I_HMB_FC_CHANGE	I_HMB_SW1	/* Flow Control State Changed */
258 #define I_HMB_FRAME_IND	I_HMB_SW2	/* Frame Indication */
259 #define I_HMB_HOST_INT	I_HMB_SW3	/* Miscellaneous Interrupt */
260 
261 /* tohostmailboxdata */
262 #define HMB_DATA_NAKHANDLED	1	/* retransmit NAK'd frame */
263 #define HMB_DATA_DEVREADY	2	/* talk to host after enable */
264 #define HMB_DATA_FC		4	/* per prio flowcontrol update flag */
265 #define HMB_DATA_FWREADY	8	/* fw ready for protocol activity */
266 
267 #define HMB_DATA_FCDATA_MASK	0xff000000
268 #define HMB_DATA_FCDATA_SHIFT	24
269 
270 #define HMB_DATA_VERSION_MASK	0x00ff0000
271 #define HMB_DATA_VERSION_SHIFT	16
272 
273 /*
274  * Software-defined protocol header
275  */
276 
277 /* Current protocol version */
278 #define SDPCM_PROT_VERSION	4
279 
280 /*
281  * Shared structure between dongle and the host.
282  * The structure contains pointers to trap or assert information.
283  */
284 #define SDPCM_SHARED_VERSION       0x0003
285 #define SDPCM_SHARED_VERSION_MASK  0x00FF
286 #define SDPCM_SHARED_ASSERT_BUILT  0x0100
287 #define SDPCM_SHARED_ASSERT        0x0200
288 #define SDPCM_SHARED_TRAP          0x0400
289 
290 /* Space for header read, limit for data packets */
291 #define MAX_HDR_READ	(1 << 6)
292 #define MAX_RX_DATASZ	2048
293 
294 /* Bump up limit on waiting for HT to account for first startup;
295  * if the image is doing a CRC calculation before programming the PMU
296  * for HT availability, it could take a couple hundred ms more, so
297  * max out at a 1 second (1000000us).
298  */
299 #undef PMU_MAX_TRANSITION_DLY
300 #define PMU_MAX_TRANSITION_DLY 1000000
301 
302 /* Value for ChipClockCSR during initial setup */
303 #define BRCMF_INIT_CLKCTL1	(SBSDIO_FORCE_HW_CLKREQ_OFF |	\
304 					SBSDIO_ALP_AVAIL_REQ)
305 
306 /* Flags for SDH calls */
307 #define F2SYNC	(SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
308 
309 #define BRCMF_IDLE_ACTIVE	0	/* Do not request any SD clock change
310 					 * when idle
311 					 */
312 #define BRCMF_IDLE_INTERVAL	1
313 
314 #define KSO_WAIT_US 50
315 #define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
316 
317 /*
318  * Conversion of 802.1D priority to precedence level
319  */
prio2prec(u32 prio)320 static uint prio2prec(u32 prio)
321 {
322 	return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
323 	       (prio^2) : prio;
324 }
325 
326 #ifdef DEBUG
327 /* Device console log buffer state */
328 struct brcmf_console {
329 	uint count;		/* Poll interval msec counter */
330 	uint log_addr;		/* Log struct address (fixed) */
331 	struct rte_log_le log_le;	/* Log struct (host copy) */
332 	uint bufsize;		/* Size of log buffer */
333 	u8 *buf;		/* Log buffer (host copy) */
334 	uint last;		/* Last buffer read index */
335 };
336 
337 struct brcmf_trap_info {
338 	__le32		type;
339 	__le32		epc;
340 	__le32		cpsr;
341 	__le32		spsr;
342 	__le32		r0;	/* a1 */
343 	__le32		r1;	/* a2 */
344 	__le32		r2;	/* a3 */
345 	__le32		r3;	/* a4 */
346 	__le32		r4;	/* v1 */
347 	__le32		r5;	/* v2 */
348 	__le32		r6;	/* v3 */
349 	__le32		r7;	/* v4 */
350 	__le32		r8;	/* v5 */
351 	__le32		r9;	/* sb/v6 */
352 	__le32		r10;	/* sl/v7 */
353 	__le32		r11;	/* fp/v8 */
354 	__le32		r12;	/* ip */
355 	__le32		r13;	/* sp */
356 	__le32		r14;	/* lr */
357 	__le32		pc;	/* r15 */
358 };
359 #endif				/* DEBUG */
360 
361 struct sdpcm_shared {
362 	u32 flags;
363 	u32 trap_addr;
364 	u32 assert_exp_addr;
365 	u32 assert_file_addr;
366 	u32 assert_line;
367 	u32 console_addr;	/* Address of struct rte_console */
368 	u32 msgtrace_addr;
369 	u8 tag[32];
370 	u32 brpt_addr;
371 };
372 
373 struct sdpcm_shared_le {
374 	__le32 flags;
375 	__le32 trap_addr;
376 	__le32 assert_exp_addr;
377 	__le32 assert_file_addr;
378 	__le32 assert_line;
379 	__le32 console_addr;	/* Address of struct rte_console */
380 	__le32 msgtrace_addr;
381 	u8 tag[32];
382 	__le32 brpt_addr;
383 };
384 
385 /* dongle SDIO bus specific header info */
386 struct brcmf_sdio_hdrinfo {
387 	u8 seq_num;
388 	u8 channel;
389 	u16 len;
390 	u16 len_left;
391 	u16 len_nxtfrm;
392 	u8 dat_offset;
393 	bool lastfrm;
394 	u16 tail_pad;
395 };
396 
397 /*
398  * hold counter variables
399  */
400 struct brcmf_sdio_count {
401 	uint intrcount;		/* Count of device interrupt callbacks */
402 	uint lastintrs;		/* Count as of last watchdog timer */
403 	uint pollcnt;		/* Count of active polls */
404 	uint regfails;		/* Count of R_REG failures */
405 	uint tx_sderrs;		/* Count of tx attempts with sd errors */
406 	uint fcqueued;		/* Tx packets that got queued */
407 	uint rxrtx;		/* Count of rtx requests (NAK to dongle) */
408 	uint rx_toolong;	/* Receive frames too long to receive */
409 	uint rxc_errors;	/* SDIO errors when reading control frames */
410 	uint rx_hdrfail;	/* SDIO errors on header reads */
411 	uint rx_badhdr;		/* Bad received headers (roosync?) */
412 	uint rx_badseq;		/* Mismatched rx sequence number */
413 	uint fc_rcvd;		/* Number of flow-control events received */
414 	uint fc_xoff;		/* Number which turned on flow-control */
415 	uint fc_xon;		/* Number which turned off flow-control */
416 	uint rxglomfail;	/* Failed deglom attempts */
417 	uint rxglomframes;	/* Number of glom frames (superframes) */
418 	uint rxglompkts;	/* Number of packets from glom frames */
419 	uint f2rxhdrs;		/* Number of header reads */
420 	uint f2rxdata;		/* Number of frame data reads */
421 	uint f2txdata;		/* Number of f2 frame writes */
422 	uint f1regdata;		/* Number of f1 register accesses */
423 	uint tickcnt;		/* Number of watchdog been schedule */
424 	ulong tx_ctlerrs;	/* Err of sending ctrl frames */
425 	ulong tx_ctlpkts;	/* Ctrl frames sent to dongle */
426 	ulong rx_ctlerrs;	/* Err of processing rx ctrl frames */
427 	ulong rx_ctlpkts;	/* Ctrl frames processed from dongle */
428 	ulong rx_readahead_cnt;	/* packets where header read-ahead was used */
429 };
430 
431 /* misc chip info needed by some of the routines */
432 /* Private data for SDIO bus interaction */
433 struct brcmf_sdio {
434 	struct brcmf_sdio_dev *sdiodev;	/* sdio device handler */
435 	struct brcmf_chip *ci;	/* Chip info struct */
436 
437 	u32 hostintmask;	/* Copy of Host Interrupt Mask */
438 	atomic_t intstatus;	/* Intstatus bits (events) pending */
439 	atomic_t fcstate;	/* State of dongle flow-control */
440 
441 	uint blocksize;		/* Block size of SDIO transfers */
442 	uint roundup;		/* Max roundup limit */
443 
444 	struct pktq txq;	/* Queue length used for flow-control */
445 	u8 flowcontrol;	/* per prio flow control bitmask */
446 	u8 tx_seq;		/* Transmit sequence number (next) */
447 	u8 tx_max;		/* Maximum transmit sequence allowed */
448 
449 	u8 *hdrbuf;		/* buffer for handling rx frame */
450 	u8 *rxhdr;		/* Header of current rx frame (in hdrbuf) */
451 	u8 rx_seq;		/* Receive sequence number (expected) */
452 	struct brcmf_sdio_hdrinfo cur_read;
453 				/* info of current read frame */
454 	bool rxskip;		/* Skip receive (awaiting NAK ACK) */
455 	bool rxpending;		/* Data frame pending in dongle */
456 
457 	uint rxbound;		/* Rx frames to read before resched */
458 	uint txbound;		/* Tx frames to send before resched */
459 	uint txminmax;
460 
461 	struct sk_buff *glomd;	/* Packet containing glomming descriptor */
462 	struct sk_buff_head glom; /* Packet list for glommed superframe */
463 	uint glomerr;		/* Glom packet read errors */
464 
465 	u8 *rxbuf;		/* Buffer for receiving control packets */
466 	uint rxblen;		/* Allocated length of rxbuf */
467 	u8 *rxctl;		/* Aligned pointer into rxbuf */
468 	u8 *rxctl_orig;		/* pointer for freeing rxctl */
469 	uint rxlen;		/* Length of valid data in buffer */
470 	spinlock_t rxctl_lock;	/* protection lock for ctrl frame resources */
471 
472 	u8 sdpcm_ver;	/* Bus protocol reported by dongle */
473 
474 	bool intr;		/* Use interrupts */
475 	bool poll;		/* Use polling */
476 	atomic_t ipend;		/* Device interrupt is pending */
477 	uint spurious;		/* Count of spurious interrupts */
478 	uint pollrate;		/* Ticks between device polls */
479 	uint polltick;		/* Tick counter */
480 
481 #ifdef DEBUG
482 	uint console_interval;
483 	struct brcmf_console console;	/* Console output polling support */
484 	uint console_addr;	/* Console address from shared struct */
485 #endif				/* DEBUG */
486 
487 	uint clkstate;		/* State of sd and backplane clock(s) */
488 	s32 idletime;		/* Control for activity timeout */
489 	s32 idlecount;		/* Activity timeout counter */
490 	s32 idleclock;		/* How to set bus driver when idle */
491 	bool rxflow_mode;	/* Rx flow control mode */
492 	bool rxflow;		/* Is rx flow control on */
493 	bool alp_only;		/* Don't use HT clock (ALP only) */
494 
495 	u8 *ctrl_frame_buf;
496 	u16 ctrl_frame_len;
497 	bool ctrl_frame_stat;
498 	int ctrl_frame_err;
499 
500 	spinlock_t txq_lock;		/* protect bus->txq */
501 	wait_queue_head_t ctrl_wait;
502 	wait_queue_head_t dcmd_resp_wait;
503 
504 	struct timer_list timer;
505 	struct completion watchdog_wait;
506 	struct task_struct *watchdog_tsk;
507 	bool wd_timer_valid;
508 	uint save_ms;
509 
510 	struct workqueue_struct *brcmf_wq;
511 	struct work_struct datawork;
512 	bool dpc_triggered;
513 	bool dpc_running;
514 
515 	bool txoff;		/* Transmit flow-controlled */
516 	struct brcmf_sdio_count sdcnt;
517 	bool sr_enabled; /* SaveRestore enabled */
518 	bool sleeping;
519 
520 	u8 tx_hdrlen;		/* sdio bus header length for tx packet */
521 	bool txglom;		/* host tx glomming enable flag */
522 	u16 head_align;		/* buffer pointer alignment */
523 	u16 sgentry_align;	/* scatter-gather buffer alignment */
524 };
525 
526 /* clkstate */
527 #define CLK_NONE	0
528 #define CLK_SDONLY	1
529 #define CLK_PENDING	2
530 #define CLK_AVAIL	3
531 
532 #ifdef DEBUG
533 static int qcount[NUMPRIO];
534 #endif				/* DEBUG */
535 
536 #define DEFAULT_SDIO_DRIVE_STRENGTH	6	/* in milliamps */
537 
538 #define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
539 
540 /* Retry count for register access failures */
541 static const uint retry_limit = 2;
542 
543 /* Limit on rounding up frames */
544 static const uint max_roundup = 512;
545 
546 #define ALIGNMENT  4
547 
548 enum brcmf_sdio_frmtype {
549 	BRCMF_SDIO_FT_NORMAL,
550 	BRCMF_SDIO_FT_SUPER,
551 	BRCMF_SDIO_FT_SUB,
552 };
553 
554 #define SDIOD_DRVSTR_KEY(chip, pmu)     (((chip) << 16) | (pmu))
555 
556 /* SDIO Pad drive strength to select value mappings */
557 struct sdiod_drive_str {
558 	u8 strength;	/* Pad Drive Strength in mA */
559 	u8 sel;		/* Chip-specific select value */
560 };
561 
562 /* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
563 static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
564 	{32, 0x6},
565 	{26, 0x7},
566 	{22, 0x4},
567 	{16, 0x5},
568 	{12, 0x2},
569 	{8, 0x3},
570 	{4, 0x0},
571 	{0, 0x1}
572 };
573 
574 /* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
575 static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
576 	{6, 0x7},
577 	{5, 0x6},
578 	{4, 0x5},
579 	{3, 0x4},
580 	{2, 0x2},
581 	{1, 0x1},
582 	{0, 0x0}
583 };
584 
585 /* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
586 static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
587 	{3, 0x3},
588 	{2, 0x2},
589 	{1, 0x1},
590 	{0, 0x0} };
591 
592 /* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
593 static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
594 	{16, 0x7},
595 	{12, 0x5},
596 	{8,  0x3},
597 	{4,  0x1}
598 };
599 
600 #define BCM43143_FIRMWARE_NAME		"brcm/brcmfmac43143-sdio.bin"
601 #define BCM43143_NVRAM_NAME		"brcm/brcmfmac43143-sdio.txt"
602 #define BCM43241B0_FIRMWARE_NAME	"brcm/brcmfmac43241b0-sdio.bin"
603 #define BCM43241B0_NVRAM_NAME		"brcm/brcmfmac43241b0-sdio.txt"
604 #define BCM43241B4_FIRMWARE_NAME	"brcm/brcmfmac43241b4-sdio.bin"
605 #define BCM43241B4_NVRAM_NAME		"brcm/brcmfmac43241b4-sdio.txt"
606 #define BCM43241B5_FIRMWARE_NAME	"brcm/brcmfmac43241b5-sdio.bin"
607 #define BCM43241B5_NVRAM_NAME		"brcm/brcmfmac43241b5-sdio.txt"
608 #define BCM4329_FIRMWARE_NAME		"brcm/brcmfmac4329-sdio.bin"
609 #define BCM4329_NVRAM_NAME		"brcm/brcmfmac4329-sdio.txt"
610 #define BCM4330_FIRMWARE_NAME		"brcm/brcmfmac4330-sdio.bin"
611 #define BCM4330_NVRAM_NAME		"brcm/brcmfmac4330-sdio.txt"
612 #define BCM4334_FIRMWARE_NAME		"brcm/brcmfmac4334-sdio.bin"
613 #define BCM4334_NVRAM_NAME		"brcm/brcmfmac4334-sdio.txt"
614 #define BCM43340_FIRMWARE_NAME		"brcm/brcmfmac43340-sdio.bin"
615 #define BCM43340_NVRAM_NAME		"brcm/brcmfmac43340-sdio.txt"
616 #define BCM4335_FIRMWARE_NAME		"brcm/brcmfmac4335-sdio.bin"
617 #define BCM4335_NVRAM_NAME		"brcm/brcmfmac4335-sdio.txt"
618 #define BCM43362_FIRMWARE_NAME		"brcm/brcmfmac43362-sdio.bin"
619 #define BCM43362_NVRAM_NAME		"brcm/brcmfmac43362-sdio.txt"
620 #define BCM4339_FIRMWARE_NAME		"brcm/brcmfmac4339-sdio.bin"
621 #define BCM4339_NVRAM_NAME		"brcm/brcmfmac4339-sdio.txt"
622 #define BCM43430_FIRMWARE_NAME		"brcm/brcmfmac43430-sdio.bin"
623 #define BCM43430_NVRAM_NAME		"brcm/brcmfmac43430-sdio.txt"
624 #define BCM43455_FIRMWARE_NAME		"brcm/brcmfmac43455-sdio.bin"
625 #define BCM43455_NVRAM_NAME		"brcm/brcmfmac43455-sdio.txt"
626 #define BCM4354_FIRMWARE_NAME		"brcm/brcmfmac4354-sdio.bin"
627 #define BCM4354_NVRAM_NAME		"brcm/brcmfmac4354-sdio.txt"
628 
629 MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
630 MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
631 MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
632 MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
633 MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
634 MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
635 MODULE_FIRMWARE(BCM43241B5_FIRMWARE_NAME);
636 MODULE_FIRMWARE(BCM43241B5_NVRAM_NAME);
637 MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
638 MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
639 MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
640 MODULE_FIRMWARE(BCM4330_NVRAM_NAME);
641 MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
642 MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
643 MODULE_FIRMWARE(BCM43340_FIRMWARE_NAME);
644 MODULE_FIRMWARE(BCM43340_NVRAM_NAME);
645 MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
646 MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
647 MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
648 MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
649 MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
650 MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
651 MODULE_FIRMWARE(BCM43430_FIRMWARE_NAME);
652 MODULE_FIRMWARE(BCM43430_NVRAM_NAME);
653 MODULE_FIRMWARE(BCM43455_FIRMWARE_NAME);
654 MODULE_FIRMWARE(BCM43455_NVRAM_NAME);
655 MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME);
656 MODULE_FIRMWARE(BCM4354_NVRAM_NAME);
657 
658 struct brcmf_firmware_names {
659 	u32 chipid;
660 	u32 revmsk;
661 	const char *bin;
662 	const char *nv;
663 };
664 
665 enum brcmf_firmware_type {
666 	BRCMF_FIRMWARE_BIN,
667 	BRCMF_FIRMWARE_NVRAM
668 };
669 
670 #define BRCMF_FIRMWARE_NVRAM(name) \
671 	name ## _FIRMWARE_NAME, name ## _NVRAM_NAME
672 
673 static const struct brcmf_firmware_names brcmf_fwname_data[] = {
674 	{ BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
675 	{ BRCM_CC_43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
676 	{ BRCM_CC_43241_CHIP_ID, 0x00000020, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
677 	{ BRCM_CC_43241_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43241B5) },
678 	{ BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
679 	{ BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
680 	{ BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
681 	{ BRCM_CC_43340_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43340) },
682 	{ BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
683 	{ BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
684 	{ BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
685 	{ BRCM_CC_43430_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43430) },
686 	{ BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43455) },
687 	{ BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
688 };
689 
brcmf_sdio_get_fwnames(struct brcmf_chip * ci,struct brcmf_sdio_dev * sdiodev)690 static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci,
691 				  struct brcmf_sdio_dev *sdiodev)
692 {
693 	int i;
694 	char end;
695 
696 	for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
697 		if (brcmf_fwname_data[i].chipid == ci->chip &&
698 		    brcmf_fwname_data[i].revmsk & BIT(ci->chiprev))
699 			break;
700 	}
701 
702 	if (i == ARRAY_SIZE(brcmf_fwname_data)) {
703 		brcmf_err("Unknown chipid %d [%d]\n", ci->chip, ci->chiprev);
704 		return -ENODEV;
705 	}
706 
707 	/* check if firmware path is provided by module parameter */
708 	if (brcmf_firmware_path[0] != '\0') {
709 		strlcpy(sdiodev->fw_name, brcmf_firmware_path,
710 			sizeof(sdiodev->fw_name));
711 		strlcpy(sdiodev->nvram_name, brcmf_firmware_path,
712 			sizeof(sdiodev->nvram_name));
713 
714 		end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1];
715 		if (end != '/') {
716 			strlcat(sdiodev->fw_name, "/",
717 				sizeof(sdiodev->fw_name));
718 			strlcat(sdiodev->nvram_name, "/",
719 				sizeof(sdiodev->nvram_name));
720 		}
721 	}
722 	strlcat(sdiodev->fw_name, brcmf_fwname_data[i].bin,
723 		sizeof(sdiodev->fw_name));
724 	strlcat(sdiodev->nvram_name, brcmf_fwname_data[i].nv,
725 		sizeof(sdiodev->nvram_name));
726 
727 	return 0;
728 }
729 
pkt_align(struct sk_buff * p,int len,int align)730 static void pkt_align(struct sk_buff *p, int len, int align)
731 {
732 	uint datalign;
733 	datalign = (unsigned long)(p->data);
734 	datalign = roundup(datalign, (align)) - datalign;
735 	if (datalign)
736 		skb_pull(p, datalign);
737 	__skb_trim(p, len);
738 }
739 
740 /* To check if there's window offered */
data_ok(struct brcmf_sdio * bus)741 static bool data_ok(struct brcmf_sdio *bus)
742 {
743 	return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
744 	       ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
745 }
746 
747 /*
748  * Reads a register in the SDIO hardware block. This block occupies a series of
749  * adresses on the 32 bit backplane bus.
750  */
r_sdreg32(struct brcmf_sdio * bus,u32 * regvar,u32 offset)751 static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
752 {
753 	struct brcmf_core *core;
754 	int ret;
755 
756 	core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
757 	*regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
758 
759 	return ret;
760 }
761 
w_sdreg32(struct brcmf_sdio * bus,u32 regval,u32 reg_offset)762 static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
763 {
764 	struct brcmf_core *core;
765 	int ret;
766 
767 	core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
768 	brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
769 
770 	return ret;
771 }
772 
773 static int
brcmf_sdio_kso_control(struct brcmf_sdio * bus,bool on)774 brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
775 {
776 	u8 wr_val = 0, rd_val, cmp_val, bmask;
777 	int err = 0;
778 	int try_cnt = 0;
779 
780 	brcmf_dbg(TRACE, "Enter: on=%d\n", on);
781 
782 	wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
783 	/* 1st KSO write goes to AOS wake up core if device is asleep  */
784 	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
785 			  wr_val, &err);
786 
787 	if (on) {
788 		/* device WAKEUP through KSO:
789 		 * write bit 0 & read back until
790 		 * both bits 0 (kso bit) & 1 (dev on status) are set
791 		 */
792 		cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK |
793 			  SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK;
794 		bmask = cmp_val;
795 		usleep_range(2000, 3000);
796 	} else {
797 		/* Put device to sleep, turn off KSO */
798 		cmp_val = 0;
799 		/* only check for bit0, bit1(dev on status) may not
800 		 * get cleared right away
801 		 */
802 		bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
803 	}
804 
805 	do {
806 		/* reliable KSO bit set/clr:
807 		 * the sdiod sleep write access is synced to PMU 32khz clk
808 		 * just one write attempt may fail,
809 		 * read it back until it matches written value
810 		 */
811 		rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
812 					   &err);
813 		if (((rd_val & bmask) == cmp_val) && !err)
814 			break;
815 
816 		udelay(KSO_WAIT_US);
817 		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
818 				  wr_val, &err);
819 	} while (try_cnt++ < MAX_KSO_ATTEMPTS);
820 
821 	if (try_cnt > 2)
822 		brcmf_dbg(SDIO, "try_cnt=%d rd_val=0x%x err=%d\n", try_cnt,
823 			  rd_val, err);
824 
825 	if (try_cnt > MAX_KSO_ATTEMPTS)
826 		brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err);
827 
828 	return err;
829 }
830 
831 #define HOSTINTMASK		(I_HMB_SW_MASK | I_CHIPACTIVE)
832 
833 /* Turn backplane clock on or off */
brcmf_sdio_htclk(struct brcmf_sdio * bus,bool on,bool pendok)834 static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
835 {
836 	int err;
837 	u8 clkctl, clkreq, devctl;
838 	unsigned long timeout;
839 
840 	brcmf_dbg(SDIO, "Enter\n");
841 
842 	clkctl = 0;
843 
844 	if (bus->sr_enabled) {
845 		bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
846 		return 0;
847 	}
848 
849 	if (on) {
850 		/* Request HT Avail */
851 		clkreq =
852 		    bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
853 
854 		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
855 				  clkreq, &err);
856 		if (err) {
857 			brcmf_err("HT Avail request error: %d\n", err);
858 			return -EBADE;
859 		}
860 
861 		/* Check current status */
862 		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
863 					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
864 		if (err) {
865 			brcmf_err("HT Avail read error: %d\n", err);
866 			return -EBADE;
867 		}
868 
869 		/* Go to pending and await interrupt if appropriate */
870 		if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
871 			/* Allow only clock-available interrupt */
872 			devctl = brcmf_sdiod_regrb(bus->sdiodev,
873 						   SBSDIO_DEVICE_CTL, &err);
874 			if (err) {
875 				brcmf_err("Devctl error setting CA: %d\n",
876 					  err);
877 				return -EBADE;
878 			}
879 
880 			devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
881 			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
882 					  devctl, &err);
883 			brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
884 			bus->clkstate = CLK_PENDING;
885 
886 			return 0;
887 		} else if (bus->clkstate == CLK_PENDING) {
888 			/* Cancel CA-only interrupt filter */
889 			devctl = brcmf_sdiod_regrb(bus->sdiodev,
890 						   SBSDIO_DEVICE_CTL, &err);
891 			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
892 			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
893 					  devctl, &err);
894 		}
895 
896 		/* Otherwise, wait here (polling) for HT Avail */
897 		timeout = jiffies +
898 			  msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
899 		while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
900 			clkctl = brcmf_sdiod_regrb(bus->sdiodev,
901 						   SBSDIO_FUNC1_CHIPCLKCSR,
902 						   &err);
903 			if (time_after(jiffies, timeout))
904 				break;
905 			else
906 				usleep_range(5000, 10000);
907 		}
908 		if (err) {
909 			brcmf_err("HT Avail request error: %d\n", err);
910 			return -EBADE;
911 		}
912 		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
913 			brcmf_err("HT Avail timeout (%d): clkctl 0x%02x\n",
914 				  PMU_MAX_TRANSITION_DLY, clkctl);
915 			return -EBADE;
916 		}
917 
918 		/* Mark clock available */
919 		bus->clkstate = CLK_AVAIL;
920 		brcmf_dbg(SDIO, "CLKCTL: turned ON\n");
921 
922 #if defined(DEBUG)
923 		if (!bus->alp_only) {
924 			if (SBSDIO_ALPONLY(clkctl))
925 				brcmf_err("HT Clock should be on\n");
926 		}
927 #endif				/* defined (DEBUG) */
928 
929 	} else {
930 		clkreq = 0;
931 
932 		if (bus->clkstate == CLK_PENDING) {
933 			/* Cancel CA-only interrupt filter */
934 			devctl = brcmf_sdiod_regrb(bus->sdiodev,
935 						   SBSDIO_DEVICE_CTL, &err);
936 			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
937 			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
938 					  devctl, &err);
939 		}
940 
941 		bus->clkstate = CLK_SDONLY;
942 		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
943 				  clkreq, &err);
944 		brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
945 		if (err) {
946 			brcmf_err("Failed access turning clock off: %d\n",
947 				  err);
948 			return -EBADE;
949 		}
950 	}
951 	return 0;
952 }
953 
954 /* Change idle/active SD state */
brcmf_sdio_sdclk(struct brcmf_sdio * bus,bool on)955 static int brcmf_sdio_sdclk(struct brcmf_sdio *bus, bool on)
956 {
957 	brcmf_dbg(SDIO, "Enter\n");
958 
959 	if (on)
960 		bus->clkstate = CLK_SDONLY;
961 	else
962 		bus->clkstate = CLK_NONE;
963 
964 	return 0;
965 }
966 
967 /* Transition SD and backplane clock readiness */
brcmf_sdio_clkctl(struct brcmf_sdio * bus,uint target,bool pendok)968 static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
969 {
970 #ifdef DEBUG
971 	uint oldstate = bus->clkstate;
972 #endif				/* DEBUG */
973 
974 	brcmf_dbg(SDIO, "Enter\n");
975 
976 	/* Early exit if we're already there */
977 	if (bus->clkstate == target)
978 		return 0;
979 
980 	switch (target) {
981 	case CLK_AVAIL:
982 		/* Make sure SD clock is available */
983 		if (bus->clkstate == CLK_NONE)
984 			brcmf_sdio_sdclk(bus, true);
985 		/* Now request HT Avail on the backplane */
986 		brcmf_sdio_htclk(bus, true, pendok);
987 		break;
988 
989 	case CLK_SDONLY:
990 		/* Remove HT request, or bring up SD clock */
991 		if (bus->clkstate == CLK_NONE)
992 			brcmf_sdio_sdclk(bus, true);
993 		else if (bus->clkstate == CLK_AVAIL)
994 			brcmf_sdio_htclk(bus, false, false);
995 		else
996 			brcmf_err("request for %d -> %d\n",
997 				  bus->clkstate, target);
998 		break;
999 
1000 	case CLK_NONE:
1001 		/* Make sure to remove HT request */
1002 		if (bus->clkstate == CLK_AVAIL)
1003 			brcmf_sdio_htclk(bus, false, false);
1004 		/* Now remove the SD clock */
1005 		brcmf_sdio_sdclk(bus, false);
1006 		break;
1007 	}
1008 #ifdef DEBUG
1009 	brcmf_dbg(SDIO, "%d -> %d\n", oldstate, bus->clkstate);
1010 #endif				/* DEBUG */
1011 
1012 	return 0;
1013 }
1014 
1015 static int
brcmf_sdio_bus_sleep(struct brcmf_sdio * bus,bool sleep,bool pendok)1016 brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
1017 {
1018 	int err = 0;
1019 	u8 clkcsr;
1020 
1021 	brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
1022 		  (sleep ? "SLEEP" : "WAKE"),
1023 		  (bus->sleeping ? "SLEEP" : "WAKE"));
1024 
1025 	/* If SR is enabled control bus state with KSO */
1026 	if (bus->sr_enabled) {
1027 		/* Done if we're already in the requested state */
1028 		if (sleep == bus->sleeping)
1029 			goto end;
1030 
1031 		/* Going to sleep */
1032 		if (sleep) {
1033 			clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
1034 						   SBSDIO_FUNC1_CHIPCLKCSR,
1035 						   &err);
1036 			if ((clkcsr & SBSDIO_CSR_MASK) == 0) {
1037 				brcmf_dbg(SDIO, "no clock, set ALP\n");
1038 				brcmf_sdiod_regwb(bus->sdiodev,
1039 						  SBSDIO_FUNC1_CHIPCLKCSR,
1040 						  SBSDIO_ALP_AVAIL_REQ, &err);
1041 			}
1042 			err = brcmf_sdio_kso_control(bus, false);
1043 		} else {
1044 			err = brcmf_sdio_kso_control(bus, true);
1045 		}
1046 		if (err) {
1047 			brcmf_err("error while changing bus sleep state %d\n",
1048 				  err);
1049 			goto done;
1050 		}
1051 	}
1052 
1053 end:
1054 	/* control clocks */
1055 	if (sleep) {
1056 		if (!bus->sr_enabled)
1057 			brcmf_sdio_clkctl(bus, CLK_NONE, pendok);
1058 	} else {
1059 		brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
1060 		brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
1061 	}
1062 	bus->sleeping = sleep;
1063 	brcmf_dbg(SDIO, "new state %s\n",
1064 		  (sleep ? "SLEEP" : "WAKE"));
1065 done:
1066 	brcmf_dbg(SDIO, "Exit: err=%d\n", err);
1067 	return err;
1068 
1069 }
1070 
1071 #ifdef DEBUG
brcmf_sdio_valid_shared_address(u32 addr)1072 static inline bool brcmf_sdio_valid_shared_address(u32 addr)
1073 {
1074 	return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
1075 }
1076 
brcmf_sdio_readshared(struct brcmf_sdio * bus,struct sdpcm_shared * sh)1077 static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
1078 				 struct sdpcm_shared *sh)
1079 {
1080 	u32 addr = 0;
1081 	int rv;
1082 	u32 shaddr = 0;
1083 	struct sdpcm_shared_le sh_le;
1084 	__le32 addr_le;
1085 
1086 	sdio_claim_host(bus->sdiodev->func[1]);
1087 	brcmf_sdio_bus_sleep(bus, false, false);
1088 
1089 	/*
1090 	 * Read last word in socram to determine
1091 	 * address of sdpcm_shared structure
1092 	 */
1093 	shaddr = bus->ci->rambase + bus->ci->ramsize - 4;
1094 	if (!bus->ci->rambase && brcmf_chip_sr_capable(bus->ci))
1095 		shaddr -= bus->ci->srsize;
1096 	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr,
1097 			       (u8 *)&addr_le, 4);
1098 	if (rv < 0)
1099 		goto fail;
1100 
1101 	/*
1102 	 * Check if addr is valid.
1103 	 * NVRAM length at the end of memory should have been overwritten.
1104 	 */
1105 	addr = le32_to_cpu(addr_le);
1106 	if (!brcmf_sdio_valid_shared_address(addr)) {
1107 		brcmf_err("invalid sdpcm_shared address 0x%08X\n", addr);
1108 		rv = -EINVAL;
1109 		goto fail;
1110 	}
1111 
1112 	brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr);
1113 
1114 	/* Read hndrte_shared structure */
1115 	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
1116 			       sizeof(struct sdpcm_shared_le));
1117 	if (rv < 0)
1118 		goto fail;
1119 
1120 	sdio_release_host(bus->sdiodev->func[1]);
1121 
1122 	/* Endianness */
1123 	sh->flags = le32_to_cpu(sh_le.flags);
1124 	sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
1125 	sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
1126 	sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
1127 	sh->assert_line = le32_to_cpu(sh_le.assert_line);
1128 	sh->console_addr = le32_to_cpu(sh_le.console_addr);
1129 	sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
1130 
1131 	if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
1132 		brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
1133 			  SDPCM_SHARED_VERSION,
1134 			  sh->flags & SDPCM_SHARED_VERSION_MASK);
1135 		return -EPROTO;
1136 	}
1137 	return 0;
1138 
1139 fail:
1140 	brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n",
1141 		  rv, addr);
1142 	sdio_release_host(bus->sdiodev->func[1]);
1143 	return rv;
1144 }
1145 
brcmf_sdio_get_console_addr(struct brcmf_sdio * bus)1146 static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
1147 {
1148 	struct sdpcm_shared sh;
1149 
1150 	if (brcmf_sdio_readshared(bus, &sh) == 0)
1151 		bus->console_addr = sh.console_addr;
1152 }
1153 #else
brcmf_sdio_get_console_addr(struct brcmf_sdio * bus)1154 static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
1155 {
1156 }
1157 #endif /* DEBUG */
1158 
brcmf_sdio_hostmail(struct brcmf_sdio * bus)1159 static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
1160 {
1161 	u32 intstatus = 0;
1162 	u32 hmb_data;
1163 	u8 fcbits;
1164 	int ret;
1165 
1166 	brcmf_dbg(SDIO, "Enter\n");
1167 
1168 	/* Read mailbox data and ack that we did so */
1169 	ret = r_sdreg32(bus, &hmb_data,
1170 			offsetof(struct sdpcmd_regs, tohostmailboxdata));
1171 
1172 	if (ret == 0)
1173 		w_sdreg32(bus, SMB_INT_ACK,
1174 			  offsetof(struct sdpcmd_regs, tosbmailbox));
1175 	bus->sdcnt.f1regdata += 2;
1176 
1177 	/* Dongle recomposed rx frames, accept them again */
1178 	if (hmb_data & HMB_DATA_NAKHANDLED) {
1179 		brcmf_dbg(SDIO, "Dongle reports NAK handled, expect rtx of %d\n",
1180 			  bus->rx_seq);
1181 		if (!bus->rxskip)
1182 			brcmf_err("unexpected NAKHANDLED!\n");
1183 
1184 		bus->rxskip = false;
1185 		intstatus |= I_HMB_FRAME_IND;
1186 	}
1187 
1188 	/*
1189 	 * DEVREADY does not occur with gSPI.
1190 	 */
1191 	if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
1192 		bus->sdpcm_ver =
1193 		    (hmb_data & HMB_DATA_VERSION_MASK) >>
1194 		    HMB_DATA_VERSION_SHIFT;
1195 		if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
1196 			brcmf_err("Version mismatch, dongle reports %d, "
1197 				  "expecting %d\n",
1198 				  bus->sdpcm_ver, SDPCM_PROT_VERSION);
1199 		else
1200 			brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
1201 				  bus->sdpcm_ver);
1202 
1203 		/*
1204 		 * Retrieve console state address now that firmware should have
1205 		 * updated it.
1206 		 */
1207 		brcmf_sdio_get_console_addr(bus);
1208 	}
1209 
1210 	/*
1211 	 * Flow Control has been moved into the RX headers and this out of band
1212 	 * method isn't used any more.
1213 	 * remaining backward compatible with older dongles.
1214 	 */
1215 	if (hmb_data & HMB_DATA_FC) {
1216 		fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >>
1217 							HMB_DATA_FCDATA_SHIFT;
1218 
1219 		if (fcbits & ~bus->flowcontrol)
1220 			bus->sdcnt.fc_xoff++;
1221 
1222 		if (bus->flowcontrol & ~fcbits)
1223 			bus->sdcnt.fc_xon++;
1224 
1225 		bus->sdcnt.fc_rcvd++;
1226 		bus->flowcontrol = fcbits;
1227 	}
1228 
1229 	/* Shouldn't be any others */
1230 	if (hmb_data & ~(HMB_DATA_DEVREADY |
1231 			 HMB_DATA_NAKHANDLED |
1232 			 HMB_DATA_FC |
1233 			 HMB_DATA_FWREADY |
1234 			 HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK))
1235 		brcmf_err("Unknown mailbox data content: 0x%02x\n",
1236 			  hmb_data);
1237 
1238 	return intstatus;
1239 }
1240 
brcmf_sdio_rxfail(struct brcmf_sdio * bus,bool abort,bool rtx)1241 static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1242 {
1243 	uint retries = 0;
1244 	u16 lastrbc;
1245 	u8 hi, lo;
1246 	int err;
1247 
1248 	brcmf_err("%sterminate frame%s\n",
1249 		  abort ? "abort command, " : "",
1250 		  rtx ? ", send NAK" : "");
1251 
1252 	if (abort)
1253 		brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
1254 
1255 	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
1256 			  SFC_RF_TERM, &err);
1257 	bus->sdcnt.f1regdata++;
1258 
1259 	/* Wait until the packet has been flushed (device/FIFO stable) */
1260 	for (lastrbc = retries = 0xffff; retries > 0; retries--) {
1261 		hi = brcmf_sdiod_regrb(bus->sdiodev,
1262 				       SBSDIO_FUNC1_RFRAMEBCHI, &err);
1263 		lo = brcmf_sdiod_regrb(bus->sdiodev,
1264 				       SBSDIO_FUNC1_RFRAMEBCLO, &err);
1265 		bus->sdcnt.f1regdata += 2;
1266 
1267 		if ((hi == 0) && (lo == 0))
1268 			break;
1269 
1270 		if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
1271 			brcmf_err("count growing: last 0x%04x now 0x%04x\n",
1272 				  lastrbc, (hi << 8) + lo);
1273 		}
1274 		lastrbc = (hi << 8) + lo;
1275 	}
1276 
1277 	if (!retries)
1278 		brcmf_err("count never zeroed: last 0x%04x\n", lastrbc);
1279 	else
1280 		brcmf_dbg(SDIO, "flush took %d iterations\n", 0xffff - retries);
1281 
1282 	if (rtx) {
1283 		bus->sdcnt.rxrtx++;
1284 		err = w_sdreg32(bus, SMB_NAK,
1285 				offsetof(struct sdpcmd_regs, tosbmailbox));
1286 
1287 		bus->sdcnt.f1regdata++;
1288 		if (err == 0)
1289 			bus->rxskip = true;
1290 	}
1291 
1292 	/* Clear partial in any case */
1293 	bus->cur_read.len = 0;
1294 }
1295 
brcmf_sdio_txfail(struct brcmf_sdio * bus)1296 static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
1297 {
1298 	struct brcmf_sdio_dev *sdiodev = bus->sdiodev;
1299 	u8 i, hi, lo;
1300 
1301 	/* On failure, abort the command and terminate the frame */
1302 	brcmf_err("sdio error, abort command and terminate frame\n");
1303 	bus->sdcnt.tx_sderrs++;
1304 
1305 	brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2);
1306 	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
1307 	bus->sdcnt.f1regdata++;
1308 
1309 	for (i = 0; i < 3; i++) {
1310 		hi = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
1311 		lo = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
1312 		bus->sdcnt.f1regdata += 2;
1313 		if ((hi == 0) && (lo == 0))
1314 			break;
1315 	}
1316 }
1317 
1318 /* return total length of buffer chain */
brcmf_sdio_glom_len(struct brcmf_sdio * bus)1319 static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus)
1320 {
1321 	struct sk_buff *p;
1322 	uint total;
1323 
1324 	total = 0;
1325 	skb_queue_walk(&bus->glom, p)
1326 		total += p->len;
1327 	return total;
1328 }
1329 
brcmf_sdio_free_glom(struct brcmf_sdio * bus)1330 static void brcmf_sdio_free_glom(struct brcmf_sdio *bus)
1331 {
1332 	struct sk_buff *cur, *next;
1333 
1334 	skb_queue_walk_safe(&bus->glom, cur, next) {
1335 		skb_unlink(cur, &bus->glom);
1336 		brcmu_pkt_buf_free_skb(cur);
1337 	}
1338 }
1339 
1340 /**
1341  * brcmfmac sdio bus specific header
1342  * This is the lowest layer header wrapped on the packets transmitted between
1343  * host and WiFi dongle which contains information needed for SDIO core and
1344  * firmware
1345  *
1346  * It consists of 3 parts: hardware header, hardware extension header and
1347  * software header
1348  * hardware header (frame tag) - 4 bytes
1349  * Byte 0~1: Frame length
1350  * Byte 2~3: Checksum, bit-wise inverse of frame length
1351  * hardware extension header - 8 bytes
1352  * Tx glom mode only, N/A for Rx or normal Tx
1353  * Byte 0~1: Packet length excluding hw frame tag
1354  * Byte 2: Reserved
1355  * Byte 3: Frame flags, bit 0: last frame indication
1356  * Byte 4~5: Reserved
1357  * Byte 6~7: Tail padding length
1358  * software header - 8 bytes
1359  * Byte 0: Rx/Tx sequence number
1360  * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
1361  * Byte 2: Length of next data frame, reserved for Tx
1362  * Byte 3: Data offset
1363  * Byte 4: Flow control bits, reserved for Tx
1364  * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet
1365  * Byte 6~7: Reserved
1366  */
1367 #define SDPCM_HWHDR_LEN			4
1368 #define SDPCM_HWEXT_LEN			8
1369 #define SDPCM_SWHDR_LEN			8
1370 #define SDPCM_HDRLEN			(SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
1371 /* software header */
1372 #define SDPCM_SEQ_MASK			0x000000ff
1373 #define SDPCM_SEQ_WRAP			256
1374 #define SDPCM_CHANNEL_MASK		0x00000f00
1375 #define SDPCM_CHANNEL_SHIFT		8
1376 #define SDPCM_CONTROL_CHANNEL		0	/* Control */
1377 #define SDPCM_EVENT_CHANNEL		1	/* Asyc Event Indication */
1378 #define SDPCM_DATA_CHANNEL		2	/* Data Xmit/Recv */
1379 #define SDPCM_GLOM_CHANNEL		3	/* Coalesced packets */
1380 #define SDPCM_TEST_CHANNEL		15	/* Test/debug packets */
1381 #define SDPCM_GLOMDESC(p)		(((u8 *)p)[1] & 0x80)
1382 #define SDPCM_NEXTLEN_MASK		0x00ff0000
1383 #define SDPCM_NEXTLEN_SHIFT		16
1384 #define SDPCM_DOFFSET_MASK		0xff000000
1385 #define SDPCM_DOFFSET_SHIFT		24
1386 #define SDPCM_FCMASK_MASK		0x000000ff
1387 #define SDPCM_WINDOW_MASK		0x0000ff00
1388 #define SDPCM_WINDOW_SHIFT		8
1389 
brcmf_sdio_getdatoffset(u8 * swheader)1390 static inline u8 brcmf_sdio_getdatoffset(u8 *swheader)
1391 {
1392 	u32 hdrvalue;
1393 	hdrvalue = *(u32 *)swheader;
1394 	return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
1395 }
1396 
brcmf_sdio_hdparse(struct brcmf_sdio * bus,u8 * header,struct brcmf_sdio_hdrinfo * rd,enum brcmf_sdio_frmtype type)1397 static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
1398 			      struct brcmf_sdio_hdrinfo *rd,
1399 			      enum brcmf_sdio_frmtype type)
1400 {
1401 	u16 len, checksum;
1402 	u8 rx_seq, fc, tx_seq_max;
1403 	u32 swheader;
1404 
1405 	trace_brcmf_sdpcm_hdr(SDPCM_RX, header);
1406 
1407 	/* hw header */
1408 	len = get_unaligned_le16(header);
1409 	checksum = get_unaligned_le16(header + sizeof(u16));
1410 	/* All zero means no more to read */
1411 	if (!(len | checksum)) {
1412 		bus->rxpending = false;
1413 		return -ENODATA;
1414 	}
1415 	if ((u16)(~(len ^ checksum))) {
1416 		brcmf_err("HW header checksum error\n");
1417 		bus->sdcnt.rx_badhdr++;
1418 		brcmf_sdio_rxfail(bus, false, false);
1419 		return -EIO;
1420 	}
1421 	if (len < SDPCM_HDRLEN) {
1422 		brcmf_err("HW header length error\n");
1423 		return -EPROTO;
1424 	}
1425 	if (type == BRCMF_SDIO_FT_SUPER &&
1426 	    (roundup(len, bus->blocksize) != rd->len)) {
1427 		brcmf_err("HW superframe header length error\n");
1428 		return -EPROTO;
1429 	}
1430 	if (type == BRCMF_SDIO_FT_SUB && len > rd->len) {
1431 		brcmf_err("HW subframe header length error\n");
1432 		return -EPROTO;
1433 	}
1434 	rd->len = len;
1435 
1436 	/* software header */
1437 	header += SDPCM_HWHDR_LEN;
1438 	swheader = le32_to_cpu(*(__le32 *)header);
1439 	if (type == BRCMF_SDIO_FT_SUPER && SDPCM_GLOMDESC(header)) {
1440 		brcmf_err("Glom descriptor found in superframe head\n");
1441 		rd->len = 0;
1442 		return -EINVAL;
1443 	}
1444 	rx_seq = (u8)(swheader & SDPCM_SEQ_MASK);
1445 	rd->channel = (swheader & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT;
1446 	if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
1447 	    type != BRCMF_SDIO_FT_SUPER) {
1448 		brcmf_err("HW header length too long\n");
1449 		bus->sdcnt.rx_toolong++;
1450 		brcmf_sdio_rxfail(bus, false, false);
1451 		rd->len = 0;
1452 		return -EPROTO;
1453 	}
1454 	if (type == BRCMF_SDIO_FT_SUPER && rd->channel != SDPCM_GLOM_CHANNEL) {
1455 		brcmf_err("Wrong channel for superframe\n");
1456 		rd->len = 0;
1457 		return -EINVAL;
1458 	}
1459 	if (type == BRCMF_SDIO_FT_SUB && rd->channel != SDPCM_DATA_CHANNEL &&
1460 	    rd->channel != SDPCM_EVENT_CHANNEL) {
1461 		brcmf_err("Wrong channel for subframe\n");
1462 		rd->len = 0;
1463 		return -EINVAL;
1464 	}
1465 	rd->dat_offset = brcmf_sdio_getdatoffset(header);
1466 	if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
1467 		brcmf_err("seq %d: bad data offset\n", rx_seq);
1468 		bus->sdcnt.rx_badhdr++;
1469 		brcmf_sdio_rxfail(bus, false, false);
1470 		rd->len = 0;
1471 		return -ENXIO;
1472 	}
1473 	if (rd->seq_num != rx_seq) {
1474 		brcmf_err("seq %d: sequence number error, expect %d\n",
1475 			  rx_seq, rd->seq_num);
1476 		bus->sdcnt.rx_badseq++;
1477 		rd->seq_num = rx_seq;
1478 	}
1479 	/* no need to check the reset for subframe */
1480 	if (type == BRCMF_SDIO_FT_SUB)
1481 		return 0;
1482 	rd->len_nxtfrm = (swheader & SDPCM_NEXTLEN_MASK) >> SDPCM_NEXTLEN_SHIFT;
1483 	if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
1484 		/* only warm for NON glom packet */
1485 		if (rd->channel != SDPCM_GLOM_CHANNEL)
1486 			brcmf_err("seq %d: next length error\n", rx_seq);
1487 		rd->len_nxtfrm = 0;
1488 	}
1489 	swheader = le32_to_cpu(*(__le32 *)(header + 4));
1490 	fc = swheader & SDPCM_FCMASK_MASK;
1491 	if (bus->flowcontrol != fc) {
1492 		if (~bus->flowcontrol & fc)
1493 			bus->sdcnt.fc_xoff++;
1494 		if (bus->flowcontrol & ~fc)
1495 			bus->sdcnt.fc_xon++;
1496 		bus->sdcnt.fc_rcvd++;
1497 		bus->flowcontrol = fc;
1498 	}
1499 	tx_seq_max = (swheader & SDPCM_WINDOW_MASK) >> SDPCM_WINDOW_SHIFT;
1500 	if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
1501 		brcmf_err("seq %d: max tx seq number error\n", rx_seq);
1502 		tx_seq_max = bus->tx_seq + 2;
1503 	}
1504 	bus->tx_max = tx_seq_max;
1505 
1506 	return 0;
1507 }
1508 
brcmf_sdio_update_hwhdr(u8 * header,u16 frm_length)1509 static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length)
1510 {
1511 	*(__le16 *)header = cpu_to_le16(frm_length);
1512 	*(((__le16 *)header) + 1) = cpu_to_le16(~frm_length);
1513 }
1514 
brcmf_sdio_hdpack(struct brcmf_sdio * bus,u8 * header,struct brcmf_sdio_hdrinfo * hd_info)1515 static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
1516 			      struct brcmf_sdio_hdrinfo *hd_info)
1517 {
1518 	u32 hdrval;
1519 	u8 hdr_offset;
1520 
1521 	brcmf_sdio_update_hwhdr(header, hd_info->len);
1522 	hdr_offset = SDPCM_HWHDR_LEN;
1523 
1524 	if (bus->txglom) {
1525 		hdrval = (hd_info->len - hdr_offset) | (hd_info->lastfrm << 24);
1526 		*((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
1527 		hdrval = (u16)hd_info->tail_pad << 16;
1528 		*(((__le32 *)(header + hdr_offset)) + 1) = cpu_to_le32(hdrval);
1529 		hdr_offset += SDPCM_HWEXT_LEN;
1530 	}
1531 
1532 	hdrval = hd_info->seq_num;
1533 	hdrval |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
1534 		  SDPCM_CHANNEL_MASK;
1535 	hdrval |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
1536 		  SDPCM_DOFFSET_MASK;
1537 	*((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
1538 	*(((__le32 *)(header + hdr_offset)) + 1) = 0;
1539 	trace_brcmf_sdpcm_hdr(SDPCM_TX + !!(bus->txglom), header);
1540 }
1541 
brcmf_sdio_rxglom(struct brcmf_sdio * bus,u8 rxseq)1542 static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1543 {
1544 	u16 dlen, totlen;
1545 	u8 *dptr, num = 0;
1546 	u16 sublen;
1547 	struct sk_buff *pfirst, *pnext;
1548 
1549 	int errcode;
1550 	u8 doff, sfdoff;
1551 
1552 	struct brcmf_sdio_hdrinfo rd_new;
1553 
1554 	/* If packets, issue read(s) and send up packet chain */
1555 	/* Return sequence numbers consumed? */
1556 
1557 	brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
1558 		  bus->glomd, skb_peek(&bus->glom));
1559 
1560 	/* If there's a descriptor, generate the packet chain */
1561 	if (bus->glomd) {
1562 		pfirst = pnext = NULL;
1563 		dlen = (u16) (bus->glomd->len);
1564 		dptr = bus->glomd->data;
1565 		if (!dlen || (dlen & 1)) {
1566 			brcmf_err("bad glomd len(%d), ignore descriptor\n",
1567 				  dlen);
1568 			dlen = 0;
1569 		}
1570 
1571 		for (totlen = num = 0; dlen; num++) {
1572 			/* Get (and move past) next length */
1573 			sublen = get_unaligned_le16(dptr);
1574 			dlen -= sizeof(u16);
1575 			dptr += sizeof(u16);
1576 			if ((sublen < SDPCM_HDRLEN) ||
1577 			    ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
1578 				brcmf_err("descriptor len %d bad: %d\n",
1579 					  num, sublen);
1580 				pnext = NULL;
1581 				break;
1582 			}
1583 			if (sublen % bus->sgentry_align) {
1584 				brcmf_err("sublen %d not multiple of %d\n",
1585 					  sublen, bus->sgentry_align);
1586 			}
1587 			totlen += sublen;
1588 
1589 			/* For last frame, adjust read len so total
1590 				 is a block multiple */
1591 			if (!dlen) {
1592 				sublen +=
1593 				    (roundup(totlen, bus->blocksize) - totlen);
1594 				totlen = roundup(totlen, bus->blocksize);
1595 			}
1596 
1597 			/* Allocate/chain packet for next subframe */
1598 			pnext = brcmu_pkt_buf_get_skb(sublen + bus->sgentry_align);
1599 			if (pnext == NULL) {
1600 				brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
1601 					  num, sublen);
1602 				break;
1603 			}
1604 			skb_queue_tail(&bus->glom, pnext);
1605 
1606 			/* Adhere to start alignment requirements */
1607 			pkt_align(pnext, sublen, bus->sgentry_align);
1608 		}
1609 
1610 		/* If all allocations succeeded, save packet chain
1611 			 in bus structure */
1612 		if (pnext) {
1613 			brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
1614 				  totlen, num);
1615 			if (BRCMF_GLOM_ON() && bus->cur_read.len &&
1616 			    totlen != bus->cur_read.len) {
1617 				brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
1618 					  bus->cur_read.len, totlen, rxseq);
1619 			}
1620 			pfirst = pnext = NULL;
1621 		} else {
1622 			brcmf_sdio_free_glom(bus);
1623 			num = 0;
1624 		}
1625 
1626 		/* Done with descriptor packet */
1627 		brcmu_pkt_buf_free_skb(bus->glomd);
1628 		bus->glomd = NULL;
1629 		bus->cur_read.len = 0;
1630 	}
1631 
1632 	/* Ok -- either we just generated a packet chain,
1633 		 or had one from before */
1634 	if (!skb_queue_empty(&bus->glom)) {
1635 		if (BRCMF_GLOM_ON()) {
1636 			brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
1637 			skb_queue_walk(&bus->glom, pnext) {
1638 				brcmf_dbg(GLOM, "    %p: %p len 0x%04x (%d)\n",
1639 					  pnext, (u8 *) (pnext->data),
1640 					  pnext->len, pnext->len);
1641 			}
1642 		}
1643 
1644 		pfirst = skb_peek(&bus->glom);
1645 		dlen = (u16) brcmf_sdio_glom_len(bus);
1646 
1647 		/* Do an SDIO read for the superframe.  Configurable iovar to
1648 		 * read directly into the chained packet, or allocate a large
1649 		 * packet and and copy into the chain.
1650 		 */
1651 		sdio_claim_host(bus->sdiodev->func[1]);
1652 		errcode = brcmf_sdiod_recv_chain(bus->sdiodev,
1653 						 &bus->glom, dlen);
1654 		sdio_release_host(bus->sdiodev->func[1]);
1655 		bus->sdcnt.f2rxdata++;
1656 
1657 		/* On failure, kill the superframe, allow a couple retries */
1658 		if (errcode < 0) {
1659 			brcmf_err("glom read of %d bytes failed: %d\n",
1660 				  dlen, errcode);
1661 
1662 			sdio_claim_host(bus->sdiodev->func[1]);
1663 			if (bus->glomerr++ < 3) {
1664 				brcmf_sdio_rxfail(bus, true, true);
1665 			} else {
1666 				bus->glomerr = 0;
1667 				brcmf_sdio_rxfail(bus, true, false);
1668 				bus->sdcnt.rxglomfail++;
1669 				brcmf_sdio_free_glom(bus);
1670 			}
1671 			sdio_release_host(bus->sdiodev->func[1]);
1672 			return 0;
1673 		}
1674 
1675 		brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1676 				   pfirst->data, min_t(int, pfirst->len, 48),
1677 				   "SUPERFRAME:\n");
1678 
1679 		rd_new.seq_num = rxseq;
1680 		rd_new.len = dlen;
1681 		sdio_claim_host(bus->sdiodev->func[1]);
1682 		errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new,
1683 					     BRCMF_SDIO_FT_SUPER);
1684 		sdio_release_host(bus->sdiodev->func[1]);
1685 		bus->cur_read.len = rd_new.len_nxtfrm << 4;
1686 
1687 		/* Remove superframe header, remember offset */
1688 		skb_pull(pfirst, rd_new.dat_offset);
1689 		sfdoff = rd_new.dat_offset;
1690 		num = 0;
1691 
1692 		/* Validate all the subframe headers */
1693 		skb_queue_walk(&bus->glom, pnext) {
1694 			/* leave when invalid subframe is found */
1695 			if (errcode)
1696 				break;
1697 
1698 			rd_new.len = pnext->len;
1699 			rd_new.seq_num = rxseq++;
1700 			sdio_claim_host(bus->sdiodev->func[1]);
1701 			errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new,
1702 						     BRCMF_SDIO_FT_SUB);
1703 			sdio_release_host(bus->sdiodev->func[1]);
1704 			brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1705 					   pnext->data, 32, "subframe:\n");
1706 
1707 			num++;
1708 		}
1709 
1710 		if (errcode) {
1711 			/* Terminate frame on error, request
1712 				 a couple retries */
1713 			sdio_claim_host(bus->sdiodev->func[1]);
1714 			if (bus->glomerr++ < 3) {
1715 				/* Restore superframe header space */
1716 				skb_push(pfirst, sfdoff);
1717 				brcmf_sdio_rxfail(bus, true, true);
1718 			} else {
1719 				bus->glomerr = 0;
1720 				brcmf_sdio_rxfail(bus, true, false);
1721 				bus->sdcnt.rxglomfail++;
1722 				brcmf_sdio_free_glom(bus);
1723 			}
1724 			sdio_release_host(bus->sdiodev->func[1]);
1725 			bus->cur_read.len = 0;
1726 			return 0;
1727 		}
1728 
1729 		/* Basic SD framing looks ok - process each packet (header) */
1730 
1731 		skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
1732 			dptr = (u8 *) (pfirst->data);
1733 			sublen = get_unaligned_le16(dptr);
1734 			doff = brcmf_sdio_getdatoffset(&dptr[SDPCM_HWHDR_LEN]);
1735 
1736 			brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1737 					   dptr, pfirst->len,
1738 					   "Rx Subframe Data:\n");
1739 
1740 			__skb_trim(pfirst, sublen);
1741 			skb_pull(pfirst, doff);
1742 
1743 			if (pfirst->len == 0) {
1744 				skb_unlink(pfirst, &bus->glom);
1745 				brcmu_pkt_buf_free_skb(pfirst);
1746 				continue;
1747 			}
1748 
1749 			brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1750 					   pfirst->data,
1751 					   min_t(int, pfirst->len, 32),
1752 					   "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
1753 					   bus->glom.qlen, pfirst, pfirst->data,
1754 					   pfirst->len, pfirst->next,
1755 					   pfirst->prev);
1756 			skb_unlink(pfirst, &bus->glom);
1757 			brcmf_rx_frame(bus->sdiodev->dev, pfirst);
1758 			bus->sdcnt.rxglompkts++;
1759 		}
1760 
1761 		bus->sdcnt.rxglomframes++;
1762 	}
1763 	return num;
1764 }
1765 
brcmf_sdio_dcmd_resp_wait(struct brcmf_sdio * bus,uint * condition,bool * pending)1766 static int brcmf_sdio_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
1767 				     bool *pending)
1768 {
1769 	DECLARE_WAITQUEUE(wait, current);
1770 	int timeout = msecs_to_jiffies(DCMD_RESP_TIMEOUT);
1771 
1772 	/* Wait until control frame is available */
1773 	add_wait_queue(&bus->dcmd_resp_wait, &wait);
1774 	set_current_state(TASK_INTERRUPTIBLE);
1775 
1776 	while (!(*condition) && (!signal_pending(current) && timeout))
1777 		timeout = schedule_timeout(timeout);
1778 
1779 	if (signal_pending(current))
1780 		*pending = true;
1781 
1782 	set_current_state(TASK_RUNNING);
1783 	remove_wait_queue(&bus->dcmd_resp_wait, &wait);
1784 
1785 	return timeout;
1786 }
1787 
brcmf_sdio_dcmd_resp_wake(struct brcmf_sdio * bus)1788 static int brcmf_sdio_dcmd_resp_wake(struct brcmf_sdio *bus)
1789 {
1790 	if (waitqueue_active(&bus->dcmd_resp_wait))
1791 		wake_up_interruptible(&bus->dcmd_resp_wait);
1792 
1793 	return 0;
1794 }
1795 static void
brcmf_sdio_read_control(struct brcmf_sdio * bus,u8 * hdr,uint len,uint doff)1796 brcmf_sdio_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1797 {
1798 	uint rdlen, pad;
1799 	u8 *buf = NULL, *rbuf;
1800 	int sdret;
1801 
1802 	brcmf_dbg(TRACE, "Enter\n");
1803 
1804 	if (bus->rxblen)
1805 		buf = vzalloc(bus->rxblen);
1806 	if (!buf)
1807 		goto done;
1808 
1809 	rbuf = bus->rxbuf;
1810 	pad = ((unsigned long)rbuf % bus->head_align);
1811 	if (pad)
1812 		rbuf += (bus->head_align - pad);
1813 
1814 	/* Copy the already-read portion over */
1815 	memcpy(buf, hdr, BRCMF_FIRSTREAD);
1816 	if (len <= BRCMF_FIRSTREAD)
1817 		goto gotpkt;
1818 
1819 	/* Raise rdlen to next SDIO block to avoid tail command */
1820 	rdlen = len - BRCMF_FIRSTREAD;
1821 	if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
1822 		pad = bus->blocksize - (rdlen % bus->blocksize);
1823 		if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
1824 		    ((len + pad) < bus->sdiodev->bus_if->maxctl))
1825 			rdlen += pad;
1826 	} else if (rdlen % bus->head_align) {
1827 		rdlen += bus->head_align - (rdlen % bus->head_align);
1828 	}
1829 
1830 	/* Drop if the read is too big or it exceeds our maximum */
1831 	if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
1832 		brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
1833 			  rdlen, bus->sdiodev->bus_if->maxctl);
1834 		brcmf_sdio_rxfail(bus, false, false);
1835 		goto done;
1836 	}
1837 
1838 	if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
1839 		brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
1840 			  len, len - doff, bus->sdiodev->bus_if->maxctl);
1841 		bus->sdcnt.rx_toolong++;
1842 		brcmf_sdio_rxfail(bus, false, false);
1843 		goto done;
1844 	}
1845 
1846 	/* Read remain of frame body */
1847 	sdret = brcmf_sdiod_recv_buf(bus->sdiodev, rbuf, rdlen);
1848 	bus->sdcnt.f2rxdata++;
1849 
1850 	/* Control frame failures need retransmission */
1851 	if (sdret < 0) {
1852 		brcmf_err("read %d control bytes failed: %d\n",
1853 			  rdlen, sdret);
1854 		bus->sdcnt.rxc_errors++;
1855 		brcmf_sdio_rxfail(bus, true, true);
1856 		goto done;
1857 	} else
1858 		memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
1859 
1860 gotpkt:
1861 
1862 	brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
1863 			   buf, len, "RxCtrl:\n");
1864 
1865 	/* Point to valid data and indicate its length */
1866 	spin_lock_bh(&bus->rxctl_lock);
1867 	if (bus->rxctl) {
1868 		brcmf_err("last control frame is being processed.\n");
1869 		spin_unlock_bh(&bus->rxctl_lock);
1870 		vfree(buf);
1871 		goto done;
1872 	}
1873 	bus->rxctl = buf + doff;
1874 	bus->rxctl_orig = buf;
1875 	bus->rxlen = len - doff;
1876 	spin_unlock_bh(&bus->rxctl_lock);
1877 
1878 done:
1879 	/* Awake any waiters */
1880 	brcmf_sdio_dcmd_resp_wake(bus);
1881 }
1882 
1883 /* Pad read to blocksize for efficiency */
brcmf_sdio_pad(struct brcmf_sdio * bus,u16 * pad,u16 * rdlen)1884 static void brcmf_sdio_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
1885 {
1886 	if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
1887 		*pad = bus->blocksize - (*rdlen % bus->blocksize);
1888 		if (*pad <= bus->roundup && *pad < bus->blocksize &&
1889 		    *rdlen + *pad + BRCMF_FIRSTREAD < MAX_RX_DATASZ)
1890 			*rdlen += *pad;
1891 	} else if (*rdlen % bus->head_align) {
1892 		*rdlen += bus->head_align - (*rdlen % bus->head_align);
1893 	}
1894 }
1895 
brcmf_sdio_readframes(struct brcmf_sdio * bus,uint maxframes)1896 static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1897 {
1898 	struct sk_buff *pkt;		/* Packet for event or data frames */
1899 	u16 pad;		/* Number of pad bytes to read */
1900 	uint rxleft = 0;	/* Remaining number of frames allowed */
1901 	int ret;		/* Return code from calls */
1902 	uint rxcount = 0;	/* Total frames read */
1903 	struct brcmf_sdio_hdrinfo *rd = &bus->cur_read, rd_new;
1904 	u8 head_read = 0;
1905 
1906 	brcmf_dbg(TRACE, "Enter\n");
1907 
1908 	/* Not finished unless we encounter no more frames indication */
1909 	bus->rxpending = true;
1910 
1911 	for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
1912 	     !bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_SDIOD_DATA;
1913 	     rd->seq_num++, rxleft--) {
1914 
1915 		/* Handle glomming separately */
1916 		if (bus->glomd || !skb_queue_empty(&bus->glom)) {
1917 			u8 cnt;
1918 			brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
1919 				  bus->glomd, skb_peek(&bus->glom));
1920 			cnt = brcmf_sdio_rxglom(bus, rd->seq_num);
1921 			brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
1922 			rd->seq_num += cnt - 1;
1923 			rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
1924 			continue;
1925 		}
1926 
1927 		rd->len_left = rd->len;
1928 		/* read header first for unknow frame length */
1929 		sdio_claim_host(bus->sdiodev->func[1]);
1930 		if (!rd->len) {
1931 			ret = brcmf_sdiod_recv_buf(bus->sdiodev,
1932 						   bus->rxhdr, BRCMF_FIRSTREAD);
1933 			bus->sdcnt.f2rxhdrs++;
1934 			if (ret < 0) {
1935 				brcmf_err("RXHEADER FAILED: %d\n",
1936 					  ret);
1937 				bus->sdcnt.rx_hdrfail++;
1938 				brcmf_sdio_rxfail(bus, true, true);
1939 				sdio_release_host(bus->sdiodev->func[1]);
1940 				continue;
1941 			}
1942 
1943 			brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
1944 					   bus->rxhdr, SDPCM_HDRLEN,
1945 					   "RxHdr:\n");
1946 
1947 			if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd,
1948 					       BRCMF_SDIO_FT_NORMAL)) {
1949 				sdio_release_host(bus->sdiodev->func[1]);
1950 				if (!bus->rxpending)
1951 					break;
1952 				else
1953 					continue;
1954 			}
1955 
1956 			if (rd->channel == SDPCM_CONTROL_CHANNEL) {
1957 				brcmf_sdio_read_control(bus, bus->rxhdr,
1958 							rd->len,
1959 							rd->dat_offset);
1960 				/* prepare the descriptor for the next read */
1961 				rd->len = rd->len_nxtfrm << 4;
1962 				rd->len_nxtfrm = 0;
1963 				/* treat all packet as event if we don't know */
1964 				rd->channel = SDPCM_EVENT_CHANNEL;
1965 				sdio_release_host(bus->sdiodev->func[1]);
1966 				continue;
1967 			}
1968 			rd->len_left = rd->len > BRCMF_FIRSTREAD ?
1969 				       rd->len - BRCMF_FIRSTREAD : 0;
1970 			head_read = BRCMF_FIRSTREAD;
1971 		}
1972 
1973 		brcmf_sdio_pad(bus, &pad, &rd->len_left);
1974 
1975 		pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
1976 					    bus->head_align);
1977 		if (!pkt) {
1978 			/* Give up on data, request rtx of events */
1979 			brcmf_err("brcmu_pkt_buf_get_skb failed\n");
1980 			brcmf_sdio_rxfail(bus, false,
1981 					    RETRYCHAN(rd->channel));
1982 			sdio_release_host(bus->sdiodev->func[1]);
1983 			continue;
1984 		}
1985 		skb_pull(pkt, head_read);
1986 		pkt_align(pkt, rd->len_left, bus->head_align);
1987 
1988 		ret = brcmf_sdiod_recv_pkt(bus->sdiodev, pkt);
1989 		bus->sdcnt.f2rxdata++;
1990 		sdio_release_host(bus->sdiodev->func[1]);
1991 
1992 		if (ret < 0) {
1993 			brcmf_err("read %d bytes from channel %d failed: %d\n",
1994 				  rd->len, rd->channel, ret);
1995 			brcmu_pkt_buf_free_skb(pkt);
1996 			sdio_claim_host(bus->sdiodev->func[1]);
1997 			brcmf_sdio_rxfail(bus, true,
1998 					    RETRYCHAN(rd->channel));
1999 			sdio_release_host(bus->sdiodev->func[1]);
2000 			continue;
2001 		}
2002 
2003 		if (head_read) {
2004 			skb_push(pkt, head_read);
2005 			memcpy(pkt->data, bus->rxhdr, head_read);
2006 			head_read = 0;
2007 		} else {
2008 			memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
2009 			rd_new.seq_num = rd->seq_num;
2010 			sdio_claim_host(bus->sdiodev->func[1]);
2011 			if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
2012 					       BRCMF_SDIO_FT_NORMAL)) {
2013 				rd->len = 0;
2014 				brcmu_pkt_buf_free_skb(pkt);
2015 			}
2016 			bus->sdcnt.rx_readahead_cnt++;
2017 			if (rd->len != roundup(rd_new.len, 16)) {
2018 				brcmf_err("frame length mismatch:read %d, should be %d\n",
2019 					  rd->len,
2020 					  roundup(rd_new.len, 16) >> 4);
2021 				rd->len = 0;
2022 				brcmf_sdio_rxfail(bus, true, true);
2023 				sdio_release_host(bus->sdiodev->func[1]);
2024 				brcmu_pkt_buf_free_skb(pkt);
2025 				continue;
2026 			}
2027 			sdio_release_host(bus->sdiodev->func[1]);
2028 			rd->len_nxtfrm = rd_new.len_nxtfrm;
2029 			rd->channel = rd_new.channel;
2030 			rd->dat_offset = rd_new.dat_offset;
2031 
2032 			brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
2033 					     BRCMF_DATA_ON()) &&
2034 					   BRCMF_HDRS_ON(),
2035 					   bus->rxhdr, SDPCM_HDRLEN,
2036 					   "RxHdr:\n");
2037 
2038 			if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
2039 				brcmf_err("readahead on control packet %d?\n",
2040 					  rd_new.seq_num);
2041 				/* Force retry w/normal header read */
2042 				rd->len = 0;
2043 				sdio_claim_host(bus->sdiodev->func[1]);
2044 				brcmf_sdio_rxfail(bus, false, true);
2045 				sdio_release_host(bus->sdiodev->func[1]);
2046 				brcmu_pkt_buf_free_skb(pkt);
2047 				continue;
2048 			}
2049 		}
2050 
2051 		brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
2052 				   pkt->data, rd->len, "Rx Data:\n");
2053 
2054 		/* Save superframe descriptor and allocate packet frame */
2055 		if (rd->channel == SDPCM_GLOM_CHANNEL) {
2056 			if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_HWHDR_LEN])) {
2057 				brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
2058 					  rd->len);
2059 				brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
2060 						   pkt->data, rd->len,
2061 						   "Glom Data:\n");
2062 				__skb_trim(pkt, rd->len);
2063 				skb_pull(pkt, SDPCM_HDRLEN);
2064 				bus->glomd = pkt;
2065 			} else {
2066 				brcmf_err("%s: glom superframe w/o "
2067 					  "descriptor!\n", __func__);
2068 				sdio_claim_host(bus->sdiodev->func[1]);
2069 				brcmf_sdio_rxfail(bus, false, false);
2070 				sdio_release_host(bus->sdiodev->func[1]);
2071 			}
2072 			/* prepare the descriptor for the next read */
2073 			rd->len = rd->len_nxtfrm << 4;
2074 			rd->len_nxtfrm = 0;
2075 			/* treat all packet as event if we don't know */
2076 			rd->channel = SDPCM_EVENT_CHANNEL;
2077 			continue;
2078 		}
2079 
2080 		/* Fill in packet len and prio, deliver upward */
2081 		__skb_trim(pkt, rd->len);
2082 		skb_pull(pkt, rd->dat_offset);
2083 
2084 		/* prepare the descriptor for the next read */
2085 		rd->len = rd->len_nxtfrm << 4;
2086 		rd->len_nxtfrm = 0;
2087 		/* treat all packet as event if we don't know */
2088 		rd->channel = SDPCM_EVENT_CHANNEL;
2089 
2090 		if (pkt->len == 0) {
2091 			brcmu_pkt_buf_free_skb(pkt);
2092 			continue;
2093 		}
2094 
2095 		brcmf_rx_frame(bus->sdiodev->dev, pkt);
2096 	}
2097 
2098 	rxcount = maxframes - rxleft;
2099 	/* Message if we hit the limit */
2100 	if (!rxleft)
2101 		brcmf_dbg(DATA, "hit rx limit of %d frames\n", maxframes);
2102 	else
2103 		brcmf_dbg(DATA, "processed %d frames\n", rxcount);
2104 	/* Back off rxseq if awaiting rtx, update rx_seq */
2105 	if (bus->rxskip)
2106 		rd->seq_num--;
2107 	bus->rx_seq = rd->seq_num;
2108 
2109 	return rxcount;
2110 }
2111 
2112 static void
brcmf_sdio_wait_event_wakeup(struct brcmf_sdio * bus)2113 brcmf_sdio_wait_event_wakeup(struct brcmf_sdio *bus)
2114 {
2115 	if (waitqueue_active(&bus->ctrl_wait))
2116 		wake_up_interruptible(&bus->ctrl_wait);
2117 	return;
2118 }
2119 
brcmf_sdio_txpkt_hdalign(struct brcmf_sdio * bus,struct sk_buff * pkt)2120 static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
2121 {
2122 	u16 head_pad;
2123 	u8 *dat_buf;
2124 
2125 	dat_buf = (u8 *)(pkt->data);
2126 
2127 	/* Check head padding */
2128 	head_pad = ((unsigned long)dat_buf % bus->head_align);
2129 	if (head_pad) {
2130 		if (skb_headroom(pkt) < head_pad) {
2131 			bus->sdiodev->bus_if->tx_realloc++;
2132 			head_pad = 0;
2133 			if (skb_cow(pkt, head_pad))
2134 				return -ENOMEM;
2135 		}
2136 		skb_push(pkt, head_pad);
2137 		dat_buf = (u8 *)(pkt->data);
2138 		memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
2139 	}
2140 	return head_pad;
2141 }
2142 
2143 /**
2144  * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
2145  * bus layer usage.
2146  */
2147 /* flag marking a dummy skb added for DMA alignment requirement */
2148 #define ALIGN_SKB_FLAG		0x8000
2149 /* bit mask of data length chopped from the previous packet */
2150 #define ALIGN_SKB_CHOP_LEN_MASK	0x7fff
2151 
brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio * bus,struct sk_buff_head * pktq,struct sk_buff * pkt,u16 total_len)2152 static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
2153 				    struct sk_buff_head *pktq,
2154 				    struct sk_buff *pkt, u16 total_len)
2155 {
2156 	struct brcmf_sdio_dev *sdiodev;
2157 	struct sk_buff *pkt_pad;
2158 	u16 tail_pad, tail_chop, chain_pad;
2159 	unsigned int blksize;
2160 	bool lastfrm;
2161 	int ntail, ret;
2162 
2163 	sdiodev = bus->sdiodev;
2164 	blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
2165 	/* sg entry alignment should be a divisor of block size */
2166 	WARN_ON(blksize % bus->sgentry_align);
2167 
2168 	/* Check tail padding */
2169 	lastfrm = skb_queue_is_last(pktq, pkt);
2170 	tail_pad = 0;
2171 	tail_chop = pkt->len % bus->sgentry_align;
2172 	if (tail_chop)
2173 		tail_pad = bus->sgentry_align - tail_chop;
2174 	chain_pad = (total_len + tail_pad) % blksize;
2175 	if (lastfrm && chain_pad)
2176 		tail_pad += blksize - chain_pad;
2177 	if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
2178 		pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop +
2179 						bus->head_align);
2180 		if (pkt_pad == NULL)
2181 			return -ENOMEM;
2182 		ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
2183 		if (unlikely(ret < 0)) {
2184 			kfree_skb(pkt_pad);
2185 			return ret;
2186 		}
2187 		memcpy(pkt_pad->data,
2188 		       pkt->data + pkt->len - tail_chop,
2189 		       tail_chop);
2190 		*(u16 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
2191 		skb_trim(pkt, pkt->len - tail_chop);
2192 		skb_trim(pkt_pad, tail_pad + tail_chop);
2193 		__skb_queue_after(pktq, pkt, pkt_pad);
2194 	} else {
2195 		ntail = pkt->data_len + tail_pad -
2196 			(pkt->end - pkt->tail);
2197 		if (skb_cloned(pkt) || ntail > 0)
2198 			if (pskb_expand_head(pkt, 0, ntail, GFP_ATOMIC))
2199 				return -ENOMEM;
2200 		if (skb_linearize(pkt))
2201 			return -ENOMEM;
2202 		__skb_put(pkt, tail_pad);
2203 	}
2204 
2205 	return tail_pad;
2206 }
2207 
2208 /**
2209  * brcmf_sdio_txpkt_prep - packet preparation for transmit
2210  * @bus: brcmf_sdio structure pointer
2211  * @pktq: packet list pointer
2212  * @chan: virtual channel to transmit the packet
2213  *
2214  * Processes to be applied to the packet
2215  *	- Align data buffer pointer
2216  *	- Align data buffer length
2217  *	- Prepare header
2218  * Return: negative value if there is error
2219  */
2220 static int
brcmf_sdio_txpkt_prep(struct brcmf_sdio * bus,struct sk_buff_head * pktq,uint chan)2221 brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
2222 		      uint chan)
2223 {
2224 	u16 head_pad, total_len;
2225 	struct sk_buff *pkt_next;
2226 	u8 txseq;
2227 	int ret;
2228 	struct brcmf_sdio_hdrinfo hd_info = {0};
2229 
2230 	txseq = bus->tx_seq;
2231 	total_len = 0;
2232 	skb_queue_walk(pktq, pkt_next) {
2233 		/* alignment packet inserted in previous
2234 		 * loop cycle can be skipped as it is
2235 		 * already properly aligned and does not
2236 		 * need an sdpcm header.
2237 		 */
2238 		if (*(u16 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
2239 			continue;
2240 
2241 		/* align packet data pointer */
2242 		ret = brcmf_sdio_txpkt_hdalign(bus, pkt_next);
2243 		if (ret < 0)
2244 			return ret;
2245 		head_pad = (u16)ret;
2246 		if (head_pad)
2247 			memset(pkt_next->data + bus->tx_hdrlen, 0, head_pad);
2248 
2249 		total_len += pkt_next->len;
2250 
2251 		hd_info.len = pkt_next->len;
2252 		hd_info.lastfrm = skb_queue_is_last(pktq, pkt_next);
2253 		if (bus->txglom && pktq->qlen > 1) {
2254 			ret = brcmf_sdio_txpkt_prep_sg(bus, pktq,
2255 						       pkt_next, total_len);
2256 			if (ret < 0)
2257 				return ret;
2258 			hd_info.tail_pad = (u16)ret;
2259 			total_len += (u16)ret;
2260 		}
2261 
2262 		hd_info.channel = chan;
2263 		hd_info.dat_offset = head_pad + bus->tx_hdrlen;
2264 		hd_info.seq_num = txseq++;
2265 
2266 		/* Now fill the header */
2267 		brcmf_sdio_hdpack(bus, pkt_next->data, &hd_info);
2268 
2269 		if (BRCMF_BYTES_ON() &&
2270 		    ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
2271 		     (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
2272 			brcmf_dbg_hex_dump(true, pkt_next->data, hd_info.len,
2273 					   "Tx Frame:\n");
2274 		else if (BRCMF_HDRS_ON())
2275 			brcmf_dbg_hex_dump(true, pkt_next->data,
2276 					   head_pad + bus->tx_hdrlen,
2277 					   "Tx Header:\n");
2278 	}
2279 	/* Hardware length tag of the first packet should be total
2280 	 * length of the chain (including padding)
2281 	 */
2282 	if (bus->txglom)
2283 		brcmf_sdio_update_hwhdr(pktq->next->data, total_len);
2284 	return 0;
2285 }
2286 
2287 /**
2288  * brcmf_sdio_txpkt_postp - packet post processing for transmit
2289  * @bus: brcmf_sdio structure pointer
2290  * @pktq: packet list pointer
2291  *
2292  * Processes to be applied to the packet
2293  *	- Remove head padding
2294  *	- Remove tail padding
2295  */
2296 static void
brcmf_sdio_txpkt_postp(struct brcmf_sdio * bus,struct sk_buff_head * pktq)2297 brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
2298 {
2299 	u8 *hdr;
2300 	u32 dat_offset;
2301 	u16 tail_pad;
2302 	u16 dummy_flags, chop_len;
2303 	struct sk_buff *pkt_next, *tmp, *pkt_prev;
2304 
2305 	skb_queue_walk_safe(pktq, pkt_next, tmp) {
2306 		dummy_flags = *(u16 *)(pkt_next->cb);
2307 		if (dummy_flags & ALIGN_SKB_FLAG) {
2308 			chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
2309 			if (chop_len) {
2310 				pkt_prev = pkt_next->prev;
2311 				skb_put(pkt_prev, chop_len);
2312 			}
2313 			__skb_unlink(pkt_next, pktq);
2314 			brcmu_pkt_buf_free_skb(pkt_next);
2315 		} else {
2316 			hdr = pkt_next->data + bus->tx_hdrlen - SDPCM_SWHDR_LEN;
2317 			dat_offset = le32_to_cpu(*(__le32 *)hdr);
2318 			dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >>
2319 				     SDPCM_DOFFSET_SHIFT;
2320 			skb_pull(pkt_next, dat_offset);
2321 			if (bus->txglom) {
2322 				tail_pad = le16_to_cpu(*(__le16 *)(hdr - 2));
2323 				skb_trim(pkt_next, pkt_next->len - tail_pad);
2324 			}
2325 		}
2326 	}
2327 }
2328 
2329 /* Writes a HW/SW header into the packet and sends it. */
2330 /* Assumes: (a) header space already there, (b) caller holds lock */
brcmf_sdio_txpkt(struct brcmf_sdio * bus,struct sk_buff_head * pktq,uint chan)2331 static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
2332 			    uint chan)
2333 {
2334 	int ret;
2335 	struct sk_buff *pkt_next, *tmp;
2336 
2337 	brcmf_dbg(TRACE, "Enter\n");
2338 
2339 	ret = brcmf_sdio_txpkt_prep(bus, pktq, chan);
2340 	if (ret)
2341 		goto done;
2342 
2343 	sdio_claim_host(bus->sdiodev->func[1]);
2344 	ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
2345 	bus->sdcnt.f2txdata++;
2346 
2347 	if (ret < 0)
2348 		brcmf_sdio_txfail(bus);
2349 
2350 	sdio_release_host(bus->sdiodev->func[1]);
2351 
2352 done:
2353 	brcmf_sdio_txpkt_postp(bus, pktq);
2354 	if (ret == 0)
2355 		bus->tx_seq = (bus->tx_seq + pktq->qlen) % SDPCM_SEQ_WRAP;
2356 	skb_queue_walk_safe(pktq, pkt_next, tmp) {
2357 		__skb_unlink(pkt_next, pktq);
2358 		brcmf_txcomplete(bus->sdiodev->dev, pkt_next, ret == 0);
2359 	}
2360 	return ret;
2361 }
2362 
brcmf_sdio_sendfromq(struct brcmf_sdio * bus,uint maxframes)2363 static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2364 {
2365 	struct sk_buff *pkt;
2366 	struct sk_buff_head pktq;
2367 	u32 intstatus = 0;
2368 	int ret = 0, prec_out, i;
2369 	uint cnt = 0;
2370 	u8 tx_prec_map, pkt_num;
2371 
2372 	brcmf_dbg(TRACE, "Enter\n");
2373 
2374 	tx_prec_map = ~bus->flowcontrol;
2375 
2376 	/* Send frames until the limit or some other event */
2377 	for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
2378 		pkt_num = 1;
2379 		if (bus->txglom)
2380 			pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
2381 					bus->sdiodev->txglomsz);
2382 		pkt_num = min_t(u32, pkt_num,
2383 				brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
2384 		__skb_queue_head_init(&pktq);
2385 		spin_lock_bh(&bus->txq_lock);
2386 		for (i = 0; i < pkt_num; i++) {
2387 			pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
2388 					      &prec_out);
2389 			if (pkt == NULL)
2390 				break;
2391 			__skb_queue_tail(&pktq, pkt);
2392 		}
2393 		spin_unlock_bh(&bus->txq_lock);
2394 		if (i == 0)
2395 			break;
2396 
2397 		ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
2398 
2399 		cnt += i;
2400 
2401 		/* In poll mode, need to check for other events */
2402 		if (!bus->intr) {
2403 			/* Check device status, signal pending interrupt */
2404 			sdio_claim_host(bus->sdiodev->func[1]);
2405 			ret = r_sdreg32(bus, &intstatus,
2406 					offsetof(struct sdpcmd_regs,
2407 						 intstatus));
2408 			sdio_release_host(bus->sdiodev->func[1]);
2409 			bus->sdcnt.f2txdata++;
2410 			if (ret != 0)
2411 				break;
2412 			if (intstatus & bus->hostintmask)
2413 				atomic_set(&bus->ipend, 1);
2414 		}
2415 	}
2416 
2417 	/* Deflow-control stack if needed */
2418 	if ((bus->sdiodev->state == BRCMF_SDIOD_DATA) &&
2419 	    bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
2420 		bus->txoff = false;
2421 		brcmf_txflowblock(bus->sdiodev->dev, false);
2422 	}
2423 
2424 	return cnt;
2425 }
2426 
brcmf_sdio_tx_ctrlframe(struct brcmf_sdio * bus,u8 * frame,u16 len)2427 static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
2428 {
2429 	u8 doff;
2430 	u16 pad;
2431 	uint retries = 0;
2432 	struct brcmf_sdio_hdrinfo hd_info = {0};
2433 	int ret;
2434 
2435 	brcmf_dbg(TRACE, "Enter\n");
2436 
2437 	/* Back the pointer to make room for bus header */
2438 	frame -= bus->tx_hdrlen;
2439 	len += bus->tx_hdrlen;
2440 
2441 	/* Add alignment padding (optional for ctl frames) */
2442 	doff = ((unsigned long)frame % bus->head_align);
2443 	if (doff) {
2444 		frame -= doff;
2445 		len += doff;
2446 		memset(frame + bus->tx_hdrlen, 0, doff);
2447 	}
2448 
2449 	/* Round send length to next SDIO block */
2450 	pad = 0;
2451 	if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
2452 		pad = bus->blocksize - (len % bus->blocksize);
2453 		if ((pad > bus->roundup) || (pad >= bus->blocksize))
2454 			pad = 0;
2455 	} else if (len % bus->head_align) {
2456 		pad = bus->head_align - (len % bus->head_align);
2457 	}
2458 	len += pad;
2459 
2460 	hd_info.len = len - pad;
2461 	hd_info.channel = SDPCM_CONTROL_CHANNEL;
2462 	hd_info.dat_offset = doff + bus->tx_hdrlen;
2463 	hd_info.seq_num = bus->tx_seq;
2464 	hd_info.lastfrm = true;
2465 	hd_info.tail_pad = pad;
2466 	brcmf_sdio_hdpack(bus, frame, &hd_info);
2467 
2468 	if (bus->txglom)
2469 		brcmf_sdio_update_hwhdr(frame, len);
2470 
2471 	brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
2472 			   frame, len, "Tx Frame:\n");
2473 	brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
2474 			   BRCMF_HDRS_ON(),
2475 			   frame, min_t(u16, len, 16), "TxHdr:\n");
2476 
2477 	do {
2478 		ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
2479 
2480 		if (ret < 0)
2481 			brcmf_sdio_txfail(bus);
2482 		else
2483 			bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
2484 	} while (ret < 0 && retries++ < TXRETRIES);
2485 
2486 	return ret;
2487 }
2488 
brcmf_sdio_bus_stop(struct device * dev)2489 static void brcmf_sdio_bus_stop(struct device *dev)
2490 {
2491 	u32 local_hostintmask;
2492 	u8 saveclk;
2493 	int err;
2494 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2495 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2496 	struct brcmf_sdio *bus = sdiodev->bus;
2497 
2498 	brcmf_dbg(TRACE, "Enter\n");
2499 
2500 	if (bus->watchdog_tsk) {
2501 		send_sig(SIGTERM, bus->watchdog_tsk, 1);
2502 		kthread_stop(bus->watchdog_tsk);
2503 		bus->watchdog_tsk = NULL;
2504 	}
2505 
2506 	if (sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
2507 		sdio_claim_host(sdiodev->func[1]);
2508 
2509 		/* Enable clock for device interrupts */
2510 		brcmf_sdio_bus_sleep(bus, false, false);
2511 
2512 		/* Disable and clear interrupts at the chip level also */
2513 		w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
2514 		local_hostintmask = bus->hostintmask;
2515 		bus->hostintmask = 0;
2516 
2517 		/* Force backplane clocks to assure F2 interrupt propagates */
2518 		saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
2519 					    &err);
2520 		if (!err)
2521 			brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
2522 					  (saveclk | SBSDIO_FORCE_HT), &err);
2523 		if (err)
2524 			brcmf_err("Failed to force clock for F2: err %d\n",
2525 				  err);
2526 
2527 		/* Turn off the bus (F2), free any pending packets */
2528 		brcmf_dbg(INTR, "disable SDIO interrupts\n");
2529 		sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
2530 
2531 		/* Clear any pending interrupts now that F2 is disabled */
2532 		w_sdreg32(bus, local_hostintmask,
2533 			  offsetof(struct sdpcmd_regs, intstatus));
2534 
2535 		sdio_release_host(sdiodev->func[1]);
2536 	}
2537 	/* Clear the data packet queues */
2538 	brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
2539 
2540 	/* Clear any held glomming stuff */
2541 	brcmu_pkt_buf_free_skb(bus->glomd);
2542 	brcmf_sdio_free_glom(bus);
2543 
2544 	/* Clear rx control and wake any waiters */
2545 	spin_lock_bh(&bus->rxctl_lock);
2546 	bus->rxlen = 0;
2547 	spin_unlock_bh(&bus->rxctl_lock);
2548 	brcmf_sdio_dcmd_resp_wake(bus);
2549 
2550 	/* Reset some F2 state stuff */
2551 	bus->rxskip = false;
2552 	bus->tx_seq = bus->rx_seq = 0;
2553 }
2554 
brcmf_sdio_clrintr(struct brcmf_sdio * bus)2555 static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
2556 {
2557 	unsigned long flags;
2558 
2559 	if (bus->sdiodev->oob_irq_requested) {
2560 		spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
2561 		if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) {
2562 			enable_irq(bus->sdiodev->pdata->oob_irq_nr);
2563 			bus->sdiodev->irq_en = true;
2564 		}
2565 		spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags);
2566 	}
2567 }
2568 
brcmf_sdio_intr_rstatus(struct brcmf_sdio * bus)2569 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
2570 {
2571 	struct brcmf_core *buscore;
2572 	u32 addr;
2573 	unsigned long val;
2574 	int ret;
2575 
2576 	buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
2577 	addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
2578 
2579 	val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
2580 	bus->sdcnt.f1regdata++;
2581 	if (ret != 0)
2582 		return ret;
2583 
2584 	val &= bus->hostintmask;
2585 	atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
2586 
2587 	/* Clear interrupts */
2588 	if (val) {
2589 		brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
2590 		bus->sdcnt.f1regdata++;
2591 		atomic_or(val, &bus->intstatus);
2592 	}
2593 
2594 	return ret;
2595 }
2596 
brcmf_sdio_dpc(struct brcmf_sdio * bus)2597 static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
2598 {
2599 	u32 newstatus = 0;
2600 	unsigned long intstatus;
2601 	uint txlimit = bus->txbound;	/* Tx frames to send before resched */
2602 	uint framecnt;			/* Temporary counter of tx/rx frames */
2603 	int err = 0;
2604 
2605 	brcmf_dbg(TRACE, "Enter\n");
2606 
2607 	sdio_claim_host(bus->sdiodev->func[1]);
2608 
2609 	/* If waiting for HTAVAIL, check status */
2610 	if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) {
2611 		u8 clkctl, devctl = 0;
2612 
2613 #ifdef DEBUG
2614 		/* Check for inconsistent device control */
2615 		devctl = brcmf_sdiod_regrb(bus->sdiodev,
2616 					   SBSDIO_DEVICE_CTL, &err);
2617 #endif				/* DEBUG */
2618 
2619 		/* Read CSR, if clock on switch to AVAIL, else ignore */
2620 		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
2621 					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
2622 
2623 		brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
2624 			  devctl, clkctl);
2625 
2626 		if (SBSDIO_HTAV(clkctl)) {
2627 			devctl = brcmf_sdiod_regrb(bus->sdiodev,
2628 						   SBSDIO_DEVICE_CTL, &err);
2629 			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
2630 			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
2631 					  devctl, &err);
2632 			bus->clkstate = CLK_AVAIL;
2633 		}
2634 	}
2635 
2636 	/* Make sure backplane clock is on */
2637 	brcmf_sdio_bus_sleep(bus, false, true);
2638 
2639 	/* Pending interrupt indicates new device status */
2640 	if (atomic_read(&bus->ipend) > 0) {
2641 		atomic_set(&bus->ipend, 0);
2642 		err = brcmf_sdio_intr_rstatus(bus);
2643 	}
2644 
2645 	/* Start with leftover status bits */
2646 	intstatus = atomic_xchg(&bus->intstatus, 0);
2647 
2648 	/* Handle flow-control change: read new state in case our ack
2649 	 * crossed another change interrupt.  If change still set, assume
2650 	 * FC ON for safety, let next loop through do the debounce.
2651 	 */
2652 	if (intstatus & I_HMB_FC_CHANGE) {
2653 		intstatus &= ~I_HMB_FC_CHANGE;
2654 		err = w_sdreg32(bus, I_HMB_FC_CHANGE,
2655 				offsetof(struct sdpcmd_regs, intstatus));
2656 
2657 		err = r_sdreg32(bus, &newstatus,
2658 				offsetof(struct sdpcmd_regs, intstatus));
2659 		bus->sdcnt.f1regdata += 2;
2660 		atomic_set(&bus->fcstate,
2661 			   !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
2662 		intstatus |= (newstatus & bus->hostintmask);
2663 	}
2664 
2665 	/* Handle host mailbox indication */
2666 	if (intstatus & I_HMB_HOST_INT) {
2667 		intstatus &= ~I_HMB_HOST_INT;
2668 		intstatus |= brcmf_sdio_hostmail(bus);
2669 	}
2670 
2671 	sdio_release_host(bus->sdiodev->func[1]);
2672 
2673 	/* Generally don't ask for these, can get CRC errors... */
2674 	if (intstatus & I_WR_OOSYNC) {
2675 		brcmf_err("Dongle reports WR_OOSYNC\n");
2676 		intstatus &= ~I_WR_OOSYNC;
2677 	}
2678 
2679 	if (intstatus & I_RD_OOSYNC) {
2680 		brcmf_err("Dongle reports RD_OOSYNC\n");
2681 		intstatus &= ~I_RD_OOSYNC;
2682 	}
2683 
2684 	if (intstatus & I_SBINT) {
2685 		brcmf_err("Dongle reports SBINT\n");
2686 		intstatus &= ~I_SBINT;
2687 	}
2688 
2689 	/* Would be active due to wake-wlan in gSPI */
2690 	if (intstatus & I_CHIPACTIVE) {
2691 		brcmf_dbg(INFO, "Dongle reports CHIPACTIVE\n");
2692 		intstatus &= ~I_CHIPACTIVE;
2693 	}
2694 
2695 	/* Ignore frame indications if rxskip is set */
2696 	if (bus->rxskip)
2697 		intstatus &= ~I_HMB_FRAME_IND;
2698 
2699 	/* On frame indication, read available frames */
2700 	if ((intstatus & I_HMB_FRAME_IND) && (bus->clkstate == CLK_AVAIL)) {
2701 		brcmf_sdio_readframes(bus, bus->rxbound);
2702 		if (!bus->rxpending)
2703 			intstatus &= ~I_HMB_FRAME_IND;
2704 	}
2705 
2706 	/* Keep still-pending events for next scheduling */
2707 	if (intstatus)
2708 		atomic_or(intstatus, &bus->intstatus);
2709 
2710 	brcmf_sdio_clrintr(bus);
2711 
2712 	if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
2713 	    data_ok(bus)) {
2714 		sdio_claim_host(bus->sdiodev->func[1]);
2715 		if (bus->ctrl_frame_stat) {
2716 			err = brcmf_sdio_tx_ctrlframe(bus,  bus->ctrl_frame_buf,
2717 						      bus->ctrl_frame_len);
2718 			bus->ctrl_frame_err = err;
2719 			wmb();
2720 			bus->ctrl_frame_stat = false;
2721 		}
2722 		sdio_release_host(bus->sdiodev->func[1]);
2723 		brcmf_sdio_wait_event_wakeup(bus);
2724 	}
2725 	/* Send queued frames (limit 1 if rx may still be pending) */
2726 	if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
2727 	    brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit &&
2728 	    data_ok(bus)) {
2729 		framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
2730 					    txlimit;
2731 		brcmf_sdio_sendfromq(bus, framecnt);
2732 	}
2733 
2734 	if ((bus->sdiodev->state != BRCMF_SDIOD_DATA) || (err != 0)) {
2735 		brcmf_err("failed backplane access over SDIO, halting operation\n");
2736 		atomic_set(&bus->intstatus, 0);
2737 		if (bus->ctrl_frame_stat) {
2738 			sdio_claim_host(bus->sdiodev->func[1]);
2739 			if (bus->ctrl_frame_stat) {
2740 				bus->ctrl_frame_err = -ENODEV;
2741 				wmb();
2742 				bus->ctrl_frame_stat = false;
2743 				brcmf_sdio_wait_event_wakeup(bus);
2744 			}
2745 			sdio_release_host(bus->sdiodev->func[1]);
2746 		}
2747 	} else if (atomic_read(&bus->intstatus) ||
2748 		   atomic_read(&bus->ipend) > 0 ||
2749 		   (!atomic_read(&bus->fcstate) &&
2750 		    brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
2751 		    data_ok(bus))) {
2752 		bus->dpc_triggered = true;
2753 	}
2754 }
2755 
brcmf_sdio_bus_gettxq(struct device * dev)2756 static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
2757 {
2758 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2759 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2760 	struct brcmf_sdio *bus = sdiodev->bus;
2761 
2762 	return &bus->txq;
2763 }
2764 
brcmf_sdio_prec_enq(struct pktq * q,struct sk_buff * pkt,int prec)2765 static bool brcmf_sdio_prec_enq(struct pktq *q, struct sk_buff *pkt, int prec)
2766 {
2767 	struct sk_buff *p;
2768 	int eprec = -1;		/* precedence to evict from */
2769 
2770 	/* Fast case, precedence queue is not full and we are also not
2771 	 * exceeding total queue length
2772 	 */
2773 	if (!pktq_pfull(q, prec) && !pktq_full(q)) {
2774 		brcmu_pktq_penq(q, prec, pkt);
2775 		return true;
2776 	}
2777 
2778 	/* Determine precedence from which to evict packet, if any */
2779 	if (pktq_pfull(q, prec)) {
2780 		eprec = prec;
2781 	} else if (pktq_full(q)) {
2782 		p = brcmu_pktq_peek_tail(q, &eprec);
2783 		if (eprec > prec)
2784 			return false;
2785 	}
2786 
2787 	/* Evict if needed */
2788 	if (eprec >= 0) {
2789 		/* Detect queueing to unconfigured precedence */
2790 		if (eprec == prec)
2791 			return false;	/* refuse newer (incoming) packet */
2792 		/* Evict packet according to discard policy */
2793 		p = brcmu_pktq_pdeq_tail(q, eprec);
2794 		if (p == NULL)
2795 			brcmf_err("brcmu_pktq_pdeq_tail() failed\n");
2796 		brcmu_pkt_buf_free_skb(p);
2797 	}
2798 
2799 	/* Enqueue */
2800 	p = brcmu_pktq_penq(q, prec, pkt);
2801 	if (p == NULL)
2802 		brcmf_err("brcmu_pktq_penq() failed\n");
2803 
2804 	return p != NULL;
2805 }
2806 
brcmf_sdio_bus_txdata(struct device * dev,struct sk_buff * pkt)2807 static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
2808 {
2809 	int ret = -EBADE;
2810 	uint prec;
2811 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2812 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2813 	struct brcmf_sdio *bus = sdiodev->bus;
2814 
2815 	brcmf_dbg(TRACE, "Enter: pkt: data %p len %d\n", pkt->data, pkt->len);
2816 	if (sdiodev->state != BRCMF_SDIOD_DATA)
2817 		return -EIO;
2818 
2819 	/* Add space for the header */
2820 	skb_push(pkt, bus->tx_hdrlen);
2821 	/* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
2822 
2823 	prec = prio2prec((pkt->priority & PRIOMASK));
2824 
2825 	/* Check for existing queue, current flow-control,
2826 			 pending event, or pending clock */
2827 	brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
2828 	bus->sdcnt.fcqueued++;
2829 
2830 	/* Priority based enq */
2831 	spin_lock_bh(&bus->txq_lock);
2832 	/* reset bus_flags in packet cb */
2833 	*(u16 *)(pkt->cb) = 0;
2834 	if (!brcmf_sdio_prec_enq(&bus->txq, pkt, prec)) {
2835 		skb_pull(pkt, bus->tx_hdrlen);
2836 		brcmf_err("out of bus->txq !!!\n");
2837 		ret = -ENOSR;
2838 	} else {
2839 		ret = 0;
2840 	}
2841 
2842 	if (pktq_len(&bus->txq) >= TXHI) {
2843 		bus->txoff = true;
2844 		brcmf_txflowblock(dev, true);
2845 	}
2846 	spin_unlock_bh(&bus->txq_lock);
2847 
2848 #ifdef DEBUG
2849 	if (pktq_plen(&bus->txq, prec) > qcount[prec])
2850 		qcount[prec] = pktq_plen(&bus->txq, prec);
2851 #endif
2852 
2853 	brcmf_sdio_trigger_dpc(bus);
2854 	return ret;
2855 }
2856 
2857 #ifdef DEBUG
2858 #define CONSOLE_LINE_MAX	192
2859 
brcmf_sdio_readconsole(struct brcmf_sdio * bus)2860 static int brcmf_sdio_readconsole(struct brcmf_sdio *bus)
2861 {
2862 	struct brcmf_console *c = &bus->console;
2863 	u8 line[CONSOLE_LINE_MAX], ch;
2864 	u32 n, idx, addr;
2865 	int rv;
2866 
2867 	/* Don't do anything until FWREADY updates console address */
2868 	if (bus->console_addr == 0)
2869 		return 0;
2870 
2871 	/* Read console log struct */
2872 	addr = bus->console_addr + offsetof(struct rte_console, log_le);
2873 	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
2874 			       sizeof(c->log_le));
2875 	if (rv < 0)
2876 		return rv;
2877 
2878 	/* Allocate console buffer (one time only) */
2879 	if (c->buf == NULL) {
2880 		c->bufsize = le32_to_cpu(c->log_le.buf_size);
2881 		c->buf = kmalloc(c->bufsize, GFP_ATOMIC);
2882 		if (c->buf == NULL)
2883 			return -ENOMEM;
2884 	}
2885 
2886 	idx = le32_to_cpu(c->log_le.idx);
2887 
2888 	/* Protect against corrupt value */
2889 	if (idx > c->bufsize)
2890 		return -EBADE;
2891 
2892 	/* Skip reading the console buffer if the index pointer
2893 	 has not moved */
2894 	if (idx == c->last)
2895 		return 0;
2896 
2897 	/* Read the console buffer */
2898 	addr = le32_to_cpu(c->log_le.buf);
2899 	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
2900 	if (rv < 0)
2901 		return rv;
2902 
2903 	while (c->last != idx) {
2904 		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
2905 			if (c->last == idx) {
2906 				/* This would output a partial line.
2907 				 * Instead, back up
2908 				 * the buffer pointer and output this
2909 				 * line next time around.
2910 				 */
2911 				if (c->last >= n)
2912 					c->last -= n;
2913 				else
2914 					c->last = c->bufsize - n;
2915 				goto break2;
2916 			}
2917 			ch = c->buf[c->last];
2918 			c->last = (c->last + 1) % c->bufsize;
2919 			if (ch == '\n')
2920 				break;
2921 			line[n] = ch;
2922 		}
2923 
2924 		if (n > 0) {
2925 			if (line[n - 1] == '\r')
2926 				n--;
2927 			line[n] = 0;
2928 			pr_debug("CONSOLE: %s\n", line);
2929 		}
2930 	}
2931 break2:
2932 
2933 	return 0;
2934 }
2935 #endif				/* DEBUG */
2936 
2937 static int
brcmf_sdio_bus_txctl(struct device * dev,unsigned char * msg,uint msglen)2938 brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2939 {
2940 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2941 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2942 	struct brcmf_sdio *bus = sdiodev->bus;
2943 	int ret;
2944 
2945 	brcmf_dbg(TRACE, "Enter\n");
2946 	if (sdiodev->state != BRCMF_SDIOD_DATA)
2947 		return -EIO;
2948 
2949 	/* Send from dpc */
2950 	bus->ctrl_frame_buf = msg;
2951 	bus->ctrl_frame_len = msglen;
2952 	wmb();
2953 	bus->ctrl_frame_stat = true;
2954 
2955 	brcmf_sdio_trigger_dpc(bus);
2956 	wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat,
2957 					 msecs_to_jiffies(CTL_DONE_TIMEOUT));
2958 	ret = 0;
2959 	if (bus->ctrl_frame_stat) {
2960 		sdio_claim_host(bus->sdiodev->func[1]);
2961 		if (bus->ctrl_frame_stat) {
2962 			brcmf_dbg(SDIO, "ctrl_frame timeout\n");
2963 			bus->ctrl_frame_stat = false;
2964 			ret = -ETIMEDOUT;
2965 		}
2966 		sdio_release_host(bus->sdiodev->func[1]);
2967 	}
2968 	if (!ret) {
2969 		brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n",
2970 			  bus->ctrl_frame_err);
2971 		rmb();
2972 		ret = bus->ctrl_frame_err;
2973 	}
2974 
2975 	if (ret)
2976 		bus->sdcnt.tx_ctlerrs++;
2977 	else
2978 		bus->sdcnt.tx_ctlpkts++;
2979 
2980 	return ret;
2981 }
2982 
2983 #ifdef DEBUG
brcmf_sdio_dump_console(struct seq_file * seq,struct brcmf_sdio * bus,struct sdpcm_shared * sh)2984 static int brcmf_sdio_dump_console(struct seq_file *seq, struct brcmf_sdio *bus,
2985 				   struct sdpcm_shared *sh)
2986 {
2987 	u32 addr, console_ptr, console_size, console_index;
2988 	char *conbuf = NULL;
2989 	__le32 sh_val;
2990 	int rv;
2991 
2992 	/* obtain console information from device memory */
2993 	addr = sh->console_addr + offsetof(struct rte_console, log_le);
2994 	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
2995 			       (u8 *)&sh_val, sizeof(u32));
2996 	if (rv < 0)
2997 		return rv;
2998 	console_ptr = le32_to_cpu(sh_val);
2999 
3000 	addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
3001 	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
3002 			       (u8 *)&sh_val, sizeof(u32));
3003 	if (rv < 0)
3004 		return rv;
3005 	console_size = le32_to_cpu(sh_val);
3006 
3007 	addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
3008 	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
3009 			       (u8 *)&sh_val, sizeof(u32));
3010 	if (rv < 0)
3011 		return rv;
3012 	console_index = le32_to_cpu(sh_val);
3013 
3014 	/* allocate buffer for console data */
3015 	if (console_size <= CONSOLE_BUFFER_MAX)
3016 		conbuf = vzalloc(console_size+1);
3017 
3018 	if (!conbuf)
3019 		return -ENOMEM;
3020 
3021 	/* obtain the console data from device */
3022 	conbuf[console_size] = '\0';
3023 	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
3024 			       console_size);
3025 	if (rv < 0)
3026 		goto done;
3027 
3028 	rv = seq_write(seq, conbuf + console_index,
3029 		       console_size - console_index);
3030 	if (rv < 0)
3031 		goto done;
3032 
3033 	if (console_index > 0)
3034 		rv = seq_write(seq, conbuf, console_index - 1);
3035 
3036 done:
3037 	vfree(conbuf);
3038 	return rv;
3039 }
3040 
brcmf_sdio_trap_info(struct seq_file * seq,struct brcmf_sdio * bus,struct sdpcm_shared * sh)3041 static int brcmf_sdio_trap_info(struct seq_file *seq, struct brcmf_sdio *bus,
3042 				struct sdpcm_shared *sh)
3043 {
3044 	int error;
3045 	struct brcmf_trap_info tr;
3046 
3047 	if ((sh->flags & SDPCM_SHARED_TRAP) == 0) {
3048 		brcmf_dbg(INFO, "no trap in firmware\n");
3049 		return 0;
3050 	}
3051 
3052 	error = brcmf_sdiod_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
3053 				  sizeof(struct brcmf_trap_info));
3054 	if (error < 0)
3055 		return error;
3056 
3057 	seq_printf(seq,
3058 		   "dongle trap info: type 0x%x @ epc 0x%08x\n"
3059 		   "  cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
3060 		   "  lr   0x%08x pc   0x%08x offset 0x%x\n"
3061 		   "  r0   0x%08x r1   0x%08x r2 0x%08x r3 0x%08x\n"
3062 		   "  r4   0x%08x r5   0x%08x r6 0x%08x r7 0x%08x\n",
3063 		   le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
3064 		   le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
3065 		   le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
3066 		   le32_to_cpu(tr.pc), sh->trap_addr,
3067 		   le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
3068 		   le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
3069 		   le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
3070 		   le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
3071 
3072 	return 0;
3073 }
3074 
brcmf_sdio_assert_info(struct seq_file * seq,struct brcmf_sdio * bus,struct sdpcm_shared * sh)3075 static int brcmf_sdio_assert_info(struct seq_file *seq, struct brcmf_sdio *bus,
3076 				  struct sdpcm_shared *sh)
3077 {
3078 	int error = 0;
3079 	char file[80] = "?";
3080 	char expr[80] = "<???>";
3081 
3082 	if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
3083 		brcmf_dbg(INFO, "firmware not built with -assert\n");
3084 		return 0;
3085 	} else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) {
3086 		brcmf_dbg(INFO, "no assert in dongle\n");
3087 		return 0;
3088 	}
3089 
3090 	sdio_claim_host(bus->sdiodev->func[1]);
3091 	if (sh->assert_file_addr != 0) {
3092 		error = brcmf_sdiod_ramrw(bus->sdiodev, false,
3093 					  sh->assert_file_addr, (u8 *)file, 80);
3094 		if (error < 0)
3095 			return error;
3096 	}
3097 	if (sh->assert_exp_addr != 0) {
3098 		error = brcmf_sdiod_ramrw(bus->sdiodev, false,
3099 					  sh->assert_exp_addr, (u8 *)expr, 80);
3100 		if (error < 0)
3101 			return error;
3102 	}
3103 	sdio_release_host(bus->sdiodev->func[1]);
3104 
3105 	seq_printf(seq, "dongle assert: %s:%d: assert(%s)\n",
3106 		   file, sh->assert_line, expr);
3107 	return 0;
3108 }
3109 
brcmf_sdio_checkdied(struct brcmf_sdio * bus)3110 static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
3111 {
3112 	int error;
3113 	struct sdpcm_shared sh;
3114 
3115 	error = brcmf_sdio_readshared(bus, &sh);
3116 
3117 	if (error < 0)
3118 		return error;
3119 
3120 	if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0)
3121 		brcmf_dbg(INFO, "firmware not built with -assert\n");
3122 	else if (sh.flags & SDPCM_SHARED_ASSERT)
3123 		brcmf_err("assertion in dongle\n");
3124 
3125 	if (sh.flags & SDPCM_SHARED_TRAP)
3126 		brcmf_err("firmware trap in dongle\n");
3127 
3128 	return 0;
3129 }
3130 
brcmf_sdio_died_dump(struct seq_file * seq,struct brcmf_sdio * bus)3131 static int brcmf_sdio_died_dump(struct seq_file *seq, struct brcmf_sdio *bus)
3132 {
3133 	int error = 0;
3134 	struct sdpcm_shared sh;
3135 
3136 	error = brcmf_sdio_readshared(bus, &sh);
3137 	if (error < 0)
3138 		goto done;
3139 
3140 	error = brcmf_sdio_assert_info(seq, bus, &sh);
3141 	if (error < 0)
3142 		goto done;
3143 
3144 	error = brcmf_sdio_trap_info(seq, bus, &sh);
3145 	if (error < 0)
3146 		goto done;
3147 
3148 	error = brcmf_sdio_dump_console(seq, bus, &sh);
3149 
3150 done:
3151 	return error;
3152 }
3153 
brcmf_sdio_forensic_read(struct seq_file * seq,void * data)3154 static int brcmf_sdio_forensic_read(struct seq_file *seq, void *data)
3155 {
3156 	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
3157 	struct brcmf_sdio *bus = bus_if->bus_priv.sdio->bus;
3158 
3159 	return brcmf_sdio_died_dump(seq, bus);
3160 }
3161 
brcmf_debugfs_sdio_count_read(struct seq_file * seq,void * data)3162 static int brcmf_debugfs_sdio_count_read(struct seq_file *seq, void *data)
3163 {
3164 	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
3165 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3166 	struct brcmf_sdio_count *sdcnt = &sdiodev->bus->sdcnt;
3167 
3168 	seq_printf(seq,
3169 		   "intrcount:    %u\nlastintrs:    %u\n"
3170 		   "pollcnt:      %u\nregfails:     %u\n"
3171 		   "tx_sderrs:    %u\nfcqueued:     %u\n"
3172 		   "rxrtx:        %u\nrx_toolong:   %u\n"
3173 		   "rxc_errors:   %u\nrx_hdrfail:   %u\n"
3174 		   "rx_badhdr:    %u\nrx_badseq:    %u\n"
3175 		   "fc_rcvd:      %u\nfc_xoff:      %u\n"
3176 		   "fc_xon:       %u\nrxglomfail:   %u\n"
3177 		   "rxglomframes: %u\nrxglompkts:   %u\n"
3178 		   "f2rxhdrs:     %u\nf2rxdata:     %u\n"
3179 		   "f2txdata:     %u\nf1regdata:    %u\n"
3180 		   "tickcnt:      %u\ntx_ctlerrs:   %lu\n"
3181 		   "tx_ctlpkts:   %lu\nrx_ctlerrs:   %lu\n"
3182 		   "rx_ctlpkts:   %lu\nrx_readahead: %lu\n",
3183 		   sdcnt->intrcount, sdcnt->lastintrs,
3184 		   sdcnt->pollcnt, sdcnt->regfails,
3185 		   sdcnt->tx_sderrs, sdcnt->fcqueued,
3186 		   sdcnt->rxrtx, sdcnt->rx_toolong,
3187 		   sdcnt->rxc_errors, sdcnt->rx_hdrfail,
3188 		   sdcnt->rx_badhdr, sdcnt->rx_badseq,
3189 		   sdcnt->fc_rcvd, sdcnt->fc_xoff,
3190 		   sdcnt->fc_xon, sdcnt->rxglomfail,
3191 		   sdcnt->rxglomframes, sdcnt->rxglompkts,
3192 		   sdcnt->f2rxhdrs, sdcnt->f2rxdata,
3193 		   sdcnt->f2txdata, sdcnt->f1regdata,
3194 		   sdcnt->tickcnt, sdcnt->tx_ctlerrs,
3195 		   sdcnt->tx_ctlpkts, sdcnt->rx_ctlerrs,
3196 		   sdcnt->rx_ctlpkts, sdcnt->rx_readahead_cnt);
3197 
3198 	return 0;
3199 }
3200 
brcmf_sdio_debugfs_create(struct brcmf_sdio * bus)3201 static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
3202 {
3203 	struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
3204 	struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
3205 
3206 	if (IS_ERR_OR_NULL(dentry))
3207 		return;
3208 
3209 	bus->console_interval = BRCMF_CONSOLE;
3210 
3211 	brcmf_debugfs_add_entry(drvr, "forensics", brcmf_sdio_forensic_read);
3212 	brcmf_debugfs_add_entry(drvr, "counters",
3213 				brcmf_debugfs_sdio_count_read);
3214 	debugfs_create_u32("console_interval", 0644, dentry,
3215 			   &bus->console_interval);
3216 }
3217 #else
brcmf_sdio_checkdied(struct brcmf_sdio * bus)3218 static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
3219 {
3220 	return 0;
3221 }
3222 
brcmf_sdio_debugfs_create(struct brcmf_sdio * bus)3223 static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
3224 {
3225 }
3226 #endif /* DEBUG */
3227 
3228 static int
brcmf_sdio_bus_rxctl(struct device * dev,unsigned char * msg,uint msglen)3229 brcmf_sdio_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
3230 {
3231 	int timeleft;
3232 	uint rxlen = 0;
3233 	bool pending;
3234 	u8 *buf;
3235 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3236 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3237 	struct brcmf_sdio *bus = sdiodev->bus;
3238 
3239 	brcmf_dbg(TRACE, "Enter\n");
3240 	if (sdiodev->state != BRCMF_SDIOD_DATA)
3241 		return -EIO;
3242 
3243 	/* Wait until control frame is available */
3244 	timeleft = brcmf_sdio_dcmd_resp_wait(bus, &bus->rxlen, &pending);
3245 
3246 	spin_lock_bh(&bus->rxctl_lock);
3247 	rxlen = bus->rxlen;
3248 	memcpy(msg, bus->rxctl, min(msglen, rxlen));
3249 	bus->rxctl = NULL;
3250 	buf = bus->rxctl_orig;
3251 	bus->rxctl_orig = NULL;
3252 	bus->rxlen = 0;
3253 	spin_unlock_bh(&bus->rxctl_lock);
3254 	vfree(buf);
3255 
3256 	if (rxlen) {
3257 		brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n",
3258 			  rxlen, msglen);
3259 	} else if (timeleft == 0) {
3260 		brcmf_err("resumed on timeout\n");
3261 		brcmf_sdio_checkdied(bus);
3262 	} else if (pending) {
3263 		brcmf_dbg(CTL, "cancelled\n");
3264 		return -ERESTARTSYS;
3265 	} else {
3266 		brcmf_dbg(CTL, "resumed for unknown reason?\n");
3267 		brcmf_sdio_checkdied(bus);
3268 	}
3269 
3270 	if (rxlen)
3271 		bus->sdcnt.rx_ctlpkts++;
3272 	else
3273 		bus->sdcnt.rx_ctlerrs++;
3274 
3275 	return rxlen ? (int)rxlen : -ETIMEDOUT;
3276 }
3277 
3278 #ifdef DEBUG
3279 static bool
brcmf_sdio_verifymemory(struct brcmf_sdio_dev * sdiodev,u32 ram_addr,u8 * ram_data,uint ram_sz)3280 brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr,
3281 			u8 *ram_data, uint ram_sz)
3282 {
3283 	char *ram_cmp;
3284 	int err;
3285 	bool ret = true;
3286 	int address;
3287 	int offset;
3288 	int len;
3289 
3290 	/* read back and verify */
3291 	brcmf_dbg(INFO, "Compare RAM dl & ul at 0x%08x; size=%d\n", ram_addr,
3292 		  ram_sz);
3293 	ram_cmp = kmalloc(MEMBLOCK, GFP_KERNEL);
3294 	/* do not proceed while no memory but  */
3295 	if (!ram_cmp)
3296 		return true;
3297 
3298 	address = ram_addr;
3299 	offset = 0;
3300 	while (offset < ram_sz) {
3301 		len = ((offset + MEMBLOCK) < ram_sz) ? MEMBLOCK :
3302 		      ram_sz - offset;
3303 		err = brcmf_sdiod_ramrw(sdiodev, false, address, ram_cmp, len);
3304 		if (err) {
3305 			brcmf_err("error %d on reading %d membytes at 0x%08x\n",
3306 				  err, len, address);
3307 			ret = false;
3308 			break;
3309 		} else if (memcmp(ram_cmp, &ram_data[offset], len)) {
3310 			brcmf_err("Downloaded RAM image is corrupted, block offset is %d, len is %d\n",
3311 				  offset, len);
3312 			ret = false;
3313 			break;
3314 		}
3315 		offset += len;
3316 		address += len;
3317 	}
3318 
3319 	kfree(ram_cmp);
3320 
3321 	return ret;
3322 }
3323 #else	/* DEBUG */
3324 static bool
brcmf_sdio_verifymemory(struct brcmf_sdio_dev * sdiodev,u32 ram_addr,u8 * ram_data,uint ram_sz)3325 brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr,
3326 			u8 *ram_data, uint ram_sz)
3327 {
3328 	return true;
3329 }
3330 #endif	/* DEBUG */
3331 
brcmf_sdio_download_code_file(struct brcmf_sdio * bus,const struct firmware * fw)3332 static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
3333 					 const struct firmware *fw)
3334 {
3335 	int err;
3336 
3337 	brcmf_dbg(TRACE, "Enter\n");
3338 
3339 	err = brcmf_sdiod_ramrw(bus->sdiodev, true, bus->ci->rambase,
3340 				(u8 *)fw->data, fw->size);
3341 	if (err)
3342 		brcmf_err("error %d on writing %d membytes at 0x%08x\n",
3343 			  err, (int)fw->size, bus->ci->rambase);
3344 	else if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
3345 					  (u8 *)fw->data, fw->size))
3346 		err = -EIO;
3347 
3348 	return err;
3349 }
3350 
brcmf_sdio_download_nvram(struct brcmf_sdio * bus,void * vars,u32 varsz)3351 static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
3352 				     void *vars, u32 varsz)
3353 {
3354 	int address;
3355 	int err;
3356 
3357 	brcmf_dbg(TRACE, "Enter\n");
3358 
3359 	address = bus->ci->ramsize - varsz + bus->ci->rambase;
3360 	err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz);
3361 	if (err)
3362 		brcmf_err("error %d on writing %d nvram bytes at 0x%08x\n",
3363 			  err, varsz, address);
3364 	else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz))
3365 		err = -EIO;
3366 
3367 	return err;
3368 }
3369 
brcmf_sdio_download_firmware(struct brcmf_sdio * bus,const struct firmware * fw,void * nvram,u32 nvlen)3370 static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
3371 					const struct firmware *fw,
3372 					void *nvram, u32 nvlen)
3373 {
3374 	int bcmerror = -EFAULT;
3375 	u32 rstvec;
3376 
3377 	sdio_claim_host(bus->sdiodev->func[1]);
3378 	brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
3379 
3380 	rstvec = get_unaligned_le32(fw->data);
3381 	brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
3382 
3383 	bcmerror = brcmf_sdio_download_code_file(bus, fw);
3384 	release_firmware(fw);
3385 	if (bcmerror) {
3386 		brcmf_err("dongle image file download failed\n");
3387 		brcmf_fw_nvram_free(nvram);
3388 		goto err;
3389 	}
3390 
3391 	bcmerror = brcmf_sdio_download_nvram(bus, nvram, nvlen);
3392 	brcmf_fw_nvram_free(nvram);
3393 	if (bcmerror) {
3394 		brcmf_err("dongle nvram file download failed\n");
3395 		goto err;
3396 	}
3397 
3398 	/* Take arm out of reset */
3399 	if (!brcmf_chip_set_active(bus->ci, rstvec)) {
3400 		brcmf_err("error getting out of ARM core reset\n");
3401 		goto err;
3402 	}
3403 
3404 	/* Allow full data communication using DPC from now on. */
3405 	brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
3406 	bcmerror = 0;
3407 
3408 err:
3409 	brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
3410 	sdio_release_host(bus->sdiodev->func[1]);
3411 	return bcmerror;
3412 }
3413 
brcmf_sdio_sr_init(struct brcmf_sdio * bus)3414 static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
3415 {
3416 	int err = 0;
3417 	u8 val;
3418 
3419 	brcmf_dbg(TRACE, "Enter\n");
3420 
3421 	val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
3422 	if (err) {
3423 		brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
3424 		return;
3425 	}
3426 
3427 	val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
3428 	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
3429 	if (err) {
3430 		brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
3431 		return;
3432 	}
3433 
3434 	/* Add CMD14 Support */
3435 	brcmf_sdiod_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
3436 			  (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
3437 			   SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
3438 			  &err);
3439 	if (err) {
3440 		brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
3441 		return;
3442 	}
3443 
3444 	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3445 			  SBSDIO_FORCE_HT, &err);
3446 	if (err) {
3447 		brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
3448 		return;
3449 	}
3450 
3451 	/* set flag */
3452 	bus->sr_enabled = true;
3453 	brcmf_dbg(INFO, "SR enabled\n");
3454 }
3455 
3456 /* enable KSO bit */
brcmf_sdio_kso_init(struct brcmf_sdio * bus)3457 static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
3458 {
3459 	u8 val;
3460 	int err = 0;
3461 
3462 	brcmf_dbg(TRACE, "Enter\n");
3463 
3464 	/* KSO bit added in SDIO core rev 12 */
3465 	if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
3466 		return 0;
3467 
3468 	val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
3469 	if (err) {
3470 		brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
3471 		return err;
3472 	}
3473 
3474 	if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
3475 		val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
3476 			SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
3477 		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
3478 				  val, &err);
3479 		if (err) {
3480 			brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
3481 			return err;
3482 		}
3483 	}
3484 
3485 	return 0;
3486 }
3487 
3488 
brcmf_sdio_bus_preinit(struct device * dev)3489 static int brcmf_sdio_bus_preinit(struct device *dev)
3490 {
3491 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3492 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3493 	struct brcmf_sdio *bus = sdiodev->bus;
3494 	uint pad_size;
3495 	u32 value;
3496 	int err;
3497 
3498 	/* the commands below use the terms tx and rx from
3499 	 * a device perspective, ie. bus:txglom affects the
3500 	 * bus transfers from device to host.
3501 	 */
3502 	if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
3503 		/* for sdio core rev < 12, disable txgloming */
3504 		value = 0;
3505 		err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
3506 					   sizeof(u32));
3507 	} else {
3508 		/* otherwise, set txglomalign */
3509 		value = 4;
3510 		if (sdiodev->pdata)
3511 			value = sdiodev->pdata->sd_sgentry_align;
3512 		/* SDIO ADMA requires at least 32 bit alignment */
3513 		value = max_t(u32, value, 4);
3514 		err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
3515 					   sizeof(u32));
3516 	}
3517 
3518 	if (err < 0)
3519 		goto done;
3520 
3521 	bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
3522 	if (sdiodev->sg_support) {
3523 		bus->txglom = false;
3524 		value = 1;
3525 		pad_size = bus->sdiodev->func[2]->cur_blksize << 1;
3526 		err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
3527 					   &value, sizeof(u32));
3528 		if (err < 0) {
3529 			/* bus:rxglom is allowed to fail */
3530 			err = 0;
3531 		} else {
3532 			bus->txglom = true;
3533 			bus->tx_hdrlen += SDPCM_HWEXT_LEN;
3534 		}
3535 	}
3536 	brcmf_bus_add_txhdrlen(bus->sdiodev->dev, bus->tx_hdrlen);
3537 
3538 done:
3539 	return err;
3540 }
3541 
brcmf_sdio_bus_get_ramsize(struct device * dev)3542 static size_t brcmf_sdio_bus_get_ramsize(struct device *dev)
3543 {
3544 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3545 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3546 	struct brcmf_sdio *bus = sdiodev->bus;
3547 
3548 	return bus->ci->ramsize - bus->ci->srsize;
3549 }
3550 
brcmf_sdio_bus_get_memdump(struct device * dev,void * data,size_t mem_size)3551 static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data,
3552 				      size_t mem_size)
3553 {
3554 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3555 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3556 	struct brcmf_sdio *bus = sdiodev->bus;
3557 	int err;
3558 	int address;
3559 	int offset;
3560 	int len;
3561 
3562 	brcmf_dbg(INFO, "dump at 0x%08x: size=%zu\n", bus->ci->rambase,
3563 		  mem_size);
3564 
3565 	address = bus->ci->rambase;
3566 	offset = err = 0;
3567 	sdio_claim_host(sdiodev->func[1]);
3568 	while (offset < mem_size) {
3569 		len = ((offset + MEMBLOCK) < mem_size) ? MEMBLOCK :
3570 		      mem_size - offset;
3571 		err = brcmf_sdiod_ramrw(sdiodev, false, address, data, len);
3572 		if (err) {
3573 			brcmf_err("error %d on reading %d membytes at 0x%08x\n",
3574 				  err, len, address);
3575 			goto done;
3576 		}
3577 		data += len;
3578 		offset += len;
3579 		address += len;
3580 	}
3581 
3582 done:
3583 	sdio_release_host(sdiodev->func[1]);
3584 	return err;
3585 }
3586 
brcmf_sdio_trigger_dpc(struct brcmf_sdio * bus)3587 void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
3588 {
3589 	if (!bus->dpc_triggered) {
3590 		bus->dpc_triggered = true;
3591 		queue_work(bus->brcmf_wq, &bus->datawork);
3592 	}
3593 }
3594 
brcmf_sdio_isr(struct brcmf_sdio * bus)3595 void brcmf_sdio_isr(struct brcmf_sdio *bus)
3596 {
3597 	brcmf_dbg(TRACE, "Enter\n");
3598 
3599 	if (!bus) {
3600 		brcmf_err("bus is null pointer, exiting\n");
3601 		return;
3602 	}
3603 
3604 	/* Count the interrupt call */
3605 	bus->sdcnt.intrcount++;
3606 	if (in_interrupt())
3607 		atomic_set(&bus->ipend, 1);
3608 	else
3609 		if (brcmf_sdio_intr_rstatus(bus)) {
3610 			brcmf_err("failed backplane access\n");
3611 		}
3612 
3613 	/* Disable additional interrupts (is this needed now)? */
3614 	if (!bus->intr)
3615 		brcmf_err("isr w/o interrupt configured!\n");
3616 
3617 	bus->dpc_triggered = true;
3618 	queue_work(bus->brcmf_wq, &bus->datawork);
3619 }
3620 
brcmf_sdio_bus_watchdog(struct brcmf_sdio * bus)3621 static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
3622 {
3623 	brcmf_dbg(TIMER, "Enter\n");
3624 
3625 	/* Poll period: check device if appropriate. */
3626 	if (!bus->sr_enabled &&
3627 	    bus->poll && (++bus->polltick >= bus->pollrate)) {
3628 		u32 intstatus = 0;
3629 
3630 		/* Reset poll tick */
3631 		bus->polltick = 0;
3632 
3633 		/* Check device if no interrupts */
3634 		if (!bus->intr ||
3635 		    (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
3636 
3637 			if (!bus->dpc_triggered) {
3638 				u8 devpend;
3639 
3640 				sdio_claim_host(bus->sdiodev->func[1]);
3641 				devpend = brcmf_sdiod_regrb(bus->sdiodev,
3642 							    SDIO_CCCR_INTx,
3643 							    NULL);
3644 				sdio_release_host(bus->sdiodev->func[1]);
3645 				intstatus = devpend & (INTR_STATUS_FUNC1 |
3646 						       INTR_STATUS_FUNC2);
3647 			}
3648 
3649 			/* If there is something, make like the ISR and
3650 				 schedule the DPC */
3651 			if (intstatus) {
3652 				bus->sdcnt.pollcnt++;
3653 				atomic_set(&bus->ipend, 1);
3654 
3655 				bus->dpc_triggered = true;
3656 				queue_work(bus->brcmf_wq, &bus->datawork);
3657 			}
3658 		}
3659 
3660 		/* Update interrupt tracking */
3661 		bus->sdcnt.lastintrs = bus->sdcnt.intrcount;
3662 	}
3663 #ifdef DEBUG
3664 	/* Poll for console output periodically */
3665 	if (bus->sdiodev->state == BRCMF_SDIOD_DATA && BRCMF_FWCON_ON() &&
3666 	    bus->console_interval != 0) {
3667 		bus->console.count += BRCMF_WD_POLL_MS;
3668 		if (bus->console.count >= bus->console_interval) {
3669 			bus->console.count -= bus->console_interval;
3670 			sdio_claim_host(bus->sdiodev->func[1]);
3671 			/* Make sure backplane clock is on */
3672 			brcmf_sdio_bus_sleep(bus, false, false);
3673 			if (brcmf_sdio_readconsole(bus) < 0)
3674 				/* stop on error */
3675 				bus->console_interval = 0;
3676 			sdio_release_host(bus->sdiodev->func[1]);
3677 		}
3678 	}
3679 #endif				/* DEBUG */
3680 
3681 	/* On idle timeout clear activity flag and/or turn off clock */
3682 	if (!bus->dpc_triggered) {
3683 		rmb();
3684 		if ((!bus->dpc_running) && (bus->idletime > 0) &&
3685 		    (bus->clkstate == CLK_AVAIL)) {
3686 			bus->idlecount++;
3687 			if (bus->idlecount > bus->idletime) {
3688 				brcmf_dbg(SDIO, "idle\n");
3689 				sdio_claim_host(bus->sdiodev->func[1]);
3690 				brcmf_sdio_wd_timer(bus, 0);
3691 				bus->idlecount = 0;
3692 				brcmf_sdio_bus_sleep(bus, true, false);
3693 				sdio_release_host(bus->sdiodev->func[1]);
3694 			}
3695 		} else {
3696 			bus->idlecount = 0;
3697 		}
3698 	} else {
3699 		bus->idlecount = 0;
3700 	}
3701 }
3702 
brcmf_sdio_dataworker(struct work_struct * work)3703 static void brcmf_sdio_dataworker(struct work_struct *work)
3704 {
3705 	struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
3706 					      datawork);
3707 
3708 	bus->dpc_running = true;
3709 	wmb();
3710 	while (ACCESS_ONCE(bus->dpc_triggered)) {
3711 		bus->dpc_triggered = false;
3712 		brcmf_sdio_dpc(bus);
3713 		bus->idlecount = 0;
3714 	}
3715 	bus->dpc_running = false;
3716 	if (brcmf_sdiod_freezing(bus->sdiodev)) {
3717 		brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DOWN);
3718 		brcmf_sdiod_try_freeze(bus->sdiodev);
3719 		brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
3720 	}
3721 }
3722 
3723 static void
brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev * sdiodev,struct brcmf_chip * ci,u32 drivestrength)3724 brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
3725 			     struct brcmf_chip *ci, u32 drivestrength)
3726 {
3727 	const struct sdiod_drive_str *str_tab = NULL;
3728 	u32 str_mask;
3729 	u32 str_shift;
3730 	u32 base;
3731 	u32 i;
3732 	u32 drivestrength_sel = 0;
3733 	u32 cc_data_temp;
3734 	u32 addr;
3735 
3736 	if (!(ci->cc_caps & CC_CAP_PMU))
3737 		return;
3738 
3739 	switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
3740 	case SDIOD_DRVSTR_KEY(BRCM_CC_4330_CHIP_ID, 12):
3741 		str_tab = sdiod_drvstr_tab1_1v8;
3742 		str_mask = 0x00003800;
3743 		str_shift = 11;
3744 		break;
3745 	case SDIOD_DRVSTR_KEY(BRCM_CC_4334_CHIP_ID, 17):
3746 		str_tab = sdiod_drvstr_tab6_1v8;
3747 		str_mask = 0x00001800;
3748 		str_shift = 11;
3749 		break;
3750 	case SDIOD_DRVSTR_KEY(BRCM_CC_43143_CHIP_ID, 17):
3751 		/* note: 43143 does not support tristate */
3752 		i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
3753 		if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
3754 			str_tab = sdiod_drvstr_tab2_3v3;
3755 			str_mask = 0x00000007;
3756 			str_shift = 0;
3757 		} else
3758 			brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
3759 				  ci->name, drivestrength);
3760 		break;
3761 	case SDIOD_DRVSTR_KEY(BRCM_CC_43362_CHIP_ID, 13):
3762 		str_tab = sdiod_drive_strength_tab5_1v8;
3763 		str_mask = 0x00003800;
3764 		str_shift = 11;
3765 		break;
3766 	default:
3767 		brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
3768 			  ci->name, ci->chiprev, ci->pmurev);
3769 		break;
3770 	}
3771 
3772 	if (str_tab != NULL) {
3773 		for (i = 0; str_tab[i].strength != 0; i++) {
3774 			if (drivestrength >= str_tab[i].strength) {
3775 				drivestrength_sel = str_tab[i].sel;
3776 				break;
3777 			}
3778 		}
3779 		base = brcmf_chip_get_chipcommon(ci)->base;
3780 		addr = CORE_CC_REG(base, chipcontrol_addr);
3781 		brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
3782 		cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
3783 		cc_data_temp &= ~str_mask;
3784 		drivestrength_sel <<= str_shift;
3785 		cc_data_temp |= drivestrength_sel;
3786 		brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
3787 
3788 		brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
3789 			  str_tab[i].strength, drivestrength, cc_data_temp);
3790 	}
3791 }
3792 
brcmf_sdio_buscoreprep(void * ctx)3793 static int brcmf_sdio_buscoreprep(void *ctx)
3794 {
3795 	struct brcmf_sdio_dev *sdiodev = ctx;
3796 	int err = 0;
3797 	u8 clkval, clkset;
3798 
3799 	/* Try forcing SDIO core to do ALPAvail request only */
3800 	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
3801 	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
3802 	if (err) {
3803 		brcmf_err("error writing for HT off\n");
3804 		return err;
3805 	}
3806 
3807 	/* If register supported, wait for ALPAvail and then force ALP */
3808 	/* This may take up to 15 milliseconds */
3809 	clkval = brcmf_sdiod_regrb(sdiodev,
3810 				   SBSDIO_FUNC1_CHIPCLKCSR, NULL);
3811 
3812 	if ((clkval & ~SBSDIO_AVBITS) != clkset) {
3813 		brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
3814 			  clkset, clkval);
3815 		return -EACCES;
3816 	}
3817 
3818 	SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
3819 					      SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
3820 			!SBSDIO_ALPAV(clkval)),
3821 			PMU_MAX_TRANSITION_DLY);
3822 	if (!SBSDIO_ALPAV(clkval)) {
3823 		brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
3824 			  clkval);
3825 		return -EBUSY;
3826 	}
3827 
3828 	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
3829 	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
3830 	udelay(65);
3831 
3832 	/* Also, disable the extra SDIO pull-ups */
3833 	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
3834 
3835 	return 0;
3836 }
3837 
brcmf_sdio_buscore_activate(void * ctx,struct brcmf_chip * chip,u32 rstvec)3838 static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip,
3839 					u32 rstvec)
3840 {
3841 	struct brcmf_sdio_dev *sdiodev = ctx;
3842 	struct brcmf_core *core;
3843 	u32 reg_addr;
3844 
3845 	/* clear all interrupts */
3846 	core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
3847 	reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
3848 	brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
3849 
3850 	if (rstvec)
3851 		/* Write reset vector to address 0 */
3852 		brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
3853 				  sizeof(rstvec));
3854 }
3855 
brcmf_sdio_buscore_read32(void * ctx,u32 addr)3856 static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
3857 {
3858 	struct brcmf_sdio_dev *sdiodev = ctx;
3859 	u32 val, rev;
3860 
3861 	val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
3862 	if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
3863 	    addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
3864 		rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
3865 		if (rev >= 2) {
3866 			val &= ~CID_ID_MASK;
3867 			val |= BRCM_CC_4339_CHIP_ID;
3868 		}
3869 	}
3870 	return val;
3871 }
3872 
brcmf_sdio_buscore_write32(void * ctx,u32 addr,u32 val)3873 static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
3874 {
3875 	struct brcmf_sdio_dev *sdiodev = ctx;
3876 
3877 	brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
3878 }
3879 
3880 static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
3881 	.prepare = brcmf_sdio_buscoreprep,
3882 	.activate = brcmf_sdio_buscore_activate,
3883 	.read32 = brcmf_sdio_buscore_read32,
3884 	.write32 = brcmf_sdio_buscore_write32,
3885 };
3886 
3887 static bool
brcmf_sdio_probe_attach(struct brcmf_sdio * bus)3888 brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
3889 {
3890 	u8 clkctl = 0;
3891 	int err = 0;
3892 	int reg_addr;
3893 	u32 reg_val;
3894 	u32 drivestrength;
3895 
3896 	sdio_claim_host(bus->sdiodev->func[1]);
3897 
3898 	pr_debug("F1 signature read @0x18000000=0x%4x\n",
3899 		 brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
3900 
3901 	/*
3902 	 * Force PLL off until brcmf_chip_attach()
3903 	 * programs PLL control regs
3904 	 */
3905 
3906 	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3907 			  BRCMF_INIT_CLKCTL1, &err);
3908 	if (!err)
3909 		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
3910 					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
3911 
3912 	if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
3913 		brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
3914 			  err, BRCMF_INIT_CLKCTL1, clkctl);
3915 		goto fail;
3916 	}
3917 
3918 	bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops);
3919 	if (IS_ERR(bus->ci)) {
3920 		brcmf_err("brcmf_chip_attach failed!\n");
3921 		bus->ci = NULL;
3922 		goto fail;
3923 	}
3924 
3925 	if (brcmf_sdio_kso_init(bus)) {
3926 		brcmf_err("error enabling KSO\n");
3927 		goto fail;
3928 	}
3929 
3930 	if ((bus->sdiodev->pdata) && (bus->sdiodev->pdata->drive_strength))
3931 		drivestrength = bus->sdiodev->pdata->drive_strength;
3932 	else
3933 		drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
3934 	brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
3935 
3936 	/* Set card control so an SDIO card reset does a WLAN backplane reset */
3937 	reg_val = brcmf_sdiod_regrb(bus->sdiodev,
3938 				    SDIO_CCCR_BRCM_CARDCTRL, &err);
3939 	if (err)
3940 		goto fail;
3941 
3942 	reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
3943 
3944 	brcmf_sdiod_regwb(bus->sdiodev,
3945 			  SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
3946 	if (err)
3947 		goto fail;
3948 
3949 	/* set PMUControl so a backplane reset does PMU state reload */
3950 	reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base,
3951 			       pmucontrol);
3952 	reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err);
3953 	if (err)
3954 		goto fail;
3955 
3956 	reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
3957 
3958 	brcmf_sdiod_regwl(bus->sdiodev, reg_addr, reg_val, &err);
3959 	if (err)
3960 		goto fail;
3961 
3962 	sdio_release_host(bus->sdiodev->func[1]);
3963 
3964 	brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
3965 
3966 	/* allocate header buffer */
3967 	bus->hdrbuf = kzalloc(MAX_HDR_READ + bus->head_align, GFP_KERNEL);
3968 	if (!bus->hdrbuf)
3969 		return false;
3970 	/* Locate an appropriately-aligned portion of hdrbuf */
3971 	bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
3972 				    bus->head_align);
3973 
3974 	/* Set the poll and/or interrupt flags */
3975 	bus->intr = true;
3976 	bus->poll = false;
3977 	if (bus->poll)
3978 		bus->pollrate = 1;
3979 
3980 	return true;
3981 
3982 fail:
3983 	sdio_release_host(bus->sdiodev->func[1]);
3984 	return false;
3985 }
3986 
3987 static int
brcmf_sdio_watchdog_thread(void * data)3988 brcmf_sdio_watchdog_thread(void *data)
3989 {
3990 	struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
3991 	int wait;
3992 
3993 	allow_signal(SIGTERM);
3994 	/* Run until signal received */
3995 	brcmf_sdiod_freezer_count(bus->sdiodev);
3996 	while (1) {
3997 		if (kthread_should_stop())
3998 			break;
3999 		brcmf_sdiod_freezer_uncount(bus->sdiodev);
4000 		wait = wait_for_completion_interruptible(&bus->watchdog_wait);
4001 		brcmf_sdiod_freezer_count(bus->sdiodev);
4002 		brcmf_sdiod_try_freeze(bus->sdiodev);
4003 		if (!wait) {
4004 			brcmf_sdio_bus_watchdog(bus);
4005 			/* Count the tick for reference */
4006 			bus->sdcnt.tickcnt++;
4007 			reinit_completion(&bus->watchdog_wait);
4008 		} else
4009 			break;
4010 	}
4011 	return 0;
4012 }
4013 
4014 static void
brcmf_sdio_watchdog(unsigned long data)4015 brcmf_sdio_watchdog(unsigned long data)
4016 {
4017 	struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
4018 
4019 	if (bus->watchdog_tsk) {
4020 		complete(&bus->watchdog_wait);
4021 		/* Reschedule the watchdog */
4022 		if (bus->wd_timer_valid)
4023 			mod_timer(&bus->timer,
4024 				  jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS));
4025 	}
4026 }
4027 
4028 static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
4029 	.stop = brcmf_sdio_bus_stop,
4030 	.preinit = brcmf_sdio_bus_preinit,
4031 	.txdata = brcmf_sdio_bus_txdata,
4032 	.txctl = brcmf_sdio_bus_txctl,
4033 	.rxctl = brcmf_sdio_bus_rxctl,
4034 	.gettxq = brcmf_sdio_bus_gettxq,
4035 	.wowl_config = brcmf_sdio_wowl_config,
4036 	.get_ramsize = brcmf_sdio_bus_get_ramsize,
4037 	.get_memdump = brcmf_sdio_bus_get_memdump,
4038 };
4039 
brcmf_sdio_firmware_callback(struct device * dev,const struct firmware * code,void * nvram,u32 nvram_len)4040 static void brcmf_sdio_firmware_callback(struct device *dev,
4041 					 const struct firmware *code,
4042 					 void *nvram, u32 nvram_len)
4043 {
4044 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
4045 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
4046 	struct brcmf_sdio *bus = sdiodev->bus;
4047 	int err = 0;
4048 	u8 saveclk;
4049 
4050 	brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
4051 
4052 	if (!bus_if->drvr)
4053 		return;
4054 
4055 	/* try to download image and nvram to the dongle */
4056 	bus->alp_only = true;
4057 	err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
4058 	if (err)
4059 		goto fail;
4060 	bus->alp_only = false;
4061 
4062 	/* Start the watchdog timer */
4063 	bus->sdcnt.tickcnt = 0;
4064 	brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
4065 
4066 	sdio_claim_host(sdiodev->func[1]);
4067 
4068 	/* Make sure backplane clock is on, needed to generate F2 interrupt */
4069 	brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
4070 	if (bus->clkstate != CLK_AVAIL)
4071 		goto release;
4072 
4073 	/* Force clocks on backplane to be sure F2 interrupt propagates */
4074 	saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
4075 	if (!err) {
4076 		brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
4077 				  (saveclk | SBSDIO_FORCE_HT), &err);
4078 	}
4079 	if (err) {
4080 		brcmf_err("Failed to force clock for F2: err %d\n", err);
4081 		goto release;
4082 	}
4083 
4084 	/* Enable function 2 (frame transfers) */
4085 	w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
4086 		  offsetof(struct sdpcmd_regs, tosbmailboxdata));
4087 	err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]);
4088 
4089 
4090 	brcmf_dbg(INFO, "enable F2: err=%d\n", err);
4091 
4092 	/* If F2 successfully enabled, set core and enable interrupts */
4093 	if (!err) {
4094 		/* Set up the interrupt mask and enable interrupts */
4095 		bus->hostintmask = HOSTINTMASK;
4096 		w_sdreg32(bus, bus->hostintmask,
4097 			  offsetof(struct sdpcmd_regs, hostintmask));
4098 
4099 		brcmf_sdiod_regwb(sdiodev, SBSDIO_WATERMARK, 8, &err);
4100 	} else {
4101 		/* Disable F2 again */
4102 		sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
4103 		goto release;
4104 	}
4105 
4106 	if (brcmf_chip_sr_capable(bus->ci)) {
4107 		brcmf_sdio_sr_init(bus);
4108 	} else {
4109 		/* Restore previous clock setting */
4110 		brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
4111 				  saveclk, &err);
4112 	}
4113 
4114 	if (err == 0) {
4115 		err = brcmf_sdiod_intr_register(sdiodev);
4116 		if (err != 0)
4117 			brcmf_err("intr register failed:%d\n", err);
4118 	}
4119 
4120 	/* If we didn't come up, turn off backplane clock */
4121 	if (err != 0)
4122 		brcmf_sdio_clkctl(bus, CLK_NONE, false);
4123 
4124 	sdio_release_host(sdiodev->func[1]);
4125 
4126 	err = brcmf_bus_start(dev);
4127 	if (err != 0) {
4128 		brcmf_err("dongle is not responding\n");
4129 		goto fail;
4130 	}
4131 	return;
4132 
4133 release:
4134 	sdio_release_host(sdiodev->func[1]);
4135 fail:
4136 	brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
4137 	device_release_driver(dev);
4138 }
4139 
brcmf_sdio_probe(struct brcmf_sdio_dev * sdiodev)4140 struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4141 {
4142 	int ret;
4143 	struct brcmf_sdio *bus;
4144 	struct workqueue_struct *wq;
4145 
4146 	brcmf_dbg(TRACE, "Enter\n");
4147 
4148 	/* Allocate private bus interface state */
4149 	bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
4150 	if (!bus)
4151 		goto fail;
4152 
4153 	bus->sdiodev = sdiodev;
4154 	sdiodev->bus = bus;
4155 	skb_queue_head_init(&bus->glom);
4156 	bus->txbound = BRCMF_TXBOUND;
4157 	bus->rxbound = BRCMF_RXBOUND;
4158 	bus->txminmax = BRCMF_TXMINMAX;
4159 	bus->tx_seq = SDPCM_SEQ_WRAP - 1;
4160 
4161 	/* platform specific configuration:
4162 	 *   alignments must be at least 4 bytes for ADMA
4163 	 */
4164 	bus->head_align = ALIGNMENT;
4165 	bus->sgentry_align = ALIGNMENT;
4166 	if (sdiodev->pdata) {
4167 		if (sdiodev->pdata->sd_head_align > ALIGNMENT)
4168 			bus->head_align = sdiodev->pdata->sd_head_align;
4169 		if (sdiodev->pdata->sd_sgentry_align > ALIGNMENT)
4170 			bus->sgentry_align = sdiodev->pdata->sd_sgentry_align;
4171 	}
4172 
4173 	/* single-threaded workqueue */
4174 	wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM,
4175 				     dev_name(&sdiodev->func[1]->dev));
4176 	if (!wq) {
4177 		brcmf_err("insufficient memory to create txworkqueue\n");
4178 		goto fail;
4179 	}
4180 	brcmf_sdiod_freezer_count(sdiodev);
4181 	INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
4182 	bus->brcmf_wq = wq;
4183 
4184 	/* attempt to attach to the dongle */
4185 	if (!(brcmf_sdio_probe_attach(bus))) {
4186 		brcmf_err("brcmf_sdio_probe_attach failed\n");
4187 		goto fail;
4188 	}
4189 
4190 	spin_lock_init(&bus->rxctl_lock);
4191 	spin_lock_init(&bus->txq_lock);
4192 	init_waitqueue_head(&bus->ctrl_wait);
4193 	init_waitqueue_head(&bus->dcmd_resp_wait);
4194 
4195 	/* Set up the watchdog timer */
4196 	init_timer(&bus->timer);
4197 	bus->timer.data = (unsigned long)bus;
4198 	bus->timer.function = brcmf_sdio_watchdog;
4199 
4200 	/* Initialize watchdog thread */
4201 	init_completion(&bus->watchdog_wait);
4202 	bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
4203 					bus, "brcmf_wdog/%s",
4204 					dev_name(&sdiodev->func[1]->dev));
4205 	if (IS_ERR(bus->watchdog_tsk)) {
4206 		pr_warn("brcmf_watchdog thread failed to start\n");
4207 		bus->watchdog_tsk = NULL;
4208 	}
4209 	/* Initialize DPC thread */
4210 	bus->dpc_triggered = false;
4211 	bus->dpc_running = false;
4212 
4213 	/* Assign bus interface call back */
4214 	bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
4215 	bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops;
4216 	bus->sdiodev->bus_if->chip = bus->ci->chip;
4217 	bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
4218 
4219 	/* default sdio bus header length for tx packet */
4220 	bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
4221 
4222 	/* Attach to the common layer, reserve hdr space */
4223 	ret = brcmf_attach(bus->sdiodev->dev);
4224 	if (ret != 0) {
4225 		brcmf_err("brcmf_attach failed\n");
4226 		goto fail;
4227 	}
4228 
4229 	/* Query the F2 block size, set roundup accordingly */
4230 	bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
4231 	bus->roundup = min(max_roundup, bus->blocksize);
4232 
4233 	/* Allocate buffers */
4234 	if (bus->sdiodev->bus_if->maxctl) {
4235 		bus->sdiodev->bus_if->maxctl += bus->roundup;
4236 		bus->rxblen =
4237 		    roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
4238 			    ALIGNMENT) + bus->head_align;
4239 		bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
4240 		if (!(bus->rxbuf)) {
4241 			brcmf_err("rxbuf allocation failed\n");
4242 			goto fail;
4243 		}
4244 	}
4245 
4246 	sdio_claim_host(bus->sdiodev->func[1]);
4247 
4248 	/* Disable F2 to clear any intermediate frame state on the dongle */
4249 	sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
4250 
4251 	bus->rxflow = false;
4252 
4253 	/* Done with backplane-dependent accesses, can drop clock... */
4254 	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
4255 
4256 	sdio_release_host(bus->sdiodev->func[1]);
4257 
4258 	/* ...and initialize clock/power states */
4259 	bus->clkstate = CLK_SDONLY;
4260 	bus->idletime = BRCMF_IDLE_INTERVAL;
4261 	bus->idleclock = BRCMF_IDLE_ACTIVE;
4262 
4263 	/* SR state */
4264 	bus->sr_enabled = false;
4265 
4266 	brcmf_sdio_debugfs_create(bus);
4267 	brcmf_dbg(INFO, "completed!!\n");
4268 
4269 	ret = brcmf_sdio_get_fwnames(bus->ci, sdiodev);
4270 	if (ret)
4271 		goto fail;
4272 
4273 	ret = brcmf_fw_get_firmwares(sdiodev->dev, BRCMF_FW_REQUEST_NVRAM,
4274 				     sdiodev->fw_name, sdiodev->nvram_name,
4275 				     brcmf_sdio_firmware_callback);
4276 	if (ret != 0) {
4277 		brcmf_err("async firmware request failed: %d\n", ret);
4278 		goto fail;
4279 	}
4280 
4281 	return bus;
4282 
4283 fail:
4284 	brcmf_sdio_remove(bus);
4285 	return NULL;
4286 }
4287 
4288 /* Detach and free everything */
brcmf_sdio_remove(struct brcmf_sdio * bus)4289 void brcmf_sdio_remove(struct brcmf_sdio *bus)
4290 {
4291 	brcmf_dbg(TRACE, "Enter\n");
4292 
4293 	if (bus) {
4294 		/* De-register interrupt handler */
4295 		brcmf_sdiod_intr_unregister(bus->sdiodev);
4296 
4297 		brcmf_detach(bus->sdiodev->dev);
4298 
4299 		cancel_work_sync(&bus->datawork);
4300 		if (bus->brcmf_wq)
4301 			destroy_workqueue(bus->brcmf_wq);
4302 
4303 		if (bus->ci) {
4304 			if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
4305 				sdio_claim_host(bus->sdiodev->func[1]);
4306 				brcmf_sdio_wd_timer(bus, 0);
4307 				brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
4308 				/* Leave the device in state where it is
4309 				 * 'passive'. This is done by resetting all
4310 				 * necessary cores.
4311 				 */
4312 				msleep(20);
4313 				brcmf_chip_set_passive(bus->ci);
4314 				brcmf_sdio_clkctl(bus, CLK_NONE, false);
4315 				sdio_release_host(bus->sdiodev->func[1]);
4316 			}
4317 			brcmf_chip_detach(bus->ci);
4318 		}
4319 
4320 		kfree(bus->rxbuf);
4321 		kfree(bus->hdrbuf);
4322 		kfree(bus);
4323 	}
4324 
4325 	brcmf_dbg(TRACE, "Disconnected\n");
4326 }
4327 
brcmf_sdio_wd_timer(struct brcmf_sdio * bus,uint wdtick)4328 void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
4329 {
4330 	/* Totally stop the timer */
4331 	if (!wdtick && bus->wd_timer_valid) {
4332 		del_timer_sync(&bus->timer);
4333 		bus->wd_timer_valid = false;
4334 		bus->save_ms = wdtick;
4335 		return;
4336 	}
4337 
4338 	/* don't start the wd until fw is loaded */
4339 	if (bus->sdiodev->state != BRCMF_SDIOD_DATA)
4340 		return;
4341 
4342 	if (wdtick) {
4343 		if (bus->save_ms != BRCMF_WD_POLL_MS) {
4344 			if (bus->wd_timer_valid)
4345 				/* Stop timer and restart at new value */
4346 				del_timer_sync(&bus->timer);
4347 
4348 			/* Create timer again when watchdog period is
4349 			   dynamically changed or in the first instance
4350 			 */
4351 			bus->timer.expires =
4352 				jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS);
4353 			add_timer(&bus->timer);
4354 
4355 		} else {
4356 			/* Re arm the timer, at last watchdog period */
4357 			mod_timer(&bus->timer,
4358 				jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS));
4359 		}
4360 
4361 		bus->wd_timer_valid = true;
4362 		bus->save_ms = wdtick;
4363 	}
4364 }
4365 
brcmf_sdio_sleep(struct brcmf_sdio * bus,bool sleep)4366 int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep)
4367 {
4368 	int ret;
4369 
4370 	sdio_claim_host(bus->sdiodev->func[1]);
4371 	ret = brcmf_sdio_bus_sleep(bus, sleep, false);
4372 	sdio_release_host(bus->sdiodev->func[1]);
4373 
4374 	return ret;
4375 }
4376 
4377