1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16 /* ****************** SDIO CARD Interface Functions **************************/
17
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/pci.h>
21 #include <linux/pci_ids.h>
22 #include <linux/sched.h>
23 #include <linux/completion.h>
24 #include <linux/scatterlist.h>
25 #include <linux/mmc/sdio.h>
26 #include <linux/mmc/core.h>
27 #include <linux/mmc/sdio_func.h>
28 #include <linux/mmc/card.h>
29 #include <linux/mmc/host.h>
30 #include <linux/platform_device.h>
31 #include <linux/platform_data/brcmfmac-sdio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/suspend.h>
34 #include <linux/errno.h>
35 #include <linux/module.h>
36 #include <net/cfg80211.h>
37
38 #include <defs.h>
39 #include <brcm_hw_ids.h>
40 #include <brcmu_utils.h>
41 #include <brcmu_wifi.h>
42 #include <chipcommon.h>
43 #include <soc.h>
44 #include "chip.h"
45 #include "bus.h"
46 #include "debug.h"
47 #include "sdio.h"
48 #include "of.h"
49
50 #define SDIOH_API_ACCESS_RETRY_LIMIT 2
51
52 #define DMA_ALIGN_MASK 0x03
53
54 #define SDIO_FUNC1_BLOCKSIZE 64
55 #define SDIO_FUNC2_BLOCKSIZE 512
56 /* Maximum milliseconds to wait for F2 to come up */
57 #define SDIO_WAIT_F2RDY 3000
58
59 #define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
60 #define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */
61
62 struct brcmf_sdiod_freezer {
63 atomic_t freezing;
64 atomic_t thread_count;
65 u32 frozen_count;
66 wait_queue_head_t thread_freeze;
67 struct completion resumed;
68 };
69
70 static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
71 module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0);
72 MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
73
brcmf_sdiod_oob_irqhandler(int irq,void * dev_id)74 static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
75 {
76 struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
77 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
78
79 brcmf_dbg(INTR, "OOB intr triggered\n");
80
81 /* out-of-band interrupt is level-triggered which won't
82 * be cleared until dpc
83 */
84 if (sdiodev->irq_en) {
85 disable_irq_nosync(irq);
86 sdiodev->irq_en = false;
87 }
88
89 brcmf_sdio_isr(sdiodev->bus);
90
91 return IRQ_HANDLED;
92 }
93
brcmf_sdiod_ib_irqhandler(struct sdio_func * func)94 static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
95 {
96 struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
97 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
98
99 brcmf_dbg(INTR, "IB intr triggered\n");
100
101 brcmf_sdio_isr(sdiodev->bus);
102 }
103
104 /* dummy handler for SDIO function 2 interrupt */
brcmf_sdiod_dummy_irqhandler(struct sdio_func * func)105 static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
106 {
107 }
108
brcmf_sdiod_intr_register(struct brcmf_sdio_dev * sdiodev)109 int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
110 {
111 int ret = 0;
112 u8 data;
113 u32 addr, gpiocontrol;
114 unsigned long flags;
115
116 if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
117 brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
118 sdiodev->pdata->oob_irq_nr);
119 ret = request_irq(sdiodev->pdata->oob_irq_nr,
120 brcmf_sdiod_oob_irqhandler,
121 sdiodev->pdata->oob_irq_flags,
122 "brcmf_oob_intr",
123 &sdiodev->func[1]->dev);
124 if (ret != 0) {
125 brcmf_err("request_irq failed %d\n", ret);
126 return ret;
127 }
128 sdiodev->oob_irq_requested = true;
129 spin_lock_init(&sdiodev->irq_en_lock);
130 spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
131 sdiodev->irq_en = true;
132 spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
133
134 ret = enable_irq_wake(sdiodev->pdata->oob_irq_nr);
135 if (ret != 0) {
136 brcmf_err("enable_irq_wake failed %d\n", ret);
137 return ret;
138 }
139 sdiodev->irq_wake = true;
140
141 sdio_claim_host(sdiodev->func[1]);
142
143 if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
144 /* assign GPIO to SDIO core */
145 addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol);
146 gpiocontrol = brcmf_sdiod_regrl(sdiodev, addr, &ret);
147 gpiocontrol |= 0x2;
148 brcmf_sdiod_regwl(sdiodev, addr, gpiocontrol, &ret);
149
150 brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_SELECT, 0xf,
151 &ret);
152 brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
153 brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
154 }
155
156 /* must configure SDIO_CCCR_IENx to enable irq */
157 data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
158 data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
159 brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
160
161 /* redirect, configure and enable io for interrupt signal */
162 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
163 if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
164 data |= SDIO_SEPINT_ACT_HI;
165 brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
166
167 sdio_release_host(sdiodev->func[1]);
168 } else {
169 brcmf_dbg(SDIO, "Entering\n");
170 sdio_claim_host(sdiodev->func[1]);
171 sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
172 sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
173 sdio_release_host(sdiodev->func[1]);
174 }
175
176 return 0;
177 }
178
brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev * sdiodev)179 int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
180 {
181 brcmf_dbg(SDIO, "Entering\n");
182
183 if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
184 sdio_claim_host(sdiodev->func[1]);
185 brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
186 brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
187 sdio_release_host(sdiodev->func[1]);
188
189 if (sdiodev->oob_irq_requested) {
190 sdiodev->oob_irq_requested = false;
191 if (sdiodev->irq_wake) {
192 disable_irq_wake(sdiodev->pdata->oob_irq_nr);
193 sdiodev->irq_wake = false;
194 }
195 free_irq(sdiodev->pdata->oob_irq_nr,
196 &sdiodev->func[1]->dev);
197 sdiodev->irq_en = false;
198 }
199 } else {
200 sdio_claim_host(sdiodev->func[1]);
201 sdio_release_irq(sdiodev->func[2]);
202 sdio_release_irq(sdiodev->func[1]);
203 sdio_release_host(sdiodev->func[1]);
204 }
205
206 return 0;
207 }
208
brcmf_sdiod_change_state(struct brcmf_sdio_dev * sdiodev,enum brcmf_sdiod_state state)209 void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
210 enum brcmf_sdiod_state state)
211 {
212 if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM ||
213 state == sdiodev->state)
214 return;
215
216 brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state);
217 switch (sdiodev->state) {
218 case BRCMF_SDIOD_DATA:
219 /* any other state means bus interface is down */
220 brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
221 break;
222 case BRCMF_SDIOD_DOWN:
223 /* transition from DOWN to DATA means bus interface is up */
224 if (state == BRCMF_SDIOD_DATA)
225 brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP);
226 break;
227 default:
228 break;
229 }
230 sdiodev->state = state;
231 }
232
brcmf_sdiod_f0_writeb(struct sdio_func * func,uint regaddr,u8 byte)233 static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
234 uint regaddr, u8 byte)
235 {
236 int err_ret;
237
238 /*
239 * Can only directly write to some F0 registers.
240 * Handle CCCR_IENx and CCCR_ABORT command
241 * as a special case.
242 */
243 if ((regaddr == SDIO_CCCR_ABORT) ||
244 (regaddr == SDIO_CCCR_IENx))
245 sdio_writeb(func, byte, regaddr, &err_ret);
246 else
247 sdio_f0_writeb(func, byte, regaddr, &err_ret);
248
249 return err_ret;
250 }
251
brcmf_sdiod_request_data(struct brcmf_sdio_dev * sdiodev,u8 fn,u32 addr,u8 regsz,void * data,bool write)252 static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
253 u32 addr, u8 regsz, void *data, bool write)
254 {
255 struct sdio_func *func;
256 int ret;
257
258 brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
259 write, fn, addr, regsz);
260
261 /* only allow byte access on F0 */
262 if (WARN_ON(regsz > 1 && !fn))
263 return -EINVAL;
264 func = sdiodev->func[fn];
265
266 switch (regsz) {
267 case sizeof(u8):
268 if (write) {
269 if (fn)
270 sdio_writeb(func, *(u8 *)data, addr, &ret);
271 else
272 ret = brcmf_sdiod_f0_writeb(func, addr,
273 *(u8 *)data);
274 } else {
275 if (fn)
276 *(u8 *)data = sdio_readb(func, addr, &ret);
277 else
278 *(u8 *)data = sdio_f0_readb(func, addr, &ret);
279 }
280 break;
281 case sizeof(u16):
282 if (write)
283 sdio_writew(func, *(u16 *)data, addr, &ret);
284 else
285 *(u16 *)data = sdio_readw(func, addr, &ret);
286 break;
287 case sizeof(u32):
288 if (write)
289 sdio_writel(func, *(u32 *)data, addr, &ret);
290 else
291 *(u32 *)data = sdio_readl(func, addr, &ret);
292 break;
293 default:
294 brcmf_err("invalid size: %d\n", regsz);
295 break;
296 }
297
298 if (ret)
299 brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
300 write ? "write" : "read", fn, addr, ret);
301
302 return ret;
303 }
304
brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev * sdiodev,u32 addr,u8 regsz,void * data,bool write)305 static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
306 u8 regsz, void *data, bool write)
307 {
308 u8 func;
309 s32 retry = 0;
310 int ret;
311
312 if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
313 return -ENOMEDIUM;
314
315 /*
316 * figure out how to read the register based on address range
317 * 0x00 ~ 0x7FF: function 0 CCCR and FBR
318 * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
319 * The rest: function 1 silicon backplane core registers
320 */
321 if ((addr & ~REG_F0_REG_MASK) == 0)
322 func = SDIO_FUNC_0;
323 else
324 func = SDIO_FUNC_1;
325
326 do {
327 if (!write)
328 memset(data, 0, regsz);
329 /* for retry wait for 1 ms till bus get settled down */
330 if (retry)
331 usleep_range(1000, 2000);
332 ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
333 data, write);
334 } while (ret != 0 && ret != -ENOMEDIUM &&
335 retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
336
337 if (ret == -ENOMEDIUM)
338 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
339 else if (ret != 0) {
340 /*
341 * SleepCSR register access can fail when
342 * waking up the device so reduce this noise
343 * in the logs.
344 */
345 if (addr != SBSDIO_FUNC1_SLEEPCSR)
346 brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
347 write ? "write" : "read", func, addr, ret);
348 else
349 brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
350 write ? "write" : "read", func, addr, ret);
351 }
352 return ret;
353 }
354
355 static int
brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev * sdiodev,u32 address)356 brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
357 {
358 int err = 0, i;
359 u8 addr[3];
360
361 if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
362 return -ENOMEDIUM;
363
364 addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
365 addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
366 addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
367
368 for (i = 0; i < 3; i++) {
369 err = brcmf_sdiod_regrw_helper(sdiodev,
370 SBSDIO_FUNC1_SBADDRLOW + i,
371 sizeof(u8), &addr[i], true);
372 if (err) {
373 brcmf_err("failed at addr: 0x%0x\n",
374 SBSDIO_FUNC1_SBADDRLOW + i);
375 break;
376 }
377 }
378
379 return err;
380 }
381
382 static int
brcmf_sdiod_addrprep(struct brcmf_sdio_dev * sdiodev,uint width,u32 * addr)383 brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
384 {
385 uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
386 int err = 0;
387
388 if (bar0 != sdiodev->sbwad) {
389 err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0);
390 if (err)
391 return err;
392
393 sdiodev->sbwad = bar0;
394 }
395
396 *addr &= SBSDIO_SB_OFT_ADDR_MASK;
397
398 if (width == 4)
399 *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
400
401 return 0;
402 }
403
brcmf_sdiod_regrb(struct brcmf_sdio_dev * sdiodev,u32 addr,int * ret)404 u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
405 {
406 u8 data;
407 int retval;
408
409 brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
410 retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
411 false);
412 brcmf_dbg(SDIO, "data:0x%02x\n", data);
413
414 if (ret)
415 *ret = retval;
416
417 return data;
418 }
419
brcmf_sdiod_regrl(struct brcmf_sdio_dev * sdiodev,u32 addr,int * ret)420 u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
421 {
422 u32 data;
423 int retval;
424
425 brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
426 retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
427 if (retval)
428 goto done;
429 retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
430 false);
431 brcmf_dbg(SDIO, "data:0x%08x\n", data);
432
433 done:
434 if (ret)
435 *ret = retval;
436
437 return data;
438 }
439
brcmf_sdiod_regwb(struct brcmf_sdio_dev * sdiodev,u32 addr,u8 data,int * ret)440 void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
441 u8 data, int *ret)
442 {
443 int retval;
444
445 brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
446 retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
447 true);
448 if (ret)
449 *ret = retval;
450 }
451
brcmf_sdiod_regwl(struct brcmf_sdio_dev * sdiodev,u32 addr,u32 data,int * ret)452 void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
453 u32 data, int *ret)
454 {
455 int retval;
456
457 brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
458 retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
459 if (retval)
460 goto done;
461 retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
462 true);
463
464 done:
465 if (ret)
466 *ret = retval;
467 }
468
brcmf_sdiod_buffrw(struct brcmf_sdio_dev * sdiodev,uint fn,bool write,u32 addr,struct sk_buff * pkt)469 static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
470 bool write, u32 addr, struct sk_buff *pkt)
471 {
472 unsigned int req_sz;
473 int err;
474
475 /* Single skb use the standard mmc interface */
476 req_sz = pkt->len + 3;
477 req_sz &= (uint)~3;
478
479 if (write)
480 err = sdio_memcpy_toio(sdiodev->func[fn], addr,
481 ((u8 *)(pkt->data)), req_sz);
482 else if (fn == 1)
483 err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)),
484 addr, req_sz);
485 else
486 /* function 2 read is FIFO operation */
487 err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
488 req_sz);
489 if (err == -ENOMEDIUM)
490 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
491 return err;
492 }
493
494 /**
495 * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
496 * @sdiodev: brcmfmac sdio device
497 * @fn: SDIO function number
498 * @write: direction flag
499 * @addr: dongle memory address as source/destination
500 * @pkt: skb pointer
501 *
502 * This function takes the respbonsibility as the interface function to MMC
503 * stack for block data access. It assumes that the skb passed down by the
504 * caller has already been padded and aligned.
505 */
brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev * sdiodev,uint fn,bool write,u32 addr,struct sk_buff_head * pktlist)506 static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
507 bool write, u32 addr,
508 struct sk_buff_head *pktlist)
509 {
510 unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
511 unsigned int max_req_sz, orig_offset, dst_offset;
512 unsigned short max_seg_cnt, seg_sz;
513 unsigned char *pkt_data, *orig_data, *dst_data;
514 struct sk_buff *pkt_next = NULL, *local_pkt_next;
515 struct sk_buff_head local_list, *target_list;
516 struct mmc_request mmc_req;
517 struct mmc_command mmc_cmd;
518 struct mmc_data mmc_dat;
519 struct scatterlist *sgl;
520 int ret = 0;
521
522 if (!pktlist->qlen)
523 return -EINVAL;
524
525 target_list = pktlist;
526 /* for host with broken sg support, prepare a page aligned list */
527 __skb_queue_head_init(&local_list);
528 if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
529 req_sz = 0;
530 skb_queue_walk(pktlist, pkt_next)
531 req_sz += pkt_next->len;
532 req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
533 while (req_sz > PAGE_SIZE) {
534 pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
535 if (pkt_next == NULL) {
536 ret = -ENOMEM;
537 goto exit;
538 }
539 __skb_queue_tail(&local_list, pkt_next);
540 req_sz -= PAGE_SIZE;
541 }
542 pkt_next = brcmu_pkt_buf_get_skb(req_sz);
543 if (pkt_next == NULL) {
544 ret = -ENOMEM;
545 goto exit;
546 }
547 __skb_queue_tail(&local_list, pkt_next);
548 target_list = &local_list;
549 }
550
551 func_blk_sz = sdiodev->func[fn]->cur_blksize;
552 max_req_sz = sdiodev->max_request_size;
553 max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
554 target_list->qlen);
555 seg_sz = target_list->qlen;
556 pkt_offset = 0;
557 pkt_next = target_list->next;
558
559 memset(&mmc_req, 0, sizeof(struct mmc_request));
560 memset(&mmc_cmd, 0, sizeof(struct mmc_command));
561 memset(&mmc_dat, 0, sizeof(struct mmc_data));
562
563 mmc_dat.sg = sdiodev->sgtable.sgl;
564 mmc_dat.blksz = func_blk_sz;
565 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
566 mmc_cmd.opcode = SD_IO_RW_EXTENDED;
567 mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
568 mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
569 mmc_cmd.arg |= 1<<27; /* block mode */
570 /* for function 1 the addr will be incremented */
571 mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
572 mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
573 mmc_req.cmd = &mmc_cmd;
574 mmc_req.data = &mmc_dat;
575
576 while (seg_sz) {
577 req_sz = 0;
578 sg_cnt = 0;
579 sgl = sdiodev->sgtable.sgl;
580 /* prep sg table */
581 while (pkt_next != (struct sk_buff *)target_list) {
582 pkt_data = pkt_next->data + pkt_offset;
583 sg_data_sz = pkt_next->len - pkt_offset;
584 if (sg_data_sz > sdiodev->max_segment_size)
585 sg_data_sz = sdiodev->max_segment_size;
586 if (sg_data_sz > max_req_sz - req_sz)
587 sg_data_sz = max_req_sz - req_sz;
588
589 sg_set_buf(sgl, pkt_data, sg_data_sz);
590
591 sg_cnt++;
592 sgl = sg_next(sgl);
593 req_sz += sg_data_sz;
594 pkt_offset += sg_data_sz;
595 if (pkt_offset == pkt_next->len) {
596 pkt_offset = 0;
597 pkt_next = pkt_next->next;
598 }
599
600 if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
601 break;
602 }
603 seg_sz -= sg_cnt;
604
605 if (req_sz % func_blk_sz != 0) {
606 brcmf_err("sg request length %u is not %u aligned\n",
607 req_sz, func_blk_sz);
608 ret = -ENOTBLK;
609 goto exit;
610 }
611
612 mmc_dat.sg_len = sg_cnt;
613 mmc_dat.blocks = req_sz / func_blk_sz;
614 mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
615 mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
616 /* incrementing addr for function 1 */
617 if (fn == 1)
618 addr += req_sz;
619
620 mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
621 mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
622
623 ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
624 if (ret == -ENOMEDIUM) {
625 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
626 break;
627 } else if (ret != 0) {
628 brcmf_err("CMD53 sg block %s failed %d\n",
629 write ? "write" : "read", ret);
630 ret = -EIO;
631 break;
632 }
633 }
634
635 if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
636 local_pkt_next = local_list.next;
637 orig_offset = 0;
638 skb_queue_walk(pktlist, pkt_next) {
639 dst_offset = 0;
640 do {
641 req_sz = local_pkt_next->len - orig_offset;
642 req_sz = min_t(uint, pkt_next->len - dst_offset,
643 req_sz);
644 orig_data = local_pkt_next->data + orig_offset;
645 dst_data = pkt_next->data + dst_offset;
646 memcpy(dst_data, orig_data, req_sz);
647 orig_offset += req_sz;
648 dst_offset += req_sz;
649 if (orig_offset == local_pkt_next->len) {
650 orig_offset = 0;
651 local_pkt_next = local_pkt_next->next;
652 }
653 if (dst_offset == pkt_next->len)
654 break;
655 } while (!skb_queue_empty(&local_list));
656 }
657 }
658
659 exit:
660 sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
661 while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
662 brcmu_pkt_buf_free_skb(pkt_next);
663
664 return ret;
665 }
666
brcmf_sdiod_recv_buf(struct brcmf_sdio_dev * sdiodev,u8 * buf,uint nbytes)667 int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
668 {
669 struct sk_buff *mypkt;
670 int err;
671
672 mypkt = brcmu_pkt_buf_get_skb(nbytes);
673 if (!mypkt) {
674 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
675 nbytes);
676 return -EIO;
677 }
678
679 err = brcmf_sdiod_recv_pkt(sdiodev, mypkt);
680 if (!err)
681 memcpy(buf, mypkt->data, nbytes);
682
683 brcmu_pkt_buf_free_skb(mypkt);
684 return err;
685 }
686
brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev * sdiodev,struct sk_buff * pkt)687 int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
688 {
689 u32 addr = sdiodev->sbwad;
690 int err = 0;
691
692 brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
693
694 err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
695 if (err)
696 goto done;
697
698 err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt);
699
700 done:
701 return err;
702 }
703
brcmf_sdiod_recv_chain(struct brcmf_sdio_dev * sdiodev,struct sk_buff_head * pktq,uint totlen)704 int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
705 struct sk_buff_head *pktq, uint totlen)
706 {
707 struct sk_buff *glom_skb;
708 struct sk_buff *skb;
709 u32 addr = sdiodev->sbwad;
710 int err = 0;
711
712 brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
713 addr, pktq->qlen);
714
715 err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
716 if (err)
717 goto done;
718
719 if (pktq->qlen == 1)
720 err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
721 pktq->next);
722 else if (!sdiodev->sg_support) {
723 glom_skb = brcmu_pkt_buf_get_skb(totlen);
724 if (!glom_skb)
725 return -ENOMEM;
726 err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
727 glom_skb);
728 if (err)
729 goto done;
730
731 skb_queue_walk(pktq, skb) {
732 memcpy(skb->data, glom_skb->data, skb->len);
733 skb_pull(glom_skb, skb->len);
734 }
735 } else
736 err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr,
737 pktq);
738
739 done:
740 return err;
741 }
742
brcmf_sdiod_send_buf(struct brcmf_sdio_dev * sdiodev,u8 * buf,uint nbytes)743 int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
744 {
745 struct sk_buff *mypkt;
746 u32 addr = sdiodev->sbwad;
747 int err;
748
749 mypkt = brcmu_pkt_buf_get_skb(nbytes);
750 if (!mypkt) {
751 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
752 nbytes);
753 return -EIO;
754 }
755
756 memcpy(mypkt->data, buf, nbytes);
757
758 err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
759
760 if (!err)
761 err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr,
762 mypkt);
763
764 brcmu_pkt_buf_free_skb(mypkt);
765 return err;
766
767 }
768
brcmf_sdiod_send_pkt(struct brcmf_sdio_dev * sdiodev,struct sk_buff_head * pktq)769 int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
770 struct sk_buff_head *pktq)
771 {
772 struct sk_buff *skb;
773 u32 addr = sdiodev->sbwad;
774 int err;
775
776 brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
777
778 err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
779 if (err)
780 return err;
781
782 if (pktq->qlen == 1 || !sdiodev->sg_support)
783 skb_queue_walk(pktq, skb) {
784 err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true,
785 addr, skb);
786 if (err)
787 break;
788 }
789 else
790 err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
791 pktq);
792
793 return err;
794 }
795
796 int
brcmf_sdiod_ramrw(struct brcmf_sdio_dev * sdiodev,bool write,u32 address,u8 * data,uint size)797 brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
798 u8 *data, uint size)
799 {
800 int bcmerror = 0;
801 struct sk_buff *pkt;
802 u32 sdaddr;
803 uint dsize;
804
805 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
806 pkt = dev_alloc_skb(dsize);
807 if (!pkt) {
808 brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
809 return -EIO;
810 }
811 pkt->priority = 0;
812
813 /* Determine initial transfer parameters */
814 sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
815 if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
816 dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
817 else
818 dsize = size;
819
820 sdio_claim_host(sdiodev->func[1]);
821
822 /* Do the transfer(s) */
823 while (size) {
824 /* Set the backplane window to include the start address */
825 bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address);
826 if (bcmerror)
827 break;
828
829 brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
830 write ? "write" : "read", dsize,
831 sdaddr, address & SBSDIO_SBWINDOW_MASK);
832
833 sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
834 sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
835
836 skb_put(pkt, dsize);
837 if (write)
838 memcpy(pkt->data, data, dsize);
839 bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write,
840 sdaddr, pkt);
841 if (bcmerror) {
842 brcmf_err("membytes transfer failed\n");
843 break;
844 }
845 if (!write)
846 memcpy(data, pkt->data, dsize);
847 skb_trim(pkt, 0);
848
849 /* Adjust for next transfer (if any) */
850 size -= dsize;
851 if (size) {
852 data += dsize;
853 address += dsize;
854 sdaddr = 0;
855 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
856 }
857 }
858
859 dev_kfree_skb(pkt);
860
861 /* Return the window to backplane enumeration space for core access */
862 if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad))
863 brcmf_err("FAILED to set window back to 0x%x\n",
864 sdiodev->sbwad);
865
866 sdio_release_host(sdiodev->func[1]);
867
868 return bcmerror;
869 }
870
brcmf_sdiod_abort(struct brcmf_sdio_dev * sdiodev,uint fn)871 int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
872 {
873 char t_func = (char)fn;
874 brcmf_dbg(SDIO, "Enter\n");
875
876 /* issue abort cmd52 command through F0 */
877 brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT,
878 sizeof(t_func), &t_func, true);
879
880 brcmf_dbg(SDIO, "Exit\n");
881 return 0;
882 }
883
brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev * sdiodev)884 static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
885 {
886 uint nents;
887 int err;
888
889 if (!sdiodev->sg_support)
890 return;
891
892 nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, brcmf_sdiod_txglomsz);
893 nents += (nents >> 4) + 1;
894
895 WARN_ON(nents > sdiodev->max_segment_count);
896
897 brcmf_dbg(TRACE, "nents=%d\n", nents);
898 err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
899 if (err < 0) {
900 brcmf_err("allocation failed: disable scatter-gather");
901 sdiodev->sg_support = false;
902 }
903
904 sdiodev->txglomsz = brcmf_sdiod_txglomsz;
905 }
906
907 #ifdef CONFIG_PM_SLEEP
brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev * sdiodev)908 static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
909 {
910 sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
911 if (!sdiodev->freezer)
912 return -ENOMEM;
913 atomic_set(&sdiodev->freezer->thread_count, 0);
914 atomic_set(&sdiodev->freezer->freezing, 0);
915 init_waitqueue_head(&sdiodev->freezer->thread_freeze);
916 init_completion(&sdiodev->freezer->resumed);
917 return 0;
918 }
919
brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev * sdiodev)920 static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
921 {
922 if (sdiodev->freezer) {
923 WARN_ON(atomic_read(&sdiodev->freezer->freezing));
924 kfree(sdiodev->freezer);
925 }
926 }
927
brcmf_sdiod_freezer_on(struct brcmf_sdio_dev * sdiodev)928 static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
929 {
930 atomic_t *expect = &sdiodev->freezer->thread_count;
931 int res = 0;
932
933 sdiodev->freezer->frozen_count = 0;
934 reinit_completion(&sdiodev->freezer->resumed);
935 atomic_set(&sdiodev->freezer->freezing, 1);
936 brcmf_sdio_trigger_dpc(sdiodev->bus);
937 wait_event(sdiodev->freezer->thread_freeze,
938 atomic_read(expect) == sdiodev->freezer->frozen_count);
939 sdio_claim_host(sdiodev->func[1]);
940 res = brcmf_sdio_sleep(sdiodev->bus, true);
941 sdio_release_host(sdiodev->func[1]);
942 return res;
943 }
944
brcmf_sdiod_freezer_off(struct brcmf_sdio_dev * sdiodev)945 static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
946 {
947 sdio_claim_host(sdiodev->func[1]);
948 brcmf_sdio_sleep(sdiodev->bus, false);
949 sdio_release_host(sdiodev->func[1]);
950 atomic_set(&sdiodev->freezer->freezing, 0);
951 complete_all(&sdiodev->freezer->resumed);
952 }
953
brcmf_sdiod_freezing(struct brcmf_sdio_dev * sdiodev)954 bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
955 {
956 return atomic_read(&sdiodev->freezer->freezing);
957 }
958
brcmf_sdiod_try_freeze(struct brcmf_sdio_dev * sdiodev)959 void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
960 {
961 if (!brcmf_sdiod_freezing(sdiodev))
962 return;
963 sdiodev->freezer->frozen_count++;
964 wake_up(&sdiodev->freezer->thread_freeze);
965 wait_for_completion(&sdiodev->freezer->resumed);
966 }
967
brcmf_sdiod_freezer_count(struct brcmf_sdio_dev * sdiodev)968 void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
969 {
970 atomic_inc(&sdiodev->freezer->thread_count);
971 }
972
brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev * sdiodev)973 void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
974 {
975 atomic_dec(&sdiodev->freezer->thread_count);
976 }
977 #else
brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev * sdiodev)978 static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
979 {
980 return 0;
981 }
982
brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev * sdiodev)983 static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
984 {
985 }
986 #endif /* CONFIG_PM_SLEEP */
987
brcmf_sdiod_remove(struct brcmf_sdio_dev * sdiodev)988 static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
989 {
990 if (sdiodev->bus) {
991 brcmf_sdio_remove(sdiodev->bus);
992 sdiodev->bus = NULL;
993 }
994
995 brcmf_sdiod_freezer_detach(sdiodev);
996
997 /* Disable Function 2 */
998 sdio_claim_host(sdiodev->func[2]);
999 sdio_disable_func(sdiodev->func[2]);
1000 sdio_release_host(sdiodev->func[2]);
1001
1002 /* Disable Function 1 */
1003 sdio_claim_host(sdiodev->func[1]);
1004 sdio_disable_func(sdiodev->func[1]);
1005 sdio_release_host(sdiodev->func[1]);
1006
1007 sg_free_table(&sdiodev->sgtable);
1008 sdiodev->sbwad = 0;
1009
1010 pm_runtime_allow(sdiodev->func[1]->card->host->parent);
1011 return 0;
1012 }
1013
brcmf_sdiod_probe(struct brcmf_sdio_dev * sdiodev)1014 static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
1015 {
1016 struct sdio_func *func;
1017 struct mmc_host *host;
1018 uint max_blocks;
1019 int ret = 0;
1020
1021 sdiodev->num_funcs = 2;
1022
1023 sdio_claim_host(sdiodev->func[1]);
1024
1025 ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
1026 if (ret) {
1027 brcmf_err("Failed to set F1 blocksize\n");
1028 sdio_release_host(sdiodev->func[1]);
1029 goto out;
1030 }
1031 ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
1032 if (ret) {
1033 brcmf_err("Failed to set F2 blocksize\n");
1034 sdio_release_host(sdiodev->func[1]);
1035 goto out;
1036 }
1037
1038 /* increase F2 timeout */
1039 sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY;
1040
1041 /* Enable Function 1 */
1042 ret = sdio_enable_func(sdiodev->func[1]);
1043 sdio_release_host(sdiodev->func[1]);
1044 if (ret) {
1045 brcmf_err("Failed to enable F1: err=%d\n", ret);
1046 goto out;
1047 }
1048
1049 /*
1050 * determine host related variables after brcmf_sdiod_probe()
1051 * as func->cur_blksize is properly set and F2 init has been
1052 * completed successfully.
1053 */
1054 func = sdiodev->func[2];
1055 host = func->card->host;
1056 sdiodev->sg_support = host->max_segs > 1;
1057 max_blocks = min_t(uint, host->max_blk_count, 511u);
1058 sdiodev->max_request_size = min_t(uint, host->max_req_size,
1059 max_blocks * func->cur_blksize);
1060 sdiodev->max_segment_count = min_t(uint, host->max_segs,
1061 SG_MAX_SINGLE_ALLOC);
1062 sdiodev->max_segment_size = host->max_seg_size;
1063
1064 /* allocate scatter-gather table. sg support
1065 * will be disabled upon allocation failure.
1066 */
1067 brcmf_sdiod_sgtable_alloc(sdiodev);
1068
1069 ret = brcmf_sdiod_freezer_attach(sdiodev);
1070 if (ret)
1071 goto out;
1072
1073 /* try to attach to the target device */
1074 sdiodev->bus = brcmf_sdio_probe(sdiodev);
1075 if (!sdiodev->bus) {
1076 ret = -ENODEV;
1077 goto out;
1078 }
1079 pm_runtime_forbid(host->parent);
1080 out:
1081 if (ret)
1082 brcmf_sdiod_remove(sdiodev);
1083
1084 return ret;
1085 }
1086
1087 #define BRCMF_SDIO_DEVICE(dev_id) \
1088 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, dev_id)}
1089
1090 /* devices we support, null terminated */
1091 static const struct sdio_device_id brcmf_sdmmc_ids[] = {
1092 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43143),
1093 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43241),
1094 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4329),
1095 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4330),
1096 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4334),
1097 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
1098 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
1099 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
1100 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
1101 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
1102 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
1103 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
1104 { /* end: all zeroes */ }
1105 };
1106 MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
1107
1108 static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
1109
1110
brcmf_ops_sdio_probe(struct sdio_func * func,const struct sdio_device_id * id)1111 static int brcmf_ops_sdio_probe(struct sdio_func *func,
1112 const struct sdio_device_id *id)
1113 {
1114 int err;
1115 struct brcmf_sdio_dev *sdiodev;
1116 struct brcmf_bus *bus_if;
1117
1118 brcmf_dbg(SDIO, "Enter\n");
1119 brcmf_dbg(SDIO, "Class=%x\n", func->class);
1120 brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
1121 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
1122 brcmf_dbg(SDIO, "Function#: %d\n", func->num);
1123
1124 /* Consume func num 1 but dont do anything with it. */
1125 if (func->num == 1)
1126 return 0;
1127
1128 /* Ignore anything but func 2 */
1129 if (func->num != 2)
1130 return -ENODEV;
1131
1132 bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
1133 if (!bus_if)
1134 return -ENOMEM;
1135 sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
1136 if (!sdiodev) {
1137 kfree(bus_if);
1138 return -ENOMEM;
1139 }
1140
1141 /* store refs to functions used. mmc_card does
1142 * not hold the F0 function pointer.
1143 */
1144 sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL);
1145 sdiodev->func[0]->num = 0;
1146 sdiodev->func[1] = func->card->sdio_func[0];
1147 sdiodev->func[2] = func;
1148
1149 sdiodev->bus_if = bus_if;
1150 bus_if->bus_priv.sdio = sdiodev;
1151 bus_if->proto_type = BRCMF_PROTO_BCDC;
1152 dev_set_drvdata(&func->dev, bus_if);
1153 dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
1154 sdiodev->dev = &sdiodev->func[1]->dev;
1155 sdiodev->pdata = brcmfmac_sdio_pdata;
1156
1157 if (!sdiodev->pdata)
1158 brcmf_of_probe(sdiodev);
1159
1160 #ifdef CONFIG_PM_SLEEP
1161 /* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ
1162 * is true or when platform data OOB irq is true).
1163 */
1164 if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
1165 ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
1166 (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)))
1167 bus_if->wowl_supported = true;
1168 #endif
1169
1170 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
1171
1172 brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
1173 err = brcmf_sdiod_probe(sdiodev);
1174 if (err) {
1175 brcmf_err("F2 error, probe failed %d...\n", err);
1176 goto fail;
1177 }
1178
1179 brcmf_dbg(SDIO, "F2 init completed...\n");
1180 return 0;
1181
1182 fail:
1183 dev_set_drvdata(&func->dev, NULL);
1184 dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
1185 kfree(sdiodev->func[0]);
1186 kfree(sdiodev);
1187 kfree(bus_if);
1188 return err;
1189 }
1190
brcmf_ops_sdio_remove(struct sdio_func * func)1191 static void brcmf_ops_sdio_remove(struct sdio_func *func)
1192 {
1193 struct brcmf_bus *bus_if;
1194 struct brcmf_sdio_dev *sdiodev;
1195
1196 brcmf_dbg(SDIO, "Enter\n");
1197 brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
1198 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
1199 brcmf_dbg(SDIO, "Function: %d\n", func->num);
1200
1201 if (func->num != 1)
1202 return;
1203
1204 bus_if = dev_get_drvdata(&func->dev);
1205 if (bus_if) {
1206 sdiodev = bus_if->bus_priv.sdio;
1207 brcmf_sdiod_remove(sdiodev);
1208
1209 dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
1210 dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
1211
1212 kfree(bus_if);
1213 kfree(sdiodev->func[0]);
1214 kfree(sdiodev);
1215 }
1216
1217 brcmf_dbg(SDIO, "Exit\n");
1218 }
1219
brcmf_sdio_wowl_config(struct device * dev,bool enabled)1220 void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
1221 {
1222 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1223 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
1224
1225 brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
1226 sdiodev->wowl_enabled = enabled;
1227 }
1228
1229 #ifdef CONFIG_PM_SLEEP
brcmf_ops_sdio_suspend(struct device * dev)1230 static int brcmf_ops_sdio_suspend(struct device *dev)
1231 {
1232 struct sdio_func *func;
1233 struct brcmf_bus *bus_if;
1234 struct brcmf_sdio_dev *sdiodev;
1235 mmc_pm_flag_t sdio_flags;
1236
1237 func = container_of(dev, struct sdio_func, dev);
1238 brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
1239 if (func->num != SDIO_FUNC_1)
1240 return 0;
1241
1242
1243 bus_if = dev_get_drvdata(dev);
1244 sdiodev = bus_if->bus_priv.sdio;
1245
1246 brcmf_sdiod_freezer_on(sdiodev);
1247 brcmf_sdio_wd_timer(sdiodev->bus, 0);
1248
1249 if (sdiodev->wowl_enabled) {
1250 sdio_flags = MMC_PM_KEEP_POWER;
1251 if (sdiodev->pdata->oob_irq_supported)
1252 enable_irq_wake(sdiodev->pdata->oob_irq_nr);
1253 else
1254 sdio_flags = MMC_PM_WAKE_SDIO_IRQ;
1255 if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
1256 brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
1257 }
1258 return 0;
1259 }
1260
brcmf_ops_sdio_resume(struct device * dev)1261 static int brcmf_ops_sdio_resume(struct device *dev)
1262 {
1263 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1264 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
1265 struct sdio_func *func = container_of(dev, struct sdio_func, dev);
1266
1267 brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
1268 if (func->num != SDIO_FUNC_2)
1269 return 0;
1270
1271 brcmf_sdiod_freezer_off(sdiodev);
1272 return 0;
1273 }
1274
1275 static const struct dev_pm_ops brcmf_sdio_pm_ops = {
1276 .suspend = brcmf_ops_sdio_suspend,
1277 .resume = brcmf_ops_sdio_resume,
1278 };
1279 #endif /* CONFIG_PM_SLEEP */
1280
1281 static struct sdio_driver brcmf_sdmmc_driver = {
1282 .probe = brcmf_ops_sdio_probe,
1283 .remove = brcmf_ops_sdio_remove,
1284 .name = BRCMFMAC_SDIO_PDATA_NAME,
1285 .id_table = brcmf_sdmmc_ids,
1286 .drv = {
1287 .owner = THIS_MODULE,
1288 #ifdef CONFIG_PM_SLEEP
1289 .pm = &brcmf_sdio_pm_ops,
1290 #endif /* CONFIG_PM_SLEEP */
1291 },
1292 };
1293
brcmf_sdio_pd_probe(struct platform_device * pdev)1294 static int __init brcmf_sdio_pd_probe(struct platform_device *pdev)
1295 {
1296 brcmf_dbg(SDIO, "Enter\n");
1297
1298 brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
1299
1300 if (brcmfmac_sdio_pdata->power_on)
1301 brcmfmac_sdio_pdata->power_on();
1302
1303 return 0;
1304 }
1305
brcmf_sdio_pd_remove(struct platform_device * pdev)1306 static int brcmf_sdio_pd_remove(struct platform_device *pdev)
1307 {
1308 brcmf_dbg(SDIO, "Enter\n");
1309
1310 if (brcmfmac_sdio_pdata->power_off)
1311 brcmfmac_sdio_pdata->power_off();
1312
1313 sdio_unregister_driver(&brcmf_sdmmc_driver);
1314
1315 return 0;
1316 }
1317
1318 static struct platform_driver brcmf_sdio_pd = {
1319 .remove = brcmf_sdio_pd_remove,
1320 .driver = {
1321 .name = BRCMFMAC_SDIO_PDATA_NAME,
1322 }
1323 };
1324
brcmf_sdio_register(void)1325 void brcmf_sdio_register(void)
1326 {
1327 int ret;
1328
1329 ret = sdio_register_driver(&brcmf_sdmmc_driver);
1330 if (ret)
1331 brcmf_err("sdio_register_driver failed: %d\n", ret);
1332 }
1333
brcmf_sdio_exit(void)1334 void brcmf_sdio_exit(void)
1335 {
1336 brcmf_dbg(SDIO, "Enter\n");
1337
1338 if (brcmfmac_sdio_pdata)
1339 platform_driver_unregister(&brcmf_sdio_pd);
1340 else
1341 sdio_unregister_driver(&brcmf_sdmmc_driver);
1342 }
1343
brcmf_sdio_init(void)1344 void __init brcmf_sdio_init(void)
1345 {
1346 int ret;
1347
1348 brcmf_dbg(SDIO, "Enter\n");
1349
1350 ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
1351 if (ret == -ENODEV)
1352 brcmf_dbg(SDIO, "No platform data available.\n");
1353 }
1354