1 /*
2  * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
3  *
4  * Copyright (C) 2005-2007 AMD (http://www.amd.com)
5  * Author: Thomas Dahlmann
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12 
13 /*
14  * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
15  * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
16  * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
17  *
18  * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
19  * be used as host port) and UOC bits PAD_EN and APU are set (should be done
20  * by BIOS init).
21  *
22  * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
23  * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
24  * can be used with gadget ether.
25  */
26 
27 /* debug control */
28 /* #define UDC_VERBOSE */
29 
30 /* Driver strings */
31 #define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
32 #define UDC_DRIVER_VERSION_STRING	"01.00.0206"
33 
34 /* system */
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/kernel.h>
38 #include <linux/delay.h>
39 #include <linux/ioport.h>
40 #include <linux/sched.h>
41 #include <linux/slab.h>
42 #include <linux/errno.h>
43 #include <linux/timer.h>
44 #include <linux/list.h>
45 #include <linux/interrupt.h>
46 #include <linux/ioctl.h>
47 #include <linux/fs.h>
48 #include <linux/dmapool.h>
49 #include <linux/moduleparam.h>
50 #include <linux/device.h>
51 #include <linux/io.h>
52 #include <linux/irq.h>
53 #include <linux/prefetch.h>
54 
55 #include <asm/byteorder.h>
56 #include <asm/unaligned.h>
57 
58 /* gadget stack */
59 #include <linux/usb/ch9.h>
60 #include <linux/usb/gadget.h>
61 
62 /* udc specific */
63 #include "amd5536udc.h"
64 
65 
66 static void udc_tasklet_disconnect(unsigned long);
67 static void empty_req_queue(struct udc_ep *);
68 static int udc_probe(struct udc *dev);
69 static void udc_basic_init(struct udc *dev);
70 static void udc_setup_endpoints(struct udc *dev);
71 static void udc_soft_reset(struct udc *dev);
72 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
73 static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
74 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
75 static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
76 				unsigned long buf_len, gfp_t gfp_flags);
77 static int udc_remote_wakeup(struct udc *dev);
78 static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
79 static void udc_pci_remove(struct pci_dev *pdev);
80 
81 /* description */
82 static const char mod_desc[] = UDC_MOD_DESCRIPTION;
83 static const char name[] = "amd5536udc";
84 
85 /* structure to hold endpoint function pointers */
86 static const struct usb_ep_ops udc_ep_ops;
87 
88 /* received setup data */
89 static union udc_setup_data setup_data;
90 
91 /* pointer to device object */
92 static struct udc *udc;
93 
94 /* irq spin lock for soft reset */
95 static DEFINE_SPINLOCK(udc_irq_spinlock);
96 /* stall spin lock */
97 static DEFINE_SPINLOCK(udc_stall_spinlock);
98 
99 /*
100 * slave mode: pending bytes in rx fifo after nyet,
101 * used if EPIN irq came but no req was available
102 */
103 static unsigned int udc_rxfifo_pending;
104 
105 /* count soft resets after suspend to avoid loop */
106 static int soft_reset_occured;
107 static int soft_reset_after_usbreset_occured;
108 
109 /* timer */
110 static struct timer_list udc_timer;
111 static int stop_timer;
112 
113 /* set_rde -- Is used to control enabling of RX DMA. Problem is
114  * that UDC has only one bit (RDE) to enable/disable RX DMA for
115  * all OUT endpoints. So we have to handle race conditions like
116  * when OUT data reaches the fifo but no request was queued yet.
117  * This cannot be solved by letting the RX DMA disabled until a
118  * request gets queued because there may be other OUT packets
119  * in the FIFO (important for not blocking control traffic).
120  * The value of set_rde controls the correspondig timer.
121  *
122  * set_rde -1 == not used, means it is alloed to be set to 0 or 1
123  * set_rde  0 == do not touch RDE, do no start the RDE timer
124  * set_rde  1 == timer function will look whether FIFO has data
125  * set_rde  2 == set by timer function to enable RX DMA on next call
126  */
127 static int set_rde = -1;
128 
129 static DECLARE_COMPLETION(on_exit);
130 static struct timer_list udc_pollstall_timer;
131 static int stop_pollstall_timer;
132 static DECLARE_COMPLETION(on_pollstall_exit);
133 
134 /* tasklet for usb disconnect */
135 static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
136 		(unsigned long) &udc);
137 
138 
139 /* endpoint names used for print */
140 static const char ep0_string[] = "ep0in";
141 static const char *const ep_string[] = {
142 	ep0_string,
143 	"ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk",
144 	"ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk",
145 	"ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk",
146 	"ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk",
147 	"ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk",
148 	"ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk",
149 	"ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk"
150 };
151 
152 /* DMA usage flag */
153 static bool use_dma = 1;
154 /* packet per buffer dma */
155 static bool use_dma_ppb = 1;
156 /* with per descr. update */
157 static bool use_dma_ppb_du;
158 /* buffer fill mode */
159 static int use_dma_bufferfill_mode;
160 /* full speed only mode */
161 static bool use_fullspeed;
162 /* tx buffer size for high speed */
163 static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
164 
165 /* module parameters */
166 module_param(use_dma, bool, S_IRUGO);
167 MODULE_PARM_DESC(use_dma, "true for DMA");
168 module_param(use_dma_ppb, bool, S_IRUGO);
169 MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
170 module_param(use_dma_ppb_du, bool, S_IRUGO);
171 MODULE_PARM_DESC(use_dma_ppb_du,
172 	"true for DMA in packet per buffer mode with descriptor update");
173 module_param(use_fullspeed, bool, S_IRUGO);
174 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
175 
176 /*---------------------------------------------------------------------------*/
177 /* Prints UDC device registers and endpoint irq registers */
print_regs(struct udc * dev)178 static void print_regs(struct udc *dev)
179 {
180 	DBG(dev, "------- Device registers -------\n");
181 	DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg));
182 	DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl));
183 	DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts));
184 	DBG(dev, "\n");
185 	DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts));
186 	DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk));
187 	DBG(dev, "\n");
188 	DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts));
189 	DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
190 	DBG(dev, "\n");
191 	DBG(dev, "USE DMA        = %d\n", use_dma);
192 	if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
193 		DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
194 			"WITHOUT desc. update)\n");
195 		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
196 	} else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
197 		DBG(dev, "DMA mode       = PPBDU (packet per buffer "
198 			"WITH desc. update)\n");
199 		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
200 	}
201 	if (use_dma && use_dma_bufferfill_mode) {
202 		DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
203 		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
204 	}
205 	if (!use_dma)
206 		dev_info(&dev->pdev->dev, "FIFO mode\n");
207 	DBG(dev, "-------------------------------------------------------\n");
208 }
209 
210 /* Masks unused interrupts */
udc_mask_unused_interrupts(struct udc * dev)211 static int udc_mask_unused_interrupts(struct udc *dev)
212 {
213 	u32 tmp;
214 
215 	/* mask all dev interrupts */
216 	tmp =	AMD_BIT(UDC_DEVINT_SVC) |
217 		AMD_BIT(UDC_DEVINT_ENUM) |
218 		AMD_BIT(UDC_DEVINT_US) |
219 		AMD_BIT(UDC_DEVINT_UR) |
220 		AMD_BIT(UDC_DEVINT_ES) |
221 		AMD_BIT(UDC_DEVINT_SI) |
222 		AMD_BIT(UDC_DEVINT_SOF)|
223 		AMD_BIT(UDC_DEVINT_SC);
224 	writel(tmp, &dev->regs->irqmsk);
225 
226 	/* mask all ep interrupts */
227 	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
228 
229 	return 0;
230 }
231 
232 /* Enables endpoint 0 interrupts */
udc_enable_ep0_interrupts(struct udc * dev)233 static int udc_enable_ep0_interrupts(struct udc *dev)
234 {
235 	u32 tmp;
236 
237 	DBG(dev, "udc_enable_ep0_interrupts()\n");
238 
239 	/* read irq mask */
240 	tmp = readl(&dev->regs->ep_irqmsk);
241 	/* enable ep0 irq's */
242 	tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
243 		& AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
244 	writel(tmp, &dev->regs->ep_irqmsk);
245 
246 	return 0;
247 }
248 
249 /* Enables device interrupts for SET_INTF and SET_CONFIG */
udc_enable_dev_setup_interrupts(struct udc * dev)250 static int udc_enable_dev_setup_interrupts(struct udc *dev)
251 {
252 	u32 tmp;
253 
254 	DBG(dev, "enable device interrupts for setup data\n");
255 
256 	/* read irq mask */
257 	tmp = readl(&dev->regs->irqmsk);
258 
259 	/* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
260 	tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
261 		& AMD_UNMASK_BIT(UDC_DEVINT_SC)
262 		& AMD_UNMASK_BIT(UDC_DEVINT_UR)
263 		& AMD_UNMASK_BIT(UDC_DEVINT_SVC)
264 		& AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
265 	writel(tmp, &dev->regs->irqmsk);
266 
267 	return 0;
268 }
269 
270 /* Calculates fifo start of endpoint based on preceding endpoints */
udc_set_txfifo_addr(struct udc_ep * ep)271 static int udc_set_txfifo_addr(struct udc_ep *ep)
272 {
273 	struct udc	*dev;
274 	u32 tmp;
275 	int i;
276 
277 	if (!ep || !(ep->in))
278 		return -EINVAL;
279 
280 	dev = ep->dev;
281 	ep->txfifo = dev->txfifo;
282 
283 	/* traverse ep's */
284 	for (i = 0; i < ep->num; i++) {
285 		if (dev->ep[i].regs) {
286 			/* read fifo size */
287 			tmp = readl(&dev->ep[i].regs->bufin_framenum);
288 			tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
289 			ep->txfifo += tmp;
290 		}
291 	}
292 	return 0;
293 }
294 
295 /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
296 static u32 cnak_pending;
297 
UDC_QUEUE_CNAK(struct udc_ep * ep,unsigned num)298 static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
299 {
300 	if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
301 		DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
302 		cnak_pending |= 1 << (num);
303 		ep->naking = 1;
304 	} else
305 		cnak_pending = cnak_pending & (~(1 << (num)));
306 }
307 
308 
309 /* Enables endpoint, is called by gadget driver */
310 static int
udc_ep_enable(struct usb_ep * usbep,const struct usb_endpoint_descriptor * desc)311 udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
312 {
313 	struct udc_ep		*ep;
314 	struct udc		*dev;
315 	u32			tmp;
316 	unsigned long		iflags;
317 	u8 udc_csr_epix;
318 	unsigned		maxpacket;
319 
320 	if (!usbep
321 			|| usbep->name == ep0_string
322 			|| !desc
323 			|| desc->bDescriptorType != USB_DT_ENDPOINT)
324 		return -EINVAL;
325 
326 	ep = container_of(usbep, struct udc_ep, ep);
327 	dev = ep->dev;
328 
329 	DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
330 
331 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
332 		return -ESHUTDOWN;
333 
334 	spin_lock_irqsave(&dev->lock, iflags);
335 	ep->ep.desc = desc;
336 
337 	ep->halted = 0;
338 
339 	/* set traffic type */
340 	tmp = readl(&dev->ep[ep->num].regs->ctl);
341 	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
342 	writel(tmp, &dev->ep[ep->num].regs->ctl);
343 
344 	/* set max packet size */
345 	maxpacket = usb_endpoint_maxp(desc);
346 	tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
347 	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
348 	ep->ep.maxpacket = maxpacket;
349 	writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
350 
351 	/* IN ep */
352 	if (ep->in) {
353 
354 		/* ep ix in UDC CSR register space */
355 		udc_csr_epix = ep->num;
356 
357 		/* set buffer size (tx fifo entries) */
358 		tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
359 		/* double buffering: fifo size = 2 x max packet size */
360 		tmp = AMD_ADDBITS(
361 				tmp,
362 				maxpacket * UDC_EPIN_BUFF_SIZE_MULT
363 					  / UDC_DWORD_BYTES,
364 				UDC_EPIN_BUFF_SIZE);
365 		writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
366 
367 		/* calc. tx fifo base addr */
368 		udc_set_txfifo_addr(ep);
369 
370 		/* flush fifo */
371 		tmp = readl(&ep->regs->ctl);
372 		tmp |= AMD_BIT(UDC_EPCTL_F);
373 		writel(tmp, &ep->regs->ctl);
374 
375 	/* OUT ep */
376 	} else {
377 		/* ep ix in UDC CSR register space */
378 		udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
379 
380 		/* set max packet size UDC CSR	*/
381 		tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
382 		tmp = AMD_ADDBITS(tmp, maxpacket,
383 					UDC_CSR_NE_MAX_PKT);
384 		writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
385 
386 		if (use_dma && !ep->in) {
387 			/* alloc and init BNA dummy request */
388 			ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
389 			ep->bna_occurred = 0;
390 		}
391 
392 		if (ep->num != UDC_EP0OUT_IX)
393 			dev->data_ep_enabled = 1;
394 	}
395 
396 	/* set ep values */
397 	tmp = readl(&dev->csr->ne[udc_csr_epix]);
398 	/* max packet */
399 	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
400 	/* ep number */
401 	tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
402 	/* ep direction */
403 	tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
404 	/* ep type */
405 	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
406 	/* ep config */
407 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
408 	/* ep interface */
409 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
410 	/* ep alt */
411 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
412 	/* write reg */
413 	writel(tmp, &dev->csr->ne[udc_csr_epix]);
414 
415 	/* enable ep irq */
416 	tmp = readl(&dev->regs->ep_irqmsk);
417 	tmp &= AMD_UNMASK_BIT(ep->num);
418 	writel(tmp, &dev->regs->ep_irqmsk);
419 
420 	/*
421 	 * clear NAK by writing CNAK
422 	 * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
423 	 */
424 	if (!use_dma || ep->in) {
425 		tmp = readl(&ep->regs->ctl);
426 		tmp |= AMD_BIT(UDC_EPCTL_CNAK);
427 		writel(tmp, &ep->regs->ctl);
428 		ep->naking = 0;
429 		UDC_QUEUE_CNAK(ep, ep->num);
430 	}
431 	tmp = desc->bEndpointAddress;
432 	DBG(dev, "%s enabled\n", usbep->name);
433 
434 	spin_unlock_irqrestore(&dev->lock, iflags);
435 	return 0;
436 }
437 
438 /* Resets endpoint */
ep_init(struct udc_regs __iomem * regs,struct udc_ep * ep)439 static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
440 {
441 	u32		tmp;
442 
443 	VDBG(ep->dev, "ep-%d reset\n", ep->num);
444 	ep->ep.desc = NULL;
445 	ep->ep.ops = &udc_ep_ops;
446 	INIT_LIST_HEAD(&ep->queue);
447 
448 	usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
449 	/* set NAK */
450 	tmp = readl(&ep->regs->ctl);
451 	tmp |= AMD_BIT(UDC_EPCTL_SNAK);
452 	writel(tmp, &ep->regs->ctl);
453 	ep->naking = 1;
454 
455 	/* disable interrupt */
456 	tmp = readl(&regs->ep_irqmsk);
457 	tmp |= AMD_BIT(ep->num);
458 	writel(tmp, &regs->ep_irqmsk);
459 
460 	if (ep->in) {
461 		/* unset P and IN bit of potential former DMA */
462 		tmp = readl(&ep->regs->ctl);
463 		tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
464 		writel(tmp, &ep->regs->ctl);
465 
466 		tmp = readl(&ep->regs->sts);
467 		tmp |= AMD_BIT(UDC_EPSTS_IN);
468 		writel(tmp, &ep->regs->sts);
469 
470 		/* flush the fifo */
471 		tmp = readl(&ep->regs->ctl);
472 		tmp |= AMD_BIT(UDC_EPCTL_F);
473 		writel(tmp, &ep->regs->ctl);
474 
475 	}
476 	/* reset desc pointer */
477 	writel(0, &ep->regs->desptr);
478 }
479 
480 /* Disables endpoint, is called by gadget driver */
udc_ep_disable(struct usb_ep * usbep)481 static int udc_ep_disable(struct usb_ep *usbep)
482 {
483 	struct udc_ep	*ep = NULL;
484 	unsigned long	iflags;
485 
486 	if (!usbep)
487 		return -EINVAL;
488 
489 	ep = container_of(usbep, struct udc_ep, ep);
490 	if (usbep->name == ep0_string || !ep->ep.desc)
491 		return -EINVAL;
492 
493 	DBG(ep->dev, "Disable ep-%d\n", ep->num);
494 
495 	spin_lock_irqsave(&ep->dev->lock, iflags);
496 	udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
497 	empty_req_queue(ep);
498 	ep_init(ep->dev->regs, ep);
499 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
500 
501 	return 0;
502 }
503 
504 /* Allocates request packet, called by gadget driver */
505 static struct usb_request *
udc_alloc_request(struct usb_ep * usbep,gfp_t gfp)506 udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
507 {
508 	struct udc_request	*req;
509 	struct udc_data_dma	*dma_desc;
510 	struct udc_ep	*ep;
511 
512 	if (!usbep)
513 		return NULL;
514 
515 	ep = container_of(usbep, struct udc_ep, ep);
516 
517 	VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
518 	req = kzalloc(sizeof(struct udc_request), gfp);
519 	if (!req)
520 		return NULL;
521 
522 	req->req.dma = DMA_DONT_USE;
523 	INIT_LIST_HEAD(&req->queue);
524 
525 	if (ep->dma) {
526 		/* ep0 in requests are allocated from data pool here */
527 		dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
528 						&req->td_phys);
529 		if (!dma_desc) {
530 			kfree(req);
531 			return NULL;
532 		}
533 
534 		VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
535 				"td_phys = %lx\n",
536 				req, dma_desc,
537 				(unsigned long)req->td_phys);
538 		/* prevent from using desc. - set HOST BUSY */
539 		dma_desc->status = AMD_ADDBITS(dma_desc->status,
540 						UDC_DMA_STP_STS_BS_HOST_BUSY,
541 						UDC_DMA_STP_STS_BS);
542 		dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
543 		req->td_data = dma_desc;
544 		req->td_data_last = NULL;
545 		req->chain_len = 1;
546 	}
547 
548 	return &req->req;
549 }
550 
551 /* Frees request packet, called by gadget driver */
552 static void
udc_free_request(struct usb_ep * usbep,struct usb_request * usbreq)553 udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
554 {
555 	struct udc_ep	*ep;
556 	struct udc_request	*req;
557 
558 	if (!usbep || !usbreq)
559 		return;
560 
561 	ep = container_of(usbep, struct udc_ep, ep);
562 	req = container_of(usbreq, struct udc_request, req);
563 	VDBG(ep->dev, "free_req req=%p\n", req);
564 	BUG_ON(!list_empty(&req->queue));
565 	if (req->td_data) {
566 		VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
567 
568 		/* free dma chain if created */
569 		if (req->chain_len > 1)
570 			udc_free_dma_chain(ep->dev, req);
571 
572 		pci_pool_free(ep->dev->data_requests, req->td_data,
573 							req->td_phys);
574 	}
575 	kfree(req);
576 }
577 
578 /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
udc_init_bna_dummy(struct udc_request * req)579 static void udc_init_bna_dummy(struct udc_request *req)
580 {
581 	if (req) {
582 		/* set last bit */
583 		req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
584 		/* set next pointer to itself */
585 		req->td_data->next = req->td_phys;
586 		/* set HOST BUSY */
587 		req->td_data->status
588 			= AMD_ADDBITS(req->td_data->status,
589 					UDC_DMA_STP_STS_BS_DMA_DONE,
590 					UDC_DMA_STP_STS_BS);
591 #ifdef UDC_VERBOSE
592 		pr_debug("bna desc = %p, sts = %08x\n",
593 			req->td_data, req->td_data->status);
594 #endif
595 	}
596 }
597 
598 /* Allocate BNA dummy descriptor */
udc_alloc_bna_dummy(struct udc_ep * ep)599 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
600 {
601 	struct udc_request *req = NULL;
602 	struct usb_request *_req = NULL;
603 
604 	/* alloc the dummy request */
605 	_req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
606 	if (_req) {
607 		req = container_of(_req, struct udc_request, req);
608 		ep->bna_dummy_req = req;
609 		udc_init_bna_dummy(req);
610 	}
611 	return req;
612 }
613 
614 /* Write data to TX fifo for IN packets */
615 static void
udc_txfifo_write(struct udc_ep * ep,struct usb_request * req)616 udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
617 {
618 	u8			*req_buf;
619 	u32			*buf;
620 	int			i, j;
621 	unsigned		bytes = 0;
622 	unsigned		remaining = 0;
623 
624 	if (!req || !ep)
625 		return;
626 
627 	req_buf = req->buf + req->actual;
628 	prefetch(req_buf);
629 	remaining = req->length - req->actual;
630 
631 	buf = (u32 *) req_buf;
632 
633 	bytes = ep->ep.maxpacket;
634 	if (bytes > remaining)
635 		bytes = remaining;
636 
637 	/* dwords first */
638 	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
639 		writel(*(buf + i), ep->txfifo);
640 
641 	/* remaining bytes must be written by byte access */
642 	for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
643 		writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
644 							ep->txfifo);
645 	}
646 
647 	/* dummy write confirm */
648 	writel(0, &ep->regs->confirm);
649 }
650 
651 /* Read dwords from RX fifo for OUT transfers */
udc_rxfifo_read_dwords(struct udc * dev,u32 * buf,int dwords)652 static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
653 {
654 	int i;
655 
656 	VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
657 
658 	for (i = 0; i < dwords; i++)
659 		*(buf + i) = readl(dev->rxfifo);
660 	return 0;
661 }
662 
663 /* Read bytes from RX fifo for OUT transfers */
udc_rxfifo_read_bytes(struct udc * dev,u8 * buf,int bytes)664 static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
665 {
666 	int i, j;
667 	u32 tmp;
668 
669 	VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
670 
671 	/* dwords first */
672 	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
673 		*((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
674 
675 	/* remaining bytes must be read by byte access */
676 	if (bytes % UDC_DWORD_BYTES) {
677 		tmp = readl(dev->rxfifo);
678 		for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
679 			*(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
680 			tmp = tmp >> UDC_BITS_PER_BYTE;
681 		}
682 	}
683 
684 	return 0;
685 }
686 
687 /* Read data from RX fifo for OUT transfers */
688 static int
udc_rxfifo_read(struct udc_ep * ep,struct udc_request * req)689 udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
690 {
691 	u8 *buf;
692 	unsigned buf_space;
693 	unsigned bytes = 0;
694 	unsigned finished = 0;
695 
696 	/* received number bytes */
697 	bytes = readl(&ep->regs->sts);
698 	bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
699 
700 	buf_space = req->req.length - req->req.actual;
701 	buf = req->req.buf + req->req.actual;
702 	if (bytes > buf_space) {
703 		if ((buf_space % ep->ep.maxpacket) != 0) {
704 			DBG(ep->dev,
705 				"%s: rx %d bytes, rx-buf space = %d bytesn\n",
706 				ep->ep.name, bytes, buf_space);
707 			req->req.status = -EOVERFLOW;
708 		}
709 		bytes = buf_space;
710 	}
711 	req->req.actual += bytes;
712 
713 	/* last packet ? */
714 	if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
715 		|| ((req->req.actual == req->req.length) && !req->req.zero))
716 		finished = 1;
717 
718 	/* read rx fifo bytes */
719 	VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
720 	udc_rxfifo_read_bytes(ep->dev, buf, bytes);
721 
722 	return finished;
723 }
724 
725 /* create/re-init a DMA descriptor or a DMA descriptor chain */
prep_dma(struct udc_ep * ep,struct udc_request * req,gfp_t gfp)726 static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
727 {
728 	int	retval = 0;
729 	u32	tmp;
730 
731 	VDBG(ep->dev, "prep_dma\n");
732 	VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
733 			ep->num, req->td_data);
734 
735 	/* set buffer pointer */
736 	req->td_data->bufptr = req->req.dma;
737 
738 	/* set last bit */
739 	req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
740 
741 	/* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
742 	if (use_dma_ppb) {
743 
744 		retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
745 		if (retval != 0) {
746 			if (retval == -ENOMEM)
747 				DBG(ep->dev, "Out of DMA memory\n");
748 			return retval;
749 		}
750 		if (ep->in) {
751 			if (req->req.length == ep->ep.maxpacket) {
752 				/* write tx bytes */
753 				req->td_data->status =
754 					AMD_ADDBITS(req->td_data->status,
755 						ep->ep.maxpacket,
756 						UDC_DMA_IN_STS_TXBYTES);
757 
758 			}
759 		}
760 
761 	}
762 
763 	if (ep->in) {
764 		VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
765 				"maxpacket=%d ep%d\n",
766 				use_dma_ppb, req->req.length,
767 				ep->ep.maxpacket, ep->num);
768 		/*
769 		 * if bytes < max packet then tx bytes must
770 		 * be written in packet per buffer mode
771 		 */
772 		if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
773 				|| ep->num == UDC_EP0OUT_IX
774 				|| ep->num == UDC_EP0IN_IX) {
775 			/* write tx bytes */
776 			req->td_data->status =
777 				AMD_ADDBITS(req->td_data->status,
778 						req->req.length,
779 						UDC_DMA_IN_STS_TXBYTES);
780 			/* reset frame num */
781 			req->td_data->status =
782 				AMD_ADDBITS(req->td_data->status,
783 						0,
784 						UDC_DMA_IN_STS_FRAMENUM);
785 		}
786 		/* set HOST BUSY */
787 		req->td_data->status =
788 			AMD_ADDBITS(req->td_data->status,
789 				UDC_DMA_STP_STS_BS_HOST_BUSY,
790 				UDC_DMA_STP_STS_BS);
791 	} else {
792 		VDBG(ep->dev, "OUT set host ready\n");
793 		/* set HOST READY */
794 		req->td_data->status =
795 			AMD_ADDBITS(req->td_data->status,
796 				UDC_DMA_STP_STS_BS_HOST_READY,
797 				UDC_DMA_STP_STS_BS);
798 
799 
800 			/* clear NAK by writing CNAK */
801 			if (ep->naking) {
802 				tmp = readl(&ep->regs->ctl);
803 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
804 				writel(tmp, &ep->regs->ctl);
805 				ep->naking = 0;
806 				UDC_QUEUE_CNAK(ep, ep->num);
807 			}
808 
809 	}
810 
811 	return retval;
812 }
813 
814 /* Completes request packet ... caller MUST hold lock */
815 static void
complete_req(struct udc_ep * ep,struct udc_request * req,int sts)816 complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
817 __releases(ep->dev->lock)
818 __acquires(ep->dev->lock)
819 {
820 	struct udc		*dev;
821 	unsigned		halted;
822 
823 	VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
824 
825 	dev = ep->dev;
826 	/* unmap DMA */
827 	if (ep->dma)
828 		usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
829 
830 	halted = ep->halted;
831 	ep->halted = 1;
832 
833 	/* set new status if pending */
834 	if (req->req.status == -EINPROGRESS)
835 		req->req.status = sts;
836 
837 	/* remove from ep queue */
838 	list_del_init(&req->queue);
839 
840 	VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
841 		&req->req, req->req.length, ep->ep.name, sts);
842 
843 	spin_unlock(&dev->lock);
844 	usb_gadget_giveback_request(&ep->ep, &req->req);
845 	spin_lock(&dev->lock);
846 	ep->halted = halted;
847 }
848 
849 /* frees pci pool descriptors of a DMA chain */
udc_free_dma_chain(struct udc * dev,struct udc_request * req)850 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
851 {
852 
853 	int ret_val = 0;
854 	struct udc_data_dma	*td;
855 	struct udc_data_dma	*td_last = NULL;
856 	unsigned int i;
857 
858 	DBG(dev, "free chain req = %p\n", req);
859 
860 	/* do not free first desc., will be done by free for request */
861 	td_last = req->td_data;
862 	td = phys_to_virt(td_last->next);
863 
864 	for (i = 1; i < req->chain_len; i++) {
865 
866 		pci_pool_free(dev->data_requests, td,
867 				(dma_addr_t) td_last->next);
868 		td_last = td;
869 		td = phys_to_virt(td_last->next);
870 	}
871 
872 	return ret_val;
873 }
874 
875 /* Iterates to the end of a DMA chain and returns last descriptor */
udc_get_last_dma_desc(struct udc_request * req)876 static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
877 {
878 	struct udc_data_dma	*td;
879 
880 	td = req->td_data;
881 	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
882 		td = phys_to_virt(td->next);
883 
884 	return td;
885 
886 }
887 
888 /* Iterates to the end of a DMA chain and counts bytes received */
udc_get_ppbdu_rxbytes(struct udc_request * req)889 static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
890 {
891 	struct udc_data_dma	*td;
892 	u32 count;
893 
894 	td = req->td_data;
895 	/* received number bytes */
896 	count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
897 
898 	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
899 		td = phys_to_virt(td->next);
900 		/* received number bytes */
901 		if (td) {
902 			count += AMD_GETBITS(td->status,
903 				UDC_DMA_OUT_STS_RXBYTES);
904 		}
905 	}
906 
907 	return count;
908 
909 }
910 
911 /* Creates or re-inits a DMA chain */
udc_create_dma_chain(struct udc_ep * ep,struct udc_request * req,unsigned long buf_len,gfp_t gfp_flags)912 static int udc_create_dma_chain(
913 	struct udc_ep *ep,
914 	struct udc_request *req,
915 	unsigned long buf_len, gfp_t gfp_flags
916 )
917 {
918 	unsigned long bytes = req->req.length;
919 	unsigned int i;
920 	dma_addr_t dma_addr;
921 	struct udc_data_dma	*td = NULL;
922 	struct udc_data_dma	*last = NULL;
923 	unsigned long txbytes;
924 	unsigned create_new_chain = 0;
925 	unsigned len;
926 
927 	VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
928 			bytes, buf_len);
929 	dma_addr = DMA_DONT_USE;
930 
931 	/* unset L bit in first desc for OUT */
932 	if (!ep->in)
933 		req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
934 
935 	/* alloc only new desc's if not already available */
936 	len = req->req.length / ep->ep.maxpacket;
937 	if (req->req.length % ep->ep.maxpacket)
938 		len++;
939 
940 	if (len > req->chain_len) {
941 		/* shorter chain already allocated before */
942 		if (req->chain_len > 1)
943 			udc_free_dma_chain(ep->dev, req);
944 		req->chain_len = len;
945 		create_new_chain = 1;
946 	}
947 
948 	td = req->td_data;
949 	/* gen. required number of descriptors and buffers */
950 	for (i = buf_len; i < bytes; i += buf_len) {
951 		/* create or determine next desc. */
952 		if (create_new_chain) {
953 
954 			td = pci_pool_alloc(ep->dev->data_requests,
955 					gfp_flags, &dma_addr);
956 			if (!td)
957 				return -ENOMEM;
958 
959 			td->status = 0;
960 		} else if (i == buf_len) {
961 			/* first td */
962 			td = (struct udc_data_dma *) phys_to_virt(
963 						req->td_data->next);
964 			td->status = 0;
965 		} else {
966 			td = (struct udc_data_dma *) phys_to_virt(last->next);
967 			td->status = 0;
968 		}
969 
970 
971 		if (td)
972 			td->bufptr = req->req.dma + i; /* assign buffer */
973 		else
974 			break;
975 
976 		/* short packet ? */
977 		if ((bytes - i) >= buf_len) {
978 			txbytes = buf_len;
979 		} else {
980 			/* short packet */
981 			txbytes = bytes - i;
982 		}
983 
984 		/* link td and assign tx bytes */
985 		if (i == buf_len) {
986 			if (create_new_chain)
987 				req->td_data->next = dma_addr;
988 			/*
989 			else
990 				req->td_data->next = virt_to_phys(td);
991 			*/
992 			/* write tx bytes */
993 			if (ep->in) {
994 				/* first desc */
995 				req->td_data->status =
996 					AMD_ADDBITS(req->td_data->status,
997 							ep->ep.maxpacket,
998 							UDC_DMA_IN_STS_TXBYTES);
999 				/* second desc */
1000 				td->status = AMD_ADDBITS(td->status,
1001 							txbytes,
1002 							UDC_DMA_IN_STS_TXBYTES);
1003 			}
1004 		} else {
1005 			if (create_new_chain)
1006 				last->next = dma_addr;
1007 			/*
1008 			else
1009 				last->next = virt_to_phys(td);
1010 			*/
1011 			if (ep->in) {
1012 				/* write tx bytes */
1013 				td->status = AMD_ADDBITS(td->status,
1014 							txbytes,
1015 							UDC_DMA_IN_STS_TXBYTES);
1016 			}
1017 		}
1018 		last = td;
1019 	}
1020 	/* set last bit */
1021 	if (td) {
1022 		td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
1023 		/* last desc. points to itself */
1024 		req->td_data_last = td;
1025 	}
1026 
1027 	return 0;
1028 }
1029 
1030 /* Enabling RX DMA */
udc_set_rde(struct udc * dev)1031 static void udc_set_rde(struct udc *dev)
1032 {
1033 	u32 tmp;
1034 
1035 	VDBG(dev, "udc_set_rde()\n");
1036 	/* stop RDE timer */
1037 	if (timer_pending(&udc_timer)) {
1038 		set_rde = 0;
1039 		mod_timer(&udc_timer, jiffies - 1);
1040 	}
1041 	/* set RDE */
1042 	tmp = readl(&dev->regs->ctl);
1043 	tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1044 	writel(tmp, &dev->regs->ctl);
1045 }
1046 
1047 /* Queues a request packet, called by gadget driver */
1048 static int
udc_queue(struct usb_ep * usbep,struct usb_request * usbreq,gfp_t gfp)1049 udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1050 {
1051 	int			retval = 0;
1052 	u8			open_rxfifo = 0;
1053 	unsigned long		iflags;
1054 	struct udc_ep		*ep;
1055 	struct udc_request	*req;
1056 	struct udc		*dev;
1057 	u32			tmp;
1058 
1059 	/* check the inputs */
1060 	req = container_of(usbreq, struct udc_request, req);
1061 
1062 	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
1063 			|| !list_empty(&req->queue))
1064 		return -EINVAL;
1065 
1066 	ep = container_of(usbep, struct udc_ep, ep);
1067 	if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1068 		return -EINVAL;
1069 
1070 	VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1071 	dev = ep->dev;
1072 
1073 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1074 		return -ESHUTDOWN;
1075 
1076 	/* map dma (usually done before) */
1077 	if (ep->dma) {
1078 		VDBG(dev, "DMA map req %p\n", req);
1079 		retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
1080 		if (retval)
1081 			return retval;
1082 	}
1083 
1084 	VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
1085 			usbep->name, usbreq, usbreq->length,
1086 			req->td_data, usbreq->buf);
1087 
1088 	spin_lock_irqsave(&dev->lock, iflags);
1089 	usbreq->actual = 0;
1090 	usbreq->status = -EINPROGRESS;
1091 	req->dma_done = 0;
1092 
1093 	/* on empty queue just do first transfer */
1094 	if (list_empty(&ep->queue)) {
1095 		/* zlp */
1096 		if (usbreq->length == 0) {
1097 			/* IN zlp's are handled by hardware */
1098 			complete_req(ep, req, 0);
1099 			VDBG(dev, "%s: zlp\n", ep->ep.name);
1100 			/*
1101 			 * if set_config or set_intf is waiting for ack by zlp
1102 			 * then set CSR_DONE
1103 			 */
1104 			if (dev->set_cfg_not_acked) {
1105 				tmp = readl(&dev->regs->ctl);
1106 				tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
1107 				writel(tmp, &dev->regs->ctl);
1108 				dev->set_cfg_not_acked = 0;
1109 			}
1110 			/* setup command is ACK'ed now by zlp */
1111 			if (dev->waiting_zlp_ack_ep0in) {
1112 				/* clear NAK by writing CNAK in EP0_IN */
1113 				tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1114 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1115 				writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1116 				dev->ep[UDC_EP0IN_IX].naking = 0;
1117 				UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1118 							UDC_EP0IN_IX);
1119 				dev->waiting_zlp_ack_ep0in = 0;
1120 			}
1121 			goto finished;
1122 		}
1123 		if (ep->dma) {
1124 			retval = prep_dma(ep, req, GFP_ATOMIC);
1125 			if (retval != 0)
1126 				goto finished;
1127 			/* write desc pointer to enable DMA */
1128 			if (ep->in) {
1129 				/* set HOST READY */
1130 				req->td_data->status =
1131 					AMD_ADDBITS(req->td_data->status,
1132 						UDC_DMA_IN_STS_BS_HOST_READY,
1133 						UDC_DMA_IN_STS_BS);
1134 			}
1135 
1136 			/* disabled rx dma while descriptor update */
1137 			if (!ep->in) {
1138 				/* stop RDE timer */
1139 				if (timer_pending(&udc_timer)) {
1140 					set_rde = 0;
1141 					mod_timer(&udc_timer, jiffies - 1);
1142 				}
1143 				/* clear RDE */
1144 				tmp = readl(&dev->regs->ctl);
1145 				tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1146 				writel(tmp, &dev->regs->ctl);
1147 				open_rxfifo = 1;
1148 
1149 				/*
1150 				 * if BNA occurred then let BNA dummy desc.
1151 				 * point to current desc.
1152 				 */
1153 				if (ep->bna_occurred) {
1154 					VDBG(dev, "copy to BNA dummy desc.\n");
1155 					memcpy(ep->bna_dummy_req->td_data,
1156 						req->td_data,
1157 						sizeof(struct udc_data_dma));
1158 				}
1159 			}
1160 			/* write desc pointer */
1161 			writel(req->td_phys, &ep->regs->desptr);
1162 
1163 			/* clear NAK by writing CNAK */
1164 			if (ep->naking) {
1165 				tmp = readl(&ep->regs->ctl);
1166 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1167 				writel(tmp, &ep->regs->ctl);
1168 				ep->naking = 0;
1169 				UDC_QUEUE_CNAK(ep, ep->num);
1170 			}
1171 
1172 			if (ep->in) {
1173 				/* enable ep irq */
1174 				tmp = readl(&dev->regs->ep_irqmsk);
1175 				tmp &= AMD_UNMASK_BIT(ep->num);
1176 				writel(tmp, &dev->regs->ep_irqmsk);
1177 			}
1178 		} else if (ep->in) {
1179 				/* enable ep irq */
1180 				tmp = readl(&dev->regs->ep_irqmsk);
1181 				tmp &= AMD_UNMASK_BIT(ep->num);
1182 				writel(tmp, &dev->regs->ep_irqmsk);
1183 			}
1184 
1185 	} else if (ep->dma) {
1186 
1187 		/*
1188 		 * prep_dma not used for OUT ep's, this is not possible
1189 		 * for PPB modes, because of chain creation reasons
1190 		 */
1191 		if (ep->in) {
1192 			retval = prep_dma(ep, req, GFP_ATOMIC);
1193 			if (retval != 0)
1194 				goto finished;
1195 		}
1196 	}
1197 	VDBG(dev, "list_add\n");
1198 	/* add request to ep queue */
1199 	if (req) {
1200 
1201 		list_add_tail(&req->queue, &ep->queue);
1202 
1203 		/* open rxfifo if out data queued */
1204 		if (open_rxfifo) {
1205 			/* enable DMA */
1206 			req->dma_going = 1;
1207 			udc_set_rde(dev);
1208 			if (ep->num != UDC_EP0OUT_IX)
1209 				dev->data_ep_queued = 1;
1210 		}
1211 		/* stop OUT naking */
1212 		if (!ep->in) {
1213 			if (!use_dma && udc_rxfifo_pending) {
1214 				DBG(dev, "udc_queue(): pending bytes in "
1215 					"rxfifo after nyet\n");
1216 				/*
1217 				 * read pending bytes afer nyet:
1218 				 * referring to isr
1219 				 */
1220 				if (udc_rxfifo_read(ep, req)) {
1221 					/* finish */
1222 					complete_req(ep, req, 0);
1223 				}
1224 				udc_rxfifo_pending = 0;
1225 
1226 			}
1227 		}
1228 	}
1229 
1230 finished:
1231 	spin_unlock_irqrestore(&dev->lock, iflags);
1232 	return retval;
1233 }
1234 
1235 /* Empty request queue of an endpoint; caller holds spinlock */
empty_req_queue(struct udc_ep * ep)1236 static void empty_req_queue(struct udc_ep *ep)
1237 {
1238 	struct udc_request	*req;
1239 
1240 	ep->halted = 1;
1241 	while (!list_empty(&ep->queue)) {
1242 		req = list_entry(ep->queue.next,
1243 			struct udc_request,
1244 			queue);
1245 		complete_req(ep, req, -ESHUTDOWN);
1246 	}
1247 }
1248 
1249 /* Dequeues a request packet, called by gadget driver */
udc_dequeue(struct usb_ep * usbep,struct usb_request * usbreq)1250 static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
1251 {
1252 	struct udc_ep		*ep;
1253 	struct udc_request	*req;
1254 	unsigned		halted;
1255 	unsigned long		iflags;
1256 
1257 	ep = container_of(usbep, struct udc_ep, ep);
1258 	if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0
1259 				&& ep->num != UDC_EP0OUT_IX)))
1260 		return -EINVAL;
1261 
1262 	req = container_of(usbreq, struct udc_request, req);
1263 
1264 	spin_lock_irqsave(&ep->dev->lock, iflags);
1265 	halted = ep->halted;
1266 	ep->halted = 1;
1267 	/* request in processing or next one */
1268 	if (ep->queue.next == &req->queue) {
1269 		if (ep->dma && req->dma_going) {
1270 			if (ep->in)
1271 				ep->cancel_transfer = 1;
1272 			else {
1273 				u32 tmp;
1274 				u32 dma_sts;
1275 				/* stop potential receive DMA */
1276 				tmp = readl(&udc->regs->ctl);
1277 				writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
1278 							&udc->regs->ctl);
1279 				/*
1280 				 * Cancel transfer later in ISR
1281 				 * if descriptor was touched.
1282 				 */
1283 				dma_sts = AMD_GETBITS(req->td_data->status,
1284 							UDC_DMA_OUT_STS_BS);
1285 				if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
1286 					ep->cancel_transfer = 1;
1287 				else {
1288 					udc_init_bna_dummy(ep->req);
1289 					writel(ep->bna_dummy_req->td_phys,
1290 						&ep->regs->desptr);
1291 				}
1292 				writel(tmp, &udc->regs->ctl);
1293 			}
1294 		}
1295 	}
1296 	complete_req(ep, req, -ECONNRESET);
1297 	ep->halted = halted;
1298 
1299 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
1300 	return 0;
1301 }
1302 
1303 /* Halt or clear halt of endpoint */
1304 static int
udc_set_halt(struct usb_ep * usbep,int halt)1305 udc_set_halt(struct usb_ep *usbep, int halt)
1306 {
1307 	struct udc_ep	*ep;
1308 	u32 tmp;
1309 	unsigned long iflags;
1310 	int retval = 0;
1311 
1312 	if (!usbep)
1313 		return -EINVAL;
1314 
1315 	pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
1316 
1317 	ep = container_of(usbep, struct udc_ep, ep);
1318 	if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1319 		return -EINVAL;
1320 	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1321 		return -ESHUTDOWN;
1322 
1323 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
1324 	/* halt or clear halt */
1325 	if (halt) {
1326 		if (ep->num == 0)
1327 			ep->dev->stall_ep0in = 1;
1328 		else {
1329 			/*
1330 			 * set STALL
1331 			 * rxfifo empty not taken into acount
1332 			 */
1333 			tmp = readl(&ep->regs->ctl);
1334 			tmp |= AMD_BIT(UDC_EPCTL_S);
1335 			writel(tmp, &ep->regs->ctl);
1336 			ep->halted = 1;
1337 
1338 			/* setup poll timer */
1339 			if (!timer_pending(&udc_pollstall_timer)) {
1340 				udc_pollstall_timer.expires = jiffies +
1341 					HZ * UDC_POLLSTALL_TIMER_USECONDS
1342 					/ (1000 * 1000);
1343 				if (!stop_pollstall_timer) {
1344 					DBG(ep->dev, "start polltimer\n");
1345 					add_timer(&udc_pollstall_timer);
1346 				}
1347 			}
1348 		}
1349 	} else {
1350 		/* ep is halted by set_halt() before */
1351 		if (ep->halted) {
1352 			tmp = readl(&ep->regs->ctl);
1353 			/* clear stall bit */
1354 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
1355 			/* clear NAK by writing CNAK */
1356 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1357 			writel(tmp, &ep->regs->ctl);
1358 			ep->halted = 0;
1359 			UDC_QUEUE_CNAK(ep, ep->num);
1360 		}
1361 	}
1362 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1363 	return retval;
1364 }
1365 
1366 /* gadget interface */
1367 static const struct usb_ep_ops udc_ep_ops = {
1368 	.enable		= udc_ep_enable,
1369 	.disable	= udc_ep_disable,
1370 
1371 	.alloc_request	= udc_alloc_request,
1372 	.free_request	= udc_free_request,
1373 
1374 	.queue		= udc_queue,
1375 	.dequeue	= udc_dequeue,
1376 
1377 	.set_halt	= udc_set_halt,
1378 	/* fifo ops not implemented */
1379 };
1380 
1381 /*-------------------------------------------------------------------------*/
1382 
1383 /* Get frame counter (not implemented) */
udc_get_frame(struct usb_gadget * gadget)1384 static int udc_get_frame(struct usb_gadget *gadget)
1385 {
1386 	return -EOPNOTSUPP;
1387 }
1388 
1389 /* Remote wakeup gadget interface */
udc_wakeup(struct usb_gadget * gadget)1390 static int udc_wakeup(struct usb_gadget *gadget)
1391 {
1392 	struct udc		*dev;
1393 
1394 	if (!gadget)
1395 		return -EINVAL;
1396 	dev = container_of(gadget, struct udc, gadget);
1397 	udc_remote_wakeup(dev);
1398 
1399 	return 0;
1400 }
1401 
1402 static int amd5536_udc_start(struct usb_gadget *g,
1403 		struct usb_gadget_driver *driver);
1404 static int amd5536_udc_stop(struct usb_gadget *g);
1405 
1406 static const struct usb_gadget_ops udc_ops = {
1407 	.wakeup		= udc_wakeup,
1408 	.get_frame	= udc_get_frame,
1409 	.udc_start	= amd5536_udc_start,
1410 	.udc_stop	= amd5536_udc_stop,
1411 };
1412 
1413 /* Setups endpoint parameters, adds endpoints to linked list */
make_ep_lists(struct udc * dev)1414 static void make_ep_lists(struct udc *dev)
1415 {
1416 	/* make gadget ep lists */
1417 	INIT_LIST_HEAD(&dev->gadget.ep_list);
1418 	list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1419 						&dev->gadget.ep_list);
1420 	list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1421 						&dev->gadget.ep_list);
1422 	list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1423 						&dev->gadget.ep_list);
1424 
1425 	/* fifo config */
1426 	dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1427 	if (dev->gadget.speed == USB_SPEED_FULL)
1428 		dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1429 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1430 		dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1431 	dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1432 }
1433 
1434 /* init registers at driver load time */
startup_registers(struct udc * dev)1435 static int startup_registers(struct udc *dev)
1436 {
1437 	u32 tmp;
1438 
1439 	/* init controller by soft reset */
1440 	udc_soft_reset(dev);
1441 
1442 	/* mask not needed interrupts */
1443 	udc_mask_unused_interrupts(dev);
1444 
1445 	/* put into initial config */
1446 	udc_basic_init(dev);
1447 	/* link up all endpoints */
1448 	udc_setup_endpoints(dev);
1449 
1450 	/* program speed */
1451 	tmp = readl(&dev->regs->cfg);
1452 	if (use_fullspeed)
1453 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1454 	else
1455 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
1456 	writel(tmp, &dev->regs->cfg);
1457 
1458 	return 0;
1459 }
1460 
1461 /* Inits UDC context */
udc_basic_init(struct udc * dev)1462 static void udc_basic_init(struct udc *dev)
1463 {
1464 	u32	tmp;
1465 
1466 	DBG(dev, "udc_basic_init()\n");
1467 
1468 	dev->gadget.speed = USB_SPEED_UNKNOWN;
1469 
1470 	/* stop RDE timer */
1471 	if (timer_pending(&udc_timer)) {
1472 		set_rde = 0;
1473 		mod_timer(&udc_timer, jiffies - 1);
1474 	}
1475 	/* stop poll stall timer */
1476 	if (timer_pending(&udc_pollstall_timer))
1477 		mod_timer(&udc_pollstall_timer, jiffies - 1);
1478 	/* disable DMA */
1479 	tmp = readl(&dev->regs->ctl);
1480 	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1481 	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
1482 	writel(tmp, &dev->regs->ctl);
1483 
1484 	/* enable dynamic CSR programming */
1485 	tmp = readl(&dev->regs->cfg);
1486 	tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
1487 	/* set self powered */
1488 	tmp |= AMD_BIT(UDC_DEVCFG_SP);
1489 	/* set remote wakeupable */
1490 	tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
1491 	writel(tmp, &dev->regs->cfg);
1492 
1493 	make_ep_lists(dev);
1494 
1495 	dev->data_ep_enabled = 0;
1496 	dev->data_ep_queued = 0;
1497 }
1498 
1499 /* Sets initial endpoint parameters */
udc_setup_endpoints(struct udc * dev)1500 static void udc_setup_endpoints(struct udc *dev)
1501 {
1502 	struct udc_ep	*ep;
1503 	u32	tmp;
1504 	u32	reg;
1505 
1506 	DBG(dev, "udc_setup_endpoints()\n");
1507 
1508 	/* read enum speed */
1509 	tmp = readl(&dev->regs->sts);
1510 	tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
1511 	if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
1512 		dev->gadget.speed = USB_SPEED_HIGH;
1513 	else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
1514 		dev->gadget.speed = USB_SPEED_FULL;
1515 
1516 	/* set basic ep parameters */
1517 	for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1518 		ep = &dev->ep[tmp];
1519 		ep->dev = dev;
1520 		ep->ep.name = ep_string[tmp];
1521 		ep->num = tmp;
1522 		/* txfifo size is calculated at enable time */
1523 		ep->txfifo = dev->txfifo;
1524 
1525 		/* fifo size */
1526 		if (tmp < UDC_EPIN_NUM) {
1527 			ep->fifo_depth = UDC_TXFIFO_SIZE;
1528 			ep->in = 1;
1529 		} else {
1530 			ep->fifo_depth = UDC_RXFIFO_SIZE;
1531 			ep->in = 0;
1532 
1533 		}
1534 		ep->regs = &dev->ep_regs[tmp];
1535 		/*
1536 		 * ep will be reset only if ep was not enabled before to avoid
1537 		 * disabling ep interrupts when ENUM interrupt occurs but ep is
1538 		 * not enabled by gadget driver
1539 		 */
1540 		if (!ep->ep.desc)
1541 			ep_init(dev->regs, ep);
1542 
1543 		if (use_dma) {
1544 			/*
1545 			 * ep->dma is not really used, just to indicate that
1546 			 * DMA is active: remove this
1547 			 * dma regs = dev control regs
1548 			 */
1549 			ep->dma = &dev->regs->ctl;
1550 
1551 			/* nak OUT endpoints until enable - not for ep0 */
1552 			if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
1553 						&& tmp > UDC_EPIN_NUM) {
1554 				/* set NAK */
1555 				reg = readl(&dev->ep[tmp].regs->ctl);
1556 				reg |= AMD_BIT(UDC_EPCTL_SNAK);
1557 				writel(reg, &dev->ep[tmp].regs->ctl);
1558 				dev->ep[tmp].naking = 1;
1559 
1560 			}
1561 		}
1562 	}
1563 	/* EP0 max packet */
1564 	if (dev->gadget.speed == USB_SPEED_FULL) {
1565 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1566 					   UDC_FS_EP0IN_MAX_PKT_SIZE);
1567 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1568 					   UDC_FS_EP0OUT_MAX_PKT_SIZE);
1569 	} else if (dev->gadget.speed == USB_SPEED_HIGH) {
1570 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1571 					   UDC_EP0IN_MAX_PKT_SIZE);
1572 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1573 					   UDC_EP0OUT_MAX_PKT_SIZE);
1574 	}
1575 
1576 	/*
1577 	 * with suspend bug workaround, ep0 params for gadget driver
1578 	 * are set at gadget driver bind() call
1579 	 */
1580 	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1581 	dev->ep[UDC_EP0IN_IX].halted = 0;
1582 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1583 
1584 	/* init cfg/alt/int */
1585 	dev->cur_config = 0;
1586 	dev->cur_intf = 0;
1587 	dev->cur_alt = 0;
1588 }
1589 
1590 /* Bringup after Connect event, initial bringup to be ready for ep0 events */
usb_connect(struct udc * dev)1591 static void usb_connect(struct udc *dev)
1592 {
1593 
1594 	dev_info(&dev->pdev->dev, "USB Connect\n");
1595 
1596 	dev->connected = 1;
1597 
1598 	/* put into initial config */
1599 	udc_basic_init(dev);
1600 
1601 	/* enable device setup interrupts */
1602 	udc_enable_dev_setup_interrupts(dev);
1603 }
1604 
1605 /*
1606  * Calls gadget with disconnect event and resets the UDC and makes
1607  * initial bringup to be ready for ep0 events
1608  */
usb_disconnect(struct udc * dev)1609 static void usb_disconnect(struct udc *dev)
1610 {
1611 
1612 	dev_info(&dev->pdev->dev, "USB Disconnect\n");
1613 
1614 	dev->connected = 0;
1615 
1616 	/* mask interrupts */
1617 	udc_mask_unused_interrupts(dev);
1618 
1619 	/* REVISIT there doesn't seem to be a point to having this
1620 	 * talk to a tasklet ... do it directly, we already hold
1621 	 * the spinlock needed to process the disconnect.
1622 	 */
1623 
1624 	tasklet_schedule(&disconnect_tasklet);
1625 }
1626 
1627 /* Tasklet for disconnect to be outside of interrupt context */
udc_tasklet_disconnect(unsigned long par)1628 static void udc_tasklet_disconnect(unsigned long par)
1629 {
1630 	struct udc *dev = (struct udc *)(*((struct udc **) par));
1631 	u32 tmp;
1632 
1633 	DBG(dev, "Tasklet disconnect\n");
1634 	spin_lock_irq(&dev->lock);
1635 
1636 	if (dev->driver) {
1637 		spin_unlock(&dev->lock);
1638 		dev->driver->disconnect(&dev->gadget);
1639 		spin_lock(&dev->lock);
1640 
1641 		/* empty queues */
1642 		for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
1643 			empty_req_queue(&dev->ep[tmp]);
1644 
1645 	}
1646 
1647 	/* disable ep0 */
1648 	ep_init(dev->regs,
1649 			&dev->ep[UDC_EP0IN_IX]);
1650 
1651 
1652 	if (!soft_reset_occured) {
1653 		/* init controller by soft reset */
1654 		udc_soft_reset(dev);
1655 		soft_reset_occured++;
1656 	}
1657 
1658 	/* re-enable dev interrupts */
1659 	udc_enable_dev_setup_interrupts(dev);
1660 	/* back to full speed ? */
1661 	if (use_fullspeed) {
1662 		tmp = readl(&dev->regs->cfg);
1663 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1664 		writel(tmp, &dev->regs->cfg);
1665 	}
1666 
1667 	spin_unlock_irq(&dev->lock);
1668 }
1669 
1670 /* Reset the UDC core */
udc_soft_reset(struct udc * dev)1671 static void udc_soft_reset(struct udc *dev)
1672 {
1673 	unsigned long	flags;
1674 
1675 	DBG(dev, "Soft reset\n");
1676 	/*
1677 	 * reset possible waiting interrupts, because int.
1678 	 * status is lost after soft reset,
1679 	 * ep int. status reset
1680 	 */
1681 	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
1682 	/* device int. status reset */
1683 	writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
1684 
1685 	spin_lock_irqsave(&udc_irq_spinlock, flags);
1686 	writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
1687 	readl(&dev->regs->cfg);
1688 	spin_unlock_irqrestore(&udc_irq_spinlock, flags);
1689 
1690 }
1691 
1692 /* RDE timer callback to set RDE bit */
udc_timer_function(unsigned long v)1693 static void udc_timer_function(unsigned long v)
1694 {
1695 	u32 tmp;
1696 
1697 	spin_lock_irq(&udc_irq_spinlock);
1698 
1699 	if (set_rde > 0) {
1700 		/*
1701 		 * open the fifo if fifo was filled on last timer call
1702 		 * conditionally
1703 		 */
1704 		if (set_rde > 1) {
1705 			/* set RDE to receive setup data */
1706 			tmp = readl(&udc->regs->ctl);
1707 			tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1708 			writel(tmp, &udc->regs->ctl);
1709 			set_rde = -1;
1710 		} else if (readl(&udc->regs->sts)
1711 				& AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
1712 			/*
1713 			 * if fifo empty setup polling, do not just
1714 			 * open the fifo
1715 			 */
1716 			udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
1717 			if (!stop_timer)
1718 				add_timer(&udc_timer);
1719 		} else {
1720 			/*
1721 			 * fifo contains data now, setup timer for opening
1722 			 * the fifo when timer expires to be able to receive
1723 			 * setup packets, when data packets gets queued by
1724 			 * gadget layer then timer will forced to expire with
1725 			 * set_rde=0 (RDE is set in udc_queue())
1726 			 */
1727 			set_rde++;
1728 			/* debug: lhadmot_timer_start = 221070 */
1729 			udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
1730 			if (!stop_timer)
1731 				add_timer(&udc_timer);
1732 		}
1733 
1734 	} else
1735 		set_rde = -1; /* RDE was set by udc_queue() */
1736 	spin_unlock_irq(&udc_irq_spinlock);
1737 	if (stop_timer)
1738 		complete(&on_exit);
1739 
1740 }
1741 
1742 /* Handle halt state, used in stall poll timer */
udc_handle_halt_state(struct udc_ep * ep)1743 static void udc_handle_halt_state(struct udc_ep *ep)
1744 {
1745 	u32 tmp;
1746 	/* set stall as long not halted */
1747 	if (ep->halted == 1) {
1748 		tmp = readl(&ep->regs->ctl);
1749 		/* STALL cleared ? */
1750 		if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
1751 			/*
1752 			 * FIXME: MSC spec requires that stall remains
1753 			 * even on receivng of CLEAR_FEATURE HALT. So
1754 			 * we would set STALL again here to be compliant.
1755 			 * But with current mass storage drivers this does
1756 			 * not work (would produce endless host retries).
1757 			 * So we clear halt on CLEAR_FEATURE.
1758 			 *
1759 			DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1760 			tmp |= AMD_BIT(UDC_EPCTL_S);
1761 			writel(tmp, &ep->regs->ctl);*/
1762 
1763 			/* clear NAK by writing CNAK */
1764 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1765 			writel(tmp, &ep->regs->ctl);
1766 			ep->halted = 0;
1767 			UDC_QUEUE_CNAK(ep, ep->num);
1768 		}
1769 	}
1770 }
1771 
1772 /* Stall timer callback to poll S bit and set it again after */
udc_pollstall_timer_function(unsigned long v)1773 static void udc_pollstall_timer_function(unsigned long v)
1774 {
1775 	struct udc_ep *ep;
1776 	int halted = 0;
1777 
1778 	spin_lock_irq(&udc_stall_spinlock);
1779 	/*
1780 	 * only one IN and OUT endpoints are handled
1781 	 * IN poll stall
1782 	 */
1783 	ep = &udc->ep[UDC_EPIN_IX];
1784 	udc_handle_halt_state(ep);
1785 	if (ep->halted)
1786 		halted = 1;
1787 	/* OUT poll stall */
1788 	ep = &udc->ep[UDC_EPOUT_IX];
1789 	udc_handle_halt_state(ep);
1790 	if (ep->halted)
1791 		halted = 1;
1792 
1793 	/* setup timer again when still halted */
1794 	if (!stop_pollstall_timer && halted) {
1795 		udc_pollstall_timer.expires = jiffies +
1796 					HZ * UDC_POLLSTALL_TIMER_USECONDS
1797 					/ (1000 * 1000);
1798 		add_timer(&udc_pollstall_timer);
1799 	}
1800 	spin_unlock_irq(&udc_stall_spinlock);
1801 
1802 	if (stop_pollstall_timer)
1803 		complete(&on_pollstall_exit);
1804 }
1805 
1806 /* Inits endpoint 0 so that SETUP packets are processed */
activate_control_endpoints(struct udc * dev)1807 static void activate_control_endpoints(struct udc *dev)
1808 {
1809 	u32 tmp;
1810 
1811 	DBG(dev, "activate_control_endpoints\n");
1812 
1813 	/* flush fifo */
1814 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1815 	tmp |= AMD_BIT(UDC_EPCTL_F);
1816 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1817 
1818 	/* set ep0 directions */
1819 	dev->ep[UDC_EP0IN_IX].in = 1;
1820 	dev->ep[UDC_EP0OUT_IX].in = 0;
1821 
1822 	/* set buffer size (tx fifo entries) of EP0_IN */
1823 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1824 	if (dev->gadget.speed == USB_SPEED_FULL)
1825 		tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
1826 					UDC_EPIN_BUFF_SIZE);
1827 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1828 		tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
1829 					UDC_EPIN_BUFF_SIZE);
1830 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1831 
1832 	/* set max packet size of EP0_IN */
1833 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1834 	if (dev->gadget.speed == USB_SPEED_FULL)
1835 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
1836 					UDC_EP_MAX_PKT_SIZE);
1837 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1838 		tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
1839 				UDC_EP_MAX_PKT_SIZE);
1840 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1841 
1842 	/* set max packet size of EP0_OUT */
1843 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1844 	if (dev->gadget.speed == USB_SPEED_FULL)
1845 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1846 					UDC_EP_MAX_PKT_SIZE);
1847 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1848 		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1849 					UDC_EP_MAX_PKT_SIZE);
1850 	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1851 
1852 	/* set max packet size of EP0 in UDC CSR */
1853 	tmp = readl(&dev->csr->ne[0]);
1854 	if (dev->gadget.speed == USB_SPEED_FULL)
1855 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1856 					UDC_CSR_NE_MAX_PKT);
1857 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1858 		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1859 					UDC_CSR_NE_MAX_PKT);
1860 	writel(tmp, &dev->csr->ne[0]);
1861 
1862 	if (use_dma) {
1863 		dev->ep[UDC_EP0OUT_IX].td->status |=
1864 			AMD_BIT(UDC_DMA_OUT_STS_L);
1865 		/* write dma desc address */
1866 		writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1867 			&dev->ep[UDC_EP0OUT_IX].regs->subptr);
1868 		writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1869 			&dev->ep[UDC_EP0OUT_IX].regs->desptr);
1870 		/* stop RDE timer */
1871 		if (timer_pending(&udc_timer)) {
1872 			set_rde = 0;
1873 			mod_timer(&udc_timer, jiffies - 1);
1874 		}
1875 		/* stop pollstall timer */
1876 		if (timer_pending(&udc_pollstall_timer))
1877 			mod_timer(&udc_pollstall_timer, jiffies - 1);
1878 		/* enable DMA */
1879 		tmp = readl(&dev->regs->ctl);
1880 		tmp |= AMD_BIT(UDC_DEVCTL_MODE)
1881 				| AMD_BIT(UDC_DEVCTL_RDE)
1882 				| AMD_BIT(UDC_DEVCTL_TDE);
1883 		if (use_dma_bufferfill_mode)
1884 			tmp |= AMD_BIT(UDC_DEVCTL_BF);
1885 		else if (use_dma_ppb_du)
1886 			tmp |= AMD_BIT(UDC_DEVCTL_DU);
1887 		writel(tmp, &dev->regs->ctl);
1888 	}
1889 
1890 	/* clear NAK by writing CNAK for EP0IN */
1891 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1892 	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1893 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1894 	dev->ep[UDC_EP0IN_IX].naking = 0;
1895 	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1896 
1897 	/* clear NAK by writing CNAK for EP0OUT */
1898 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1899 	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1900 	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1901 	dev->ep[UDC_EP0OUT_IX].naking = 0;
1902 	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1903 }
1904 
1905 /* Make endpoint 0 ready for control traffic */
setup_ep0(struct udc * dev)1906 static int setup_ep0(struct udc *dev)
1907 {
1908 	activate_control_endpoints(dev);
1909 	/* enable ep0 interrupts */
1910 	udc_enable_ep0_interrupts(dev);
1911 	/* enable device setup interrupts */
1912 	udc_enable_dev_setup_interrupts(dev);
1913 
1914 	return 0;
1915 }
1916 
1917 /* Called by gadget driver to register itself */
amd5536_udc_start(struct usb_gadget * g,struct usb_gadget_driver * driver)1918 static int amd5536_udc_start(struct usb_gadget *g,
1919 		struct usb_gadget_driver *driver)
1920 {
1921 	struct udc *dev = to_amd5536_udc(g);
1922 	u32 tmp;
1923 
1924 	driver->driver.bus = NULL;
1925 	dev->driver = driver;
1926 
1927 	/* Some gadget drivers use both ep0 directions.
1928 	 * NOTE: to gadget driver, ep0 is just one endpoint...
1929 	 */
1930 	dev->ep[UDC_EP0OUT_IX].ep.driver_data =
1931 		dev->ep[UDC_EP0IN_IX].ep.driver_data;
1932 
1933 	/* get ready for ep0 traffic */
1934 	setup_ep0(dev);
1935 
1936 	/* clear SD */
1937 	tmp = readl(&dev->regs->ctl);
1938 	tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
1939 	writel(tmp, &dev->regs->ctl);
1940 
1941 	usb_connect(dev);
1942 
1943 	return 0;
1944 }
1945 
1946 /* shutdown requests and disconnect from gadget */
1947 static void
shutdown(struct udc * dev,struct usb_gadget_driver * driver)1948 shutdown(struct udc *dev, struct usb_gadget_driver *driver)
1949 __releases(dev->lock)
1950 __acquires(dev->lock)
1951 {
1952 	int tmp;
1953 
1954 	/* empty queues and init hardware */
1955 	udc_basic_init(dev);
1956 
1957 	for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
1958 		empty_req_queue(&dev->ep[tmp]);
1959 
1960 	udc_setup_endpoints(dev);
1961 }
1962 
1963 /* Called by gadget driver to unregister itself */
amd5536_udc_stop(struct usb_gadget * g)1964 static int amd5536_udc_stop(struct usb_gadget *g)
1965 {
1966 	struct udc *dev = to_amd5536_udc(g);
1967 	unsigned long flags;
1968 	u32 tmp;
1969 
1970 	spin_lock_irqsave(&dev->lock, flags);
1971 	udc_mask_unused_interrupts(dev);
1972 	shutdown(dev, NULL);
1973 	spin_unlock_irqrestore(&dev->lock, flags);
1974 
1975 	dev->driver = NULL;
1976 
1977 	/* set SD */
1978 	tmp = readl(&dev->regs->ctl);
1979 	tmp |= AMD_BIT(UDC_DEVCTL_SD);
1980 	writel(tmp, &dev->regs->ctl);
1981 
1982 	return 0;
1983 }
1984 
1985 /* Clear pending NAK bits */
udc_process_cnak_queue(struct udc * dev)1986 static void udc_process_cnak_queue(struct udc *dev)
1987 {
1988 	u32 tmp;
1989 	u32 reg;
1990 
1991 	/* check epin's */
1992 	DBG(dev, "CNAK pending queue processing\n");
1993 	for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
1994 		if (cnak_pending & (1 << tmp)) {
1995 			DBG(dev, "CNAK pending for ep%d\n", tmp);
1996 			/* clear NAK by writing CNAK */
1997 			reg = readl(&dev->ep[tmp].regs->ctl);
1998 			reg |= AMD_BIT(UDC_EPCTL_CNAK);
1999 			writel(reg, &dev->ep[tmp].regs->ctl);
2000 			dev->ep[tmp].naking = 0;
2001 			UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2002 		}
2003 	}
2004 	/* ...	and ep0out */
2005 	if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
2006 		DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2007 		/* clear NAK by writing CNAK */
2008 		reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2009 		reg |= AMD_BIT(UDC_EPCTL_CNAK);
2010 		writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2011 		dev->ep[UDC_EP0OUT_IX].naking = 0;
2012 		UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2013 				dev->ep[UDC_EP0OUT_IX].num);
2014 	}
2015 }
2016 
2017 /* Enabling RX DMA after setup packet */
udc_ep0_set_rde(struct udc * dev)2018 static void udc_ep0_set_rde(struct udc *dev)
2019 {
2020 	if (use_dma) {
2021 		/*
2022 		 * only enable RXDMA when no data endpoint enabled
2023 		 * or data is queued
2024 		 */
2025 		if (!dev->data_ep_enabled || dev->data_ep_queued) {
2026 			udc_set_rde(dev);
2027 		} else {
2028 			/*
2029 			 * setup timer for enabling RDE (to not enable
2030 			 * RXFIFO DMA for data endpoints to early)
2031 			 */
2032 			if (set_rde != 0 && !timer_pending(&udc_timer)) {
2033 				udc_timer.expires =
2034 					jiffies + HZ/UDC_RDE_TIMER_DIV;
2035 				set_rde = 1;
2036 				if (!stop_timer)
2037 					add_timer(&udc_timer);
2038 			}
2039 		}
2040 	}
2041 }
2042 
2043 
2044 /* Interrupt handler for data OUT traffic */
udc_data_out_isr(struct udc * dev,int ep_ix)2045 static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
2046 {
2047 	irqreturn_t		ret_val = IRQ_NONE;
2048 	u32			tmp;
2049 	struct udc_ep		*ep;
2050 	struct udc_request	*req;
2051 	unsigned int		count;
2052 	struct udc_data_dma	*td = NULL;
2053 	unsigned		dma_done;
2054 
2055 	VDBG(dev, "ep%d irq\n", ep_ix);
2056 	ep = &dev->ep[ep_ix];
2057 
2058 	tmp = readl(&ep->regs->sts);
2059 	if (use_dma) {
2060 		/* BNA event ? */
2061 		if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2062 			DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
2063 					ep->num, readl(&ep->regs->desptr));
2064 			/* clear BNA */
2065 			writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2066 			if (!ep->cancel_transfer)
2067 				ep->bna_occurred = 1;
2068 			else
2069 				ep->cancel_transfer = 0;
2070 			ret_val = IRQ_HANDLED;
2071 			goto finished;
2072 		}
2073 	}
2074 	/* HE event ? */
2075 	if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
2076 		dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num);
2077 
2078 		/* clear HE */
2079 		writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2080 		ret_val = IRQ_HANDLED;
2081 		goto finished;
2082 	}
2083 
2084 	if (!list_empty(&ep->queue)) {
2085 
2086 		/* next request */
2087 		req = list_entry(ep->queue.next,
2088 			struct udc_request, queue);
2089 	} else {
2090 		req = NULL;
2091 		udc_rxfifo_pending = 1;
2092 	}
2093 	VDBG(dev, "req = %p\n", req);
2094 	/* fifo mode */
2095 	if (!use_dma) {
2096 
2097 		/* read fifo */
2098 		if (req && udc_rxfifo_read(ep, req)) {
2099 			ret_val = IRQ_HANDLED;
2100 
2101 			/* finish */
2102 			complete_req(ep, req, 0);
2103 			/* next request */
2104 			if (!list_empty(&ep->queue) && !ep->halted) {
2105 				req = list_entry(ep->queue.next,
2106 					struct udc_request, queue);
2107 			} else
2108 				req = NULL;
2109 		}
2110 
2111 	/* DMA */
2112 	} else if (!ep->cancel_transfer && req != NULL) {
2113 		ret_val = IRQ_HANDLED;
2114 
2115 		/* check for DMA done */
2116 		if (!use_dma_ppb) {
2117 			dma_done = AMD_GETBITS(req->td_data->status,
2118 						UDC_DMA_OUT_STS_BS);
2119 		/* packet per buffer mode - rx bytes */
2120 		} else {
2121 			/*
2122 			 * if BNA occurred then recover desc. from
2123 			 * BNA dummy desc.
2124 			 */
2125 			if (ep->bna_occurred) {
2126 				VDBG(dev, "Recover desc. from BNA dummy\n");
2127 				memcpy(req->td_data, ep->bna_dummy_req->td_data,
2128 						sizeof(struct udc_data_dma));
2129 				ep->bna_occurred = 0;
2130 				udc_init_bna_dummy(ep->req);
2131 			}
2132 			td = udc_get_last_dma_desc(req);
2133 			dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
2134 		}
2135 		if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
2136 			/* buffer fill mode - rx bytes */
2137 			if (!use_dma_ppb) {
2138 				/* received number bytes */
2139 				count = AMD_GETBITS(req->td_data->status,
2140 						UDC_DMA_OUT_STS_RXBYTES);
2141 				VDBG(dev, "rx bytes=%u\n", count);
2142 			/* packet per buffer mode - rx bytes */
2143 			} else {
2144 				VDBG(dev, "req->td_data=%p\n", req->td_data);
2145 				VDBG(dev, "last desc = %p\n", td);
2146 				/* received number bytes */
2147 				if (use_dma_ppb_du) {
2148 					/* every desc. counts bytes */
2149 					count = udc_get_ppbdu_rxbytes(req);
2150 				} else {
2151 					/* last desc. counts bytes */
2152 					count = AMD_GETBITS(td->status,
2153 						UDC_DMA_OUT_STS_RXBYTES);
2154 					if (!count && req->req.length
2155 						== UDC_DMA_MAXPACKET) {
2156 						/*
2157 						 * on 64k packets the RXBYTES
2158 						 * field is zero
2159 						 */
2160 						count = UDC_DMA_MAXPACKET;
2161 					}
2162 				}
2163 				VDBG(dev, "last desc rx bytes=%u\n", count);
2164 			}
2165 
2166 			tmp = req->req.length - req->req.actual;
2167 			if (count > tmp) {
2168 				if ((tmp % ep->ep.maxpacket) != 0) {
2169 					DBG(dev, "%s: rx %db, space=%db\n",
2170 						ep->ep.name, count, tmp);
2171 					req->req.status = -EOVERFLOW;
2172 				}
2173 				count = tmp;
2174 			}
2175 			req->req.actual += count;
2176 			req->dma_going = 0;
2177 			/* complete request */
2178 			complete_req(ep, req, 0);
2179 
2180 			/* next request */
2181 			if (!list_empty(&ep->queue) && !ep->halted) {
2182 				req = list_entry(ep->queue.next,
2183 					struct udc_request,
2184 					queue);
2185 				/*
2186 				 * DMA may be already started by udc_queue()
2187 				 * called by gadget drivers completion
2188 				 * routine. This happens when queue
2189 				 * holds one request only.
2190 				 */
2191 				if (req->dma_going == 0) {
2192 					/* next dma */
2193 					if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2194 						goto finished;
2195 					/* write desc pointer */
2196 					writel(req->td_phys,
2197 						&ep->regs->desptr);
2198 					req->dma_going = 1;
2199 					/* enable DMA */
2200 					udc_set_rde(dev);
2201 				}
2202 			} else {
2203 				/*
2204 				 * implant BNA dummy descriptor to allow
2205 				 * RXFIFO opening by RDE
2206 				 */
2207 				if (ep->bna_dummy_req) {
2208 					/* write desc pointer */
2209 					writel(ep->bna_dummy_req->td_phys,
2210 						&ep->regs->desptr);
2211 					ep->bna_occurred = 0;
2212 				}
2213 
2214 				/*
2215 				 * schedule timer for setting RDE if queue
2216 				 * remains empty to allow ep0 packets pass
2217 				 * through
2218 				 */
2219 				if (set_rde != 0
2220 						&& !timer_pending(&udc_timer)) {
2221 					udc_timer.expires =
2222 						jiffies
2223 						+ HZ*UDC_RDE_TIMER_SECONDS;
2224 					set_rde = 1;
2225 					if (!stop_timer)
2226 						add_timer(&udc_timer);
2227 				}
2228 				if (ep->num != UDC_EP0OUT_IX)
2229 					dev->data_ep_queued = 0;
2230 			}
2231 
2232 		} else {
2233 			/*
2234 			* RX DMA must be reenabled for each desc in PPBDU mode
2235 			* and must be enabled for PPBNDU mode in case of BNA
2236 			*/
2237 			udc_set_rde(dev);
2238 		}
2239 
2240 	} else if (ep->cancel_transfer) {
2241 		ret_val = IRQ_HANDLED;
2242 		ep->cancel_transfer = 0;
2243 	}
2244 
2245 	/* check pending CNAKS */
2246 	if (cnak_pending) {
2247 		/* CNAk processing when rxfifo empty only */
2248 		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2249 			udc_process_cnak_queue(dev);
2250 	}
2251 
2252 	/* clear OUT bits in ep status */
2253 	writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2254 finished:
2255 	return ret_val;
2256 }
2257 
2258 /* Interrupt handler for data IN traffic */
udc_data_in_isr(struct udc * dev,int ep_ix)2259 static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2260 {
2261 	irqreturn_t ret_val = IRQ_NONE;
2262 	u32 tmp;
2263 	u32 epsts;
2264 	struct udc_ep *ep;
2265 	struct udc_request *req;
2266 	struct udc_data_dma *td;
2267 	unsigned dma_done;
2268 	unsigned len;
2269 
2270 	ep = &dev->ep[ep_ix];
2271 
2272 	epsts = readl(&ep->regs->sts);
2273 	if (use_dma) {
2274 		/* BNA ? */
2275 		if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
2276 			dev_err(&dev->pdev->dev,
2277 				"BNA ep%din occurred - DESPTR = %08lx\n",
2278 				ep->num,
2279 				(unsigned long) readl(&ep->regs->desptr));
2280 
2281 			/* clear BNA */
2282 			writel(epsts, &ep->regs->sts);
2283 			ret_val = IRQ_HANDLED;
2284 			goto finished;
2285 		}
2286 	}
2287 	/* HE event ? */
2288 	if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
2289 		dev_err(&dev->pdev->dev,
2290 			"HE ep%dn occurred - DESPTR = %08lx\n",
2291 			ep->num, (unsigned long) readl(&ep->regs->desptr));
2292 
2293 		/* clear HE */
2294 		writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2295 		ret_val = IRQ_HANDLED;
2296 		goto finished;
2297 	}
2298 
2299 	/* DMA completion */
2300 	if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
2301 		VDBG(dev, "TDC set- completion\n");
2302 		ret_val = IRQ_HANDLED;
2303 		if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2304 			req = list_entry(ep->queue.next,
2305 					struct udc_request, queue);
2306 			/*
2307 			 * length bytes transferred
2308 			 * check dma done of last desc. in PPBDU mode
2309 			 */
2310 			if (use_dma_ppb_du) {
2311 				td = udc_get_last_dma_desc(req);
2312 				if (td) {
2313 					dma_done =
2314 						AMD_GETBITS(td->status,
2315 						UDC_DMA_IN_STS_BS);
2316 					/* don't care DMA done */
2317 					req->req.actual = req->req.length;
2318 				}
2319 			} else {
2320 				/* assume all bytes transferred */
2321 				req->req.actual = req->req.length;
2322 			}
2323 
2324 			if (req->req.actual == req->req.length) {
2325 				/* complete req */
2326 				complete_req(ep, req, 0);
2327 				req->dma_going = 0;
2328 				/* further request available ? */
2329 				if (list_empty(&ep->queue)) {
2330 					/* disable interrupt */
2331 					tmp = readl(&dev->regs->ep_irqmsk);
2332 					tmp |= AMD_BIT(ep->num);
2333 					writel(tmp, &dev->regs->ep_irqmsk);
2334 				}
2335 			}
2336 		}
2337 		ep->cancel_transfer = 0;
2338 
2339 	}
2340 	/*
2341 	 * status reg has IN bit set and TDC not set (if TDC was handled,
2342 	 * IN must not be handled (UDC defect) ?
2343 	 */
2344 	if ((epsts & AMD_BIT(UDC_EPSTS_IN))
2345 			&& !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
2346 		ret_val = IRQ_HANDLED;
2347 		if (!list_empty(&ep->queue)) {
2348 			/* next request */
2349 			req = list_entry(ep->queue.next,
2350 					struct udc_request, queue);
2351 			/* FIFO mode */
2352 			if (!use_dma) {
2353 				/* write fifo */
2354 				udc_txfifo_write(ep, &req->req);
2355 				len = req->req.length - req->req.actual;
2356 				if (len > ep->ep.maxpacket)
2357 					len = ep->ep.maxpacket;
2358 				req->req.actual += len;
2359 				if (req->req.actual == req->req.length
2360 					|| (len != ep->ep.maxpacket)) {
2361 					/* complete req */
2362 					complete_req(ep, req, 0);
2363 				}
2364 			/* DMA */
2365 			} else if (req && !req->dma_going) {
2366 				VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
2367 					req, req->td_data);
2368 				if (req->td_data) {
2369 
2370 					req->dma_going = 1;
2371 
2372 					/*
2373 					 * unset L bit of first desc.
2374 					 * for chain
2375 					 */
2376 					if (use_dma_ppb && req->req.length >
2377 							ep->ep.maxpacket) {
2378 						req->td_data->status &=
2379 							AMD_CLEAR_BIT(
2380 							UDC_DMA_IN_STS_L);
2381 					}
2382 
2383 					/* write desc pointer */
2384 					writel(req->td_phys, &ep->regs->desptr);
2385 
2386 					/* set HOST READY */
2387 					req->td_data->status =
2388 						AMD_ADDBITS(
2389 						req->td_data->status,
2390 						UDC_DMA_IN_STS_BS_HOST_READY,
2391 						UDC_DMA_IN_STS_BS);
2392 
2393 					/* set poll demand bit */
2394 					tmp = readl(&ep->regs->ctl);
2395 					tmp |= AMD_BIT(UDC_EPCTL_P);
2396 					writel(tmp, &ep->regs->ctl);
2397 				}
2398 			}
2399 
2400 		} else if (!use_dma && ep->in) {
2401 			/* disable interrupt */
2402 			tmp = readl(
2403 				&dev->regs->ep_irqmsk);
2404 			tmp |= AMD_BIT(ep->num);
2405 			writel(tmp,
2406 				&dev->regs->ep_irqmsk);
2407 		}
2408 	}
2409 	/* clear status bits */
2410 	writel(epsts, &ep->regs->sts);
2411 
2412 finished:
2413 	return ret_val;
2414 
2415 }
2416 
2417 /* Interrupt handler for Control OUT traffic */
udc_control_out_isr(struct udc * dev)2418 static irqreturn_t udc_control_out_isr(struct udc *dev)
2419 __releases(dev->lock)
2420 __acquires(dev->lock)
2421 {
2422 	irqreturn_t ret_val = IRQ_NONE;
2423 	u32 tmp;
2424 	int setup_supported;
2425 	u32 count;
2426 	int set = 0;
2427 	struct udc_ep	*ep;
2428 	struct udc_ep	*ep_tmp;
2429 
2430 	ep = &dev->ep[UDC_EP0OUT_IX];
2431 
2432 	/* clear irq */
2433 	writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
2434 
2435 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2436 	/* check BNA and clear if set */
2437 	if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2438 		VDBG(dev, "ep0: BNA set\n");
2439 		writel(AMD_BIT(UDC_EPSTS_BNA),
2440 			&dev->ep[UDC_EP0OUT_IX].regs->sts);
2441 		ep->bna_occurred = 1;
2442 		ret_val = IRQ_HANDLED;
2443 		goto finished;
2444 	}
2445 
2446 	/* type of data: SETUP or DATA 0 bytes */
2447 	tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
2448 	VDBG(dev, "data_typ = %x\n", tmp);
2449 
2450 	/* setup data */
2451 	if (tmp == UDC_EPSTS_OUT_SETUP) {
2452 		ret_val = IRQ_HANDLED;
2453 
2454 		ep->dev->stall_ep0in = 0;
2455 		dev->waiting_zlp_ack_ep0in = 0;
2456 
2457 		/* set NAK for EP0_IN */
2458 		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2459 		tmp |= AMD_BIT(UDC_EPCTL_SNAK);
2460 		writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2461 		dev->ep[UDC_EP0IN_IX].naking = 1;
2462 		/* get setup data */
2463 		if (use_dma) {
2464 
2465 			/* clear OUT bits in ep status */
2466 			writel(UDC_EPSTS_OUT_CLEAR,
2467 				&dev->ep[UDC_EP0OUT_IX].regs->sts);
2468 
2469 			setup_data.data[0] =
2470 				dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2471 			setup_data.data[1] =
2472 				dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2473 			/* set HOST READY */
2474 			dev->ep[UDC_EP0OUT_IX].td_stp->status =
2475 					UDC_DMA_STP_STS_BS_HOST_READY;
2476 		} else {
2477 			/* read fifo */
2478 			udc_rxfifo_read_dwords(dev, setup_data.data, 2);
2479 		}
2480 
2481 		/* determine direction of control data */
2482 		if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
2483 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2484 			/* enable RDE */
2485 			udc_ep0_set_rde(dev);
2486 			set = 0;
2487 		} else {
2488 			dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2489 			/*
2490 			 * implant BNA dummy descriptor to allow RXFIFO opening
2491 			 * by RDE
2492 			 */
2493 			if (ep->bna_dummy_req) {
2494 				/* write desc pointer */
2495 				writel(ep->bna_dummy_req->td_phys,
2496 					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
2497 				ep->bna_occurred = 0;
2498 			}
2499 
2500 			set = 1;
2501 			dev->ep[UDC_EP0OUT_IX].naking = 1;
2502 			/*
2503 			 * setup timer for enabling RDE (to not enable
2504 			 * RXFIFO DMA for data to early)
2505 			 */
2506 			set_rde = 1;
2507 			if (!timer_pending(&udc_timer)) {
2508 				udc_timer.expires = jiffies +
2509 							HZ/UDC_RDE_TIMER_DIV;
2510 				if (!stop_timer)
2511 					add_timer(&udc_timer);
2512 			}
2513 		}
2514 
2515 		/*
2516 		 * mass storage reset must be processed here because
2517 		 * next packet may be a CLEAR_FEATURE HALT which would not
2518 		 * clear the stall bit when no STALL handshake was received
2519 		 * before (autostall can cause this)
2520 		 */
2521 		if (setup_data.data[0] == UDC_MSCRES_DWORD0
2522 				&& setup_data.data[1] == UDC_MSCRES_DWORD1) {
2523 			DBG(dev, "MSC Reset\n");
2524 			/*
2525 			 * clear stall bits
2526 			 * only one IN and OUT endpoints are handled
2527 			 */
2528 			ep_tmp = &udc->ep[UDC_EPIN_IX];
2529 			udc_set_halt(&ep_tmp->ep, 0);
2530 			ep_tmp = &udc->ep[UDC_EPOUT_IX];
2531 			udc_set_halt(&ep_tmp->ep, 0);
2532 		}
2533 
2534 		/* call gadget with setup data received */
2535 		spin_unlock(&dev->lock);
2536 		setup_supported = dev->driver->setup(&dev->gadget,
2537 						&setup_data.request);
2538 		spin_lock(&dev->lock);
2539 
2540 		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2541 		/* ep0 in returns data (not zlp) on IN phase */
2542 		if (setup_supported >= 0 && setup_supported <
2543 				UDC_EP0IN_MAXPACKET) {
2544 			/* clear NAK by writing CNAK in EP0_IN */
2545 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2546 			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2547 			dev->ep[UDC_EP0IN_IX].naking = 0;
2548 			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2549 
2550 		/* if unsupported request then stall */
2551 		} else if (setup_supported < 0) {
2552 			tmp |= AMD_BIT(UDC_EPCTL_S);
2553 			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2554 		} else
2555 			dev->waiting_zlp_ack_ep0in = 1;
2556 
2557 
2558 		/* clear NAK by writing CNAK in EP0_OUT */
2559 		if (!set) {
2560 			tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2561 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2562 			writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2563 			dev->ep[UDC_EP0OUT_IX].naking = 0;
2564 			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2565 		}
2566 
2567 		if (!use_dma) {
2568 			/* clear OUT bits in ep status */
2569 			writel(UDC_EPSTS_OUT_CLEAR,
2570 				&dev->ep[UDC_EP0OUT_IX].regs->sts);
2571 		}
2572 
2573 	/* data packet 0 bytes */
2574 	} else if (tmp == UDC_EPSTS_OUT_DATA) {
2575 		/* clear OUT bits in ep status */
2576 		writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2577 
2578 		/* get setup data: only 0 packet */
2579 		if (use_dma) {
2580 			/* no req if 0 packet, just reactivate */
2581 			if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2582 				VDBG(dev, "ZLP\n");
2583 
2584 				/* set HOST READY */
2585 				dev->ep[UDC_EP0OUT_IX].td->status =
2586 					AMD_ADDBITS(
2587 					dev->ep[UDC_EP0OUT_IX].td->status,
2588 					UDC_DMA_OUT_STS_BS_HOST_READY,
2589 					UDC_DMA_OUT_STS_BS);
2590 				/* enable RDE */
2591 				udc_ep0_set_rde(dev);
2592 				ret_val = IRQ_HANDLED;
2593 
2594 			} else {
2595 				/* control write */
2596 				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2597 				/* re-program desc. pointer for possible ZLPs */
2598 				writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2599 					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
2600 				/* enable RDE */
2601 				udc_ep0_set_rde(dev);
2602 			}
2603 		} else {
2604 
2605 			/* received number bytes */
2606 			count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2607 			count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
2608 			/* out data for fifo mode not working */
2609 			count = 0;
2610 
2611 			/* 0 packet or real data ? */
2612 			if (count != 0) {
2613 				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2614 			} else {
2615 				/* dummy read confirm */
2616 				readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2617 				ret_val = IRQ_HANDLED;
2618 			}
2619 		}
2620 	}
2621 
2622 	/* check pending CNAKS */
2623 	if (cnak_pending) {
2624 		/* CNAk processing when rxfifo empty only */
2625 		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2626 			udc_process_cnak_queue(dev);
2627 	}
2628 
2629 finished:
2630 	return ret_val;
2631 }
2632 
2633 /* Interrupt handler for Control IN traffic */
udc_control_in_isr(struct udc * dev)2634 static irqreturn_t udc_control_in_isr(struct udc *dev)
2635 {
2636 	irqreturn_t ret_val = IRQ_NONE;
2637 	u32 tmp;
2638 	struct udc_ep *ep;
2639 	struct udc_request *req;
2640 	unsigned len;
2641 
2642 	ep = &dev->ep[UDC_EP0IN_IX];
2643 
2644 	/* clear irq */
2645 	writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
2646 
2647 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2648 	/* DMA completion */
2649 	if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
2650 		VDBG(dev, "isr: TDC clear\n");
2651 		ret_val = IRQ_HANDLED;
2652 
2653 		/* clear TDC bit */
2654 		writel(AMD_BIT(UDC_EPSTS_TDC),
2655 				&dev->ep[UDC_EP0IN_IX].regs->sts);
2656 
2657 	/* status reg has IN bit set ? */
2658 	} else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
2659 		ret_val = IRQ_HANDLED;
2660 
2661 		if (ep->dma) {
2662 			/* clear IN bit */
2663 			writel(AMD_BIT(UDC_EPSTS_IN),
2664 				&dev->ep[UDC_EP0IN_IX].regs->sts);
2665 		}
2666 		if (dev->stall_ep0in) {
2667 			DBG(dev, "stall ep0in\n");
2668 			/* halt ep0in */
2669 			tmp = readl(&ep->regs->ctl);
2670 			tmp |= AMD_BIT(UDC_EPCTL_S);
2671 			writel(tmp, &ep->regs->ctl);
2672 		} else {
2673 			if (!list_empty(&ep->queue)) {
2674 				/* next request */
2675 				req = list_entry(ep->queue.next,
2676 						struct udc_request, queue);
2677 
2678 				if (ep->dma) {
2679 					/* write desc pointer */
2680 					writel(req->td_phys, &ep->regs->desptr);
2681 					/* set HOST READY */
2682 					req->td_data->status =
2683 						AMD_ADDBITS(
2684 						req->td_data->status,
2685 						UDC_DMA_STP_STS_BS_HOST_READY,
2686 						UDC_DMA_STP_STS_BS);
2687 
2688 					/* set poll demand bit */
2689 					tmp =
2690 					readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2691 					tmp |= AMD_BIT(UDC_EPCTL_P);
2692 					writel(tmp,
2693 					&dev->ep[UDC_EP0IN_IX].regs->ctl);
2694 
2695 					/* all bytes will be transferred */
2696 					req->req.actual = req->req.length;
2697 
2698 					/* complete req */
2699 					complete_req(ep, req, 0);
2700 
2701 				} else {
2702 					/* write fifo */
2703 					udc_txfifo_write(ep, &req->req);
2704 
2705 					/* lengh bytes transferred */
2706 					len = req->req.length - req->req.actual;
2707 					if (len > ep->ep.maxpacket)
2708 						len = ep->ep.maxpacket;
2709 
2710 					req->req.actual += len;
2711 					if (req->req.actual == req->req.length
2712 						|| (len != ep->ep.maxpacket)) {
2713 						/* complete req */
2714 						complete_req(ep, req, 0);
2715 					}
2716 				}
2717 
2718 			}
2719 		}
2720 		ep->halted = 0;
2721 		dev->stall_ep0in = 0;
2722 		if (!ep->dma) {
2723 			/* clear IN bit */
2724 			writel(AMD_BIT(UDC_EPSTS_IN),
2725 				&dev->ep[UDC_EP0IN_IX].regs->sts);
2726 		}
2727 	}
2728 
2729 	return ret_val;
2730 }
2731 
2732 
2733 /* Interrupt handler for global device events */
udc_dev_isr(struct udc * dev,u32 dev_irq)2734 static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
2735 __releases(dev->lock)
2736 __acquires(dev->lock)
2737 {
2738 	irqreturn_t ret_val = IRQ_NONE;
2739 	u32 tmp;
2740 	u32 cfg;
2741 	struct udc_ep *ep;
2742 	u16 i;
2743 	u8 udc_csr_epix;
2744 
2745 	/* SET_CONFIG irq ? */
2746 	if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
2747 		ret_val = IRQ_HANDLED;
2748 
2749 		/* read config value */
2750 		tmp = readl(&dev->regs->sts);
2751 		cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
2752 		DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
2753 		dev->cur_config = cfg;
2754 		dev->set_cfg_not_acked = 1;
2755 
2756 		/* make usb request for gadget driver */
2757 		memset(&setup_data, 0 , sizeof(union udc_setup_data));
2758 		setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
2759 		setup_data.request.wValue = cpu_to_le16(dev->cur_config);
2760 
2761 		/* programm the NE registers */
2762 		for (i = 0; i < UDC_EP_NUM; i++) {
2763 			ep = &dev->ep[i];
2764 			if (ep->in) {
2765 
2766 				/* ep ix in UDC CSR register space */
2767 				udc_csr_epix = ep->num;
2768 
2769 
2770 			/* OUT ep */
2771 			} else {
2772 				/* ep ix in UDC CSR register space */
2773 				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2774 			}
2775 
2776 			tmp = readl(&dev->csr->ne[udc_csr_epix]);
2777 			/* ep cfg */
2778 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2779 						UDC_CSR_NE_CFG);
2780 			/* write reg */
2781 			writel(tmp, &dev->csr->ne[udc_csr_epix]);
2782 
2783 			/* clear stall bits */
2784 			ep->halted = 0;
2785 			tmp = readl(&ep->regs->ctl);
2786 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2787 			writel(tmp, &ep->regs->ctl);
2788 		}
2789 		/* call gadget zero with setup data received */
2790 		spin_unlock(&dev->lock);
2791 		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2792 		spin_lock(&dev->lock);
2793 
2794 	} /* SET_INTERFACE ? */
2795 	if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
2796 		ret_val = IRQ_HANDLED;
2797 
2798 		dev->set_cfg_not_acked = 1;
2799 		/* read interface and alt setting values */
2800 		tmp = readl(&dev->regs->sts);
2801 		dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
2802 		dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
2803 
2804 		/* make usb request for gadget driver */
2805 		memset(&setup_data, 0 , sizeof(union udc_setup_data));
2806 		setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
2807 		setup_data.request.bRequestType = USB_RECIP_INTERFACE;
2808 		setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
2809 		setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
2810 
2811 		DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
2812 				dev->cur_alt, dev->cur_intf);
2813 
2814 		/* programm the NE registers */
2815 		for (i = 0; i < UDC_EP_NUM; i++) {
2816 			ep = &dev->ep[i];
2817 			if (ep->in) {
2818 
2819 				/* ep ix in UDC CSR register space */
2820 				udc_csr_epix = ep->num;
2821 
2822 
2823 			/* OUT ep */
2824 			} else {
2825 				/* ep ix in UDC CSR register space */
2826 				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2827 			}
2828 
2829 			/* UDC CSR reg */
2830 			/* set ep values */
2831 			tmp = readl(&dev->csr->ne[udc_csr_epix]);
2832 			/* ep interface */
2833 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2834 						UDC_CSR_NE_INTF);
2835 			/* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
2836 			/* ep alt */
2837 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2838 						UDC_CSR_NE_ALT);
2839 			/* write reg */
2840 			writel(tmp, &dev->csr->ne[udc_csr_epix]);
2841 
2842 			/* clear stall bits */
2843 			ep->halted = 0;
2844 			tmp = readl(&ep->regs->ctl);
2845 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2846 			writel(tmp, &ep->regs->ctl);
2847 		}
2848 
2849 		/* call gadget zero with setup data received */
2850 		spin_unlock(&dev->lock);
2851 		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2852 		spin_lock(&dev->lock);
2853 
2854 	} /* USB reset */
2855 	if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
2856 		DBG(dev, "USB Reset interrupt\n");
2857 		ret_val = IRQ_HANDLED;
2858 
2859 		/* allow soft reset when suspend occurs */
2860 		soft_reset_occured = 0;
2861 
2862 		dev->waiting_zlp_ack_ep0in = 0;
2863 		dev->set_cfg_not_acked = 0;
2864 
2865 		/* mask not needed interrupts */
2866 		udc_mask_unused_interrupts(dev);
2867 
2868 		/* call gadget to resume and reset configs etc. */
2869 		spin_unlock(&dev->lock);
2870 		if (dev->sys_suspended && dev->driver->resume) {
2871 			dev->driver->resume(&dev->gadget);
2872 			dev->sys_suspended = 0;
2873 		}
2874 		usb_gadget_udc_reset(&dev->gadget, dev->driver);
2875 		spin_lock(&dev->lock);
2876 
2877 		/* disable ep0 to empty req queue */
2878 		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2879 		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2880 
2881 		/* soft reset when rxfifo not empty */
2882 		tmp = readl(&dev->regs->sts);
2883 		if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2884 				&& !soft_reset_after_usbreset_occured) {
2885 			udc_soft_reset(dev);
2886 			soft_reset_after_usbreset_occured++;
2887 		}
2888 
2889 		/*
2890 		 * DMA reset to kill potential old DMA hw hang,
2891 		 * POLL bit is already reset by ep_init() through
2892 		 * disconnect()
2893 		 */
2894 		DBG(dev, "DMA machine reset\n");
2895 		tmp = readl(&dev->regs->cfg);
2896 		writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
2897 		writel(tmp, &dev->regs->cfg);
2898 
2899 		/* put into initial config */
2900 		udc_basic_init(dev);
2901 
2902 		/* enable device setup interrupts */
2903 		udc_enable_dev_setup_interrupts(dev);
2904 
2905 		/* enable suspend interrupt */
2906 		tmp = readl(&dev->regs->irqmsk);
2907 		tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
2908 		writel(tmp, &dev->regs->irqmsk);
2909 
2910 	} /* USB suspend */
2911 	if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
2912 		DBG(dev, "USB Suspend interrupt\n");
2913 		ret_val = IRQ_HANDLED;
2914 		if (dev->driver->suspend) {
2915 			spin_unlock(&dev->lock);
2916 			dev->sys_suspended = 1;
2917 			dev->driver->suspend(&dev->gadget);
2918 			spin_lock(&dev->lock);
2919 		}
2920 	} /* new speed ? */
2921 	if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
2922 		DBG(dev, "ENUM interrupt\n");
2923 		ret_val = IRQ_HANDLED;
2924 		soft_reset_after_usbreset_occured = 0;
2925 
2926 		/* disable ep0 to empty req queue */
2927 		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2928 		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2929 
2930 		/* link up all endpoints */
2931 		udc_setup_endpoints(dev);
2932 		dev_info(&dev->pdev->dev, "Connect: %s\n",
2933 			 usb_speed_string(dev->gadget.speed));
2934 
2935 		/* init ep 0 */
2936 		activate_control_endpoints(dev);
2937 
2938 		/* enable ep0 interrupts */
2939 		udc_enable_ep0_interrupts(dev);
2940 	}
2941 	/* session valid change interrupt */
2942 	if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
2943 		DBG(dev, "USB SVC interrupt\n");
2944 		ret_val = IRQ_HANDLED;
2945 
2946 		/* check that session is not valid to detect disconnect */
2947 		tmp = readl(&dev->regs->sts);
2948 		if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
2949 			/* disable suspend interrupt */
2950 			tmp = readl(&dev->regs->irqmsk);
2951 			tmp |= AMD_BIT(UDC_DEVINT_US);
2952 			writel(tmp, &dev->regs->irqmsk);
2953 			DBG(dev, "USB Disconnect (session valid low)\n");
2954 			/* cleanup on disconnect */
2955 			usb_disconnect(udc);
2956 		}
2957 
2958 	}
2959 
2960 	return ret_val;
2961 }
2962 
2963 /* Interrupt Service Routine, see Linux Kernel Doc for parameters */
udc_irq(int irq,void * pdev)2964 static irqreturn_t udc_irq(int irq, void *pdev)
2965 {
2966 	struct udc *dev = pdev;
2967 	u32 reg;
2968 	u16 i;
2969 	u32 ep_irq;
2970 	irqreturn_t ret_val = IRQ_NONE;
2971 
2972 	spin_lock(&dev->lock);
2973 
2974 	/* check for ep irq */
2975 	reg = readl(&dev->regs->ep_irqsts);
2976 	if (reg) {
2977 		if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
2978 			ret_val |= udc_control_out_isr(dev);
2979 		if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
2980 			ret_val |= udc_control_in_isr(dev);
2981 
2982 		/*
2983 		 * data endpoint
2984 		 * iterate ep's
2985 		 */
2986 		for (i = 1; i < UDC_EP_NUM; i++) {
2987 			ep_irq = 1 << i;
2988 			if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
2989 				continue;
2990 
2991 			/* clear irq status */
2992 			writel(ep_irq, &dev->regs->ep_irqsts);
2993 
2994 			/* irq for out ep ? */
2995 			if (i > UDC_EPIN_NUM)
2996 				ret_val |= udc_data_out_isr(dev, i);
2997 			else
2998 				ret_val |= udc_data_in_isr(dev, i);
2999 		}
3000 
3001 	}
3002 
3003 
3004 	/* check for dev irq */
3005 	reg = readl(&dev->regs->irqsts);
3006 	if (reg) {
3007 		/* clear irq */
3008 		writel(reg, &dev->regs->irqsts);
3009 		ret_val |= udc_dev_isr(dev, reg);
3010 	}
3011 
3012 
3013 	spin_unlock(&dev->lock);
3014 	return ret_val;
3015 }
3016 
3017 /* Tears down device */
gadget_release(struct device * pdev)3018 static void gadget_release(struct device *pdev)
3019 {
3020 	struct amd5536udc *dev = dev_get_drvdata(pdev);
3021 	kfree(dev);
3022 }
3023 
3024 /* Cleanup on device remove */
udc_remove(struct udc * dev)3025 static void udc_remove(struct udc *dev)
3026 {
3027 	/* remove timer */
3028 	stop_timer++;
3029 	if (timer_pending(&udc_timer))
3030 		wait_for_completion(&on_exit);
3031 	if (udc_timer.data)
3032 		del_timer_sync(&udc_timer);
3033 	/* remove pollstall timer */
3034 	stop_pollstall_timer++;
3035 	if (timer_pending(&udc_pollstall_timer))
3036 		wait_for_completion(&on_pollstall_exit);
3037 	if (udc_pollstall_timer.data)
3038 		del_timer_sync(&udc_pollstall_timer);
3039 	udc = NULL;
3040 }
3041 
3042 /* Reset all pci context */
udc_pci_remove(struct pci_dev * pdev)3043 static void udc_pci_remove(struct pci_dev *pdev)
3044 {
3045 	struct udc		*dev;
3046 
3047 	dev = pci_get_drvdata(pdev);
3048 
3049 	usb_del_gadget_udc(&udc->gadget);
3050 	/* gadget driver must not be registered */
3051 	BUG_ON(dev->driver != NULL);
3052 
3053 	/* dma pool cleanup */
3054 	if (dev->data_requests)
3055 		pci_pool_destroy(dev->data_requests);
3056 
3057 	if (dev->stp_requests) {
3058 		/* cleanup DMA desc's for ep0in */
3059 		pci_pool_free(dev->stp_requests,
3060 			dev->ep[UDC_EP0OUT_IX].td_stp,
3061 			dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3062 		pci_pool_free(dev->stp_requests,
3063 			dev->ep[UDC_EP0OUT_IX].td,
3064 			dev->ep[UDC_EP0OUT_IX].td_phys);
3065 
3066 		pci_pool_destroy(dev->stp_requests);
3067 	}
3068 
3069 	/* reset controller */
3070 	writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
3071 	if (dev->irq_registered)
3072 		free_irq(pdev->irq, dev);
3073 	if (dev->regs)
3074 		iounmap(dev->regs);
3075 	if (dev->mem_region)
3076 		release_mem_region(pci_resource_start(pdev, 0),
3077 				pci_resource_len(pdev, 0));
3078 	if (dev->active)
3079 		pci_disable_device(pdev);
3080 
3081 	udc_remove(dev);
3082 }
3083 
3084 /* create dma pools on init */
init_dma_pools(struct udc * dev)3085 static int init_dma_pools(struct udc *dev)
3086 {
3087 	struct udc_stp_dma	*td_stp;
3088 	struct udc_data_dma	*td_data;
3089 	int retval;
3090 
3091 	/* consistent DMA mode setting ? */
3092 	if (use_dma_ppb) {
3093 		use_dma_bufferfill_mode = 0;
3094 	} else {
3095 		use_dma_ppb_du = 0;
3096 		use_dma_bufferfill_mode = 1;
3097 	}
3098 
3099 	/* DMA setup */
3100 	dev->data_requests = dma_pool_create("data_requests", NULL,
3101 		sizeof(struct udc_data_dma), 0, 0);
3102 	if (!dev->data_requests) {
3103 		DBG(dev, "can't get request data pool\n");
3104 		retval = -ENOMEM;
3105 		goto finished;
3106 	}
3107 
3108 	/* EP0 in dma regs = dev control regs */
3109 	dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3110 
3111 	/* dma desc for setup data */
3112 	dev->stp_requests = dma_pool_create("setup requests", NULL,
3113 		sizeof(struct udc_stp_dma), 0, 0);
3114 	if (!dev->stp_requests) {
3115 		DBG(dev, "can't get stp request pool\n");
3116 		retval = -ENOMEM;
3117 		goto finished;
3118 	}
3119 	/* setup */
3120 	td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3121 				&dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3122 	if (td_stp == NULL) {
3123 		retval = -ENOMEM;
3124 		goto finished;
3125 	}
3126 	dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3127 
3128 	/* data: 0 packets !? */
3129 	td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3130 				&dev->ep[UDC_EP0OUT_IX].td_phys);
3131 	if (td_data == NULL) {
3132 		retval = -ENOMEM;
3133 		goto finished;
3134 	}
3135 	dev->ep[UDC_EP0OUT_IX].td = td_data;
3136 	return 0;
3137 
3138 finished:
3139 	return retval;
3140 }
3141 
3142 /* Called by pci bus driver to init pci context */
udc_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)3143 static int udc_pci_probe(
3144 	struct pci_dev *pdev,
3145 	const struct pci_device_id *id
3146 )
3147 {
3148 	struct udc		*dev;
3149 	unsigned long		resource;
3150 	unsigned long		len;
3151 	int			retval = 0;
3152 
3153 	/* one udc only */
3154 	if (udc) {
3155 		dev_dbg(&pdev->dev, "already probed\n");
3156 		return -EBUSY;
3157 	}
3158 
3159 	/* init */
3160 	dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
3161 	if (!dev) {
3162 		retval = -ENOMEM;
3163 		goto finished;
3164 	}
3165 
3166 	/* pci setup */
3167 	if (pci_enable_device(pdev) < 0) {
3168 		kfree(dev);
3169 		dev = NULL;
3170 		retval = -ENODEV;
3171 		goto finished;
3172 	}
3173 	dev->active = 1;
3174 
3175 	/* PCI resource allocation */
3176 	resource = pci_resource_start(pdev, 0);
3177 	len = pci_resource_len(pdev, 0);
3178 
3179 	if (!request_mem_region(resource, len, name)) {
3180 		dev_dbg(&pdev->dev, "pci device used already\n");
3181 		kfree(dev);
3182 		dev = NULL;
3183 		retval = -EBUSY;
3184 		goto finished;
3185 	}
3186 	dev->mem_region = 1;
3187 
3188 	dev->virt_addr = ioremap_nocache(resource, len);
3189 	if (dev->virt_addr == NULL) {
3190 		dev_dbg(&pdev->dev, "start address cannot be mapped\n");
3191 		kfree(dev);
3192 		dev = NULL;
3193 		retval = -EFAULT;
3194 		goto finished;
3195 	}
3196 
3197 	if (!pdev->irq) {
3198 		dev_err(&pdev->dev, "irq not set\n");
3199 		kfree(dev);
3200 		dev = NULL;
3201 		retval = -ENODEV;
3202 		goto finished;
3203 	}
3204 
3205 	spin_lock_init(&dev->lock);
3206 	/* udc csr registers base */
3207 	dev->csr = dev->virt_addr + UDC_CSR_ADDR;
3208 	/* dev registers base */
3209 	dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
3210 	/* ep registers base */
3211 	dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
3212 	/* fifo's base */
3213 	dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
3214 	dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
3215 
3216 	if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
3217 		dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
3218 		kfree(dev);
3219 		dev = NULL;
3220 		retval = -EBUSY;
3221 		goto finished;
3222 	}
3223 	dev->irq_registered = 1;
3224 
3225 	pci_set_drvdata(pdev, dev);
3226 
3227 	/* chip revision for Hs AMD5536 */
3228 	dev->chiprev = pdev->revision;
3229 
3230 	pci_set_master(pdev);
3231 	pci_try_set_mwi(pdev);
3232 
3233 	/* init dma pools */
3234 	if (use_dma) {
3235 		retval = init_dma_pools(dev);
3236 		if (retval != 0)
3237 			goto finished;
3238 	}
3239 
3240 	dev->phys_addr = resource;
3241 	dev->irq = pdev->irq;
3242 	dev->pdev = pdev;
3243 
3244 	/* general probing */
3245 	if (udc_probe(dev) == 0)
3246 		return 0;
3247 
3248 finished:
3249 	if (dev)
3250 		udc_pci_remove(pdev);
3251 	return retval;
3252 }
3253 
3254 /* general probe */
udc_probe(struct udc * dev)3255 static int udc_probe(struct udc *dev)
3256 {
3257 	char		tmp[128];
3258 	u32		reg;
3259 	int		retval;
3260 
3261 	/* mark timer as not initialized */
3262 	udc_timer.data = 0;
3263 	udc_pollstall_timer.data = 0;
3264 
3265 	/* device struct setup */
3266 	dev->gadget.ops = &udc_ops;
3267 
3268 	dev_set_name(&dev->gadget.dev, "gadget");
3269 	dev->gadget.name = name;
3270 	dev->gadget.max_speed = USB_SPEED_HIGH;
3271 
3272 	/* init registers, interrupts, ... */
3273 	startup_registers(dev);
3274 
3275 	dev_info(&dev->pdev->dev, "%s\n", mod_desc);
3276 
3277 	snprintf(tmp, sizeof tmp, "%d", dev->irq);
3278 	dev_info(&dev->pdev->dev,
3279 		"irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
3280 		tmp, dev->phys_addr, dev->chiprev,
3281 		(dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
3282 	strcpy(tmp, UDC_DRIVER_VERSION_STRING);
3283 	if (dev->chiprev == UDC_HSA0_REV) {
3284 		dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
3285 		retval = -ENODEV;
3286 		goto finished;
3287 	}
3288 	dev_info(&dev->pdev->dev,
3289 		"driver version: %s(for Geode5536 B1)\n", tmp);
3290 	udc = dev;
3291 
3292 	retval = usb_add_gadget_udc_release(&udc->pdev->dev, &dev->gadget,
3293 			gadget_release);
3294 	if (retval)
3295 		goto finished;
3296 
3297 	/* timer init */
3298 	init_timer(&udc_timer);
3299 	udc_timer.function = udc_timer_function;
3300 	udc_timer.data = 1;
3301 	/* timer pollstall init */
3302 	init_timer(&udc_pollstall_timer);
3303 	udc_pollstall_timer.function = udc_pollstall_timer_function;
3304 	udc_pollstall_timer.data = 1;
3305 
3306 	/* set SD */
3307 	reg = readl(&dev->regs->ctl);
3308 	reg |= AMD_BIT(UDC_DEVCTL_SD);
3309 	writel(reg, &dev->regs->ctl);
3310 
3311 	/* print dev register info */
3312 	print_regs(dev);
3313 
3314 	return 0;
3315 
3316 finished:
3317 	return retval;
3318 }
3319 
3320 /* Initiates a remote wakeup */
udc_remote_wakeup(struct udc * dev)3321 static int udc_remote_wakeup(struct udc *dev)
3322 {
3323 	unsigned long flags;
3324 	u32 tmp;
3325 
3326 	DBG(dev, "UDC initiates remote wakeup\n");
3327 
3328 	spin_lock_irqsave(&dev->lock, flags);
3329 
3330 	tmp = readl(&dev->regs->ctl);
3331 	tmp |= AMD_BIT(UDC_DEVCTL_RES);
3332 	writel(tmp, &dev->regs->ctl);
3333 	tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
3334 	writel(tmp, &dev->regs->ctl);
3335 
3336 	spin_unlock_irqrestore(&dev->lock, flags);
3337 	return 0;
3338 }
3339 
3340 /* PCI device parameters */
3341 static const struct pci_device_id pci_id[] = {
3342 	{
3343 		PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
3344 		.class =	(PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3345 		.class_mask =	0xffffffff,
3346 	},
3347 	{},
3348 };
3349 MODULE_DEVICE_TABLE(pci, pci_id);
3350 
3351 /* PCI functions */
3352 static struct pci_driver udc_pci_driver = {
3353 	.name =		(char *) name,
3354 	.id_table =	pci_id,
3355 	.probe =	udc_pci_probe,
3356 	.remove =	udc_pci_remove,
3357 };
3358 
3359 module_pci_driver(udc_pci_driver);
3360 
3361 MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
3362 MODULE_AUTHOR("Thomas Dahlmann");
3363 MODULE_LICENSE("GPL");
3364 
3365