1 /**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/list.h>
28 #include <linux/dma-mapping.h>
29
30 #include <linux/usb/ch9.h>
31 #include <linux/usb/gadget.h>
32
33 #include "debug.h"
34 #include "core.h"
35 #include "gadget.h"
36 #include "io.h"
37
38 /**
39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40 * @dwc: pointer to our context structure
41 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42 *
43 * Caller should take care of locking. This function will
44 * return 0 on success or -EINVAL if wrong Test Selector
45 * is passed
46 */
dwc3_gadget_set_test_mode(struct dwc3 * dwc,int mode)47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48 {
49 u32 reg;
50
51 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53
54 switch (mode) {
55 case TEST_J:
56 case TEST_K:
57 case TEST_SE0_NAK:
58 case TEST_PACKET:
59 case TEST_FORCE_EN:
60 reg |= mode << 1;
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67
68 return 0;
69 }
70
71 /**
72 * dwc3_gadget_get_link_state - Gets current state of USB Link
73 * @dwc: pointer to our context structure
74 *
75 * Caller should take care of locking. This function will
76 * return the link state on success (>= 0) or -ETIMEDOUT.
77 */
dwc3_gadget_get_link_state(struct dwc3 * dwc)78 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79 {
80 u32 reg;
81
82 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83
84 return DWC3_DSTS_USBLNKST(reg);
85 }
86
87 /**
88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89 * @dwc: pointer to our context structure
90 * @state: the state to put link into
91 *
92 * Caller should take care of locking. This function will
93 * return 0 on success or -ETIMEDOUT.
94 */
dwc3_gadget_set_link_state(struct dwc3 * dwc,enum dwc3_link_state state)95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96 {
97 int retries = 10000;
98 u32 reg;
99
100 /*
101 * Wait until device controller is ready. Only applies to 1.94a and
102 * later RTL.
103 */
104 if (dwc->revision >= DWC3_REVISION_194A) {
105 while (--retries) {
106 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107 if (reg & DWC3_DSTS_DCNRD)
108 udelay(5);
109 else
110 break;
111 }
112
113 if (retries <= 0)
114 return -ETIMEDOUT;
115 }
116
117 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119
120 /* set requested state */
121 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123
124 /*
125 * The following code is racy when called from dwc3_gadget_wakeup,
126 * and is not needed, at least on newer versions
127 */
128 if (dwc->revision >= DWC3_REVISION_194A)
129 return 0;
130
131 /* wait for a change in DSTS */
132 retries = 10000;
133 while (--retries) {
134 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135
136 if (DWC3_DSTS_USBLNKST(reg) == state)
137 return 0;
138
139 udelay(5);
140 }
141
142 dwc3_trace(trace_dwc3_gadget,
143 "link state change request timed out");
144
145 return -ETIMEDOUT;
146 }
147
148 /**
149 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
150 * @dwc: pointer to our context structure
151 *
152 * This function will a best effort FIFO allocation in order
153 * to improve FIFO usage and throughput, while still allowing
154 * us to enable as many endpoints as possible.
155 *
156 * Keep in mind that this operation will be highly dependent
157 * on the configured size for RAM1 - which contains TxFifo -,
158 * the amount of endpoints enabled on coreConsultant tool, and
159 * the width of the Master Bus.
160 *
161 * In the ideal world, we would always be able to satisfy the
162 * following equation:
163 *
164 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
165 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
166 *
167 * Unfortunately, due to many variables that's not always the case.
168 */
dwc3_gadget_resize_tx_fifos(struct dwc3 * dwc)169 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
170 {
171 int last_fifo_depth = 0;
172 int ram1_depth;
173 int fifo_size;
174 int mdwidth;
175 int num;
176
177 if (!dwc->needs_fifo_resize)
178 return 0;
179
180 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
181 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
182
183 /* MDWIDTH is represented in bits, we need it in bytes */
184 mdwidth >>= 3;
185
186 /*
187 * FIXME For now we will only allocate 1 wMaxPacketSize space
188 * for each enabled endpoint, later patches will come to
189 * improve this algorithm so that we better use the internal
190 * FIFO space
191 */
192 for (num = 0; num < dwc->num_in_eps; num++) {
193 /* bit0 indicates direction; 1 means IN ep */
194 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1];
195 int mult = 1;
196 int tmp;
197
198 if (!(dep->flags & DWC3_EP_ENABLED))
199 continue;
200
201 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
202 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
203 mult = 3;
204
205 /*
206 * REVISIT: the following assumes we will always have enough
207 * space available on the FIFO RAM for all possible use cases.
208 * Make sure that's true somehow and change FIFO allocation
209 * accordingly.
210 *
211 * If we have Bulk or Isochronous endpoints, we want
212 * them to be able to be very, very fast. So we're giving
213 * those endpoints a fifo_size which is enough for 3 full
214 * packets
215 */
216 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
217 tmp += mdwidth;
218
219 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
220
221 fifo_size |= (last_fifo_depth << 16);
222
223 dwc3_trace(trace_dwc3_gadget, "%s: Fifo Addr %04x Size %d",
224 dep->name, last_fifo_depth, fifo_size & 0xffff);
225
226 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
227
228 last_fifo_depth += (fifo_size & 0xffff);
229 }
230
231 return 0;
232 }
233
dwc3_gadget_giveback(struct dwc3_ep * dep,struct dwc3_request * req,int status)234 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
235 int status)
236 {
237 struct dwc3 *dwc = dep->dwc;
238 int i;
239
240 if (req->queued) {
241 i = 0;
242 do {
243 dep->busy_slot++;
244 /*
245 * Skip LINK TRB. We can't use req->trb and check for
246 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
247 * just completed (not the LINK TRB).
248 */
249 if (((dep->busy_slot & DWC3_TRB_MASK) ==
250 DWC3_TRB_NUM- 1) &&
251 usb_endpoint_xfer_isoc(dep->endpoint.desc))
252 dep->busy_slot++;
253 } while(++i < req->request.num_mapped_sgs);
254 req->queued = false;
255 }
256 list_del(&req->list);
257 req->trb = NULL;
258
259 if (req->request.status == -EINPROGRESS)
260 req->request.status = status;
261
262 if (dwc->ep0_bounced && dep->number == 0)
263 dwc->ep0_bounced = false;
264 else
265 usb_gadget_unmap_request(&dwc->gadget, &req->request,
266 req->direction);
267
268 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
269 req, dep->name, req->request.actual,
270 req->request.length, status);
271 trace_dwc3_gadget_giveback(req);
272
273 spin_unlock(&dwc->lock);
274 usb_gadget_giveback_request(&dep->endpoint, &req->request);
275 spin_lock(&dwc->lock);
276 }
277
dwc3_send_gadget_generic_command(struct dwc3 * dwc,unsigned cmd,u32 param)278 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
279 {
280 u32 timeout = 500;
281 u32 reg;
282
283 trace_dwc3_gadget_generic_cmd(cmd, param);
284
285 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
286 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
287
288 do {
289 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
290 if (!(reg & DWC3_DGCMD_CMDACT)) {
291 dwc3_trace(trace_dwc3_gadget,
292 "Command Complete --> %d",
293 DWC3_DGCMD_STATUS(reg));
294 if (DWC3_DGCMD_STATUS(reg))
295 return -EINVAL;
296 return 0;
297 }
298
299 /*
300 * We can't sleep here, because it's also called from
301 * interrupt context.
302 */
303 timeout--;
304 if (!timeout) {
305 dwc3_trace(trace_dwc3_gadget,
306 "Command Timed Out");
307 return -ETIMEDOUT;
308 }
309 udelay(1);
310 } while (1);
311 }
312
dwc3_send_gadget_ep_cmd(struct dwc3 * dwc,unsigned ep,unsigned cmd,struct dwc3_gadget_ep_cmd_params * params)313 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
314 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
315 {
316 struct dwc3_ep *dep = dwc->eps[ep];
317 u32 timeout = 500;
318 u32 reg;
319
320 trace_dwc3_gadget_ep_cmd(dep, cmd, params);
321
322 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
323 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
324 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
325
326 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
327 do {
328 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
329 if (!(reg & DWC3_DEPCMD_CMDACT)) {
330 dwc3_trace(trace_dwc3_gadget,
331 "Command Complete --> %d",
332 DWC3_DEPCMD_STATUS(reg));
333 if (DWC3_DEPCMD_STATUS(reg))
334 return -EINVAL;
335 return 0;
336 }
337
338 /*
339 * We can't sleep here, because it is also called from
340 * interrupt context.
341 */
342 timeout--;
343 if (!timeout) {
344 dwc3_trace(trace_dwc3_gadget,
345 "Command Timed Out");
346 return -ETIMEDOUT;
347 }
348
349 udelay(1);
350 } while (1);
351 }
352
dwc3_trb_dma_offset(struct dwc3_ep * dep,struct dwc3_trb * trb)353 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
354 struct dwc3_trb *trb)
355 {
356 u32 offset = (char *) trb - (char *) dep->trb_pool;
357
358 return dep->trb_pool_dma + offset;
359 }
360
dwc3_alloc_trb_pool(struct dwc3_ep * dep)361 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
362 {
363 struct dwc3 *dwc = dep->dwc;
364
365 if (dep->trb_pool)
366 return 0;
367
368 dep->trb_pool = dma_alloc_coherent(dwc->dev,
369 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
370 &dep->trb_pool_dma, GFP_KERNEL);
371 if (!dep->trb_pool) {
372 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
373 dep->name);
374 return -ENOMEM;
375 }
376
377 return 0;
378 }
379
dwc3_free_trb_pool(struct dwc3_ep * dep)380 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
381 {
382 struct dwc3 *dwc = dep->dwc;
383
384 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
385 dep->trb_pool, dep->trb_pool_dma);
386
387 dep->trb_pool = NULL;
388 dep->trb_pool_dma = 0;
389 }
390
391 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
392
393 /**
394 * dwc3_gadget_start_config - Configure EP resources
395 * @dwc: pointer to our controller context structure
396 * @dep: endpoint that is being enabled
397 *
398 * The assignment of transfer resources cannot perfectly follow the
399 * data book due to the fact that the controller driver does not have
400 * all knowledge of the configuration in advance. It is given this
401 * information piecemeal by the composite gadget framework after every
402 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
403 * programming model in this scenario can cause errors. For two
404 * reasons:
405 *
406 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
407 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
408 * multiple interfaces.
409 *
410 * 2) The databook does not mention doing more DEPXFERCFG for new
411 * endpoint on alt setting (8.1.6).
412 *
413 * The following simplified method is used instead:
414 *
415 * All hardware endpoints can be assigned a transfer resource and this
416 * setting will stay persistent until either a core reset or
417 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
418 * do DEPXFERCFG for every hardware endpoint as well. We are
419 * guaranteed that there are as many transfer resources as endpoints.
420 *
421 * This function is called for each endpoint when it is being enabled
422 * but is triggered only when called for EP0-out, which always happens
423 * first, and which should only happen in one of the above conditions.
424 */
dwc3_gadget_start_config(struct dwc3 * dwc,struct dwc3_ep * dep)425 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
426 {
427 struct dwc3_gadget_ep_cmd_params params;
428 u32 cmd;
429 int i;
430 int ret;
431
432 if (dep->number)
433 return 0;
434
435 memset(¶ms, 0x00, sizeof(params));
436 cmd = DWC3_DEPCMD_DEPSTARTCFG;
437
438 ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms);
439 if (ret)
440 return ret;
441
442 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
443 struct dwc3_ep *dep = dwc->eps[i];
444
445 if (!dep)
446 continue;
447
448 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
449 if (ret)
450 return ret;
451 }
452
453 return 0;
454 }
455
dwc3_gadget_set_ep_config(struct dwc3 * dwc,struct dwc3_ep * dep,const struct usb_endpoint_descriptor * desc,const struct usb_ss_ep_comp_descriptor * comp_desc,bool ignore,bool restore)456 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
457 const struct usb_endpoint_descriptor *desc,
458 const struct usb_ss_ep_comp_descriptor *comp_desc,
459 bool ignore, bool restore)
460 {
461 struct dwc3_gadget_ep_cmd_params params;
462
463 memset(¶ms, 0x00, sizeof(params));
464
465 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
466 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
467
468 /* Burst size is only needed in SuperSpeed mode */
469 if (dwc->gadget.speed == USB_SPEED_SUPER) {
470 u32 burst = dep->endpoint.maxburst - 1;
471
472 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
473 }
474
475 if (ignore)
476 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
477
478 if (restore) {
479 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
480 params.param2 |= dep->saved_state;
481 }
482
483 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
484 | DWC3_DEPCFG_XFER_NOT_READY_EN;
485
486 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
487 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
488 | DWC3_DEPCFG_STREAM_EVENT_EN;
489 dep->stream_capable = true;
490 }
491
492 if (!usb_endpoint_xfer_control(desc))
493 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
494
495 /*
496 * We are doing 1:1 mapping for endpoints, meaning
497 * Physical Endpoints 2 maps to Logical Endpoint 2 and
498 * so on. We consider the direction bit as part of the physical
499 * endpoint number. So USB endpoint 0x81 is 0x03.
500 */
501 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
502
503 /*
504 * We must use the lower 16 TX FIFOs even though
505 * HW might have more
506 */
507 if (dep->direction)
508 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
509
510 if (desc->bInterval) {
511 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
512 dep->interval = 1 << (desc->bInterval - 1);
513 }
514
515 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
516 DWC3_DEPCMD_SETEPCONFIG, ¶ms);
517 }
518
dwc3_gadget_set_xfer_resource(struct dwc3 * dwc,struct dwc3_ep * dep)519 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
520 {
521 struct dwc3_gadget_ep_cmd_params params;
522
523 memset(¶ms, 0x00, sizeof(params));
524
525 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
526
527 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
528 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms);
529 }
530
531 /**
532 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
533 * @dep: endpoint to be initialized
534 * @desc: USB Endpoint Descriptor
535 *
536 * Caller should take care of locking
537 */
__dwc3_gadget_ep_enable(struct dwc3_ep * dep,const struct usb_endpoint_descriptor * desc,const struct usb_ss_ep_comp_descriptor * comp_desc,bool ignore,bool restore)538 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
539 const struct usb_endpoint_descriptor *desc,
540 const struct usb_ss_ep_comp_descriptor *comp_desc,
541 bool ignore, bool restore)
542 {
543 struct dwc3 *dwc = dep->dwc;
544 u32 reg;
545 int ret;
546
547 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
548
549 if (!(dep->flags & DWC3_EP_ENABLED)) {
550 ret = dwc3_gadget_start_config(dwc, dep);
551 if (ret)
552 return ret;
553 }
554
555 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
556 restore);
557 if (ret)
558 return ret;
559
560 if (!(dep->flags & DWC3_EP_ENABLED)) {
561 struct dwc3_trb *trb_st_hw;
562 struct dwc3_trb *trb_link;
563
564 dep->endpoint.desc = desc;
565 dep->comp_desc = comp_desc;
566 dep->type = usb_endpoint_type(desc);
567 dep->flags |= DWC3_EP_ENABLED;
568
569 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
570 reg |= DWC3_DALEPENA_EP(dep->number);
571 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
572
573 if (!usb_endpoint_xfer_isoc(desc))
574 return 0;
575
576 /* Link TRB for ISOC. The HWO bit is never reset */
577 trb_st_hw = &dep->trb_pool[0];
578
579 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
580 memset(trb_link, 0, sizeof(*trb_link));
581
582 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
583 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
584 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
585 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
586 }
587
588 switch (usb_endpoint_type(desc)) {
589 case USB_ENDPOINT_XFER_CONTROL:
590 strlcat(dep->name, "-control", sizeof(dep->name));
591 break;
592 case USB_ENDPOINT_XFER_ISOC:
593 strlcat(dep->name, "-isoc", sizeof(dep->name));
594 break;
595 case USB_ENDPOINT_XFER_BULK:
596 strlcat(dep->name, "-bulk", sizeof(dep->name));
597 break;
598 case USB_ENDPOINT_XFER_INT:
599 strlcat(dep->name, "-int", sizeof(dep->name));
600 break;
601 default:
602 dev_err(dwc->dev, "invalid endpoint transfer type\n");
603 }
604
605 return 0;
606 }
607
608 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
dwc3_remove_requests(struct dwc3 * dwc,struct dwc3_ep * dep)609 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
610 {
611 struct dwc3_request *req;
612
613 if (!list_empty(&dep->req_queued)) {
614 dwc3_stop_active_transfer(dwc, dep->number, true);
615
616 /* - giveback all requests to gadget driver */
617 while (!list_empty(&dep->req_queued)) {
618 req = next_request(&dep->req_queued);
619
620 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
621 }
622 }
623
624 while (!list_empty(&dep->request_list)) {
625 req = next_request(&dep->request_list);
626
627 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
628 }
629 }
630
631 /**
632 * __dwc3_gadget_ep_disable - Disables a HW endpoint
633 * @dep: the endpoint to disable
634 *
635 * This function also removes requests which are currently processed ny the
636 * hardware and those which are not yet scheduled.
637 * Caller should take care of locking.
638 */
__dwc3_gadget_ep_disable(struct dwc3_ep * dep)639 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
640 {
641 struct dwc3 *dwc = dep->dwc;
642 u32 reg;
643
644 dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
645
646 dwc3_remove_requests(dwc, dep);
647
648 /* make sure HW endpoint isn't stalled */
649 if (dep->flags & DWC3_EP_STALL)
650 __dwc3_gadget_ep_set_halt(dep, 0, false);
651
652 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
653 reg &= ~DWC3_DALEPENA_EP(dep->number);
654 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
655
656 dep->stream_capable = false;
657 dep->endpoint.desc = NULL;
658 dep->comp_desc = NULL;
659 dep->type = 0;
660 dep->flags = 0;
661
662 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
663 dep->number >> 1,
664 (dep->number & 1) ? "in" : "out");
665
666 return 0;
667 }
668
669 /* -------------------------------------------------------------------------- */
670
dwc3_gadget_ep0_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)671 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
672 const struct usb_endpoint_descriptor *desc)
673 {
674 return -EINVAL;
675 }
676
dwc3_gadget_ep0_disable(struct usb_ep * ep)677 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
678 {
679 return -EINVAL;
680 }
681
682 /* -------------------------------------------------------------------------- */
683
dwc3_gadget_ep_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)684 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
685 const struct usb_endpoint_descriptor *desc)
686 {
687 struct dwc3_ep *dep;
688 struct dwc3 *dwc;
689 unsigned long flags;
690 int ret;
691
692 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
693 pr_debug("dwc3: invalid parameters\n");
694 return -EINVAL;
695 }
696
697 if (!desc->wMaxPacketSize) {
698 pr_debug("dwc3: missing wMaxPacketSize\n");
699 return -EINVAL;
700 }
701
702 dep = to_dwc3_ep(ep);
703 dwc = dep->dwc;
704
705 if (dep->flags & DWC3_EP_ENABLED) {
706 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
707 dep->name);
708 return 0;
709 }
710
711 spin_lock_irqsave(&dwc->lock, flags);
712 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
713 spin_unlock_irqrestore(&dwc->lock, flags);
714
715 return ret;
716 }
717
dwc3_gadget_ep_disable(struct usb_ep * ep)718 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
719 {
720 struct dwc3_ep *dep;
721 struct dwc3 *dwc;
722 unsigned long flags;
723 int ret;
724
725 if (!ep) {
726 pr_debug("dwc3: invalid parameters\n");
727 return -EINVAL;
728 }
729
730 dep = to_dwc3_ep(ep);
731 dwc = dep->dwc;
732
733 if (!(dep->flags & DWC3_EP_ENABLED)) {
734 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
735 dep->name);
736 return 0;
737 }
738
739 spin_lock_irqsave(&dwc->lock, flags);
740 ret = __dwc3_gadget_ep_disable(dep);
741 spin_unlock_irqrestore(&dwc->lock, flags);
742
743 return ret;
744 }
745
dwc3_gadget_ep_alloc_request(struct usb_ep * ep,gfp_t gfp_flags)746 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
747 gfp_t gfp_flags)
748 {
749 struct dwc3_request *req;
750 struct dwc3_ep *dep = to_dwc3_ep(ep);
751
752 req = kzalloc(sizeof(*req), gfp_flags);
753 if (!req)
754 return NULL;
755
756 req->epnum = dep->number;
757 req->dep = dep;
758
759 trace_dwc3_alloc_request(req);
760
761 return &req->request;
762 }
763
dwc3_gadget_ep_free_request(struct usb_ep * ep,struct usb_request * request)764 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
765 struct usb_request *request)
766 {
767 struct dwc3_request *req = to_dwc3_request(request);
768
769 trace_dwc3_free_request(req);
770 kfree(req);
771 }
772
773 /**
774 * dwc3_prepare_one_trb - setup one TRB from one request
775 * @dep: endpoint for which this request is prepared
776 * @req: dwc3_request pointer
777 */
dwc3_prepare_one_trb(struct dwc3_ep * dep,struct dwc3_request * req,dma_addr_t dma,unsigned length,unsigned last,unsigned chain,unsigned node)778 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
779 struct dwc3_request *req, dma_addr_t dma,
780 unsigned length, unsigned last, unsigned chain, unsigned node)
781 {
782 struct dwc3_trb *trb;
783
784 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s",
785 dep->name, req, (unsigned long long) dma,
786 length, last ? " last" : "",
787 chain ? " chain" : "");
788
789
790 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
791
792 if (!req->trb) {
793 dwc3_gadget_move_request_queued(req);
794 req->trb = trb;
795 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
796 req->start_slot = dep->free_slot & DWC3_TRB_MASK;
797 }
798
799 dep->free_slot++;
800 /* Skip the LINK-TRB on ISOC */
801 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
802 usb_endpoint_xfer_isoc(dep->endpoint.desc))
803 dep->free_slot++;
804
805 trb->size = DWC3_TRB_SIZE_LENGTH(length);
806 trb->bpl = lower_32_bits(dma);
807 trb->bph = upper_32_bits(dma);
808
809 switch (usb_endpoint_type(dep->endpoint.desc)) {
810 case USB_ENDPOINT_XFER_CONTROL:
811 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
812 break;
813
814 case USB_ENDPOINT_XFER_ISOC:
815 if (!node)
816 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
817 else
818 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
819 break;
820
821 case USB_ENDPOINT_XFER_BULK:
822 case USB_ENDPOINT_XFER_INT:
823 trb->ctrl = DWC3_TRBCTL_NORMAL;
824 break;
825 default:
826 /*
827 * This is only possible with faulty memory because we
828 * checked it already :)
829 */
830 BUG();
831 }
832
833 if (!req->request.no_interrupt && !chain)
834 trb->ctrl |= DWC3_TRB_CTRL_IOC;
835
836 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
837 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
838 trb->ctrl |= DWC3_TRB_CTRL_CSP;
839 } else if (last) {
840 trb->ctrl |= DWC3_TRB_CTRL_LST;
841 }
842
843 if (chain)
844 trb->ctrl |= DWC3_TRB_CTRL_CHN;
845
846 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
847 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
848
849 trb->ctrl |= DWC3_TRB_CTRL_HWO;
850
851 trace_dwc3_prepare_trb(dep, trb);
852 }
853
854 /*
855 * dwc3_prepare_trbs - setup TRBs from requests
856 * @dep: endpoint for which requests are being prepared
857 * @starting: true if the endpoint is idle and no requests are queued.
858 *
859 * The function goes through the requests list and sets up TRBs for the
860 * transfers. The function returns once there are no more TRBs available or
861 * it runs out of requests.
862 */
dwc3_prepare_trbs(struct dwc3_ep * dep,bool starting)863 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
864 {
865 struct dwc3_request *req, *n;
866 u32 trbs_left;
867 u32 max;
868 unsigned int last_one = 0;
869
870 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
871
872 /* the first request must not be queued */
873 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
874
875 /* Can't wrap around on a non-isoc EP since there's no link TRB */
876 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
877 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
878 if (trbs_left > max)
879 trbs_left = max;
880 }
881
882 /*
883 * If busy & slot are equal than it is either full or empty. If we are
884 * starting to process requests then we are empty. Otherwise we are
885 * full and don't do anything
886 */
887 if (!trbs_left) {
888 if (!starting)
889 return;
890 trbs_left = DWC3_TRB_NUM;
891 /*
892 * In case we start from scratch, we queue the ISOC requests
893 * starting from slot 1. This is done because we use ring
894 * buffer and have no LST bit to stop us. Instead, we place
895 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
896 * after the first request so we start at slot 1 and have
897 * 7 requests proceed before we hit the first IOC.
898 * Other transfer types don't use the ring buffer and are
899 * processed from the first TRB until the last one. Since we
900 * don't wrap around we have to start at the beginning.
901 */
902 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
903 dep->busy_slot = 1;
904 dep->free_slot = 1;
905 } else {
906 dep->busy_slot = 0;
907 dep->free_slot = 0;
908 }
909 }
910
911 /* The last TRB is a link TRB, not used for xfer */
912 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
913 return;
914
915 list_for_each_entry_safe(req, n, &dep->request_list, list) {
916 unsigned length;
917 dma_addr_t dma;
918 last_one = false;
919
920 if (req->request.num_mapped_sgs > 0) {
921 struct usb_request *request = &req->request;
922 struct scatterlist *sg = request->sg;
923 struct scatterlist *s;
924 int i;
925
926 for_each_sg(sg, s, request->num_mapped_sgs, i) {
927 unsigned chain = true;
928
929 length = sg_dma_len(s);
930 dma = sg_dma_address(s);
931
932 if (i == (request->num_mapped_sgs - 1) ||
933 sg_is_last(s)) {
934 if (list_empty(&dep->request_list))
935 last_one = true;
936 chain = false;
937 }
938
939 trbs_left--;
940 if (!trbs_left)
941 last_one = true;
942
943 if (last_one)
944 chain = false;
945
946 dwc3_prepare_one_trb(dep, req, dma, length,
947 last_one, chain, i);
948
949 if (last_one)
950 break;
951 }
952
953 if (last_one)
954 break;
955 } else {
956 dma = req->request.dma;
957 length = req->request.length;
958 trbs_left--;
959
960 if (!trbs_left)
961 last_one = 1;
962
963 /* Is this the last request? */
964 if (list_is_last(&req->list, &dep->request_list))
965 last_one = 1;
966
967 dwc3_prepare_one_trb(dep, req, dma, length,
968 last_one, false, 0);
969
970 if (last_one)
971 break;
972 }
973 }
974 }
975
__dwc3_gadget_kick_transfer(struct dwc3_ep * dep,u16 cmd_param,int start_new)976 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
977 int start_new)
978 {
979 struct dwc3_gadget_ep_cmd_params params;
980 struct dwc3_request *req;
981 struct dwc3 *dwc = dep->dwc;
982 int ret;
983 u32 cmd;
984
985 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
986 dwc3_trace(trace_dwc3_gadget, "%s: endpoint busy", dep->name);
987 return -EBUSY;
988 }
989
990 /*
991 * If we are getting here after a short-out-packet we don't enqueue any
992 * new requests as we try to set the IOC bit only on the last request.
993 */
994 if (start_new) {
995 if (list_empty(&dep->req_queued))
996 dwc3_prepare_trbs(dep, start_new);
997
998 /* req points to the first request which will be sent */
999 req = next_request(&dep->req_queued);
1000 } else {
1001 dwc3_prepare_trbs(dep, start_new);
1002
1003 /*
1004 * req points to the first request where HWO changed from 0 to 1
1005 */
1006 req = next_request(&dep->req_queued);
1007 }
1008 if (!req) {
1009 dep->flags |= DWC3_EP_PENDING_REQUEST;
1010 return 0;
1011 }
1012
1013 memset(¶ms, 0, sizeof(params));
1014
1015 if (start_new) {
1016 params.param0 = upper_32_bits(req->trb_dma);
1017 params.param1 = lower_32_bits(req->trb_dma);
1018 cmd = DWC3_DEPCMD_STARTTRANSFER;
1019 } else {
1020 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1021 }
1022
1023 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
1024 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
1025 if (ret < 0) {
1026 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
1027
1028 /*
1029 * FIXME we need to iterate over the list of requests
1030 * here and stop, unmap, free and del each of the linked
1031 * requests instead of what we do now.
1032 */
1033 usb_gadget_unmap_request(&dwc->gadget, &req->request,
1034 req->direction);
1035 list_del(&req->list);
1036 return ret;
1037 }
1038
1039 dep->flags |= DWC3_EP_BUSY;
1040
1041 if (start_new) {
1042 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
1043 dep->number);
1044 WARN_ON_ONCE(!dep->resource_index);
1045 }
1046
1047 return 0;
1048 }
1049
__dwc3_gadget_start_isoc(struct dwc3 * dwc,struct dwc3_ep * dep,u32 cur_uf)1050 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1051 struct dwc3_ep *dep, u32 cur_uf)
1052 {
1053 u32 uf;
1054
1055 if (list_empty(&dep->request_list)) {
1056 dwc3_trace(trace_dwc3_gadget,
1057 "ISOC ep %s run out for requests",
1058 dep->name);
1059 dep->flags |= DWC3_EP_PENDING_REQUEST;
1060 return;
1061 }
1062
1063 /* 4 micro frames in the future */
1064 uf = cur_uf + dep->interval * 4;
1065
1066 __dwc3_gadget_kick_transfer(dep, uf, 1);
1067 }
1068
dwc3_gadget_start_isoc(struct dwc3 * dwc,struct dwc3_ep * dep,const struct dwc3_event_depevt * event)1069 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1070 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1071 {
1072 u32 cur_uf, mask;
1073
1074 mask = ~(dep->interval - 1);
1075 cur_uf = event->parameters & mask;
1076
1077 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1078 }
1079
__dwc3_gadget_ep_queue(struct dwc3_ep * dep,struct dwc3_request * req)1080 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1081 {
1082 struct dwc3 *dwc = dep->dwc;
1083 int ret;
1084
1085 req->request.actual = 0;
1086 req->request.status = -EINPROGRESS;
1087 req->direction = dep->direction;
1088 req->epnum = dep->number;
1089
1090 trace_dwc3_ep_queue(req);
1091
1092 /*
1093 * We only add to our list of requests now and
1094 * start consuming the list once we get XferNotReady
1095 * IRQ.
1096 *
1097 * That way, we avoid doing anything that we don't need
1098 * to do now and defer it until the point we receive a
1099 * particular token from the Host side.
1100 *
1101 * This will also avoid Host cancelling URBs due to too
1102 * many NAKs.
1103 */
1104 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1105 dep->direction);
1106 if (ret)
1107 return ret;
1108
1109 list_add_tail(&req->list, &dep->request_list);
1110
1111 /*
1112 * If there are no pending requests and the endpoint isn't already
1113 * busy, we will just start the request straight away.
1114 *
1115 * This will save one IRQ (XFER_NOT_READY) and possibly make it a
1116 * little bit faster.
1117 */
1118 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1119 !usb_endpoint_xfer_int(dep->endpoint.desc) &&
1120 !(dep->flags & DWC3_EP_BUSY)) {
1121 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1122 goto out;
1123 }
1124
1125 /*
1126 * There are a few special cases:
1127 *
1128 * 1. XferNotReady with empty list of requests. We need to kick the
1129 * transfer here in that situation, otherwise we will be NAKing
1130 * forever. If we get XferNotReady before gadget driver has a
1131 * chance to queue a request, we will ACK the IRQ but won't be
1132 * able to receive the data until the next request is queued.
1133 * The following code is handling exactly that.
1134 *
1135 */
1136 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1137 /*
1138 * If xfernotready is already elapsed and it is a case
1139 * of isoc transfer, then issue END TRANSFER, so that
1140 * you can receive xfernotready again and can have
1141 * notion of current microframe.
1142 */
1143 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1144 if (list_empty(&dep->req_queued)) {
1145 dwc3_stop_active_transfer(dwc, dep->number, true);
1146 dep->flags = DWC3_EP_ENABLED;
1147 }
1148 return 0;
1149 }
1150
1151 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1152 if (!ret)
1153 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
1154
1155 goto out;
1156 }
1157
1158 /*
1159 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1160 * kick the transfer here after queuing a request, otherwise the
1161 * core may not see the modified TRB(s).
1162 */
1163 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1164 (dep->flags & DWC3_EP_BUSY) &&
1165 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
1166 WARN_ON_ONCE(!dep->resource_index);
1167 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
1168 false);
1169 goto out;
1170 }
1171
1172 /*
1173 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1174 * right away, otherwise host will not know we have streams to be
1175 * handled.
1176 */
1177 if (dep->stream_capable)
1178 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1179
1180 out:
1181 if (ret && ret != -EBUSY)
1182 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1183 dep->name);
1184 if (ret == -EBUSY)
1185 ret = 0;
1186
1187 return ret;
1188 }
1189
dwc3_gadget_ep_queue(struct usb_ep * ep,struct usb_request * request,gfp_t gfp_flags)1190 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1191 gfp_t gfp_flags)
1192 {
1193 struct dwc3_request *req = to_dwc3_request(request);
1194 struct dwc3_ep *dep = to_dwc3_ep(ep);
1195 struct dwc3 *dwc = dep->dwc;
1196
1197 unsigned long flags;
1198
1199 int ret;
1200
1201 spin_lock_irqsave(&dwc->lock, flags);
1202 if (!dep->endpoint.desc) {
1203 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1204 request, ep->name);
1205 ret = -ESHUTDOWN;
1206 goto out;
1207 }
1208
1209 if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1210 request, req->dep->name)) {
1211 ret = -EINVAL;
1212 goto out;
1213 }
1214
1215 ret = __dwc3_gadget_ep_queue(dep, req);
1216
1217 out:
1218 spin_unlock_irqrestore(&dwc->lock, flags);
1219
1220 return ret;
1221 }
1222
dwc3_gadget_ep_dequeue(struct usb_ep * ep,struct usb_request * request)1223 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1224 struct usb_request *request)
1225 {
1226 struct dwc3_request *req = to_dwc3_request(request);
1227 struct dwc3_request *r = NULL;
1228
1229 struct dwc3_ep *dep = to_dwc3_ep(ep);
1230 struct dwc3 *dwc = dep->dwc;
1231
1232 unsigned long flags;
1233 int ret = 0;
1234
1235 trace_dwc3_ep_dequeue(req);
1236
1237 spin_lock_irqsave(&dwc->lock, flags);
1238
1239 list_for_each_entry(r, &dep->request_list, list) {
1240 if (r == req)
1241 break;
1242 }
1243
1244 if (r != req) {
1245 list_for_each_entry(r, &dep->req_queued, list) {
1246 if (r == req)
1247 break;
1248 }
1249 if (r == req) {
1250 /* wait until it is processed */
1251 dwc3_stop_active_transfer(dwc, dep->number, true);
1252 goto out1;
1253 }
1254 dev_err(dwc->dev, "request %p was not queued to %s\n",
1255 request, ep->name);
1256 ret = -EINVAL;
1257 goto out0;
1258 }
1259
1260 out1:
1261 /* giveback the request */
1262 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1263
1264 out0:
1265 spin_unlock_irqrestore(&dwc->lock, flags);
1266
1267 return ret;
1268 }
1269
__dwc3_gadget_ep_set_halt(struct dwc3_ep * dep,int value,int protocol)1270 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1271 {
1272 struct dwc3_gadget_ep_cmd_params params;
1273 struct dwc3 *dwc = dep->dwc;
1274 int ret;
1275
1276 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1277 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1278 return -EINVAL;
1279 }
1280
1281 memset(¶ms, 0x00, sizeof(params));
1282
1283 if (value) {
1284 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
1285 (!list_empty(&dep->req_queued) ||
1286 !list_empty(&dep->request_list)))) {
1287 dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
1288 dep->name);
1289 return -EAGAIN;
1290 }
1291
1292 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1293 DWC3_DEPCMD_SETSTALL, ¶ms);
1294 if (ret)
1295 dev_err(dwc->dev, "failed to set STALL on %s\n",
1296 dep->name);
1297 else
1298 dep->flags |= DWC3_EP_STALL;
1299 } else {
1300 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1301 DWC3_DEPCMD_CLEARSTALL, ¶ms);
1302 if (ret)
1303 dev_err(dwc->dev, "failed to clear STALL on %s\n",
1304 dep->name);
1305 else
1306 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1307 }
1308
1309 return ret;
1310 }
1311
dwc3_gadget_ep_set_halt(struct usb_ep * ep,int value)1312 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1313 {
1314 struct dwc3_ep *dep = to_dwc3_ep(ep);
1315 struct dwc3 *dwc = dep->dwc;
1316
1317 unsigned long flags;
1318
1319 int ret;
1320
1321 spin_lock_irqsave(&dwc->lock, flags);
1322 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1323 spin_unlock_irqrestore(&dwc->lock, flags);
1324
1325 return ret;
1326 }
1327
dwc3_gadget_ep_set_wedge(struct usb_ep * ep)1328 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1329 {
1330 struct dwc3_ep *dep = to_dwc3_ep(ep);
1331 struct dwc3 *dwc = dep->dwc;
1332 unsigned long flags;
1333 int ret;
1334
1335 spin_lock_irqsave(&dwc->lock, flags);
1336 dep->flags |= DWC3_EP_WEDGE;
1337
1338 if (dep->number == 0 || dep->number == 1)
1339 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1340 else
1341 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1342 spin_unlock_irqrestore(&dwc->lock, flags);
1343
1344 return ret;
1345 }
1346
1347 /* -------------------------------------------------------------------------- */
1348
1349 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1350 .bLength = USB_DT_ENDPOINT_SIZE,
1351 .bDescriptorType = USB_DT_ENDPOINT,
1352 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1353 };
1354
1355 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1356 .enable = dwc3_gadget_ep0_enable,
1357 .disable = dwc3_gadget_ep0_disable,
1358 .alloc_request = dwc3_gadget_ep_alloc_request,
1359 .free_request = dwc3_gadget_ep_free_request,
1360 .queue = dwc3_gadget_ep0_queue,
1361 .dequeue = dwc3_gadget_ep_dequeue,
1362 .set_halt = dwc3_gadget_ep0_set_halt,
1363 .set_wedge = dwc3_gadget_ep_set_wedge,
1364 };
1365
1366 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1367 .enable = dwc3_gadget_ep_enable,
1368 .disable = dwc3_gadget_ep_disable,
1369 .alloc_request = dwc3_gadget_ep_alloc_request,
1370 .free_request = dwc3_gadget_ep_free_request,
1371 .queue = dwc3_gadget_ep_queue,
1372 .dequeue = dwc3_gadget_ep_dequeue,
1373 .set_halt = dwc3_gadget_ep_set_halt,
1374 .set_wedge = dwc3_gadget_ep_set_wedge,
1375 };
1376
1377 /* -------------------------------------------------------------------------- */
1378
dwc3_gadget_get_frame(struct usb_gadget * g)1379 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1380 {
1381 struct dwc3 *dwc = gadget_to_dwc(g);
1382 u32 reg;
1383
1384 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1385 return DWC3_DSTS_SOFFN(reg);
1386 }
1387
dwc3_gadget_wakeup(struct usb_gadget * g)1388 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1389 {
1390 struct dwc3 *dwc = gadget_to_dwc(g);
1391
1392 unsigned long timeout;
1393 unsigned long flags;
1394
1395 u32 reg;
1396
1397 int ret = 0;
1398
1399 u8 link_state;
1400 u8 speed;
1401
1402 spin_lock_irqsave(&dwc->lock, flags);
1403
1404 /*
1405 * According to the Databook Remote wakeup request should
1406 * be issued only when the device is in early suspend state.
1407 *
1408 * We can check that via USB Link State bits in DSTS register.
1409 */
1410 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1411
1412 speed = reg & DWC3_DSTS_CONNECTSPD;
1413 if (speed == DWC3_DSTS_SUPERSPEED) {
1414 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1415 ret = -EINVAL;
1416 goto out;
1417 }
1418
1419 link_state = DWC3_DSTS_USBLNKST(reg);
1420
1421 switch (link_state) {
1422 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1423 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1424 break;
1425 default:
1426 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1427 link_state);
1428 ret = -EINVAL;
1429 goto out;
1430 }
1431
1432 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1433 if (ret < 0) {
1434 dev_err(dwc->dev, "failed to put link in Recovery\n");
1435 goto out;
1436 }
1437
1438 /* Recent versions do this automatically */
1439 if (dwc->revision < DWC3_REVISION_194A) {
1440 /* write zeroes to Link Change Request */
1441 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1442 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1443 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1444 }
1445
1446 /* poll until Link State changes to ON */
1447 timeout = jiffies + msecs_to_jiffies(100);
1448
1449 while (!time_after(jiffies, timeout)) {
1450 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1451
1452 /* in HS, means ON */
1453 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1454 break;
1455 }
1456
1457 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1458 dev_err(dwc->dev, "failed to send remote wakeup\n");
1459 ret = -EINVAL;
1460 }
1461
1462 out:
1463 spin_unlock_irqrestore(&dwc->lock, flags);
1464
1465 return ret;
1466 }
1467
dwc3_gadget_set_selfpowered(struct usb_gadget * g,int is_selfpowered)1468 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1469 int is_selfpowered)
1470 {
1471 struct dwc3 *dwc = gadget_to_dwc(g);
1472 unsigned long flags;
1473
1474 spin_lock_irqsave(&dwc->lock, flags);
1475 g->is_selfpowered = !!is_selfpowered;
1476 spin_unlock_irqrestore(&dwc->lock, flags);
1477
1478 return 0;
1479 }
1480
dwc3_gadget_run_stop(struct dwc3 * dwc,int is_on,int suspend)1481 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1482 {
1483 u32 reg;
1484 u32 timeout = 500;
1485
1486 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1487 if (is_on) {
1488 if (dwc->revision <= DWC3_REVISION_187A) {
1489 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1490 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1491 }
1492
1493 if (dwc->revision >= DWC3_REVISION_194A)
1494 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1495 reg |= DWC3_DCTL_RUN_STOP;
1496
1497 if (dwc->has_hibernation)
1498 reg |= DWC3_DCTL_KEEP_CONNECT;
1499
1500 dwc->pullups_connected = true;
1501 } else {
1502 reg &= ~DWC3_DCTL_RUN_STOP;
1503
1504 if (dwc->has_hibernation && !suspend)
1505 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1506
1507 dwc->pullups_connected = false;
1508 }
1509
1510 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1511
1512 do {
1513 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1514 if (is_on) {
1515 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1516 break;
1517 } else {
1518 if (reg & DWC3_DSTS_DEVCTRLHLT)
1519 break;
1520 }
1521 timeout--;
1522 if (!timeout)
1523 return -ETIMEDOUT;
1524 udelay(1);
1525 } while (1);
1526
1527 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
1528 dwc->gadget_driver
1529 ? dwc->gadget_driver->function : "no-function",
1530 is_on ? "connect" : "disconnect");
1531
1532 return 0;
1533 }
1534
dwc3_gadget_pullup(struct usb_gadget * g,int is_on)1535 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1536 {
1537 struct dwc3 *dwc = gadget_to_dwc(g);
1538 unsigned long flags;
1539 int ret;
1540
1541 is_on = !!is_on;
1542
1543 spin_lock_irqsave(&dwc->lock, flags);
1544 ret = dwc3_gadget_run_stop(dwc, is_on, false);
1545 spin_unlock_irqrestore(&dwc->lock, flags);
1546
1547 return ret;
1548 }
1549
dwc3_gadget_enable_irq(struct dwc3 * dwc)1550 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1551 {
1552 u32 reg;
1553
1554 /* Enable all but Start and End of Frame IRQs */
1555 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1556 DWC3_DEVTEN_EVNTOVERFLOWEN |
1557 DWC3_DEVTEN_CMDCMPLTEN |
1558 DWC3_DEVTEN_ERRTICERREN |
1559 DWC3_DEVTEN_WKUPEVTEN |
1560 DWC3_DEVTEN_ULSTCNGEN |
1561 DWC3_DEVTEN_CONNECTDONEEN |
1562 DWC3_DEVTEN_USBRSTEN |
1563 DWC3_DEVTEN_DISCONNEVTEN);
1564
1565 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1566 }
1567
dwc3_gadget_disable_irq(struct dwc3 * dwc)1568 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1569 {
1570 /* mask all interrupts */
1571 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1572 }
1573
1574 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1575 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1576
dwc3_gadget_start(struct usb_gadget * g,struct usb_gadget_driver * driver)1577 static int dwc3_gadget_start(struct usb_gadget *g,
1578 struct usb_gadget_driver *driver)
1579 {
1580 struct dwc3 *dwc = gadget_to_dwc(g);
1581 struct dwc3_ep *dep;
1582 unsigned long flags;
1583 int ret = 0;
1584 int irq;
1585 u32 reg;
1586
1587 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1588 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1589 IRQF_SHARED, "dwc3", dwc);
1590 if (ret) {
1591 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1592 irq, ret);
1593 goto err0;
1594 }
1595
1596 spin_lock_irqsave(&dwc->lock, flags);
1597
1598 if (dwc->gadget_driver) {
1599 dev_err(dwc->dev, "%s is already bound to %s\n",
1600 dwc->gadget.name,
1601 dwc->gadget_driver->driver.name);
1602 ret = -EBUSY;
1603 goto err1;
1604 }
1605
1606 dwc->gadget_driver = driver;
1607
1608 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1609 reg &= ~(DWC3_DCFG_SPEED_MASK);
1610
1611 /**
1612 * WORKAROUND: DWC3 revision < 2.20a have an issue
1613 * which would cause metastability state on Run/Stop
1614 * bit if we try to force the IP to USB2-only mode.
1615 *
1616 * Because of that, we cannot configure the IP to any
1617 * speed other than the SuperSpeed
1618 *
1619 * Refers to:
1620 *
1621 * STAR#9000525659: Clock Domain Crossing on DCTL in
1622 * USB 2.0 Mode
1623 */
1624 if (dwc->revision < DWC3_REVISION_220A) {
1625 reg |= DWC3_DCFG_SUPERSPEED;
1626 } else {
1627 switch (dwc->maximum_speed) {
1628 case USB_SPEED_LOW:
1629 reg |= DWC3_DSTS_LOWSPEED;
1630 break;
1631 case USB_SPEED_FULL:
1632 reg |= DWC3_DSTS_FULLSPEED1;
1633 break;
1634 case USB_SPEED_HIGH:
1635 reg |= DWC3_DSTS_HIGHSPEED;
1636 break;
1637 case USB_SPEED_SUPER: /* FALLTHROUGH */
1638 case USB_SPEED_UNKNOWN: /* FALTHROUGH */
1639 default:
1640 reg |= DWC3_DSTS_SUPERSPEED;
1641 }
1642 }
1643 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1644
1645 /* Start with SuperSpeed Default */
1646 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1647
1648 dep = dwc->eps[0];
1649 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1650 false);
1651 if (ret) {
1652 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1653 goto err2;
1654 }
1655
1656 dep = dwc->eps[1];
1657 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1658 false);
1659 if (ret) {
1660 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1661 goto err3;
1662 }
1663
1664 /* begin to receive SETUP packets */
1665 dwc->ep0state = EP0_SETUP_PHASE;
1666 dwc3_ep0_out_start(dwc);
1667
1668 dwc3_gadget_enable_irq(dwc);
1669
1670 spin_unlock_irqrestore(&dwc->lock, flags);
1671
1672 return 0;
1673
1674 err3:
1675 __dwc3_gadget_ep_disable(dwc->eps[0]);
1676
1677 err2:
1678 dwc->gadget_driver = NULL;
1679
1680 err1:
1681 spin_unlock_irqrestore(&dwc->lock, flags);
1682
1683 free_irq(irq, dwc);
1684
1685 err0:
1686 return ret;
1687 }
1688
dwc3_gadget_stop(struct usb_gadget * g)1689 static int dwc3_gadget_stop(struct usb_gadget *g)
1690 {
1691 struct dwc3 *dwc = gadget_to_dwc(g);
1692 unsigned long flags;
1693 int irq;
1694
1695 spin_lock_irqsave(&dwc->lock, flags);
1696
1697 dwc3_gadget_disable_irq(dwc);
1698 __dwc3_gadget_ep_disable(dwc->eps[0]);
1699 __dwc3_gadget_ep_disable(dwc->eps[1]);
1700
1701 dwc->gadget_driver = NULL;
1702
1703 spin_unlock_irqrestore(&dwc->lock, flags);
1704
1705 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1706 free_irq(irq, dwc);
1707
1708 return 0;
1709 }
1710
1711 static const struct usb_gadget_ops dwc3_gadget_ops = {
1712 .get_frame = dwc3_gadget_get_frame,
1713 .wakeup = dwc3_gadget_wakeup,
1714 .set_selfpowered = dwc3_gadget_set_selfpowered,
1715 .pullup = dwc3_gadget_pullup,
1716 .udc_start = dwc3_gadget_start,
1717 .udc_stop = dwc3_gadget_stop,
1718 };
1719
1720 /* -------------------------------------------------------------------------- */
1721
dwc3_gadget_init_hw_endpoints(struct dwc3 * dwc,u8 num,u32 direction)1722 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1723 u8 num, u32 direction)
1724 {
1725 struct dwc3_ep *dep;
1726 u8 i;
1727
1728 for (i = 0; i < num; i++) {
1729 u8 epnum = (i << 1) | (!!direction);
1730
1731 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1732 if (!dep)
1733 return -ENOMEM;
1734
1735 dep->dwc = dwc;
1736 dep->number = epnum;
1737 dep->direction = !!direction;
1738 dwc->eps[epnum] = dep;
1739
1740 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1741 (epnum & 1) ? "in" : "out");
1742
1743 dep->endpoint.name = dep->name;
1744
1745 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
1746
1747 if (epnum == 0 || epnum == 1) {
1748 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1749 dep->endpoint.maxburst = 1;
1750 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1751 if (!epnum)
1752 dwc->gadget.ep0 = &dep->endpoint;
1753 } else {
1754 int ret;
1755
1756 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
1757 dep->endpoint.max_streams = 15;
1758 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1759 list_add_tail(&dep->endpoint.ep_list,
1760 &dwc->gadget.ep_list);
1761
1762 ret = dwc3_alloc_trb_pool(dep);
1763 if (ret)
1764 return ret;
1765 }
1766
1767 if (epnum == 0 || epnum == 1) {
1768 dep->endpoint.caps.type_control = true;
1769 } else {
1770 dep->endpoint.caps.type_iso = true;
1771 dep->endpoint.caps.type_bulk = true;
1772 dep->endpoint.caps.type_int = true;
1773 }
1774
1775 dep->endpoint.caps.dir_in = !!direction;
1776 dep->endpoint.caps.dir_out = !direction;
1777
1778 INIT_LIST_HEAD(&dep->request_list);
1779 INIT_LIST_HEAD(&dep->req_queued);
1780 }
1781
1782 return 0;
1783 }
1784
dwc3_gadget_init_endpoints(struct dwc3 * dwc)1785 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1786 {
1787 int ret;
1788
1789 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1790
1791 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1792 if (ret < 0) {
1793 dwc3_trace(trace_dwc3_gadget,
1794 "failed to allocate OUT endpoints");
1795 return ret;
1796 }
1797
1798 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1799 if (ret < 0) {
1800 dwc3_trace(trace_dwc3_gadget,
1801 "failed to allocate IN endpoints");
1802 return ret;
1803 }
1804
1805 return 0;
1806 }
1807
dwc3_gadget_free_endpoints(struct dwc3 * dwc)1808 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1809 {
1810 struct dwc3_ep *dep;
1811 u8 epnum;
1812
1813 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1814 dep = dwc->eps[epnum];
1815 if (!dep)
1816 continue;
1817 /*
1818 * Physical endpoints 0 and 1 are special; they form the
1819 * bi-directional USB endpoint 0.
1820 *
1821 * For those two physical endpoints, we don't allocate a TRB
1822 * pool nor do we add them the endpoints list. Due to that, we
1823 * shouldn't do these two operations otherwise we would end up
1824 * with all sorts of bugs when removing dwc3.ko.
1825 */
1826 if (epnum != 0 && epnum != 1) {
1827 dwc3_free_trb_pool(dep);
1828 list_del(&dep->endpoint.ep_list);
1829 }
1830
1831 kfree(dep);
1832 }
1833 }
1834
1835 /* -------------------------------------------------------------------------- */
1836
__dwc3_cleanup_done_trbs(struct dwc3 * dwc,struct dwc3_ep * dep,struct dwc3_request * req,struct dwc3_trb * trb,const struct dwc3_event_depevt * event,int status)1837 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1838 struct dwc3_request *req, struct dwc3_trb *trb,
1839 const struct dwc3_event_depevt *event, int status)
1840 {
1841 unsigned int count;
1842 unsigned int s_pkt = 0;
1843 unsigned int trb_status;
1844
1845 trace_dwc3_complete_trb(dep, trb);
1846
1847 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1848 /*
1849 * We continue despite the error. There is not much we
1850 * can do. If we don't clean it up we loop forever. If
1851 * we skip the TRB then it gets overwritten after a
1852 * while since we use them in a ring buffer. A BUG()
1853 * would help. Lets hope that if this occurs, someone
1854 * fixes the root cause instead of looking away :)
1855 */
1856 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1857 dep->name, trb);
1858 count = trb->size & DWC3_TRB_SIZE_MASK;
1859
1860 if (dep->direction) {
1861 if (count) {
1862 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1863 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1864 dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1865 dep->name);
1866 /*
1867 * If missed isoc occurred and there is
1868 * no request queued then issue END
1869 * TRANSFER, so that core generates
1870 * next xfernotready and we will issue
1871 * a fresh START TRANSFER.
1872 * If there are still queued request
1873 * then wait, do not issue either END
1874 * or UPDATE TRANSFER, just attach next
1875 * request in request_list during
1876 * giveback.If any future queued request
1877 * is successfully transferred then we
1878 * will issue UPDATE TRANSFER for all
1879 * request in the request_list.
1880 */
1881 dep->flags |= DWC3_EP_MISSED_ISOC;
1882 } else {
1883 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1884 dep->name);
1885 status = -ECONNRESET;
1886 }
1887 } else {
1888 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1889 }
1890 } else {
1891 if (count && (event->status & DEPEVT_STATUS_SHORT))
1892 s_pkt = 1;
1893 }
1894
1895 /*
1896 * We assume here we will always receive the entire data block
1897 * which we should receive. Meaning, if we program RX to
1898 * receive 4K but we receive only 2K, we assume that's all we
1899 * should receive and we simply bounce the request back to the
1900 * gadget driver for further processing.
1901 */
1902 req->request.actual += req->request.length - count;
1903 if (s_pkt)
1904 return 1;
1905 if ((event->status & DEPEVT_STATUS_LST) &&
1906 (trb->ctrl & (DWC3_TRB_CTRL_LST |
1907 DWC3_TRB_CTRL_HWO)))
1908 return 1;
1909 if ((event->status & DEPEVT_STATUS_IOC) &&
1910 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1911 return 1;
1912 return 0;
1913 }
1914
dwc3_cleanup_done_reqs(struct dwc3 * dwc,struct dwc3_ep * dep,const struct dwc3_event_depevt * event,int status)1915 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1916 const struct dwc3_event_depevt *event, int status)
1917 {
1918 struct dwc3_request *req;
1919 struct dwc3_trb *trb;
1920 unsigned int slot;
1921 unsigned int i;
1922 int ret;
1923
1924 do {
1925 req = next_request(&dep->req_queued);
1926 if (!req) {
1927 WARN_ON_ONCE(1);
1928 return 1;
1929 }
1930 i = 0;
1931 do {
1932 slot = req->start_slot + i;
1933 if ((slot == DWC3_TRB_NUM - 1) &&
1934 usb_endpoint_xfer_isoc(dep->endpoint.desc))
1935 slot++;
1936 slot %= DWC3_TRB_NUM;
1937 trb = &dep->trb_pool[slot];
1938
1939 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1940 event, status);
1941 if (ret)
1942 break;
1943 } while (++i < req->request.num_mapped_sgs);
1944
1945 dwc3_gadget_giveback(dep, req, status);
1946
1947 if (ret)
1948 break;
1949 } while (1);
1950
1951 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1952 list_empty(&dep->req_queued)) {
1953 if (list_empty(&dep->request_list)) {
1954 /*
1955 * If there is no entry in request list then do
1956 * not issue END TRANSFER now. Just set PENDING
1957 * flag, so that END TRANSFER is issued when an
1958 * entry is added into request list.
1959 */
1960 dep->flags = DWC3_EP_PENDING_REQUEST;
1961 } else {
1962 dwc3_stop_active_transfer(dwc, dep->number, true);
1963 dep->flags = DWC3_EP_ENABLED;
1964 }
1965 return 1;
1966 }
1967
1968 return 1;
1969 }
1970
dwc3_endpoint_transfer_complete(struct dwc3 * dwc,struct dwc3_ep * dep,const struct dwc3_event_depevt * event)1971 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1972 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1973 {
1974 unsigned status = 0;
1975 int clean_busy;
1976 u32 is_xfer_complete;
1977
1978 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
1979
1980 if (event->status & DEPEVT_STATUS_BUSERR)
1981 status = -ECONNRESET;
1982
1983 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1984 if (clean_busy && (is_xfer_complete ||
1985 usb_endpoint_xfer_isoc(dep->endpoint.desc)))
1986 dep->flags &= ~DWC3_EP_BUSY;
1987
1988 /*
1989 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1990 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1991 */
1992 if (dwc->revision < DWC3_REVISION_183A) {
1993 u32 reg;
1994 int i;
1995
1996 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1997 dep = dwc->eps[i];
1998
1999 if (!(dep->flags & DWC3_EP_ENABLED))
2000 continue;
2001
2002 if (!list_empty(&dep->req_queued))
2003 return;
2004 }
2005
2006 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2007 reg |= dwc->u1u2;
2008 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2009
2010 dwc->u1u2 = 0;
2011 }
2012
2013 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2014 int ret;
2015
2016 ret = __dwc3_gadget_kick_transfer(dep, 0, is_xfer_complete);
2017 if (!ret || ret == -EBUSY)
2018 return;
2019 }
2020 }
2021
dwc3_endpoint_interrupt(struct dwc3 * dwc,const struct dwc3_event_depevt * event)2022 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
2023 const struct dwc3_event_depevt *event)
2024 {
2025 struct dwc3_ep *dep;
2026 u8 epnum = event->endpoint_number;
2027
2028 dep = dwc->eps[epnum];
2029
2030 if (!(dep->flags & DWC3_EP_ENABLED))
2031 return;
2032
2033 if (epnum == 0 || epnum == 1) {
2034 dwc3_ep0_interrupt(dwc, event);
2035 return;
2036 }
2037
2038 switch (event->endpoint_event) {
2039 case DWC3_DEPEVT_XFERCOMPLETE:
2040 dep->resource_index = 0;
2041
2042 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2043 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
2044 dep->name);
2045 return;
2046 }
2047
2048 dwc3_endpoint_transfer_complete(dwc, dep, event);
2049 break;
2050 case DWC3_DEPEVT_XFERINPROGRESS:
2051 dwc3_endpoint_transfer_complete(dwc, dep, event);
2052 break;
2053 case DWC3_DEPEVT_XFERNOTREADY:
2054 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2055 dwc3_gadget_start_isoc(dwc, dep, event);
2056 } else {
2057 int active;
2058 int ret;
2059
2060 active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
2061
2062 dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
2063 dep->name, active ? "Transfer Active"
2064 : "Transfer Not Active");
2065
2066 ret = __dwc3_gadget_kick_transfer(dep, 0, !active);
2067 if (!ret || ret == -EBUSY)
2068 return;
2069
2070 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
2071 dep->name);
2072 }
2073
2074 break;
2075 case DWC3_DEPEVT_STREAMEVT:
2076 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
2077 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
2078 dep->name);
2079 return;
2080 }
2081
2082 switch (event->status) {
2083 case DEPEVT_STREAMEVT_FOUND:
2084 dwc3_trace(trace_dwc3_gadget,
2085 "Stream %d found and started",
2086 event->parameters);
2087
2088 break;
2089 case DEPEVT_STREAMEVT_NOTFOUND:
2090 /* FALLTHROUGH */
2091 default:
2092 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
2093 }
2094 break;
2095 case DWC3_DEPEVT_RXTXFIFOEVT:
2096 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
2097 break;
2098 case DWC3_DEPEVT_EPCMDCMPLT:
2099 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
2100 break;
2101 }
2102 }
2103
dwc3_disconnect_gadget(struct dwc3 * dwc)2104 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2105 {
2106 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2107 spin_unlock(&dwc->lock);
2108 dwc->gadget_driver->disconnect(&dwc->gadget);
2109 spin_lock(&dwc->lock);
2110 }
2111 }
2112
dwc3_suspend_gadget(struct dwc3 * dwc)2113 static void dwc3_suspend_gadget(struct dwc3 *dwc)
2114 {
2115 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
2116 spin_unlock(&dwc->lock);
2117 dwc->gadget_driver->suspend(&dwc->gadget);
2118 spin_lock(&dwc->lock);
2119 }
2120 }
2121
dwc3_resume_gadget(struct dwc3 * dwc)2122 static void dwc3_resume_gadget(struct dwc3 *dwc)
2123 {
2124 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2125 spin_unlock(&dwc->lock);
2126 dwc->gadget_driver->resume(&dwc->gadget);
2127 spin_lock(&dwc->lock);
2128 }
2129 }
2130
dwc3_reset_gadget(struct dwc3 * dwc)2131 static void dwc3_reset_gadget(struct dwc3 *dwc)
2132 {
2133 if (!dwc->gadget_driver)
2134 return;
2135
2136 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2137 spin_unlock(&dwc->lock);
2138 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
2139 spin_lock(&dwc->lock);
2140 }
2141 }
2142
dwc3_stop_active_transfer(struct dwc3 * dwc,u32 epnum,bool force)2143 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
2144 {
2145 struct dwc3_ep *dep;
2146 struct dwc3_gadget_ep_cmd_params params;
2147 u32 cmd;
2148 int ret;
2149
2150 dep = dwc->eps[epnum];
2151
2152 if (!dep->resource_index)
2153 return;
2154
2155 /*
2156 * NOTICE: We are violating what the Databook says about the
2157 * EndTransfer command. Ideally we would _always_ wait for the
2158 * EndTransfer Command Completion IRQ, but that's causing too
2159 * much trouble synchronizing between us and gadget driver.
2160 *
2161 * We have discussed this with the IP Provider and it was
2162 * suggested to giveback all requests here, but give HW some
2163 * extra time to synchronize with the interconnect. We're using
2164 * an arbitrary 100us delay for that.
2165 *
2166 * Note also that a similar handling was tested by Synopsys
2167 * (thanks a lot Paul) and nothing bad has come out of it.
2168 * In short, what we're doing is:
2169 *
2170 * - Issue EndTransfer WITH CMDIOC bit set
2171 * - Wait 100us
2172 */
2173
2174 cmd = DWC3_DEPCMD_ENDTRANSFER;
2175 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2176 cmd |= DWC3_DEPCMD_CMDIOC;
2177 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2178 memset(¶ms, 0, sizeof(params));
2179 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
2180 WARN_ON_ONCE(ret);
2181 dep->resource_index = 0;
2182 dep->flags &= ~DWC3_EP_BUSY;
2183 udelay(100);
2184 }
2185
dwc3_stop_active_transfers(struct dwc3 * dwc)2186 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2187 {
2188 u32 epnum;
2189
2190 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2191 struct dwc3_ep *dep;
2192
2193 dep = dwc->eps[epnum];
2194 if (!dep)
2195 continue;
2196
2197 if (!(dep->flags & DWC3_EP_ENABLED))
2198 continue;
2199
2200 dwc3_remove_requests(dwc, dep);
2201 }
2202 }
2203
dwc3_clear_stall_all_ep(struct dwc3 * dwc)2204 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2205 {
2206 u32 epnum;
2207
2208 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2209 struct dwc3_ep *dep;
2210 struct dwc3_gadget_ep_cmd_params params;
2211 int ret;
2212
2213 dep = dwc->eps[epnum];
2214 if (!dep)
2215 continue;
2216
2217 if (!(dep->flags & DWC3_EP_STALL))
2218 continue;
2219
2220 dep->flags &= ~DWC3_EP_STALL;
2221
2222 memset(¶ms, 0, sizeof(params));
2223 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2224 DWC3_DEPCMD_CLEARSTALL, ¶ms);
2225 WARN_ON_ONCE(ret);
2226 }
2227 }
2228
dwc3_gadget_disconnect_interrupt(struct dwc3 * dwc)2229 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2230 {
2231 int reg;
2232
2233 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2234 reg &= ~DWC3_DCTL_INITU1ENA;
2235 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2236
2237 reg &= ~DWC3_DCTL_INITU2ENA;
2238 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2239
2240 dwc3_disconnect_gadget(dwc);
2241
2242 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2243 dwc->setup_packet_pending = false;
2244 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2245 }
2246
dwc3_gadget_reset_interrupt(struct dwc3 * dwc)2247 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2248 {
2249 u32 reg;
2250
2251 /*
2252 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2253 * would cause a missing Disconnect Event if there's a
2254 * pending Setup Packet in the FIFO.
2255 *
2256 * There's no suggested workaround on the official Bug
2257 * report, which states that "unless the driver/application
2258 * is doing any special handling of a disconnect event,
2259 * there is no functional issue".
2260 *
2261 * Unfortunately, it turns out that we _do_ some special
2262 * handling of a disconnect event, namely complete all
2263 * pending transfers, notify gadget driver of the
2264 * disconnection, and so on.
2265 *
2266 * Our suggested workaround is to follow the Disconnect
2267 * Event steps here, instead, based on a setup_packet_pending
2268 * flag. Such flag gets set whenever we have a XferNotReady
2269 * event on EP0 and gets cleared on XferComplete for the
2270 * same endpoint.
2271 *
2272 * Refers to:
2273 *
2274 * STAR#9000466709: RTL: Device : Disconnect event not
2275 * generated if setup packet pending in FIFO
2276 */
2277 if (dwc->revision < DWC3_REVISION_188A) {
2278 if (dwc->setup_packet_pending)
2279 dwc3_gadget_disconnect_interrupt(dwc);
2280 }
2281
2282 dwc3_reset_gadget(dwc);
2283
2284 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2285 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2286 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2287 dwc->test_mode = false;
2288
2289 dwc3_stop_active_transfers(dwc);
2290 dwc3_clear_stall_all_ep(dwc);
2291
2292 /* Reset device address to zero */
2293 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2294 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2295 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2296 }
2297
dwc3_update_ram_clk_sel(struct dwc3 * dwc,u32 speed)2298 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2299 {
2300 u32 reg;
2301 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2302
2303 /*
2304 * We change the clock only at SS but I dunno why I would want to do
2305 * this. Maybe it becomes part of the power saving plan.
2306 */
2307
2308 if (speed != DWC3_DSTS_SUPERSPEED)
2309 return;
2310
2311 /*
2312 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2313 * each time on Connect Done.
2314 */
2315 if (!usb30_clock)
2316 return;
2317
2318 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2319 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2320 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2321 }
2322
dwc3_gadget_conndone_interrupt(struct dwc3 * dwc)2323 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2324 {
2325 struct dwc3_ep *dep;
2326 int ret;
2327 u32 reg;
2328 u8 speed;
2329
2330 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2331 speed = reg & DWC3_DSTS_CONNECTSPD;
2332 dwc->speed = speed;
2333
2334 dwc3_update_ram_clk_sel(dwc, speed);
2335
2336 switch (speed) {
2337 case DWC3_DCFG_SUPERSPEED:
2338 /*
2339 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2340 * would cause a missing USB3 Reset event.
2341 *
2342 * In such situations, we should force a USB3 Reset
2343 * event by calling our dwc3_gadget_reset_interrupt()
2344 * routine.
2345 *
2346 * Refers to:
2347 *
2348 * STAR#9000483510: RTL: SS : USB3 reset event may
2349 * not be generated always when the link enters poll
2350 */
2351 if (dwc->revision < DWC3_REVISION_190A)
2352 dwc3_gadget_reset_interrupt(dwc);
2353
2354 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2355 dwc->gadget.ep0->maxpacket = 512;
2356 dwc->gadget.speed = USB_SPEED_SUPER;
2357 break;
2358 case DWC3_DCFG_HIGHSPEED:
2359 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2360 dwc->gadget.ep0->maxpacket = 64;
2361 dwc->gadget.speed = USB_SPEED_HIGH;
2362 break;
2363 case DWC3_DCFG_FULLSPEED2:
2364 case DWC3_DCFG_FULLSPEED1:
2365 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2366 dwc->gadget.ep0->maxpacket = 64;
2367 dwc->gadget.speed = USB_SPEED_FULL;
2368 break;
2369 case DWC3_DCFG_LOWSPEED:
2370 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2371 dwc->gadget.ep0->maxpacket = 8;
2372 dwc->gadget.speed = USB_SPEED_LOW;
2373 break;
2374 }
2375
2376 /* Enable USB2 LPM Capability */
2377
2378 if ((dwc->revision > DWC3_REVISION_194A)
2379 && (speed != DWC3_DCFG_SUPERSPEED)) {
2380 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2381 reg |= DWC3_DCFG_LPM_CAP;
2382 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2383
2384 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2385 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2386
2387 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2388
2389 /*
2390 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2391 * DCFG.LPMCap is set, core responses with an ACK and the
2392 * BESL value in the LPM token is less than or equal to LPM
2393 * NYET threshold.
2394 */
2395 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2396 && dwc->has_lpm_erratum,
2397 "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2398
2399 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2400 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2401
2402 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2403 } else {
2404 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2405 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2406 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2407 }
2408
2409 dep = dwc->eps[0];
2410 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2411 false);
2412 if (ret) {
2413 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2414 return;
2415 }
2416
2417 dep = dwc->eps[1];
2418 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2419 false);
2420 if (ret) {
2421 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2422 return;
2423 }
2424
2425 /*
2426 * Configure PHY via GUSB3PIPECTLn if required.
2427 *
2428 * Update GTXFIFOSIZn
2429 *
2430 * In both cases reset values should be sufficient.
2431 */
2432 }
2433
dwc3_gadget_wakeup_interrupt(struct dwc3 * dwc)2434 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2435 {
2436 /*
2437 * TODO take core out of low power mode when that's
2438 * implemented.
2439 */
2440
2441 dwc->gadget_driver->resume(&dwc->gadget);
2442 }
2443
dwc3_gadget_linksts_change_interrupt(struct dwc3 * dwc,unsigned int evtinfo)2444 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2445 unsigned int evtinfo)
2446 {
2447 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2448 unsigned int pwropt;
2449
2450 /*
2451 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2452 * Hibernation mode enabled which would show up when device detects
2453 * host-initiated U3 exit.
2454 *
2455 * In that case, device will generate a Link State Change Interrupt
2456 * from U3 to RESUME which is only necessary if Hibernation is
2457 * configured in.
2458 *
2459 * There are no functional changes due to such spurious event and we
2460 * just need to ignore it.
2461 *
2462 * Refers to:
2463 *
2464 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2465 * operational mode
2466 */
2467 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2468 if ((dwc->revision < DWC3_REVISION_250A) &&
2469 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2470 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2471 (next == DWC3_LINK_STATE_RESUME)) {
2472 dwc3_trace(trace_dwc3_gadget,
2473 "ignoring transition U3 -> Resume");
2474 return;
2475 }
2476 }
2477
2478 /*
2479 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2480 * on the link partner, the USB session might do multiple entry/exit
2481 * of low power states before a transfer takes place.
2482 *
2483 * Due to this problem, we might experience lower throughput. The
2484 * suggested workaround is to disable DCTL[12:9] bits if we're
2485 * transitioning from U1/U2 to U0 and enable those bits again
2486 * after a transfer completes and there are no pending transfers
2487 * on any of the enabled endpoints.
2488 *
2489 * This is the first half of that workaround.
2490 *
2491 * Refers to:
2492 *
2493 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2494 * core send LGO_Ux entering U0
2495 */
2496 if (dwc->revision < DWC3_REVISION_183A) {
2497 if (next == DWC3_LINK_STATE_U0) {
2498 u32 u1u2;
2499 u32 reg;
2500
2501 switch (dwc->link_state) {
2502 case DWC3_LINK_STATE_U1:
2503 case DWC3_LINK_STATE_U2:
2504 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2505 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2506 | DWC3_DCTL_ACCEPTU2ENA
2507 | DWC3_DCTL_INITU1ENA
2508 | DWC3_DCTL_ACCEPTU1ENA);
2509
2510 if (!dwc->u1u2)
2511 dwc->u1u2 = reg & u1u2;
2512
2513 reg &= ~u1u2;
2514
2515 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2516 break;
2517 default:
2518 /* do nothing */
2519 break;
2520 }
2521 }
2522 }
2523
2524 switch (next) {
2525 case DWC3_LINK_STATE_U1:
2526 if (dwc->speed == USB_SPEED_SUPER)
2527 dwc3_suspend_gadget(dwc);
2528 break;
2529 case DWC3_LINK_STATE_U2:
2530 case DWC3_LINK_STATE_U3:
2531 dwc3_suspend_gadget(dwc);
2532 break;
2533 case DWC3_LINK_STATE_RESUME:
2534 dwc3_resume_gadget(dwc);
2535 break;
2536 default:
2537 /* do nothing */
2538 break;
2539 }
2540
2541 dwc->link_state = next;
2542 }
2543
dwc3_gadget_hibernation_interrupt(struct dwc3 * dwc,unsigned int evtinfo)2544 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2545 unsigned int evtinfo)
2546 {
2547 unsigned int is_ss = evtinfo & BIT(4);
2548
2549 /**
2550 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2551 * have a known issue which can cause USB CV TD.9.23 to fail
2552 * randomly.
2553 *
2554 * Because of this issue, core could generate bogus hibernation
2555 * events which SW needs to ignore.
2556 *
2557 * Refers to:
2558 *
2559 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2560 * Device Fallback from SuperSpeed
2561 */
2562 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2563 return;
2564
2565 /* enter hibernation here */
2566 }
2567
dwc3_gadget_interrupt(struct dwc3 * dwc,const struct dwc3_event_devt * event)2568 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2569 const struct dwc3_event_devt *event)
2570 {
2571 switch (event->type) {
2572 case DWC3_DEVICE_EVENT_DISCONNECT:
2573 dwc3_gadget_disconnect_interrupt(dwc);
2574 break;
2575 case DWC3_DEVICE_EVENT_RESET:
2576 dwc3_gadget_reset_interrupt(dwc);
2577 break;
2578 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2579 dwc3_gadget_conndone_interrupt(dwc);
2580 break;
2581 case DWC3_DEVICE_EVENT_WAKEUP:
2582 dwc3_gadget_wakeup_interrupt(dwc);
2583 break;
2584 case DWC3_DEVICE_EVENT_HIBER_REQ:
2585 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2586 "unexpected hibernation event\n"))
2587 break;
2588
2589 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2590 break;
2591 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2592 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2593 break;
2594 case DWC3_DEVICE_EVENT_EOPF:
2595 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
2596 break;
2597 case DWC3_DEVICE_EVENT_SOF:
2598 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
2599 break;
2600 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2601 dwc3_trace(trace_dwc3_gadget, "Erratic Error");
2602 break;
2603 case DWC3_DEVICE_EVENT_CMD_CMPL:
2604 dwc3_trace(trace_dwc3_gadget, "Command Complete");
2605 break;
2606 case DWC3_DEVICE_EVENT_OVERFLOW:
2607 dwc3_trace(trace_dwc3_gadget, "Overflow");
2608 break;
2609 default:
2610 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2611 }
2612 }
2613
dwc3_process_event_entry(struct dwc3 * dwc,const union dwc3_event * event)2614 static void dwc3_process_event_entry(struct dwc3 *dwc,
2615 const union dwc3_event *event)
2616 {
2617 trace_dwc3_event(event->raw);
2618
2619 /* Endpoint IRQ, handle it and return early */
2620 if (event->type.is_devspec == 0) {
2621 /* depevt */
2622 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2623 }
2624
2625 switch (event->type.type) {
2626 case DWC3_EVENT_TYPE_DEV:
2627 dwc3_gadget_interrupt(dwc, &event->devt);
2628 break;
2629 /* REVISIT what to do with Carkit and I2C events ? */
2630 default:
2631 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2632 }
2633 }
2634
dwc3_process_event_buf(struct dwc3 * dwc,u32 buf)2635 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2636 {
2637 struct dwc3_event_buffer *evt;
2638 irqreturn_t ret = IRQ_NONE;
2639 int left;
2640 u32 reg;
2641
2642 evt = dwc->ev_buffs[buf];
2643 left = evt->count;
2644
2645 if (!(evt->flags & DWC3_EVENT_PENDING))
2646 return IRQ_NONE;
2647
2648 while (left > 0) {
2649 union dwc3_event event;
2650
2651 event.raw = *(u32 *) (evt->buf + evt->lpos);
2652
2653 dwc3_process_event_entry(dwc, &event);
2654
2655 /*
2656 * FIXME we wrap around correctly to the next entry as
2657 * almost all entries are 4 bytes in size. There is one
2658 * entry which has 12 bytes which is a regular entry
2659 * followed by 8 bytes data. ATM I don't know how
2660 * things are organized if we get next to the a
2661 * boundary so I worry about that once we try to handle
2662 * that.
2663 */
2664 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2665 left -= 4;
2666
2667 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2668 }
2669
2670 evt->count = 0;
2671 evt->flags &= ~DWC3_EVENT_PENDING;
2672 ret = IRQ_HANDLED;
2673
2674 /* Unmask interrupt */
2675 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2676 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2677 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2678
2679 return ret;
2680 }
2681
dwc3_thread_interrupt(int irq,void * _dwc)2682 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2683 {
2684 struct dwc3 *dwc = _dwc;
2685 unsigned long flags;
2686 irqreturn_t ret = IRQ_NONE;
2687 int i;
2688
2689 spin_lock_irqsave(&dwc->lock, flags);
2690
2691 for (i = 0; i < dwc->num_event_buffers; i++)
2692 ret |= dwc3_process_event_buf(dwc, i);
2693
2694 spin_unlock_irqrestore(&dwc->lock, flags);
2695
2696 return ret;
2697 }
2698
dwc3_check_event_buf(struct dwc3 * dwc,u32 buf)2699 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
2700 {
2701 struct dwc3_event_buffer *evt;
2702 u32 count;
2703 u32 reg;
2704
2705 evt = dwc->ev_buffs[buf];
2706
2707 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2708 count &= DWC3_GEVNTCOUNT_MASK;
2709 if (!count)
2710 return IRQ_NONE;
2711
2712 evt->count = count;
2713 evt->flags |= DWC3_EVENT_PENDING;
2714
2715 /* Mask interrupt */
2716 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2717 reg |= DWC3_GEVNTSIZ_INTMASK;
2718 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2719
2720 return IRQ_WAKE_THREAD;
2721 }
2722
dwc3_interrupt(int irq,void * _dwc)2723 static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2724 {
2725 struct dwc3 *dwc = _dwc;
2726 int i;
2727 irqreturn_t ret = IRQ_NONE;
2728
2729 for (i = 0; i < dwc->num_event_buffers; i++) {
2730 irqreturn_t status;
2731
2732 status = dwc3_check_event_buf(dwc, i);
2733 if (status == IRQ_WAKE_THREAD)
2734 ret = status;
2735 }
2736
2737 return ret;
2738 }
2739
2740 /**
2741 * dwc3_gadget_init - Initializes gadget related registers
2742 * @dwc: pointer to our controller context structure
2743 *
2744 * Returns 0 on success otherwise negative errno.
2745 */
dwc3_gadget_init(struct dwc3 * dwc)2746 int dwc3_gadget_init(struct dwc3 *dwc)
2747 {
2748 int ret;
2749
2750 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2751 &dwc->ctrl_req_addr, GFP_KERNEL);
2752 if (!dwc->ctrl_req) {
2753 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2754 ret = -ENOMEM;
2755 goto err0;
2756 }
2757
2758 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
2759 &dwc->ep0_trb_addr, GFP_KERNEL);
2760 if (!dwc->ep0_trb) {
2761 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2762 ret = -ENOMEM;
2763 goto err1;
2764 }
2765
2766 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2767 if (!dwc->setup_buf) {
2768 ret = -ENOMEM;
2769 goto err2;
2770 }
2771
2772 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2773 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2774 GFP_KERNEL);
2775 if (!dwc->ep0_bounce) {
2776 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2777 ret = -ENOMEM;
2778 goto err3;
2779 }
2780
2781 dwc->gadget.ops = &dwc3_gadget_ops;
2782 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2783 dwc->gadget.sg_supported = true;
2784 dwc->gadget.name = "dwc3-gadget";
2785
2786 /*
2787 * FIXME We might be setting max_speed to <SUPER, however versions
2788 * <2.20a of dwc3 have an issue with metastability (documented
2789 * elsewhere in this driver) which tells us we can't set max speed to
2790 * anything lower than SUPER.
2791 *
2792 * Because gadget.max_speed is only used by composite.c and function
2793 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
2794 * to happen so we avoid sending SuperSpeed Capability descriptor
2795 * together with our BOS descriptor as that could confuse host into
2796 * thinking we can handle super speed.
2797 *
2798 * Note that, in fact, we won't even support GetBOS requests when speed
2799 * is less than super speed because we don't have means, yet, to tell
2800 * composite.c that we are USB 2.0 + LPM ECN.
2801 */
2802 if (dwc->revision < DWC3_REVISION_220A)
2803 dwc3_trace(trace_dwc3_gadget,
2804 "Changing max_speed on rev %08x\n",
2805 dwc->revision);
2806
2807 dwc->gadget.max_speed = dwc->maximum_speed;
2808
2809 /*
2810 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2811 * on ep out.
2812 */
2813 dwc->gadget.quirk_ep_out_aligned_size = true;
2814
2815 /*
2816 * REVISIT: Here we should clear all pending IRQs to be
2817 * sure we're starting from a well known location.
2818 */
2819
2820 ret = dwc3_gadget_init_endpoints(dwc);
2821 if (ret)
2822 goto err4;
2823
2824 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2825 if (ret) {
2826 dev_err(dwc->dev, "failed to register udc\n");
2827 goto err4;
2828 }
2829
2830 return 0;
2831
2832 err4:
2833 dwc3_gadget_free_endpoints(dwc);
2834 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2835 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2836
2837 err3:
2838 kfree(dwc->setup_buf);
2839
2840 err2:
2841 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2842 dwc->ep0_trb, dwc->ep0_trb_addr);
2843
2844 err1:
2845 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2846 dwc->ctrl_req, dwc->ctrl_req_addr);
2847
2848 err0:
2849 return ret;
2850 }
2851
2852 /* -------------------------------------------------------------------------- */
2853
dwc3_gadget_exit(struct dwc3 * dwc)2854 void dwc3_gadget_exit(struct dwc3 *dwc)
2855 {
2856 usb_del_gadget_udc(&dwc->gadget);
2857
2858 dwc3_gadget_free_endpoints(dwc);
2859
2860 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2861 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2862
2863 kfree(dwc->setup_buf);
2864
2865 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2866 dwc->ep0_trb, dwc->ep0_trb_addr);
2867
2868 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2869 dwc->ctrl_req, dwc->ctrl_req_addr);
2870 }
2871
dwc3_gadget_suspend(struct dwc3 * dwc)2872 int dwc3_gadget_suspend(struct dwc3 *dwc)
2873 {
2874 if (dwc->pullups_connected) {
2875 dwc3_gadget_disable_irq(dwc);
2876 dwc3_gadget_run_stop(dwc, true, true);
2877 }
2878
2879 __dwc3_gadget_ep_disable(dwc->eps[0]);
2880 __dwc3_gadget_ep_disable(dwc->eps[1]);
2881
2882 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2883
2884 return 0;
2885 }
2886
dwc3_gadget_resume(struct dwc3 * dwc)2887 int dwc3_gadget_resume(struct dwc3 *dwc)
2888 {
2889 struct dwc3_ep *dep;
2890 int ret;
2891
2892 /* Start with SuperSpeed Default */
2893 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2894
2895 dep = dwc->eps[0];
2896 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2897 false);
2898 if (ret)
2899 goto err0;
2900
2901 dep = dwc->eps[1];
2902 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2903 false);
2904 if (ret)
2905 goto err1;
2906
2907 /* begin to receive SETUP packets */
2908 dwc->ep0state = EP0_SETUP_PHASE;
2909 dwc3_ep0_out_start(dwc);
2910
2911 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
2912
2913 if (dwc->pullups_connected) {
2914 dwc3_gadget_enable_irq(dwc);
2915 dwc3_gadget_run_stop(dwc, true, false);
2916 }
2917
2918 return 0;
2919
2920 err1:
2921 __dwc3_gadget_ep_disable(dwc->eps[0]);
2922
2923 err0:
2924 return ret;
2925 }
2926