1 /**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/list.h>
28 #include <linux/dma-mapping.h>
29
30 #include <linux/usb/ch9.h>
31 #include <linux/usb/gadget.h>
32
33 #include "debug.h"
34 #include "core.h"
35 #include "gadget.h"
36 #include "io.h"
37
38 /**
39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40 * @dwc: pointer to our context structure
41 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42 *
43 * Caller should take care of locking. This function will
44 * return 0 on success or -EINVAL if wrong Test Selector
45 * is passed
46 */
dwc3_gadget_set_test_mode(struct dwc3 * dwc,int mode)47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48 {
49 u32 reg;
50
51 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53
54 switch (mode) {
55 case TEST_J:
56 case TEST_K:
57 case TEST_SE0_NAK:
58 case TEST_PACKET:
59 case TEST_FORCE_EN:
60 reg |= mode << 1;
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67
68 return 0;
69 }
70
71 /**
72 * dwc3_gadget_get_link_state - Gets current state of USB Link
73 * @dwc: pointer to our context structure
74 *
75 * Caller should take care of locking. This function will
76 * return the link state on success (>= 0) or -ETIMEDOUT.
77 */
dwc3_gadget_get_link_state(struct dwc3 * dwc)78 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79 {
80 u32 reg;
81
82 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83
84 return DWC3_DSTS_USBLNKST(reg);
85 }
86
87 /**
88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89 * @dwc: pointer to our context structure
90 * @state: the state to put link into
91 *
92 * Caller should take care of locking. This function will
93 * return 0 on success or -ETIMEDOUT.
94 */
dwc3_gadget_set_link_state(struct dwc3 * dwc,enum dwc3_link_state state)95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96 {
97 int retries = 10000;
98 u32 reg;
99
100 /*
101 * Wait until device controller is ready. Only applies to 1.94a and
102 * later RTL.
103 */
104 if (dwc->revision >= DWC3_REVISION_194A) {
105 while (--retries) {
106 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107 if (reg & DWC3_DSTS_DCNRD)
108 udelay(5);
109 else
110 break;
111 }
112
113 if (retries <= 0)
114 return -ETIMEDOUT;
115 }
116
117 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119
120 /* set requested state */
121 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123
124 /*
125 * The following code is racy when called from dwc3_gadget_wakeup,
126 * and is not needed, at least on newer versions
127 */
128 if (dwc->revision >= DWC3_REVISION_194A)
129 return 0;
130
131 /* wait for a change in DSTS */
132 retries = 10000;
133 while (--retries) {
134 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135
136 if (DWC3_DSTS_USBLNKST(reg) == state)
137 return 0;
138
139 udelay(5);
140 }
141
142 dwc3_trace(trace_dwc3_gadget,
143 "link state change request timed out");
144
145 return -ETIMEDOUT;
146 }
147
148 /**
149 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
150 * @dwc: pointer to our context structure
151 *
152 * This function will a best effort FIFO allocation in order
153 * to improve FIFO usage and throughput, while still allowing
154 * us to enable as many endpoints as possible.
155 *
156 * Keep in mind that this operation will be highly dependent
157 * on the configured size for RAM1 - which contains TxFifo -,
158 * the amount of endpoints enabled on coreConsultant tool, and
159 * the width of the Master Bus.
160 *
161 * In the ideal world, we would always be able to satisfy the
162 * following equation:
163 *
164 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
165 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
166 *
167 * Unfortunately, due to many variables that's not always the case.
168 */
dwc3_gadget_resize_tx_fifos(struct dwc3 * dwc)169 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
170 {
171 int last_fifo_depth = 0;
172 int ram1_depth;
173 int fifo_size;
174 int mdwidth;
175 int num;
176
177 if (!dwc->needs_fifo_resize)
178 return 0;
179
180 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
181 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
182
183 /* MDWIDTH is represented in bits, we need it in bytes */
184 mdwidth >>= 3;
185
186 /*
187 * FIXME For now we will only allocate 1 wMaxPacketSize space
188 * for each enabled endpoint, later patches will come to
189 * improve this algorithm so that we better use the internal
190 * FIFO space
191 */
192 for (num = 0; num < dwc->num_in_eps; num++) {
193 /* bit0 indicates direction; 1 means IN ep */
194 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1];
195 int mult = 1;
196 int tmp;
197
198 if (!(dep->flags & DWC3_EP_ENABLED))
199 continue;
200
201 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
202 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
203 mult = 3;
204
205 /*
206 * REVISIT: the following assumes we will always have enough
207 * space available on the FIFO RAM for all possible use cases.
208 * Make sure that's true somehow and change FIFO allocation
209 * accordingly.
210 *
211 * If we have Bulk or Isochronous endpoints, we want
212 * them to be able to be very, very fast. So we're giving
213 * those endpoints a fifo_size which is enough for 3 full
214 * packets
215 */
216 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
217 tmp += mdwidth;
218
219 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
220
221 fifo_size |= (last_fifo_depth << 16);
222
223 dwc3_trace(trace_dwc3_gadget, "%s: Fifo Addr %04x Size %d",
224 dep->name, last_fifo_depth, fifo_size & 0xffff);
225
226 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
227
228 last_fifo_depth += (fifo_size & 0xffff);
229 }
230
231 return 0;
232 }
233
dwc3_gadget_giveback(struct dwc3_ep * dep,struct dwc3_request * req,int status)234 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
235 int status)
236 {
237 struct dwc3 *dwc = dep->dwc;
238 int i;
239
240 if (req->queued) {
241 i = 0;
242 do {
243 dep->busy_slot++;
244 /*
245 * Skip LINK TRB. We can't use req->trb and check for
246 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
247 * just completed (not the LINK TRB).
248 */
249 if (((dep->busy_slot & DWC3_TRB_MASK) ==
250 DWC3_TRB_NUM- 1) &&
251 usb_endpoint_xfer_isoc(dep->endpoint.desc))
252 dep->busy_slot++;
253 } while(++i < req->request.num_mapped_sgs);
254 req->queued = false;
255 }
256 list_del(&req->list);
257 req->trb = NULL;
258
259 if (req->request.status == -EINPROGRESS)
260 req->request.status = status;
261
262 if (dwc->ep0_bounced && dep->number == 0)
263 dwc->ep0_bounced = false;
264 else
265 usb_gadget_unmap_request(&dwc->gadget, &req->request,
266 req->direction);
267
268 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
269 req, dep->name, req->request.actual,
270 req->request.length, status);
271 trace_dwc3_gadget_giveback(req);
272
273 spin_unlock(&dwc->lock);
274 usb_gadget_giveback_request(&dep->endpoint, &req->request);
275 spin_lock(&dwc->lock);
276 }
277
dwc3_send_gadget_generic_command(struct dwc3 * dwc,unsigned cmd,u32 param)278 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
279 {
280 u32 timeout = 500;
281 u32 reg;
282
283 trace_dwc3_gadget_generic_cmd(cmd, param);
284
285 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
286 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
287
288 do {
289 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
290 if (!(reg & DWC3_DGCMD_CMDACT)) {
291 dwc3_trace(trace_dwc3_gadget,
292 "Command Complete --> %d",
293 DWC3_DGCMD_STATUS(reg));
294 if (DWC3_DGCMD_STATUS(reg))
295 return -EINVAL;
296 return 0;
297 }
298
299 /*
300 * We can't sleep here, because it's also called from
301 * interrupt context.
302 */
303 timeout--;
304 if (!timeout) {
305 dwc3_trace(trace_dwc3_gadget,
306 "Command Timed Out");
307 return -ETIMEDOUT;
308 }
309 udelay(1);
310 } while (1);
311 }
312
dwc3_send_gadget_ep_cmd(struct dwc3 * dwc,unsigned ep,unsigned cmd,struct dwc3_gadget_ep_cmd_params * params)313 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
314 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
315 {
316 struct dwc3_ep *dep = dwc->eps[ep];
317 u32 timeout = 500;
318 u32 reg;
319
320 trace_dwc3_gadget_ep_cmd(dep, cmd, params);
321
322 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
323 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
324 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
325
326 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
327 do {
328 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
329 if (!(reg & DWC3_DEPCMD_CMDACT)) {
330 dwc3_trace(trace_dwc3_gadget,
331 "Command Complete --> %d",
332 DWC3_DEPCMD_STATUS(reg));
333 if (DWC3_DEPCMD_STATUS(reg))
334 return -EINVAL;
335 return 0;
336 }
337
338 /*
339 * We can't sleep here, because it is also called from
340 * interrupt context.
341 */
342 timeout--;
343 if (!timeout) {
344 dwc3_trace(trace_dwc3_gadget,
345 "Command Timed Out");
346 return -ETIMEDOUT;
347 }
348
349 udelay(1);
350 } while (1);
351 }
352
dwc3_trb_dma_offset(struct dwc3_ep * dep,struct dwc3_trb * trb)353 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
354 struct dwc3_trb *trb)
355 {
356 u32 offset = (char *) trb - (char *) dep->trb_pool;
357
358 return dep->trb_pool_dma + offset;
359 }
360
dwc3_alloc_trb_pool(struct dwc3_ep * dep)361 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
362 {
363 struct dwc3 *dwc = dep->dwc;
364
365 if (dep->trb_pool)
366 return 0;
367
368 dep->trb_pool = dma_alloc_coherent(dwc->dev,
369 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
370 &dep->trb_pool_dma, GFP_KERNEL);
371 if (!dep->trb_pool) {
372 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
373 dep->name);
374 return -ENOMEM;
375 }
376
377 return 0;
378 }
379
dwc3_free_trb_pool(struct dwc3_ep * dep)380 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
381 {
382 struct dwc3 *dwc = dep->dwc;
383
384 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
385 dep->trb_pool, dep->trb_pool_dma);
386
387 dep->trb_pool = NULL;
388 dep->trb_pool_dma = 0;
389 }
390
391 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
392
393 /**
394 * dwc3_gadget_start_config - Configure EP resources
395 * @dwc: pointer to our controller context structure
396 * @dep: endpoint that is being enabled
397 *
398 * The assignment of transfer resources cannot perfectly follow the
399 * data book due to the fact that the controller driver does not have
400 * all knowledge of the configuration in advance. It is given this
401 * information piecemeal by the composite gadget framework after every
402 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
403 * programming model in this scenario can cause errors. For two
404 * reasons:
405 *
406 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
407 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
408 * multiple interfaces.
409 *
410 * 2) The databook does not mention doing more DEPXFERCFG for new
411 * endpoint on alt setting (8.1.6).
412 *
413 * The following simplified method is used instead:
414 *
415 * All hardware endpoints can be assigned a transfer resource and this
416 * setting will stay persistent until either a core reset or
417 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
418 * do DEPXFERCFG for every hardware endpoint as well. We are
419 * guaranteed that there are as many transfer resources as endpoints.
420 *
421 * This function is called for each endpoint when it is being enabled
422 * but is triggered only when called for EP0-out, which always happens
423 * first, and which should only happen in one of the above conditions.
424 */
dwc3_gadget_start_config(struct dwc3 * dwc,struct dwc3_ep * dep)425 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
426 {
427 struct dwc3_gadget_ep_cmd_params params;
428 u32 cmd;
429 int i;
430 int ret;
431
432 if (dep->number)
433 return 0;
434
435 memset(¶ms, 0x00, sizeof(params));
436 cmd = DWC3_DEPCMD_DEPSTARTCFG;
437
438 ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms);
439 if (ret)
440 return ret;
441
442 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
443 struct dwc3_ep *dep = dwc->eps[i];
444
445 if (!dep)
446 continue;
447
448 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
449 if (ret)
450 return ret;
451 }
452
453 return 0;
454 }
455
dwc3_gadget_set_ep_config(struct dwc3 * dwc,struct dwc3_ep * dep,const struct usb_endpoint_descriptor * desc,const struct usb_ss_ep_comp_descriptor * comp_desc,bool ignore,bool restore)456 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
457 const struct usb_endpoint_descriptor *desc,
458 const struct usb_ss_ep_comp_descriptor *comp_desc,
459 bool ignore, bool restore)
460 {
461 struct dwc3_gadget_ep_cmd_params params;
462
463 memset(¶ms, 0x00, sizeof(params));
464
465 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
466 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
467
468 /* Burst size is only needed in SuperSpeed mode */
469 if (dwc->gadget.speed == USB_SPEED_SUPER) {
470 u32 burst = dep->endpoint.maxburst - 1;
471
472 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
473 }
474
475 if (ignore)
476 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
477
478 if (restore) {
479 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
480 params.param2 |= dep->saved_state;
481 }
482
483 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
484 | DWC3_DEPCFG_XFER_NOT_READY_EN;
485
486 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
487 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
488 | DWC3_DEPCFG_STREAM_EVENT_EN;
489 dep->stream_capable = true;
490 }
491
492 if (!usb_endpoint_xfer_control(desc))
493 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
494
495 /*
496 * We are doing 1:1 mapping for endpoints, meaning
497 * Physical Endpoints 2 maps to Logical Endpoint 2 and
498 * so on. We consider the direction bit as part of the physical
499 * endpoint number. So USB endpoint 0x81 is 0x03.
500 */
501 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
502
503 /*
504 * We must use the lower 16 TX FIFOs even though
505 * HW might have more
506 */
507 if (dep->direction)
508 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
509
510 if (desc->bInterval) {
511 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
512 dep->interval = 1 << (desc->bInterval - 1);
513 }
514
515 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
516 DWC3_DEPCMD_SETEPCONFIG, ¶ms);
517 }
518
dwc3_gadget_set_xfer_resource(struct dwc3 * dwc,struct dwc3_ep * dep)519 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
520 {
521 struct dwc3_gadget_ep_cmd_params params;
522
523 memset(¶ms, 0x00, sizeof(params));
524
525 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
526
527 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
528 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms);
529 }
530
531 /**
532 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
533 * @dep: endpoint to be initialized
534 * @desc: USB Endpoint Descriptor
535 *
536 * Caller should take care of locking
537 */
__dwc3_gadget_ep_enable(struct dwc3_ep * dep,const struct usb_endpoint_descriptor * desc,const struct usb_ss_ep_comp_descriptor * comp_desc,bool ignore,bool restore)538 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
539 const struct usb_endpoint_descriptor *desc,
540 const struct usb_ss_ep_comp_descriptor *comp_desc,
541 bool ignore, bool restore)
542 {
543 struct dwc3 *dwc = dep->dwc;
544 u32 reg;
545 int ret;
546
547 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
548
549 if (!(dep->flags & DWC3_EP_ENABLED)) {
550 ret = dwc3_gadget_start_config(dwc, dep);
551 if (ret)
552 return ret;
553 }
554
555 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
556 restore);
557 if (ret)
558 return ret;
559
560 if (!(dep->flags & DWC3_EP_ENABLED)) {
561 struct dwc3_trb *trb_st_hw;
562 struct dwc3_trb *trb_link;
563
564 dep->endpoint.desc = desc;
565 dep->comp_desc = comp_desc;
566 dep->type = usb_endpoint_type(desc);
567 dep->flags |= DWC3_EP_ENABLED;
568
569 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
570 reg |= DWC3_DALEPENA_EP(dep->number);
571 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
572
573 if (!usb_endpoint_xfer_isoc(desc))
574 return 0;
575
576 /* Link TRB for ISOC. The HWO bit is never reset */
577 trb_st_hw = &dep->trb_pool[0];
578
579 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
580 memset(trb_link, 0, sizeof(*trb_link));
581
582 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
583 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
584 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
585 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
586 }
587
588 return 0;
589 }
590
591 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
dwc3_remove_requests(struct dwc3 * dwc,struct dwc3_ep * dep)592 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
593 {
594 struct dwc3_request *req;
595
596 if (!list_empty(&dep->req_queued)) {
597 dwc3_stop_active_transfer(dwc, dep->number, true);
598
599 /* - giveback all requests to gadget driver */
600 while (!list_empty(&dep->req_queued)) {
601 req = next_request(&dep->req_queued);
602
603 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
604 }
605 }
606
607 while (!list_empty(&dep->request_list)) {
608 req = next_request(&dep->request_list);
609
610 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
611 }
612 }
613
614 /**
615 * __dwc3_gadget_ep_disable - Disables a HW endpoint
616 * @dep: the endpoint to disable
617 *
618 * This function also removes requests which are currently processed ny the
619 * hardware and those which are not yet scheduled.
620 * Caller should take care of locking.
621 */
__dwc3_gadget_ep_disable(struct dwc3_ep * dep)622 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
623 {
624 struct dwc3 *dwc = dep->dwc;
625 u32 reg;
626
627 dwc3_remove_requests(dwc, dep);
628
629 /* make sure HW endpoint isn't stalled */
630 if (dep->flags & DWC3_EP_STALL)
631 __dwc3_gadget_ep_set_halt(dep, 0, false);
632
633 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
634 reg &= ~DWC3_DALEPENA_EP(dep->number);
635 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
636
637 dep->stream_capable = false;
638 dep->endpoint.desc = NULL;
639 dep->comp_desc = NULL;
640 dep->type = 0;
641 dep->flags = 0;
642
643 return 0;
644 }
645
646 /* -------------------------------------------------------------------------- */
647
dwc3_gadget_ep0_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)648 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
649 const struct usb_endpoint_descriptor *desc)
650 {
651 return -EINVAL;
652 }
653
dwc3_gadget_ep0_disable(struct usb_ep * ep)654 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
655 {
656 return -EINVAL;
657 }
658
659 /* -------------------------------------------------------------------------- */
660
dwc3_gadget_ep_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)661 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
662 const struct usb_endpoint_descriptor *desc)
663 {
664 struct dwc3_ep *dep;
665 struct dwc3 *dwc;
666 unsigned long flags;
667 int ret;
668
669 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
670 pr_debug("dwc3: invalid parameters\n");
671 return -EINVAL;
672 }
673
674 if (!desc->wMaxPacketSize) {
675 pr_debug("dwc3: missing wMaxPacketSize\n");
676 return -EINVAL;
677 }
678
679 dep = to_dwc3_ep(ep);
680 dwc = dep->dwc;
681
682 if (dep->flags & DWC3_EP_ENABLED) {
683 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
684 dep->name);
685 return 0;
686 }
687
688 switch (usb_endpoint_type(desc)) {
689 case USB_ENDPOINT_XFER_CONTROL:
690 strlcat(dep->name, "-control", sizeof(dep->name));
691 break;
692 case USB_ENDPOINT_XFER_ISOC:
693 strlcat(dep->name, "-isoc", sizeof(dep->name));
694 break;
695 case USB_ENDPOINT_XFER_BULK:
696 strlcat(dep->name, "-bulk", sizeof(dep->name));
697 break;
698 case USB_ENDPOINT_XFER_INT:
699 strlcat(dep->name, "-int", sizeof(dep->name));
700 break;
701 default:
702 dev_err(dwc->dev, "invalid endpoint transfer type\n");
703 }
704
705 spin_lock_irqsave(&dwc->lock, flags);
706 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
707 spin_unlock_irqrestore(&dwc->lock, flags);
708
709 return ret;
710 }
711
dwc3_gadget_ep_disable(struct usb_ep * ep)712 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
713 {
714 struct dwc3_ep *dep;
715 struct dwc3 *dwc;
716 unsigned long flags;
717 int ret;
718
719 if (!ep) {
720 pr_debug("dwc3: invalid parameters\n");
721 return -EINVAL;
722 }
723
724 dep = to_dwc3_ep(ep);
725 dwc = dep->dwc;
726
727 if (!(dep->flags & DWC3_EP_ENABLED)) {
728 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
729 dep->name);
730 return 0;
731 }
732
733 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
734 dep->number >> 1,
735 (dep->number & 1) ? "in" : "out");
736
737 spin_lock_irqsave(&dwc->lock, flags);
738 ret = __dwc3_gadget_ep_disable(dep);
739 spin_unlock_irqrestore(&dwc->lock, flags);
740
741 return ret;
742 }
743
dwc3_gadget_ep_alloc_request(struct usb_ep * ep,gfp_t gfp_flags)744 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
745 gfp_t gfp_flags)
746 {
747 struct dwc3_request *req;
748 struct dwc3_ep *dep = to_dwc3_ep(ep);
749
750 req = kzalloc(sizeof(*req), gfp_flags);
751 if (!req)
752 return NULL;
753
754 req->epnum = dep->number;
755 req->dep = dep;
756
757 trace_dwc3_alloc_request(req);
758
759 return &req->request;
760 }
761
dwc3_gadget_ep_free_request(struct usb_ep * ep,struct usb_request * request)762 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
763 struct usb_request *request)
764 {
765 struct dwc3_request *req = to_dwc3_request(request);
766
767 trace_dwc3_free_request(req);
768 kfree(req);
769 }
770
771 /**
772 * dwc3_prepare_one_trb - setup one TRB from one request
773 * @dep: endpoint for which this request is prepared
774 * @req: dwc3_request pointer
775 */
dwc3_prepare_one_trb(struct dwc3_ep * dep,struct dwc3_request * req,dma_addr_t dma,unsigned length,unsigned last,unsigned chain,unsigned node)776 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
777 struct dwc3_request *req, dma_addr_t dma,
778 unsigned length, unsigned last, unsigned chain, unsigned node)
779 {
780 struct dwc3_trb *trb;
781
782 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s",
783 dep->name, req, (unsigned long long) dma,
784 length, last ? " last" : "",
785 chain ? " chain" : "");
786
787
788 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
789
790 if (!req->trb) {
791 dwc3_gadget_move_request_queued(req);
792 req->trb = trb;
793 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
794 req->start_slot = dep->free_slot & DWC3_TRB_MASK;
795 }
796
797 dep->free_slot++;
798 /* Skip the LINK-TRB on ISOC */
799 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
800 usb_endpoint_xfer_isoc(dep->endpoint.desc))
801 dep->free_slot++;
802
803 trb->size = DWC3_TRB_SIZE_LENGTH(length);
804 trb->bpl = lower_32_bits(dma);
805 trb->bph = upper_32_bits(dma);
806
807 switch (usb_endpoint_type(dep->endpoint.desc)) {
808 case USB_ENDPOINT_XFER_CONTROL:
809 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
810 break;
811
812 case USB_ENDPOINT_XFER_ISOC:
813 if (!node)
814 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
815 else
816 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
817 break;
818
819 case USB_ENDPOINT_XFER_BULK:
820 case USB_ENDPOINT_XFER_INT:
821 trb->ctrl = DWC3_TRBCTL_NORMAL;
822 break;
823 default:
824 /*
825 * This is only possible with faulty memory because we
826 * checked it already :)
827 */
828 BUG();
829 }
830
831 if (!req->request.no_interrupt && !chain)
832 trb->ctrl |= DWC3_TRB_CTRL_IOC;
833
834 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
835 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
836 trb->ctrl |= DWC3_TRB_CTRL_CSP;
837 } else if (last) {
838 trb->ctrl |= DWC3_TRB_CTRL_LST;
839 }
840
841 if (chain)
842 trb->ctrl |= DWC3_TRB_CTRL_CHN;
843
844 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
845 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
846
847 trb->ctrl |= DWC3_TRB_CTRL_HWO;
848
849 trace_dwc3_prepare_trb(dep, trb);
850 }
851
852 /*
853 * dwc3_prepare_trbs - setup TRBs from requests
854 * @dep: endpoint for which requests are being prepared
855 * @starting: true if the endpoint is idle and no requests are queued.
856 *
857 * The function goes through the requests list and sets up TRBs for the
858 * transfers. The function returns once there are no more TRBs available or
859 * it runs out of requests.
860 */
dwc3_prepare_trbs(struct dwc3_ep * dep,bool starting)861 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
862 {
863 struct dwc3_request *req, *n;
864 u32 trbs_left;
865 u32 max;
866 unsigned int last_one = 0;
867
868 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
869
870 /* the first request must not be queued */
871 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
872
873 /* Can't wrap around on a non-isoc EP since there's no link TRB */
874 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
875 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
876 if (trbs_left > max)
877 trbs_left = max;
878 }
879
880 /*
881 * If busy & slot are equal than it is either full or empty. If we are
882 * starting to process requests then we are empty. Otherwise we are
883 * full and don't do anything
884 */
885 if (!trbs_left) {
886 if (!starting)
887 return;
888 trbs_left = DWC3_TRB_NUM;
889 /*
890 * In case we start from scratch, we queue the ISOC requests
891 * starting from slot 1. This is done because we use ring
892 * buffer and have no LST bit to stop us. Instead, we place
893 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
894 * after the first request so we start at slot 1 and have
895 * 7 requests proceed before we hit the first IOC.
896 * Other transfer types don't use the ring buffer and are
897 * processed from the first TRB until the last one. Since we
898 * don't wrap around we have to start at the beginning.
899 */
900 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
901 dep->busy_slot = 1;
902 dep->free_slot = 1;
903 } else {
904 dep->busy_slot = 0;
905 dep->free_slot = 0;
906 }
907 }
908
909 /* The last TRB is a link TRB, not used for xfer */
910 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
911 return;
912
913 list_for_each_entry_safe(req, n, &dep->request_list, list) {
914 unsigned length;
915 dma_addr_t dma;
916 last_one = false;
917
918 if (req->request.num_mapped_sgs > 0) {
919 struct usb_request *request = &req->request;
920 struct scatterlist *sg = request->sg;
921 struct scatterlist *s;
922 int i;
923
924 for_each_sg(sg, s, request->num_mapped_sgs, i) {
925 unsigned chain = true;
926
927 length = sg_dma_len(s);
928 dma = sg_dma_address(s);
929
930 if (i == (request->num_mapped_sgs - 1) ||
931 sg_is_last(s)) {
932 if (list_empty(&dep->request_list))
933 last_one = true;
934 chain = false;
935 }
936
937 trbs_left--;
938 if (!trbs_left)
939 last_one = true;
940
941 if (last_one)
942 chain = false;
943
944 dwc3_prepare_one_trb(dep, req, dma, length,
945 last_one, chain, i);
946
947 if (last_one)
948 break;
949 }
950
951 if (last_one)
952 break;
953 } else {
954 dma = req->request.dma;
955 length = req->request.length;
956 trbs_left--;
957
958 if (!trbs_left)
959 last_one = 1;
960
961 /* Is this the last request? */
962 if (list_is_last(&req->list, &dep->request_list))
963 last_one = 1;
964
965 dwc3_prepare_one_trb(dep, req, dma, length,
966 last_one, false, 0);
967
968 if (last_one)
969 break;
970 }
971 }
972 }
973
__dwc3_gadget_kick_transfer(struct dwc3_ep * dep,u16 cmd_param,int start_new)974 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
975 int start_new)
976 {
977 struct dwc3_gadget_ep_cmd_params params;
978 struct dwc3_request *req;
979 struct dwc3 *dwc = dep->dwc;
980 int ret;
981 u32 cmd;
982
983 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
984 dwc3_trace(trace_dwc3_gadget, "%s: endpoint busy", dep->name);
985 return -EBUSY;
986 }
987 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
988
989 /*
990 * If we are getting here after a short-out-packet we don't enqueue any
991 * new requests as we try to set the IOC bit only on the last request.
992 */
993 if (start_new) {
994 if (list_empty(&dep->req_queued))
995 dwc3_prepare_trbs(dep, start_new);
996
997 /* req points to the first request which will be sent */
998 req = next_request(&dep->req_queued);
999 } else {
1000 dwc3_prepare_trbs(dep, start_new);
1001
1002 /*
1003 * req points to the first request where HWO changed from 0 to 1
1004 */
1005 req = next_request(&dep->req_queued);
1006 }
1007 if (!req) {
1008 dep->flags |= DWC3_EP_PENDING_REQUEST;
1009 return 0;
1010 }
1011
1012 memset(¶ms, 0, sizeof(params));
1013
1014 if (start_new) {
1015 params.param0 = upper_32_bits(req->trb_dma);
1016 params.param1 = lower_32_bits(req->trb_dma);
1017 cmd = DWC3_DEPCMD_STARTTRANSFER;
1018 } else {
1019 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1020 }
1021
1022 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
1023 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
1024 if (ret < 0) {
1025 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
1026
1027 /*
1028 * FIXME we need to iterate over the list of requests
1029 * here and stop, unmap, free and del each of the linked
1030 * requests instead of what we do now.
1031 */
1032 usb_gadget_unmap_request(&dwc->gadget, &req->request,
1033 req->direction);
1034 list_del(&req->list);
1035 return ret;
1036 }
1037
1038 dep->flags |= DWC3_EP_BUSY;
1039
1040 if (start_new) {
1041 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
1042 dep->number);
1043 WARN_ON_ONCE(!dep->resource_index);
1044 }
1045
1046 return 0;
1047 }
1048
__dwc3_gadget_start_isoc(struct dwc3 * dwc,struct dwc3_ep * dep,u32 cur_uf)1049 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1050 struct dwc3_ep *dep, u32 cur_uf)
1051 {
1052 u32 uf;
1053
1054 if (list_empty(&dep->request_list)) {
1055 dwc3_trace(trace_dwc3_gadget,
1056 "ISOC ep %s run out for requests",
1057 dep->name);
1058 dep->flags |= DWC3_EP_PENDING_REQUEST;
1059 return;
1060 }
1061
1062 /* 4 micro frames in the future */
1063 uf = cur_uf + dep->interval * 4;
1064
1065 __dwc3_gadget_kick_transfer(dep, uf, 1);
1066 }
1067
dwc3_gadget_start_isoc(struct dwc3 * dwc,struct dwc3_ep * dep,const struct dwc3_event_depevt * event)1068 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1069 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1070 {
1071 u32 cur_uf, mask;
1072
1073 mask = ~(dep->interval - 1);
1074 cur_uf = event->parameters & mask;
1075
1076 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1077 }
1078
__dwc3_gadget_ep_queue(struct dwc3_ep * dep,struct dwc3_request * req)1079 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1080 {
1081 struct dwc3 *dwc = dep->dwc;
1082 int ret;
1083
1084 req->request.actual = 0;
1085 req->request.status = -EINPROGRESS;
1086 req->direction = dep->direction;
1087 req->epnum = dep->number;
1088
1089 /*
1090 * We only add to our list of requests now and
1091 * start consuming the list once we get XferNotReady
1092 * IRQ.
1093 *
1094 * That way, we avoid doing anything that we don't need
1095 * to do now and defer it until the point we receive a
1096 * particular token from the Host side.
1097 *
1098 * This will also avoid Host cancelling URBs due to too
1099 * many NAKs.
1100 */
1101 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1102 dep->direction);
1103 if (ret)
1104 return ret;
1105
1106 list_add_tail(&req->list, &dep->request_list);
1107
1108 /*
1109 * There are a few special cases:
1110 *
1111 * 1. XferNotReady with empty list of requests. We need to kick the
1112 * transfer here in that situation, otherwise we will be NAKing
1113 * forever. If we get XferNotReady before gadget driver has a
1114 * chance to queue a request, we will ACK the IRQ but won't be
1115 * able to receive the data until the next request is queued.
1116 * The following code is handling exactly that.
1117 *
1118 */
1119 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1120 /*
1121 * If xfernotready is already elapsed and it is a case
1122 * of isoc transfer, then issue END TRANSFER, so that
1123 * you can receive xfernotready again and can have
1124 * notion of current microframe.
1125 */
1126 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1127 if (list_empty(&dep->req_queued)) {
1128 dwc3_stop_active_transfer(dwc, dep->number, true);
1129 dep->flags = DWC3_EP_ENABLED;
1130 }
1131 return 0;
1132 }
1133
1134 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1135 if (ret && ret != -EBUSY)
1136 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1137 dep->name);
1138 return ret;
1139 }
1140
1141 /*
1142 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1143 * kick the transfer here after queuing a request, otherwise the
1144 * core may not see the modified TRB(s).
1145 */
1146 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1147 (dep->flags & DWC3_EP_BUSY) &&
1148 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
1149 WARN_ON_ONCE(!dep->resource_index);
1150 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
1151 false);
1152 if (ret && ret != -EBUSY)
1153 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1154 dep->name);
1155 return ret;
1156 }
1157
1158 /*
1159 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1160 * right away, otherwise host will not know we have streams to be
1161 * handled.
1162 */
1163 if (dep->stream_capable) {
1164 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1165 if (ret && ret != -EBUSY)
1166 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1167 dep->name);
1168 }
1169
1170 return 0;
1171 }
1172
dwc3_gadget_ep_queue(struct usb_ep * ep,struct usb_request * request,gfp_t gfp_flags)1173 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1174 gfp_t gfp_flags)
1175 {
1176 struct dwc3_request *req = to_dwc3_request(request);
1177 struct dwc3_ep *dep = to_dwc3_ep(ep);
1178 struct dwc3 *dwc = dep->dwc;
1179
1180 unsigned long flags;
1181
1182 int ret;
1183
1184 spin_lock_irqsave(&dwc->lock, flags);
1185 if (!dep->endpoint.desc) {
1186 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1187 request, ep->name);
1188 ret = -ESHUTDOWN;
1189 goto out;
1190 }
1191
1192 if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1193 request, req->dep->name)) {
1194 ret = -EINVAL;
1195 goto out;
1196 }
1197
1198 trace_dwc3_ep_queue(req);
1199
1200 ret = __dwc3_gadget_ep_queue(dep, req);
1201
1202 out:
1203 spin_unlock_irqrestore(&dwc->lock, flags);
1204
1205 return ret;
1206 }
1207
dwc3_gadget_ep_dequeue(struct usb_ep * ep,struct usb_request * request)1208 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1209 struct usb_request *request)
1210 {
1211 struct dwc3_request *req = to_dwc3_request(request);
1212 struct dwc3_request *r = NULL;
1213
1214 struct dwc3_ep *dep = to_dwc3_ep(ep);
1215 struct dwc3 *dwc = dep->dwc;
1216
1217 unsigned long flags;
1218 int ret = 0;
1219
1220 trace_dwc3_ep_dequeue(req);
1221
1222 spin_lock_irqsave(&dwc->lock, flags);
1223
1224 list_for_each_entry(r, &dep->request_list, list) {
1225 if (r == req)
1226 break;
1227 }
1228
1229 if (r != req) {
1230 list_for_each_entry(r, &dep->req_queued, list) {
1231 if (r == req)
1232 break;
1233 }
1234 if (r == req) {
1235 /* wait until it is processed */
1236 dwc3_stop_active_transfer(dwc, dep->number, true);
1237 goto out1;
1238 }
1239 dev_err(dwc->dev, "request %p was not queued to %s\n",
1240 request, ep->name);
1241 ret = -EINVAL;
1242 goto out0;
1243 }
1244
1245 out1:
1246 /* giveback the request */
1247 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1248
1249 out0:
1250 spin_unlock_irqrestore(&dwc->lock, flags);
1251
1252 return ret;
1253 }
1254
__dwc3_gadget_ep_set_halt(struct dwc3_ep * dep,int value,int protocol)1255 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1256 {
1257 struct dwc3_gadget_ep_cmd_params params;
1258 struct dwc3 *dwc = dep->dwc;
1259 int ret;
1260
1261 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1262 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1263 return -EINVAL;
1264 }
1265
1266 memset(¶ms, 0x00, sizeof(params));
1267
1268 if (value) {
1269 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
1270 (!list_empty(&dep->req_queued) ||
1271 !list_empty(&dep->request_list)))) {
1272 dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
1273 dep->name);
1274 return -EAGAIN;
1275 }
1276
1277 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1278 DWC3_DEPCMD_SETSTALL, ¶ms);
1279 if (ret)
1280 dev_err(dwc->dev, "failed to set STALL on %s\n",
1281 dep->name);
1282 else
1283 dep->flags |= DWC3_EP_STALL;
1284 } else {
1285 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1286 DWC3_DEPCMD_CLEARSTALL, ¶ms);
1287 if (ret)
1288 dev_err(dwc->dev, "failed to clear STALL on %s\n",
1289 dep->name);
1290 else
1291 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1292 }
1293
1294 return ret;
1295 }
1296
dwc3_gadget_ep_set_halt(struct usb_ep * ep,int value)1297 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1298 {
1299 struct dwc3_ep *dep = to_dwc3_ep(ep);
1300 struct dwc3 *dwc = dep->dwc;
1301
1302 unsigned long flags;
1303
1304 int ret;
1305
1306 spin_lock_irqsave(&dwc->lock, flags);
1307 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1308 spin_unlock_irqrestore(&dwc->lock, flags);
1309
1310 return ret;
1311 }
1312
dwc3_gadget_ep_set_wedge(struct usb_ep * ep)1313 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1314 {
1315 struct dwc3_ep *dep = to_dwc3_ep(ep);
1316 struct dwc3 *dwc = dep->dwc;
1317 unsigned long flags;
1318 int ret;
1319
1320 spin_lock_irqsave(&dwc->lock, flags);
1321 dep->flags |= DWC3_EP_WEDGE;
1322
1323 if (dep->number == 0 || dep->number == 1)
1324 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1325 else
1326 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1327 spin_unlock_irqrestore(&dwc->lock, flags);
1328
1329 return ret;
1330 }
1331
1332 /* -------------------------------------------------------------------------- */
1333
1334 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1335 .bLength = USB_DT_ENDPOINT_SIZE,
1336 .bDescriptorType = USB_DT_ENDPOINT,
1337 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1338 };
1339
1340 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1341 .enable = dwc3_gadget_ep0_enable,
1342 .disable = dwc3_gadget_ep0_disable,
1343 .alloc_request = dwc3_gadget_ep_alloc_request,
1344 .free_request = dwc3_gadget_ep_free_request,
1345 .queue = dwc3_gadget_ep0_queue,
1346 .dequeue = dwc3_gadget_ep_dequeue,
1347 .set_halt = dwc3_gadget_ep0_set_halt,
1348 .set_wedge = dwc3_gadget_ep_set_wedge,
1349 };
1350
1351 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1352 .enable = dwc3_gadget_ep_enable,
1353 .disable = dwc3_gadget_ep_disable,
1354 .alloc_request = dwc3_gadget_ep_alloc_request,
1355 .free_request = dwc3_gadget_ep_free_request,
1356 .queue = dwc3_gadget_ep_queue,
1357 .dequeue = dwc3_gadget_ep_dequeue,
1358 .set_halt = dwc3_gadget_ep_set_halt,
1359 .set_wedge = dwc3_gadget_ep_set_wedge,
1360 };
1361
1362 /* -------------------------------------------------------------------------- */
1363
dwc3_gadget_get_frame(struct usb_gadget * g)1364 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1365 {
1366 struct dwc3 *dwc = gadget_to_dwc(g);
1367 u32 reg;
1368
1369 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1370 return DWC3_DSTS_SOFFN(reg);
1371 }
1372
dwc3_gadget_wakeup(struct usb_gadget * g)1373 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1374 {
1375 struct dwc3 *dwc = gadget_to_dwc(g);
1376
1377 unsigned long timeout;
1378 unsigned long flags;
1379
1380 u32 reg;
1381
1382 int ret = 0;
1383
1384 u8 link_state;
1385 u8 speed;
1386
1387 spin_lock_irqsave(&dwc->lock, flags);
1388
1389 /*
1390 * According to the Databook Remote wakeup request should
1391 * be issued only when the device is in early suspend state.
1392 *
1393 * We can check that via USB Link State bits in DSTS register.
1394 */
1395 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1396
1397 speed = reg & DWC3_DSTS_CONNECTSPD;
1398 if (speed == DWC3_DSTS_SUPERSPEED) {
1399 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1400 ret = -EINVAL;
1401 goto out;
1402 }
1403
1404 link_state = DWC3_DSTS_USBLNKST(reg);
1405
1406 switch (link_state) {
1407 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1408 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1409 break;
1410 default:
1411 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1412 link_state);
1413 ret = -EINVAL;
1414 goto out;
1415 }
1416
1417 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1418 if (ret < 0) {
1419 dev_err(dwc->dev, "failed to put link in Recovery\n");
1420 goto out;
1421 }
1422
1423 /* Recent versions do this automatically */
1424 if (dwc->revision < DWC3_REVISION_194A) {
1425 /* write zeroes to Link Change Request */
1426 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1427 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1428 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1429 }
1430
1431 /* poll until Link State changes to ON */
1432 timeout = jiffies + msecs_to_jiffies(100);
1433
1434 while (!time_after(jiffies, timeout)) {
1435 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1436
1437 /* in HS, means ON */
1438 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1439 break;
1440 }
1441
1442 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1443 dev_err(dwc->dev, "failed to send remote wakeup\n");
1444 ret = -EINVAL;
1445 }
1446
1447 out:
1448 spin_unlock_irqrestore(&dwc->lock, flags);
1449
1450 return ret;
1451 }
1452
dwc3_gadget_set_selfpowered(struct usb_gadget * g,int is_selfpowered)1453 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1454 int is_selfpowered)
1455 {
1456 struct dwc3 *dwc = gadget_to_dwc(g);
1457 unsigned long flags;
1458
1459 spin_lock_irqsave(&dwc->lock, flags);
1460 g->is_selfpowered = !!is_selfpowered;
1461 spin_unlock_irqrestore(&dwc->lock, flags);
1462
1463 return 0;
1464 }
1465
dwc3_gadget_run_stop(struct dwc3 * dwc,int is_on,int suspend)1466 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1467 {
1468 u32 reg;
1469 u32 timeout = 500;
1470
1471 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1472 if (is_on) {
1473 if (dwc->revision <= DWC3_REVISION_187A) {
1474 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1475 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1476 }
1477
1478 if (dwc->revision >= DWC3_REVISION_194A)
1479 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1480 reg |= DWC3_DCTL_RUN_STOP;
1481
1482 if (dwc->has_hibernation)
1483 reg |= DWC3_DCTL_KEEP_CONNECT;
1484
1485 dwc->pullups_connected = true;
1486 } else {
1487 reg &= ~DWC3_DCTL_RUN_STOP;
1488
1489 if (dwc->has_hibernation && !suspend)
1490 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1491
1492 dwc->pullups_connected = false;
1493 }
1494
1495 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1496
1497 do {
1498 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1499 if (is_on) {
1500 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1501 break;
1502 } else {
1503 if (reg & DWC3_DSTS_DEVCTRLHLT)
1504 break;
1505 }
1506 timeout--;
1507 if (!timeout)
1508 return -ETIMEDOUT;
1509 udelay(1);
1510 } while (1);
1511
1512 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
1513 dwc->gadget_driver
1514 ? dwc->gadget_driver->function : "no-function",
1515 is_on ? "connect" : "disconnect");
1516
1517 return 0;
1518 }
1519
dwc3_gadget_pullup(struct usb_gadget * g,int is_on)1520 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1521 {
1522 struct dwc3 *dwc = gadget_to_dwc(g);
1523 unsigned long flags;
1524 int ret;
1525
1526 is_on = !!is_on;
1527
1528 spin_lock_irqsave(&dwc->lock, flags);
1529 ret = dwc3_gadget_run_stop(dwc, is_on, false);
1530 spin_unlock_irqrestore(&dwc->lock, flags);
1531
1532 return ret;
1533 }
1534
dwc3_gadget_enable_irq(struct dwc3 * dwc)1535 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1536 {
1537 u32 reg;
1538
1539 /* Enable all but Start and End of Frame IRQs */
1540 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1541 DWC3_DEVTEN_EVNTOVERFLOWEN |
1542 DWC3_DEVTEN_CMDCMPLTEN |
1543 DWC3_DEVTEN_ERRTICERREN |
1544 DWC3_DEVTEN_WKUPEVTEN |
1545 DWC3_DEVTEN_ULSTCNGEN |
1546 DWC3_DEVTEN_CONNECTDONEEN |
1547 DWC3_DEVTEN_USBRSTEN |
1548 DWC3_DEVTEN_DISCONNEVTEN);
1549
1550 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1551 }
1552
dwc3_gadget_disable_irq(struct dwc3 * dwc)1553 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1554 {
1555 /* mask all interrupts */
1556 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1557 }
1558
1559 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1560 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1561
dwc3_gadget_start(struct usb_gadget * g,struct usb_gadget_driver * driver)1562 static int dwc3_gadget_start(struct usb_gadget *g,
1563 struct usb_gadget_driver *driver)
1564 {
1565 struct dwc3 *dwc = gadget_to_dwc(g);
1566 struct dwc3_ep *dep;
1567 unsigned long flags;
1568 int ret = 0;
1569 int irq;
1570 u32 reg;
1571
1572 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1573 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1574 IRQF_SHARED, "dwc3", dwc);
1575 if (ret) {
1576 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1577 irq, ret);
1578 goto err0;
1579 }
1580
1581 spin_lock_irqsave(&dwc->lock, flags);
1582
1583 if (dwc->gadget_driver) {
1584 dev_err(dwc->dev, "%s is already bound to %s\n",
1585 dwc->gadget.name,
1586 dwc->gadget_driver->driver.name);
1587 ret = -EBUSY;
1588 goto err1;
1589 }
1590
1591 dwc->gadget_driver = driver;
1592
1593 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1594 reg &= ~(DWC3_DCFG_SPEED_MASK);
1595
1596 /**
1597 * WORKAROUND: DWC3 revision < 2.20a have an issue
1598 * which would cause metastability state on Run/Stop
1599 * bit if we try to force the IP to USB2-only mode.
1600 *
1601 * Because of that, we cannot configure the IP to any
1602 * speed other than the SuperSpeed
1603 *
1604 * Refers to:
1605 *
1606 * STAR#9000525659: Clock Domain Crossing on DCTL in
1607 * USB 2.0 Mode
1608 */
1609 if (dwc->revision < DWC3_REVISION_220A) {
1610 reg |= DWC3_DCFG_SUPERSPEED;
1611 } else {
1612 switch (dwc->maximum_speed) {
1613 case USB_SPEED_LOW:
1614 reg |= DWC3_DSTS_LOWSPEED;
1615 break;
1616 case USB_SPEED_FULL:
1617 reg |= DWC3_DSTS_FULLSPEED1;
1618 break;
1619 case USB_SPEED_HIGH:
1620 reg |= DWC3_DSTS_HIGHSPEED;
1621 break;
1622 case USB_SPEED_SUPER: /* FALLTHROUGH */
1623 case USB_SPEED_UNKNOWN: /* FALTHROUGH */
1624 default:
1625 reg |= DWC3_DSTS_SUPERSPEED;
1626 }
1627 }
1628 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1629
1630 /* Start with SuperSpeed Default */
1631 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1632
1633 dep = dwc->eps[0];
1634 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1635 false);
1636 if (ret) {
1637 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1638 goto err2;
1639 }
1640
1641 dep = dwc->eps[1];
1642 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1643 false);
1644 if (ret) {
1645 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1646 goto err3;
1647 }
1648
1649 /* begin to receive SETUP packets */
1650 dwc->ep0state = EP0_SETUP_PHASE;
1651 dwc3_ep0_out_start(dwc);
1652
1653 dwc3_gadget_enable_irq(dwc);
1654
1655 spin_unlock_irqrestore(&dwc->lock, flags);
1656
1657 return 0;
1658
1659 err3:
1660 __dwc3_gadget_ep_disable(dwc->eps[0]);
1661
1662 err2:
1663 dwc->gadget_driver = NULL;
1664
1665 err1:
1666 spin_unlock_irqrestore(&dwc->lock, flags);
1667
1668 free_irq(irq, dwc);
1669
1670 err0:
1671 return ret;
1672 }
1673
dwc3_gadget_stop(struct usb_gadget * g)1674 static int dwc3_gadget_stop(struct usb_gadget *g)
1675 {
1676 struct dwc3 *dwc = gadget_to_dwc(g);
1677 unsigned long flags;
1678 int irq;
1679
1680 spin_lock_irqsave(&dwc->lock, flags);
1681
1682 dwc3_gadget_disable_irq(dwc);
1683 __dwc3_gadget_ep_disable(dwc->eps[0]);
1684 __dwc3_gadget_ep_disable(dwc->eps[1]);
1685
1686 dwc->gadget_driver = NULL;
1687
1688 spin_unlock_irqrestore(&dwc->lock, flags);
1689
1690 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1691 free_irq(irq, dwc);
1692
1693 return 0;
1694 }
1695
1696 static const struct usb_gadget_ops dwc3_gadget_ops = {
1697 .get_frame = dwc3_gadget_get_frame,
1698 .wakeup = dwc3_gadget_wakeup,
1699 .set_selfpowered = dwc3_gadget_set_selfpowered,
1700 .pullup = dwc3_gadget_pullup,
1701 .udc_start = dwc3_gadget_start,
1702 .udc_stop = dwc3_gadget_stop,
1703 };
1704
1705 /* -------------------------------------------------------------------------- */
1706
dwc3_gadget_init_hw_endpoints(struct dwc3 * dwc,u8 num,u32 direction)1707 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1708 u8 num, u32 direction)
1709 {
1710 struct dwc3_ep *dep;
1711 u8 i;
1712
1713 for (i = 0; i < num; i++) {
1714 u8 epnum = (i << 1) | (!!direction);
1715
1716 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1717 if (!dep)
1718 return -ENOMEM;
1719
1720 dep->dwc = dwc;
1721 dep->number = epnum;
1722 dep->direction = !!direction;
1723 dwc->eps[epnum] = dep;
1724
1725 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1726 (epnum & 1) ? "in" : "out");
1727
1728 dep->endpoint.name = dep->name;
1729
1730 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
1731
1732 if (epnum == 0 || epnum == 1) {
1733 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1734 dep->endpoint.maxburst = 1;
1735 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1736 if (!epnum)
1737 dwc->gadget.ep0 = &dep->endpoint;
1738 } else {
1739 int ret;
1740
1741 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
1742 dep->endpoint.max_streams = 15;
1743 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1744 list_add_tail(&dep->endpoint.ep_list,
1745 &dwc->gadget.ep_list);
1746
1747 ret = dwc3_alloc_trb_pool(dep);
1748 if (ret)
1749 return ret;
1750 }
1751
1752 INIT_LIST_HEAD(&dep->request_list);
1753 INIT_LIST_HEAD(&dep->req_queued);
1754 }
1755
1756 return 0;
1757 }
1758
dwc3_gadget_init_endpoints(struct dwc3 * dwc)1759 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1760 {
1761 int ret;
1762
1763 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1764
1765 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1766 if (ret < 0) {
1767 dwc3_trace(trace_dwc3_gadget,
1768 "failed to allocate OUT endpoints");
1769 return ret;
1770 }
1771
1772 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1773 if (ret < 0) {
1774 dwc3_trace(trace_dwc3_gadget,
1775 "failed to allocate IN endpoints");
1776 return ret;
1777 }
1778
1779 return 0;
1780 }
1781
dwc3_gadget_free_endpoints(struct dwc3 * dwc)1782 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1783 {
1784 struct dwc3_ep *dep;
1785 u8 epnum;
1786
1787 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1788 dep = dwc->eps[epnum];
1789 if (!dep)
1790 continue;
1791 /*
1792 * Physical endpoints 0 and 1 are special; they form the
1793 * bi-directional USB endpoint 0.
1794 *
1795 * For those two physical endpoints, we don't allocate a TRB
1796 * pool nor do we add them the endpoints list. Due to that, we
1797 * shouldn't do these two operations otherwise we would end up
1798 * with all sorts of bugs when removing dwc3.ko.
1799 */
1800 if (epnum != 0 && epnum != 1) {
1801 dwc3_free_trb_pool(dep);
1802 list_del(&dep->endpoint.ep_list);
1803 }
1804
1805 kfree(dep);
1806 }
1807 }
1808
1809 /* -------------------------------------------------------------------------- */
1810
__dwc3_cleanup_done_trbs(struct dwc3 * dwc,struct dwc3_ep * dep,struct dwc3_request * req,struct dwc3_trb * trb,const struct dwc3_event_depevt * event,int status)1811 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1812 struct dwc3_request *req, struct dwc3_trb *trb,
1813 const struct dwc3_event_depevt *event, int status)
1814 {
1815 unsigned int count;
1816 unsigned int s_pkt = 0;
1817 unsigned int trb_status;
1818
1819 trace_dwc3_complete_trb(dep, trb);
1820
1821 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1822 /*
1823 * We continue despite the error. There is not much we
1824 * can do. If we don't clean it up we loop forever. If
1825 * we skip the TRB then it gets overwritten after a
1826 * while since we use them in a ring buffer. A BUG()
1827 * would help. Lets hope that if this occurs, someone
1828 * fixes the root cause instead of looking away :)
1829 */
1830 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1831 dep->name, trb);
1832 count = trb->size & DWC3_TRB_SIZE_MASK;
1833
1834 if (dep->direction) {
1835 if (count) {
1836 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1837 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1838 dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1839 dep->name);
1840 /*
1841 * If missed isoc occurred and there is
1842 * no request queued then issue END
1843 * TRANSFER, so that core generates
1844 * next xfernotready and we will issue
1845 * a fresh START TRANSFER.
1846 * If there are still queued request
1847 * then wait, do not issue either END
1848 * or UPDATE TRANSFER, just attach next
1849 * request in request_list during
1850 * giveback.If any future queued request
1851 * is successfully transferred then we
1852 * will issue UPDATE TRANSFER for all
1853 * request in the request_list.
1854 */
1855 dep->flags |= DWC3_EP_MISSED_ISOC;
1856 } else {
1857 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1858 dep->name);
1859 status = -ECONNRESET;
1860 }
1861 } else {
1862 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1863 }
1864 } else {
1865 if (count && (event->status & DEPEVT_STATUS_SHORT))
1866 s_pkt = 1;
1867 }
1868
1869 /*
1870 * We assume here we will always receive the entire data block
1871 * which we should receive. Meaning, if we program RX to
1872 * receive 4K but we receive only 2K, we assume that's all we
1873 * should receive and we simply bounce the request back to the
1874 * gadget driver for further processing.
1875 */
1876 req->request.actual += req->request.length - count;
1877 if (s_pkt)
1878 return 1;
1879 if ((event->status & DEPEVT_STATUS_LST) &&
1880 (trb->ctrl & (DWC3_TRB_CTRL_LST |
1881 DWC3_TRB_CTRL_HWO)))
1882 return 1;
1883 if ((event->status & DEPEVT_STATUS_IOC) &&
1884 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1885 return 1;
1886 return 0;
1887 }
1888
dwc3_cleanup_done_reqs(struct dwc3 * dwc,struct dwc3_ep * dep,const struct dwc3_event_depevt * event,int status)1889 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1890 const struct dwc3_event_depevt *event, int status)
1891 {
1892 struct dwc3_request *req;
1893 struct dwc3_trb *trb;
1894 unsigned int slot;
1895 unsigned int i;
1896 int ret;
1897
1898 do {
1899 req = next_request(&dep->req_queued);
1900 if (!req) {
1901 WARN_ON_ONCE(1);
1902 return 1;
1903 }
1904 i = 0;
1905 do {
1906 slot = req->start_slot + i;
1907 if ((slot == DWC3_TRB_NUM - 1) &&
1908 usb_endpoint_xfer_isoc(dep->endpoint.desc))
1909 slot++;
1910 slot %= DWC3_TRB_NUM;
1911 trb = &dep->trb_pool[slot];
1912
1913 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1914 event, status);
1915 if (ret)
1916 break;
1917 } while (++i < req->request.num_mapped_sgs);
1918
1919 dwc3_gadget_giveback(dep, req, status);
1920
1921 if (ret)
1922 break;
1923 } while (1);
1924
1925 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1926 list_empty(&dep->req_queued)) {
1927 if (list_empty(&dep->request_list)) {
1928 /*
1929 * If there is no entry in request list then do
1930 * not issue END TRANSFER now. Just set PENDING
1931 * flag, so that END TRANSFER is issued when an
1932 * entry is added into request list.
1933 */
1934 dep->flags = DWC3_EP_PENDING_REQUEST;
1935 } else {
1936 dwc3_stop_active_transfer(dwc, dep->number, true);
1937 dep->flags = DWC3_EP_ENABLED;
1938 }
1939 return 1;
1940 }
1941
1942 return 1;
1943 }
1944
dwc3_endpoint_transfer_complete(struct dwc3 * dwc,struct dwc3_ep * dep,const struct dwc3_event_depevt * event)1945 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1946 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1947 {
1948 unsigned status = 0;
1949 int clean_busy;
1950 u32 is_xfer_complete;
1951
1952 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
1953
1954 if (event->status & DEPEVT_STATUS_BUSERR)
1955 status = -ECONNRESET;
1956
1957 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1958 if (clean_busy && (is_xfer_complete ||
1959 usb_endpoint_xfer_isoc(dep->endpoint.desc)))
1960 dep->flags &= ~DWC3_EP_BUSY;
1961
1962 /*
1963 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1964 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1965 */
1966 if (dwc->revision < DWC3_REVISION_183A) {
1967 u32 reg;
1968 int i;
1969
1970 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1971 dep = dwc->eps[i];
1972
1973 if (!(dep->flags & DWC3_EP_ENABLED))
1974 continue;
1975
1976 if (!list_empty(&dep->req_queued))
1977 return;
1978 }
1979
1980 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1981 reg |= dwc->u1u2;
1982 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1983
1984 dwc->u1u2 = 0;
1985 }
1986 }
1987
dwc3_endpoint_interrupt(struct dwc3 * dwc,const struct dwc3_event_depevt * event)1988 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1989 const struct dwc3_event_depevt *event)
1990 {
1991 struct dwc3_ep *dep;
1992 u8 epnum = event->endpoint_number;
1993
1994 dep = dwc->eps[epnum];
1995
1996 if (!(dep->flags & DWC3_EP_ENABLED))
1997 return;
1998
1999 if (epnum == 0 || epnum == 1) {
2000 dwc3_ep0_interrupt(dwc, event);
2001 return;
2002 }
2003
2004 switch (event->endpoint_event) {
2005 case DWC3_DEPEVT_XFERCOMPLETE:
2006 dep->resource_index = 0;
2007
2008 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2009 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
2010 dep->name);
2011 return;
2012 }
2013
2014 dwc3_endpoint_transfer_complete(dwc, dep, event);
2015 break;
2016 case DWC3_DEPEVT_XFERINPROGRESS:
2017 dwc3_endpoint_transfer_complete(dwc, dep, event);
2018 break;
2019 case DWC3_DEPEVT_XFERNOTREADY:
2020 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2021 dwc3_gadget_start_isoc(dwc, dep, event);
2022 } else {
2023 int ret;
2024
2025 dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
2026 dep->name, event->status &
2027 DEPEVT_STATUS_TRANSFER_ACTIVE
2028 ? "Transfer Active"
2029 : "Transfer Not Active");
2030
2031 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
2032 if (!ret || ret == -EBUSY)
2033 return;
2034
2035 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
2036 dep->name);
2037 }
2038
2039 break;
2040 case DWC3_DEPEVT_STREAMEVT:
2041 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
2042 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
2043 dep->name);
2044 return;
2045 }
2046
2047 switch (event->status) {
2048 case DEPEVT_STREAMEVT_FOUND:
2049 dwc3_trace(trace_dwc3_gadget,
2050 "Stream %d found and started",
2051 event->parameters);
2052
2053 break;
2054 case DEPEVT_STREAMEVT_NOTFOUND:
2055 /* FALLTHROUGH */
2056 default:
2057 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
2058 }
2059 break;
2060 case DWC3_DEPEVT_RXTXFIFOEVT:
2061 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
2062 break;
2063 case DWC3_DEPEVT_EPCMDCMPLT:
2064 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
2065 break;
2066 }
2067 }
2068
dwc3_disconnect_gadget(struct dwc3 * dwc)2069 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2070 {
2071 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2072 spin_unlock(&dwc->lock);
2073 dwc->gadget_driver->disconnect(&dwc->gadget);
2074 spin_lock(&dwc->lock);
2075 }
2076 }
2077
dwc3_suspend_gadget(struct dwc3 * dwc)2078 static void dwc3_suspend_gadget(struct dwc3 *dwc)
2079 {
2080 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
2081 spin_unlock(&dwc->lock);
2082 dwc->gadget_driver->suspend(&dwc->gadget);
2083 spin_lock(&dwc->lock);
2084 }
2085 }
2086
dwc3_resume_gadget(struct dwc3 * dwc)2087 static void dwc3_resume_gadget(struct dwc3 *dwc)
2088 {
2089 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2090 spin_unlock(&dwc->lock);
2091 dwc->gadget_driver->resume(&dwc->gadget);
2092 spin_lock(&dwc->lock);
2093 }
2094 }
2095
dwc3_reset_gadget(struct dwc3 * dwc)2096 static void dwc3_reset_gadget(struct dwc3 *dwc)
2097 {
2098 if (!dwc->gadget_driver)
2099 return;
2100
2101 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2102 spin_unlock(&dwc->lock);
2103 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
2104 spin_lock(&dwc->lock);
2105 }
2106 }
2107
dwc3_stop_active_transfer(struct dwc3 * dwc,u32 epnum,bool force)2108 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
2109 {
2110 struct dwc3_ep *dep;
2111 struct dwc3_gadget_ep_cmd_params params;
2112 u32 cmd;
2113 int ret;
2114
2115 dep = dwc->eps[epnum];
2116
2117 if (!dep->resource_index)
2118 return;
2119
2120 /*
2121 * NOTICE: We are violating what the Databook says about the
2122 * EndTransfer command. Ideally we would _always_ wait for the
2123 * EndTransfer Command Completion IRQ, but that's causing too
2124 * much trouble synchronizing between us and gadget driver.
2125 *
2126 * We have discussed this with the IP Provider and it was
2127 * suggested to giveback all requests here, but give HW some
2128 * extra time to synchronize with the interconnect. We're using
2129 * an arbitrary 100us delay for that.
2130 *
2131 * Note also that a similar handling was tested by Synopsys
2132 * (thanks a lot Paul) and nothing bad has come out of it.
2133 * In short, what we're doing is:
2134 *
2135 * - Issue EndTransfer WITH CMDIOC bit set
2136 * - Wait 100us
2137 */
2138
2139 cmd = DWC3_DEPCMD_ENDTRANSFER;
2140 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2141 cmd |= DWC3_DEPCMD_CMDIOC;
2142 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2143 memset(¶ms, 0, sizeof(params));
2144 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
2145 WARN_ON_ONCE(ret);
2146 dep->resource_index = 0;
2147 dep->flags &= ~DWC3_EP_BUSY;
2148 udelay(100);
2149 }
2150
dwc3_stop_active_transfers(struct dwc3 * dwc)2151 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2152 {
2153 u32 epnum;
2154
2155 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2156 struct dwc3_ep *dep;
2157
2158 dep = dwc->eps[epnum];
2159 if (!dep)
2160 continue;
2161
2162 if (!(dep->flags & DWC3_EP_ENABLED))
2163 continue;
2164
2165 dwc3_remove_requests(dwc, dep);
2166 }
2167 }
2168
dwc3_clear_stall_all_ep(struct dwc3 * dwc)2169 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2170 {
2171 u32 epnum;
2172
2173 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2174 struct dwc3_ep *dep;
2175 struct dwc3_gadget_ep_cmd_params params;
2176 int ret;
2177
2178 dep = dwc->eps[epnum];
2179 if (!dep)
2180 continue;
2181
2182 if (!(dep->flags & DWC3_EP_STALL))
2183 continue;
2184
2185 dep->flags &= ~DWC3_EP_STALL;
2186
2187 memset(¶ms, 0, sizeof(params));
2188 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2189 DWC3_DEPCMD_CLEARSTALL, ¶ms);
2190 WARN_ON_ONCE(ret);
2191 }
2192 }
2193
dwc3_gadget_disconnect_interrupt(struct dwc3 * dwc)2194 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2195 {
2196 int reg;
2197
2198 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2199 reg &= ~DWC3_DCTL_INITU1ENA;
2200 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2201
2202 reg &= ~DWC3_DCTL_INITU2ENA;
2203 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2204
2205 dwc3_disconnect_gadget(dwc);
2206
2207 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2208 dwc->setup_packet_pending = false;
2209 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2210 }
2211
dwc3_gadget_reset_interrupt(struct dwc3 * dwc)2212 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2213 {
2214 u32 reg;
2215
2216 /*
2217 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2218 * would cause a missing Disconnect Event if there's a
2219 * pending Setup Packet in the FIFO.
2220 *
2221 * There's no suggested workaround on the official Bug
2222 * report, which states that "unless the driver/application
2223 * is doing any special handling of a disconnect event,
2224 * there is no functional issue".
2225 *
2226 * Unfortunately, it turns out that we _do_ some special
2227 * handling of a disconnect event, namely complete all
2228 * pending transfers, notify gadget driver of the
2229 * disconnection, and so on.
2230 *
2231 * Our suggested workaround is to follow the Disconnect
2232 * Event steps here, instead, based on a setup_packet_pending
2233 * flag. Such flag gets set whenever we have a XferNotReady
2234 * event on EP0 and gets cleared on XferComplete for the
2235 * same endpoint.
2236 *
2237 * Refers to:
2238 *
2239 * STAR#9000466709: RTL: Device : Disconnect event not
2240 * generated if setup packet pending in FIFO
2241 */
2242 if (dwc->revision < DWC3_REVISION_188A) {
2243 if (dwc->setup_packet_pending)
2244 dwc3_gadget_disconnect_interrupt(dwc);
2245 }
2246
2247 dwc3_reset_gadget(dwc);
2248
2249 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2250 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2251 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2252 dwc->test_mode = false;
2253
2254 dwc3_stop_active_transfers(dwc);
2255 dwc3_clear_stall_all_ep(dwc);
2256
2257 /* Reset device address to zero */
2258 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2259 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2260 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2261 }
2262
dwc3_update_ram_clk_sel(struct dwc3 * dwc,u32 speed)2263 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2264 {
2265 u32 reg;
2266 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2267
2268 /*
2269 * We change the clock only at SS but I dunno why I would want to do
2270 * this. Maybe it becomes part of the power saving plan.
2271 */
2272
2273 if (speed != DWC3_DSTS_SUPERSPEED)
2274 return;
2275
2276 /*
2277 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2278 * each time on Connect Done.
2279 */
2280 if (!usb30_clock)
2281 return;
2282
2283 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2284 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2285 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2286 }
2287
dwc3_gadget_conndone_interrupt(struct dwc3 * dwc)2288 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2289 {
2290 struct dwc3_ep *dep;
2291 int ret;
2292 u32 reg;
2293 u8 speed;
2294
2295 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2296 speed = reg & DWC3_DSTS_CONNECTSPD;
2297 dwc->speed = speed;
2298
2299 dwc3_update_ram_clk_sel(dwc, speed);
2300
2301 switch (speed) {
2302 case DWC3_DCFG_SUPERSPEED:
2303 /*
2304 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2305 * would cause a missing USB3 Reset event.
2306 *
2307 * In such situations, we should force a USB3 Reset
2308 * event by calling our dwc3_gadget_reset_interrupt()
2309 * routine.
2310 *
2311 * Refers to:
2312 *
2313 * STAR#9000483510: RTL: SS : USB3 reset event may
2314 * not be generated always when the link enters poll
2315 */
2316 if (dwc->revision < DWC3_REVISION_190A)
2317 dwc3_gadget_reset_interrupt(dwc);
2318
2319 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2320 dwc->gadget.ep0->maxpacket = 512;
2321 dwc->gadget.speed = USB_SPEED_SUPER;
2322 break;
2323 case DWC3_DCFG_HIGHSPEED:
2324 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2325 dwc->gadget.ep0->maxpacket = 64;
2326 dwc->gadget.speed = USB_SPEED_HIGH;
2327 break;
2328 case DWC3_DCFG_FULLSPEED2:
2329 case DWC3_DCFG_FULLSPEED1:
2330 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2331 dwc->gadget.ep0->maxpacket = 64;
2332 dwc->gadget.speed = USB_SPEED_FULL;
2333 break;
2334 case DWC3_DCFG_LOWSPEED:
2335 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2336 dwc->gadget.ep0->maxpacket = 8;
2337 dwc->gadget.speed = USB_SPEED_LOW;
2338 break;
2339 }
2340
2341 /* Enable USB2 LPM Capability */
2342
2343 if ((dwc->revision > DWC3_REVISION_194A)
2344 && (speed != DWC3_DCFG_SUPERSPEED)) {
2345 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2346 reg |= DWC3_DCFG_LPM_CAP;
2347 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2348
2349 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2350 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2351
2352 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2353
2354 /*
2355 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2356 * DCFG.LPMCap is set, core responses with an ACK and the
2357 * BESL value in the LPM token is less than or equal to LPM
2358 * NYET threshold.
2359 */
2360 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2361 && dwc->has_lpm_erratum,
2362 "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2363
2364 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2365 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2366
2367 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2368 } else {
2369 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2370 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2371 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2372 }
2373
2374 dep = dwc->eps[0];
2375 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2376 false);
2377 if (ret) {
2378 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2379 return;
2380 }
2381
2382 dep = dwc->eps[1];
2383 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2384 false);
2385 if (ret) {
2386 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2387 return;
2388 }
2389
2390 /*
2391 * Configure PHY via GUSB3PIPECTLn if required.
2392 *
2393 * Update GTXFIFOSIZn
2394 *
2395 * In both cases reset values should be sufficient.
2396 */
2397 }
2398
dwc3_gadget_wakeup_interrupt(struct dwc3 * dwc)2399 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2400 {
2401 /*
2402 * TODO take core out of low power mode when that's
2403 * implemented.
2404 */
2405
2406 dwc->gadget_driver->resume(&dwc->gadget);
2407 }
2408
dwc3_gadget_linksts_change_interrupt(struct dwc3 * dwc,unsigned int evtinfo)2409 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2410 unsigned int evtinfo)
2411 {
2412 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2413 unsigned int pwropt;
2414
2415 /*
2416 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2417 * Hibernation mode enabled which would show up when device detects
2418 * host-initiated U3 exit.
2419 *
2420 * In that case, device will generate a Link State Change Interrupt
2421 * from U3 to RESUME which is only necessary if Hibernation is
2422 * configured in.
2423 *
2424 * There are no functional changes due to such spurious event and we
2425 * just need to ignore it.
2426 *
2427 * Refers to:
2428 *
2429 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2430 * operational mode
2431 */
2432 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2433 if ((dwc->revision < DWC3_REVISION_250A) &&
2434 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2435 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2436 (next == DWC3_LINK_STATE_RESUME)) {
2437 dwc3_trace(trace_dwc3_gadget,
2438 "ignoring transition U3 -> Resume");
2439 return;
2440 }
2441 }
2442
2443 /*
2444 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2445 * on the link partner, the USB session might do multiple entry/exit
2446 * of low power states before a transfer takes place.
2447 *
2448 * Due to this problem, we might experience lower throughput. The
2449 * suggested workaround is to disable DCTL[12:9] bits if we're
2450 * transitioning from U1/U2 to U0 and enable those bits again
2451 * after a transfer completes and there are no pending transfers
2452 * on any of the enabled endpoints.
2453 *
2454 * This is the first half of that workaround.
2455 *
2456 * Refers to:
2457 *
2458 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2459 * core send LGO_Ux entering U0
2460 */
2461 if (dwc->revision < DWC3_REVISION_183A) {
2462 if (next == DWC3_LINK_STATE_U0) {
2463 u32 u1u2;
2464 u32 reg;
2465
2466 switch (dwc->link_state) {
2467 case DWC3_LINK_STATE_U1:
2468 case DWC3_LINK_STATE_U2:
2469 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2470 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2471 | DWC3_DCTL_ACCEPTU2ENA
2472 | DWC3_DCTL_INITU1ENA
2473 | DWC3_DCTL_ACCEPTU1ENA);
2474
2475 if (!dwc->u1u2)
2476 dwc->u1u2 = reg & u1u2;
2477
2478 reg &= ~u1u2;
2479
2480 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2481 break;
2482 default:
2483 /* do nothing */
2484 break;
2485 }
2486 }
2487 }
2488
2489 switch (next) {
2490 case DWC3_LINK_STATE_U1:
2491 if (dwc->speed == USB_SPEED_SUPER)
2492 dwc3_suspend_gadget(dwc);
2493 break;
2494 case DWC3_LINK_STATE_U2:
2495 case DWC3_LINK_STATE_U3:
2496 dwc3_suspend_gadget(dwc);
2497 break;
2498 case DWC3_LINK_STATE_RESUME:
2499 dwc3_resume_gadget(dwc);
2500 break;
2501 default:
2502 /* do nothing */
2503 break;
2504 }
2505
2506 dwc->link_state = next;
2507 }
2508
dwc3_gadget_hibernation_interrupt(struct dwc3 * dwc,unsigned int evtinfo)2509 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2510 unsigned int evtinfo)
2511 {
2512 unsigned int is_ss = evtinfo & BIT(4);
2513
2514 /**
2515 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2516 * have a known issue which can cause USB CV TD.9.23 to fail
2517 * randomly.
2518 *
2519 * Because of this issue, core could generate bogus hibernation
2520 * events which SW needs to ignore.
2521 *
2522 * Refers to:
2523 *
2524 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2525 * Device Fallback from SuperSpeed
2526 */
2527 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2528 return;
2529
2530 /* enter hibernation here */
2531 }
2532
dwc3_gadget_interrupt(struct dwc3 * dwc,const struct dwc3_event_devt * event)2533 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2534 const struct dwc3_event_devt *event)
2535 {
2536 switch (event->type) {
2537 case DWC3_DEVICE_EVENT_DISCONNECT:
2538 dwc3_gadget_disconnect_interrupt(dwc);
2539 break;
2540 case DWC3_DEVICE_EVENT_RESET:
2541 dwc3_gadget_reset_interrupt(dwc);
2542 break;
2543 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2544 dwc3_gadget_conndone_interrupt(dwc);
2545 break;
2546 case DWC3_DEVICE_EVENT_WAKEUP:
2547 dwc3_gadget_wakeup_interrupt(dwc);
2548 break;
2549 case DWC3_DEVICE_EVENT_HIBER_REQ:
2550 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2551 "unexpected hibernation event\n"))
2552 break;
2553
2554 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2555 break;
2556 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2557 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2558 break;
2559 case DWC3_DEVICE_EVENT_EOPF:
2560 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
2561 break;
2562 case DWC3_DEVICE_EVENT_SOF:
2563 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
2564 break;
2565 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2566 dwc3_trace(trace_dwc3_gadget, "Erratic Error");
2567 break;
2568 case DWC3_DEVICE_EVENT_CMD_CMPL:
2569 dwc3_trace(trace_dwc3_gadget, "Command Complete");
2570 break;
2571 case DWC3_DEVICE_EVENT_OVERFLOW:
2572 dwc3_trace(trace_dwc3_gadget, "Overflow");
2573 break;
2574 default:
2575 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2576 }
2577 }
2578
dwc3_process_event_entry(struct dwc3 * dwc,const union dwc3_event * event)2579 static void dwc3_process_event_entry(struct dwc3 *dwc,
2580 const union dwc3_event *event)
2581 {
2582 trace_dwc3_event(event->raw);
2583
2584 /* Endpoint IRQ, handle it and return early */
2585 if (event->type.is_devspec == 0) {
2586 /* depevt */
2587 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2588 }
2589
2590 switch (event->type.type) {
2591 case DWC3_EVENT_TYPE_DEV:
2592 dwc3_gadget_interrupt(dwc, &event->devt);
2593 break;
2594 /* REVISIT what to do with Carkit and I2C events ? */
2595 default:
2596 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2597 }
2598 }
2599
dwc3_process_event_buf(struct dwc3 * dwc,u32 buf)2600 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2601 {
2602 struct dwc3_event_buffer *evt;
2603 irqreturn_t ret = IRQ_NONE;
2604 int left;
2605 u32 reg;
2606
2607 evt = dwc->ev_buffs[buf];
2608 left = evt->count;
2609
2610 if (!(evt->flags & DWC3_EVENT_PENDING))
2611 return IRQ_NONE;
2612
2613 while (left > 0) {
2614 union dwc3_event event;
2615
2616 event.raw = *(u32 *) (evt->buf + evt->lpos);
2617
2618 dwc3_process_event_entry(dwc, &event);
2619
2620 /*
2621 * FIXME we wrap around correctly to the next entry as
2622 * almost all entries are 4 bytes in size. There is one
2623 * entry which has 12 bytes which is a regular entry
2624 * followed by 8 bytes data. ATM I don't know how
2625 * things are organized if we get next to the a
2626 * boundary so I worry about that once we try to handle
2627 * that.
2628 */
2629 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2630 left -= 4;
2631
2632 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2633 }
2634
2635 evt->count = 0;
2636 evt->flags &= ~DWC3_EVENT_PENDING;
2637 ret = IRQ_HANDLED;
2638
2639 /* Unmask interrupt */
2640 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2641 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2642 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2643
2644 return ret;
2645 }
2646
dwc3_thread_interrupt(int irq,void * _dwc)2647 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2648 {
2649 struct dwc3 *dwc = _dwc;
2650 unsigned long flags;
2651 irqreturn_t ret = IRQ_NONE;
2652 int i;
2653
2654 spin_lock_irqsave(&dwc->lock, flags);
2655
2656 for (i = 0; i < dwc->num_event_buffers; i++)
2657 ret |= dwc3_process_event_buf(dwc, i);
2658
2659 spin_unlock_irqrestore(&dwc->lock, flags);
2660
2661 return ret;
2662 }
2663
dwc3_check_event_buf(struct dwc3 * dwc,u32 buf)2664 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
2665 {
2666 struct dwc3_event_buffer *evt;
2667 u32 count;
2668 u32 reg;
2669
2670 evt = dwc->ev_buffs[buf];
2671
2672 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2673 count &= DWC3_GEVNTCOUNT_MASK;
2674 if (!count)
2675 return IRQ_NONE;
2676
2677 evt->count = count;
2678 evt->flags |= DWC3_EVENT_PENDING;
2679
2680 /* Mask interrupt */
2681 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2682 reg |= DWC3_GEVNTSIZ_INTMASK;
2683 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2684
2685 return IRQ_WAKE_THREAD;
2686 }
2687
dwc3_interrupt(int irq,void * _dwc)2688 static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2689 {
2690 struct dwc3 *dwc = _dwc;
2691 int i;
2692 irqreturn_t ret = IRQ_NONE;
2693
2694 spin_lock(&dwc->lock);
2695
2696 for (i = 0; i < dwc->num_event_buffers; i++) {
2697 irqreturn_t status;
2698
2699 status = dwc3_check_event_buf(dwc, i);
2700 if (status == IRQ_WAKE_THREAD)
2701 ret = status;
2702 }
2703
2704 spin_unlock(&dwc->lock);
2705
2706 return ret;
2707 }
2708
2709 /**
2710 * dwc3_gadget_init - Initializes gadget related registers
2711 * @dwc: pointer to our controller context structure
2712 *
2713 * Returns 0 on success otherwise negative errno.
2714 */
dwc3_gadget_init(struct dwc3 * dwc)2715 int dwc3_gadget_init(struct dwc3 *dwc)
2716 {
2717 int ret;
2718
2719 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2720 &dwc->ctrl_req_addr, GFP_KERNEL);
2721 if (!dwc->ctrl_req) {
2722 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2723 ret = -ENOMEM;
2724 goto err0;
2725 }
2726
2727 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2728 &dwc->ep0_trb_addr, GFP_KERNEL);
2729 if (!dwc->ep0_trb) {
2730 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2731 ret = -ENOMEM;
2732 goto err1;
2733 }
2734
2735 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2736 if (!dwc->setup_buf) {
2737 ret = -ENOMEM;
2738 goto err2;
2739 }
2740
2741 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2742 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2743 GFP_KERNEL);
2744 if (!dwc->ep0_bounce) {
2745 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2746 ret = -ENOMEM;
2747 goto err3;
2748 }
2749
2750 dwc->gadget.ops = &dwc3_gadget_ops;
2751 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2752 dwc->gadget.sg_supported = true;
2753 dwc->gadget.name = "dwc3-gadget";
2754
2755 /*
2756 * FIXME We might be setting max_speed to <SUPER, however versions
2757 * <2.20a of dwc3 have an issue with metastability (documented
2758 * elsewhere in this driver) which tells us we can't set max speed to
2759 * anything lower than SUPER.
2760 *
2761 * Because gadget.max_speed is only used by composite.c and function
2762 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
2763 * to happen so we avoid sending SuperSpeed Capability descriptor
2764 * together with our BOS descriptor as that could confuse host into
2765 * thinking we can handle super speed.
2766 *
2767 * Note that, in fact, we won't even support GetBOS requests when speed
2768 * is less than super speed because we don't have means, yet, to tell
2769 * composite.c that we are USB 2.0 + LPM ECN.
2770 */
2771 if (dwc->revision < DWC3_REVISION_220A)
2772 dwc3_trace(trace_dwc3_gadget,
2773 "Changing max_speed on rev %08x\n",
2774 dwc->revision);
2775
2776 dwc->gadget.max_speed = dwc->maximum_speed;
2777
2778 /*
2779 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2780 * on ep out.
2781 */
2782 dwc->gadget.quirk_ep_out_aligned_size = true;
2783
2784 /*
2785 * REVISIT: Here we should clear all pending IRQs to be
2786 * sure we're starting from a well known location.
2787 */
2788
2789 ret = dwc3_gadget_init_endpoints(dwc);
2790 if (ret)
2791 goto err4;
2792
2793 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2794 if (ret) {
2795 dev_err(dwc->dev, "failed to register udc\n");
2796 goto err4;
2797 }
2798
2799 return 0;
2800
2801 err4:
2802 dwc3_gadget_free_endpoints(dwc);
2803 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2804 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2805
2806 err3:
2807 kfree(dwc->setup_buf);
2808
2809 err2:
2810 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2811 dwc->ep0_trb, dwc->ep0_trb_addr);
2812
2813 err1:
2814 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2815 dwc->ctrl_req, dwc->ctrl_req_addr);
2816
2817 err0:
2818 return ret;
2819 }
2820
2821 /* -------------------------------------------------------------------------- */
2822
dwc3_gadget_exit(struct dwc3 * dwc)2823 void dwc3_gadget_exit(struct dwc3 *dwc)
2824 {
2825 usb_del_gadget_udc(&dwc->gadget);
2826
2827 dwc3_gadget_free_endpoints(dwc);
2828
2829 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2830 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2831
2832 kfree(dwc->setup_buf);
2833
2834 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2835 dwc->ep0_trb, dwc->ep0_trb_addr);
2836
2837 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2838 dwc->ctrl_req, dwc->ctrl_req_addr);
2839 }
2840
dwc3_gadget_suspend(struct dwc3 * dwc)2841 int dwc3_gadget_suspend(struct dwc3 *dwc)
2842 {
2843 if (dwc->pullups_connected) {
2844 dwc3_gadget_disable_irq(dwc);
2845 dwc3_gadget_run_stop(dwc, true, true);
2846 }
2847
2848 __dwc3_gadget_ep_disable(dwc->eps[0]);
2849 __dwc3_gadget_ep_disable(dwc->eps[1]);
2850
2851 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2852
2853 return 0;
2854 }
2855
dwc3_gadget_resume(struct dwc3 * dwc)2856 int dwc3_gadget_resume(struct dwc3 *dwc)
2857 {
2858 struct dwc3_ep *dep;
2859 int ret;
2860
2861 /* Start with SuperSpeed Default */
2862 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2863
2864 dep = dwc->eps[0];
2865 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2866 false);
2867 if (ret)
2868 goto err0;
2869
2870 dep = dwc->eps[1];
2871 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2872 false);
2873 if (ret)
2874 goto err1;
2875
2876 /* begin to receive SETUP packets */
2877 dwc->ep0state = EP0_SETUP_PHASE;
2878 dwc3_ep0_out_start(dwc);
2879
2880 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
2881
2882 if (dwc->pullups_connected) {
2883 dwc3_gadget_enable_irq(dwc);
2884 dwc3_gadget_run_stop(dwc, true, false);
2885 }
2886
2887 return 0;
2888
2889 err1:
2890 __dwc3_gadget_ep_disable(dwc->eps[0]);
2891
2892 err0:
2893 return ret;
2894 }
2895