This source file includes following definitions.
- dwc2_enable_common_interrupts
- dwc2_gahbcfg_init
- dwc2_gusbcfg_init
- dwc2_vbus_supply_init
- dwc2_vbus_supply_exit
- dwc2_enable_host_interrupts
- dwc2_disable_host_interrupts
- dwc2_calculate_dynamic_fifo
- dwc2_config_fifos
- dwc2_calc_frame_interval
- dwc2_read_packet
- dwc2_dump_channel_info
- dwc2_host_start
- dwc2_host_disconnect
- dwc2_host_hub_info
- dwc2_hc_enable_slave_ints
- dwc2_hc_enable_dma_ints
- dwc2_hc_enable_ints
- dwc2_hc_init
- dwc2_hc_halt
- dwc2_hc_cleanup
- dwc2_hc_set_even_odd_frame
- dwc2_set_pid_isoc
- dwc2_hc_write_packet
- dwc2_hc_do_ping
- dwc2_hc_start_transfer
- dwc2_hc_start_transfer_ddma
- dwc2_hc_continue_transfer
- dwc2_kill_urbs_in_qh_list
- dwc2_qh_list_free
- dwc2_kill_all_urbs
- dwc2_hcd_start
- dwc2_hcd_cleanup_channels
- dwc2_hcd_connect
- dwc2_hcd_disconnect
- dwc2_hcd_rem_wakeup
- dwc2_hcd_stop
- dwc2_hcd_urb_enqueue
- dwc2_hcd_urb_dequeue
- dwc2_hcd_endpoint_disable
- dwc2_hcd_endpoint_reset
- dwc2_core_init
- dwc2_core_host_init
- dwc2_hcd_reinit
- dwc2_hc_init_split
- dwc2_hc_init_xfer
- dwc2_alloc_split_dma_aligned_buf
- dwc2_free_dma_aligned_buffer
- dwc2_alloc_dma_aligned_buffer
- dwc2_map_urb_for_dma
- dwc2_unmap_urb_for_dma
- dwc2_assign_and_init_hc
- dwc2_hcd_select_transactions
- dwc2_queue_transaction
- dwc2_process_periodic_channels
- dwc2_process_non_periodic_channels
- dwc2_hcd_queue_transactions
- dwc2_conn_id_status_change
- dwc2_wakeup_detected
- dwc2_host_is_b_hnp_enabled
- dwc2_port_suspend
- dwc2_port_resume
- dwc2_hcd_hub_control
- dwc2_hcd_is_status_changed
- dwc2_hcd_get_frame_number
- dwc2_hcd_get_future_frame_number
- dwc2_hcd_is_b_host
- dwc2_hcd_urb_alloc
- dwc2_hcd_urb_set_pipeinfo
- dwc2_hcd_dump_state
- dwc2_hcd_to_hsotg
- dwc2_host_get_tt_info
- dwc2_host_put_tt_info
- dwc2_host_get_speed
- dwc2_allocate_bus_bandwidth
- dwc2_free_bus_bandwidth
- dwc2_host_complete
- dwc2_hcd_start_func
- dwc2_hcd_reset_func
- dwc2_hcd_phy_reset_func
- _dwc2_hcd_start
- _dwc2_hcd_stop
- _dwc2_hcd_suspend
- _dwc2_hcd_resume
- _dwc2_hcd_get_frame_number
- dwc2_dump_urb_info
- _dwc2_hcd_urb_enqueue
- _dwc2_hcd_urb_dequeue
- _dwc2_hcd_endpoint_disable
- _dwc2_hcd_endpoint_reset
- _dwc2_hcd_irq
- _dwc2_hcd_hub_status_data
- _dwc2_hcd_hub_control
- _dwc2_hcd_clear_tt_buffer_complete
- dwc2_change_bus_speed
- dwc2_free_dev
- dwc2_reset_device
- dwc2_hcd_free
- dwc2_hcd_release
- dwc2_hcd_init
- dwc2_hcd_remove
- dwc2_backup_host_registers
- dwc2_restore_host_registers
- dwc2_host_enter_hibernation
- dwc2_host_exit_hibernation
- dwc2_host_can_poweroff_phy
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/spinlock.h>
45 #include <linux/interrupt.h>
46 #include <linux/platform_device.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/delay.h>
49 #include <linux/io.h>
50 #include <linux/slab.h>
51 #include <linux/usb.h>
52
53 #include <linux/usb/hcd.h>
54 #include <linux/usb/ch11.h>
55
56 #include "core.h"
57 #include "hcd.h"
58
59 static void dwc2_port_resume(struct dwc2_hsotg *hsotg);
60
61
62
63
64
65
66
67
68
69
70
71
72
73 static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
74 {
75 u32 intmsk;
76
77
78 dwc2_writel(hsotg, 0xffffffff, GOTGINT);
79
80
81 dwc2_writel(hsotg, 0xffffffff, GINTSTS);
82
83
84 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
85
86 if (!hsotg->params.host_dma)
87 intmsk |= GINTSTS_RXFLVL;
88 if (!hsotg->params.external_id_pin_ctl)
89 intmsk |= GINTSTS_CONIDSTSCHNG;
90
91 intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
92 GINTSTS_SESSREQINT;
93
94 if (dwc2_is_device_mode(hsotg) && hsotg->params.lpm)
95 intmsk |= GINTSTS_LPMTRANRCVD;
96
97 dwc2_writel(hsotg, intmsk, GINTMSK);
98 }
99
100 static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
101 {
102 u32 ahbcfg = dwc2_readl(hsotg, GAHBCFG);
103
104 switch (hsotg->hw_params.arch) {
105 case GHWCFG2_EXT_DMA_ARCH:
106 dev_err(hsotg->dev, "External DMA Mode not supported\n");
107 return -EINVAL;
108
109 case GHWCFG2_INT_DMA_ARCH:
110 dev_dbg(hsotg->dev, "Internal DMA Mode\n");
111 if (hsotg->params.ahbcfg != -1) {
112 ahbcfg &= GAHBCFG_CTRL_MASK;
113 ahbcfg |= hsotg->params.ahbcfg &
114 ~GAHBCFG_CTRL_MASK;
115 }
116 break;
117
118 case GHWCFG2_SLAVE_ONLY_ARCH:
119 default:
120 dev_dbg(hsotg->dev, "Slave Only Mode\n");
121 break;
122 }
123
124 if (hsotg->params.host_dma)
125 ahbcfg |= GAHBCFG_DMA_EN;
126 else
127 hsotg->params.dma_desc_enable = false;
128
129 dwc2_writel(hsotg, ahbcfg, GAHBCFG);
130
131 return 0;
132 }
133
134 static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
135 {
136 u32 usbcfg;
137
138 usbcfg = dwc2_readl(hsotg, GUSBCFG);
139 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
140
141 switch (hsotg->hw_params.op_mode) {
142 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
143 if (hsotg->params.otg_cap ==
144 DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
145 usbcfg |= GUSBCFG_HNPCAP;
146 if (hsotg->params.otg_cap !=
147 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
148 usbcfg |= GUSBCFG_SRPCAP;
149 break;
150
151 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
152 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
153 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
154 if (hsotg->params.otg_cap !=
155 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
156 usbcfg |= GUSBCFG_SRPCAP;
157 break;
158
159 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
160 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
161 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
162 default:
163 break;
164 }
165
166 dwc2_writel(hsotg, usbcfg, GUSBCFG);
167 }
168
169 static int dwc2_vbus_supply_init(struct dwc2_hsotg *hsotg)
170 {
171 if (hsotg->vbus_supply)
172 return regulator_enable(hsotg->vbus_supply);
173
174 return 0;
175 }
176
177 static int dwc2_vbus_supply_exit(struct dwc2_hsotg *hsotg)
178 {
179 if (hsotg->vbus_supply)
180 return regulator_disable(hsotg->vbus_supply);
181
182 return 0;
183 }
184
185
186
187
188
189
190 static void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
191 {
192 u32 intmsk;
193
194 dev_dbg(hsotg->dev, "%s()\n", __func__);
195
196
197 dwc2_writel(hsotg, 0, GINTMSK);
198 dwc2_writel(hsotg, 0, HAINTMSK);
199
200
201 dwc2_enable_common_interrupts(hsotg);
202
203
204 intmsk = dwc2_readl(hsotg, GINTMSK);
205 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
206 dwc2_writel(hsotg, intmsk, GINTMSK);
207 }
208
209
210
211
212
213
214 static void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
215 {
216 u32 intmsk = dwc2_readl(hsotg, GINTMSK);
217
218
219 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
220 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT);
221 dwc2_writel(hsotg, intmsk, GINTMSK);
222 }
223
224
225
226
227
228
229
230
231 static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
232 {
233 struct dwc2_core_params *params = &hsotg->params;
234 struct dwc2_hw_params *hw = &hsotg->hw_params;
235 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
236
237 total_fifo_size = hw->total_fifo_size;
238 rxfsiz = params->host_rx_fifo_size;
239 nptxfsiz = params->host_nperio_tx_fifo_size;
240 ptxfsiz = params->host_perio_tx_fifo_size;
241
242
243
244
245
246
247
248 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
249
250
251
252
253
254
255 rxfsiz = 516 + hw->host_channels;
256
257
258
259
260
261
262 nptxfsiz = 256;
263
264
265
266
267
268
269 ptxfsiz = 768;
270
271 params->host_rx_fifo_size = rxfsiz;
272 params->host_nperio_tx_fifo_size = nptxfsiz;
273 params->host_perio_tx_fifo_size = ptxfsiz;
274 }
275
276
277
278
279
280
281
282
283
284
285
286 if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
287 dev_err(hsotg->dev, "invalid fifo sizes\n");
288 }
289
290 static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
291 {
292 struct dwc2_core_params *params = &hsotg->params;
293 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
294
295 if (!params->enable_dynamic_fifo)
296 return;
297
298 dwc2_calculate_dynamic_fifo(hsotg);
299
300
301 grxfsiz = dwc2_readl(hsotg, GRXFSIZ);
302 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
303 grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
304 grxfsiz |= params->host_rx_fifo_size <<
305 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
306 dwc2_writel(hsotg, grxfsiz, GRXFSIZ);
307 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
308 dwc2_readl(hsotg, GRXFSIZ));
309
310
311 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
312 dwc2_readl(hsotg, GNPTXFSIZ));
313 nptxfsiz = params->host_nperio_tx_fifo_size <<
314 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
315 nptxfsiz |= params->host_rx_fifo_size <<
316 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
317 dwc2_writel(hsotg, nptxfsiz, GNPTXFSIZ);
318 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
319 dwc2_readl(hsotg, GNPTXFSIZ));
320
321
322 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
323 dwc2_readl(hsotg, HPTXFSIZ));
324 hptxfsiz = params->host_perio_tx_fifo_size <<
325 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
326 hptxfsiz |= (params->host_rx_fifo_size +
327 params->host_nperio_tx_fifo_size) <<
328 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
329 dwc2_writel(hsotg, hptxfsiz, HPTXFSIZ);
330 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
331 dwc2_readl(hsotg, HPTXFSIZ));
332
333 if (hsotg->params.en_multiple_tx_fifo &&
334 hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_91a) {
335
336
337
338
339
340 dfifocfg = dwc2_readl(hsotg, GDFIFOCFG);
341 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
342 dfifocfg |= (params->host_rx_fifo_size +
343 params->host_nperio_tx_fifo_size +
344 params->host_perio_tx_fifo_size) <<
345 GDFIFOCFG_EPINFOBASE_SHIFT &
346 GDFIFOCFG_EPINFOBASE_MASK;
347 dwc2_writel(hsotg, dfifocfg, GDFIFOCFG);
348 }
349 }
350
351
352
353
354
355
356
357
358
359
360
361 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
362 {
363 u32 usbcfg;
364 u32 hprt0;
365 int clock = 60;
366
367 usbcfg = dwc2_readl(hsotg, GUSBCFG);
368 hprt0 = dwc2_readl(hsotg, HPRT0);
369
370 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
371 !(usbcfg & GUSBCFG_PHYIF16))
372 clock = 60;
373 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
374 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
375 clock = 48;
376 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
377 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
378 clock = 30;
379 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
380 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
381 clock = 60;
382 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
383 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
384 clock = 48;
385 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
386 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
387 clock = 48;
388 if ((usbcfg & GUSBCFG_PHYSEL) &&
389 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
390 clock = 48;
391
392 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
393
394 return 125 * clock - 1;
395
396
397 return 1000 * clock - 1;
398 }
399
400
401
402
403
404
405
406
407
408 void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
409 {
410 u32 *data_buf = (u32 *)dest;
411 int word_count = (bytes + 3) / 4;
412 int i;
413
414
415
416
417
418
419
420 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
421
422 for (i = 0; i < word_count; i++, data_buf++)
423 *data_buf = dwc2_readl(hsotg, HCFIFO(0));
424 }
425
426
427
428
429
430
431
432
433
434
435
436
437 static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
438 struct dwc2_host_chan *chan)
439 {
440 #ifdef VERBOSE_DEBUG
441 int num_channels = hsotg->params.host_channels;
442 struct dwc2_qh *qh;
443 u32 hcchar;
444 u32 hcsplt;
445 u32 hctsiz;
446 u32 hc_dma;
447 int i;
448
449 if (!chan)
450 return;
451
452 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
453 hcsplt = dwc2_readl(hsotg, HCSPLT(chan->hc_num));
454 hctsiz = dwc2_readl(hsotg, HCTSIZ(chan->hc_num));
455 hc_dma = dwc2_readl(hsotg, HCDMA(chan->hc_num));
456
457 dev_dbg(hsotg->dev, " Assigned to channel %p:\n", chan);
458 dev_dbg(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n",
459 hcchar, hcsplt);
460 dev_dbg(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n",
461 hctsiz, hc_dma);
462 dev_dbg(hsotg->dev, " dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
463 chan->dev_addr, chan->ep_num, chan->ep_is_in);
464 dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type);
465 dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet);
466 dev_dbg(hsotg->dev, " data_pid_start: %d\n", chan->data_pid_start);
467 dev_dbg(hsotg->dev, " xfer_started: %d\n", chan->xfer_started);
468 dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status);
469 dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf);
470 dev_dbg(hsotg->dev, " xfer_dma: %08lx\n",
471 (unsigned long)chan->xfer_dma);
472 dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len);
473 dev_dbg(hsotg->dev, " qh: %p\n", chan->qh);
474 dev_dbg(hsotg->dev, " NP inactive sched:\n");
475 list_for_each_entry(qh, &hsotg->non_periodic_sched_inactive,
476 qh_list_entry)
477 dev_dbg(hsotg->dev, " %p\n", qh);
478 dev_dbg(hsotg->dev, " NP waiting sched:\n");
479 list_for_each_entry(qh, &hsotg->non_periodic_sched_waiting,
480 qh_list_entry)
481 dev_dbg(hsotg->dev, " %p\n", qh);
482 dev_dbg(hsotg->dev, " NP active sched:\n");
483 list_for_each_entry(qh, &hsotg->non_periodic_sched_active,
484 qh_list_entry)
485 dev_dbg(hsotg->dev, " %p\n", qh);
486 dev_dbg(hsotg->dev, " Channels:\n");
487 for (i = 0; i < num_channels; i++) {
488 struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i];
489
490 dev_dbg(hsotg->dev, " %2d: %p\n", i, chan);
491 }
492 #endif
493 }
494
495 static int _dwc2_hcd_start(struct usb_hcd *hcd);
496
497 static void dwc2_host_start(struct dwc2_hsotg *hsotg)
498 {
499 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
500
501 hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg);
502 _dwc2_hcd_start(hcd);
503 }
504
505 static void dwc2_host_disconnect(struct dwc2_hsotg *hsotg)
506 {
507 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
508
509 hcd->self.is_b_host = 0;
510 }
511
512 static void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
513 int *hub_addr, int *hub_port)
514 {
515 struct urb *urb = context;
516
517 if (urb->dev->tt)
518 *hub_addr = urb->dev->tt->hub->devnum;
519 else
520 *hub_addr = 0;
521 *hub_port = urb->dev->ttport;
522 }
523
524
525
526
527
528
529
530 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
531 struct dwc2_host_chan *chan)
532 {
533 u32 hcintmsk = HCINTMSK_CHHLTD;
534
535 switch (chan->ep_type) {
536 case USB_ENDPOINT_XFER_CONTROL:
537 case USB_ENDPOINT_XFER_BULK:
538 dev_vdbg(hsotg->dev, "control/bulk\n");
539 hcintmsk |= HCINTMSK_XFERCOMPL;
540 hcintmsk |= HCINTMSK_STALL;
541 hcintmsk |= HCINTMSK_XACTERR;
542 hcintmsk |= HCINTMSK_DATATGLERR;
543 if (chan->ep_is_in) {
544 hcintmsk |= HCINTMSK_BBLERR;
545 } else {
546 hcintmsk |= HCINTMSK_NAK;
547 hcintmsk |= HCINTMSK_NYET;
548 if (chan->do_ping)
549 hcintmsk |= HCINTMSK_ACK;
550 }
551
552 if (chan->do_split) {
553 hcintmsk |= HCINTMSK_NAK;
554 if (chan->complete_split)
555 hcintmsk |= HCINTMSK_NYET;
556 else
557 hcintmsk |= HCINTMSK_ACK;
558 }
559
560 if (chan->error_state)
561 hcintmsk |= HCINTMSK_ACK;
562 break;
563
564 case USB_ENDPOINT_XFER_INT:
565 if (dbg_perio())
566 dev_vdbg(hsotg->dev, "intr\n");
567 hcintmsk |= HCINTMSK_XFERCOMPL;
568 hcintmsk |= HCINTMSK_NAK;
569 hcintmsk |= HCINTMSK_STALL;
570 hcintmsk |= HCINTMSK_XACTERR;
571 hcintmsk |= HCINTMSK_DATATGLERR;
572 hcintmsk |= HCINTMSK_FRMOVRUN;
573
574 if (chan->ep_is_in)
575 hcintmsk |= HCINTMSK_BBLERR;
576 if (chan->error_state)
577 hcintmsk |= HCINTMSK_ACK;
578 if (chan->do_split) {
579 if (chan->complete_split)
580 hcintmsk |= HCINTMSK_NYET;
581 else
582 hcintmsk |= HCINTMSK_ACK;
583 }
584 break;
585
586 case USB_ENDPOINT_XFER_ISOC:
587 if (dbg_perio())
588 dev_vdbg(hsotg->dev, "isoc\n");
589 hcintmsk |= HCINTMSK_XFERCOMPL;
590 hcintmsk |= HCINTMSK_FRMOVRUN;
591 hcintmsk |= HCINTMSK_ACK;
592
593 if (chan->ep_is_in) {
594 hcintmsk |= HCINTMSK_XACTERR;
595 hcintmsk |= HCINTMSK_BBLERR;
596 }
597 break;
598 default:
599 dev_err(hsotg->dev, "## Unknown EP type ##\n");
600 break;
601 }
602
603 dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num));
604 if (dbg_hc(chan))
605 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
606 }
607
608 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
609 struct dwc2_host_chan *chan)
610 {
611 u32 hcintmsk = HCINTMSK_CHHLTD;
612
613
614
615
616
617 if (!hsotg->params.dma_desc_enable) {
618 if (dbg_hc(chan))
619 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
620 hcintmsk |= HCINTMSK_AHBERR;
621 } else {
622 if (dbg_hc(chan))
623 dev_vdbg(hsotg->dev, "desc DMA enabled\n");
624 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
625 hcintmsk |= HCINTMSK_XFERCOMPL;
626 }
627
628 if (chan->error_state && !chan->do_split &&
629 chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
630 if (dbg_hc(chan))
631 dev_vdbg(hsotg->dev, "setting ACK\n");
632 hcintmsk |= HCINTMSK_ACK;
633 if (chan->ep_is_in) {
634 hcintmsk |= HCINTMSK_DATATGLERR;
635 if (chan->ep_type != USB_ENDPOINT_XFER_INT)
636 hcintmsk |= HCINTMSK_NAK;
637 }
638 }
639
640 dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num));
641 if (dbg_hc(chan))
642 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
643 }
644
645 static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
646 struct dwc2_host_chan *chan)
647 {
648 u32 intmsk;
649
650 if (hsotg->params.host_dma) {
651 if (dbg_hc(chan))
652 dev_vdbg(hsotg->dev, "DMA enabled\n");
653 dwc2_hc_enable_dma_ints(hsotg, chan);
654 } else {
655 if (dbg_hc(chan))
656 dev_vdbg(hsotg->dev, "DMA disabled\n");
657 dwc2_hc_enable_slave_ints(hsotg, chan);
658 }
659
660
661 intmsk = dwc2_readl(hsotg, HAINTMSK);
662 intmsk |= 1 << chan->hc_num;
663 dwc2_writel(hsotg, intmsk, HAINTMSK);
664 if (dbg_hc(chan))
665 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
666
667
668 intmsk = dwc2_readl(hsotg, GINTMSK);
669 intmsk |= GINTSTS_HCHINT;
670 dwc2_writel(hsotg, intmsk, GINTMSK);
671 if (dbg_hc(chan))
672 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
673 }
674
675
676
677
678
679
680
681
682
683
684
685
686 static void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
687 {
688 u8 hc_num = chan->hc_num;
689 u32 hcintmsk;
690 u32 hcchar;
691 u32 hcsplt = 0;
692
693 if (dbg_hc(chan))
694 dev_vdbg(hsotg->dev, "%s()\n", __func__);
695
696
697 hcintmsk = 0xffffffff;
698 hcintmsk &= ~HCINTMSK_RESERVED14_31;
699 dwc2_writel(hsotg, hcintmsk, HCINT(hc_num));
700
701
702 dwc2_hc_enable_ints(hsotg, chan);
703
704
705
706
707
708 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
709 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
710 if (chan->ep_is_in)
711 hcchar |= HCCHAR_EPDIR;
712 if (chan->speed == USB_SPEED_LOW)
713 hcchar |= HCCHAR_LSPDDEV;
714 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
715 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
716 dwc2_writel(hsotg, hcchar, HCCHAR(hc_num));
717 if (dbg_hc(chan)) {
718 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
719 hc_num, hcchar);
720
721 dev_vdbg(hsotg->dev, "%s: Channel %d\n",
722 __func__, hc_num);
723 dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
724 chan->dev_addr);
725 dev_vdbg(hsotg->dev, " Ep Num: %d\n",
726 chan->ep_num);
727 dev_vdbg(hsotg->dev, " Is In: %d\n",
728 chan->ep_is_in);
729 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
730 chan->speed == USB_SPEED_LOW);
731 dev_vdbg(hsotg->dev, " Ep Type: %d\n",
732 chan->ep_type);
733 dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
734 chan->max_packet);
735 }
736
737
738 if (chan->do_split) {
739 if (dbg_hc(chan))
740 dev_vdbg(hsotg->dev,
741 "Programming HC %d with split --> %s\n",
742 hc_num,
743 chan->complete_split ? "CSPLIT" : "SSPLIT");
744 if (chan->complete_split)
745 hcsplt |= HCSPLT_COMPSPLT;
746 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
747 HCSPLT_XACTPOS_MASK;
748 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
749 HCSPLT_HUBADDR_MASK;
750 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
751 HCSPLT_PRTADDR_MASK;
752 if (dbg_hc(chan)) {
753 dev_vdbg(hsotg->dev, " comp split %d\n",
754 chan->complete_split);
755 dev_vdbg(hsotg->dev, " xact pos %d\n",
756 chan->xact_pos);
757 dev_vdbg(hsotg->dev, " hub addr %d\n",
758 chan->hub_addr);
759 dev_vdbg(hsotg->dev, " hub port %d\n",
760 chan->hub_port);
761 dev_vdbg(hsotg->dev, " is_in %d\n",
762 chan->ep_is_in);
763 dev_vdbg(hsotg->dev, " Max Pkt %d\n",
764 chan->max_packet);
765 dev_vdbg(hsotg->dev, " xferlen %d\n",
766 chan->xfer_len);
767 }
768 }
769
770 dwc2_writel(hsotg, hcsplt, HCSPLT(hc_num));
771 }
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
802 enum dwc2_halt_status halt_status)
803 {
804 u32 nptxsts, hptxsts, hcchar;
805
806 if (dbg_hc(chan))
807 dev_vdbg(hsotg->dev, "%s()\n", __func__);
808
809
810
811
812
813
814
815 if ((hsotg->params.g_dma && !hsotg->params.g_dma_desc) ||
816 hsotg->hw_params.arch == GHWCFG2_EXT_DMA_ARCH) {
817 if (!chan->do_split &&
818 (chan->ep_type == USB_ENDPOINT_XFER_ISOC ||
819 chan->ep_type == USB_ENDPOINT_XFER_INT)) {
820 dev_err(hsotg->dev, "%s() Channel can't be halted\n",
821 __func__);
822 return;
823 }
824 }
825
826 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
827 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
828
829 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
830 halt_status == DWC2_HC_XFER_AHB_ERR) {
831
832
833
834
835
836
837 u32 hcintmsk = HCINTMSK_CHHLTD;
838
839 dev_vdbg(hsotg->dev, "dequeue/error\n");
840 dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num));
841
842
843
844
845
846
847 dwc2_writel(hsotg, ~hcintmsk, HCINT(chan->hc_num));
848
849
850
851
852
853
854 chan->halt_status = halt_status;
855
856 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
857 if (!(hcchar & HCCHAR_CHENA)) {
858
859
860
861
862
863
864
865
866
867
868
869 return;
870 }
871 }
872 if (chan->halt_pending) {
873
874
875
876
877
878 dev_vdbg(hsotg->dev,
879 "*** %s: Channel %d, chan->halt_pending already set ***\n",
880 __func__, chan->hc_num);
881 return;
882 }
883
884 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
885
886
887
888 if (!hsotg->params.dma_desc_enable) {
889 if (dbg_hc(chan))
890 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
891 hcchar |= HCCHAR_CHENA;
892 } else {
893 if (dbg_hc(chan))
894 dev_dbg(hsotg->dev, "desc DMA enabled\n");
895 }
896 hcchar |= HCCHAR_CHDIS;
897
898 if (!hsotg->params.host_dma) {
899 if (dbg_hc(chan))
900 dev_vdbg(hsotg->dev, "DMA not enabled\n");
901 hcchar |= HCCHAR_CHENA;
902
903
904 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
905 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
906 dev_vdbg(hsotg->dev, "control/bulk\n");
907 nptxsts = dwc2_readl(hsotg, GNPTXSTS);
908 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
909 dev_vdbg(hsotg->dev, "Disabling channel\n");
910 hcchar &= ~HCCHAR_CHENA;
911 }
912 } else {
913 if (dbg_perio())
914 dev_vdbg(hsotg->dev, "isoc/intr\n");
915 hptxsts = dwc2_readl(hsotg, HPTXSTS);
916 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
917 hsotg->queuing_high_bandwidth) {
918 if (dbg_perio())
919 dev_vdbg(hsotg->dev, "Disabling channel\n");
920 hcchar &= ~HCCHAR_CHENA;
921 }
922 }
923 } else {
924 if (dbg_hc(chan))
925 dev_vdbg(hsotg->dev, "DMA enabled\n");
926 }
927
928 dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
929 chan->halt_status = halt_status;
930
931 if (hcchar & HCCHAR_CHENA) {
932 if (dbg_hc(chan))
933 dev_vdbg(hsotg->dev, "Channel enabled\n");
934 chan->halt_pending = 1;
935 chan->halt_on_queue = 0;
936 } else {
937 if (dbg_hc(chan))
938 dev_vdbg(hsotg->dev, "Channel disabled\n");
939 chan->halt_on_queue = 1;
940 }
941
942 if (dbg_hc(chan)) {
943 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
944 chan->hc_num);
945 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
946 hcchar);
947 dev_vdbg(hsotg->dev, " halt_pending: %d\n",
948 chan->halt_pending);
949 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
950 chan->halt_on_queue);
951 dev_vdbg(hsotg->dev, " halt_status: %d\n",
952 chan->halt_status);
953 }
954 }
955
956
957
958
959
960
961
962
963
964
965 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
966 {
967 u32 hcintmsk;
968
969 chan->xfer_started = 0;
970
971 list_del_init(&chan->split_order_list_entry);
972
973
974
975
976
977 dwc2_writel(hsotg, 0, HCINTMSK(chan->hc_num));
978 hcintmsk = 0xffffffff;
979 hcintmsk &= ~HCINTMSK_RESERVED14_31;
980 dwc2_writel(hsotg, hcintmsk, HCINT(chan->hc_num));
981 }
982
983
984
985
986
987
988
989
990
991
992
993 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
994 struct dwc2_host_chan *chan, u32 *hcchar)
995 {
996 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
997 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
998 int host_speed;
999 int xfer_ns;
1000 int xfer_us;
1001 int bytes_in_fifo;
1002 u16 fifo_space;
1003 u16 frame_number;
1004 u16 wire_frame;
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030 host_speed = (chan->speed != USB_SPEED_HIGH &&
1031 !chan->do_split) ? chan->speed : USB_SPEED_HIGH;
1032
1033
1034 fifo_space = (dwc2_readl(hsotg, HPTXSTS) &
1035 TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT;
1036 bytes_in_fifo = sizeof(u32) *
1037 (hsotg->params.host_perio_tx_fifo_size -
1038 fifo_space);
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048 xfer_ns = usb_calc_bus_time(host_speed, false, false,
1049 chan->xfer_len + bytes_in_fifo);
1050 xfer_us = NS_TO_US(xfer_ns);
1051
1052
1053 frame_number = dwc2_hcd_get_future_frame_number(hsotg, xfer_us);
1054
1055
1056 wire_frame = dwc2_frame_num_inc(chan->qh->next_active_frame, 1);
1057
1058
1059
1060
1061
1062
1063
1064
1065 if (dwc2_frame_num_gt(frame_number, wire_frame)) {
1066 dwc2_sch_vdbg(hsotg,
1067 "QH=%p EO MISS fr=%04x=>%04x (%+d)\n",
1068 chan->qh, wire_frame, frame_number,
1069 dwc2_frame_num_dec(frame_number,
1070 wire_frame));
1071 wire_frame = frame_number;
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081 chan->qh->next_active_frame =
1082 dwc2_frame_num_dec(frame_number, 1);
1083 }
1084
1085 if (wire_frame & 1)
1086 *hcchar |= HCCHAR_ODDFRM;
1087 else
1088 *hcchar &= ~HCCHAR_ODDFRM;
1089 }
1090 }
1091
1092 static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1093 {
1094
1095 if (chan->speed == USB_SPEED_HIGH) {
1096 if (chan->ep_is_in) {
1097 if (chan->multi_count == 1)
1098 chan->data_pid_start = DWC2_HC_PID_DATA0;
1099 else if (chan->multi_count == 2)
1100 chan->data_pid_start = DWC2_HC_PID_DATA1;
1101 else
1102 chan->data_pid_start = DWC2_HC_PID_DATA2;
1103 } else {
1104 if (chan->multi_count == 1)
1105 chan->data_pid_start = DWC2_HC_PID_DATA0;
1106 else
1107 chan->data_pid_start = DWC2_HC_PID_MDATA;
1108 }
1109 } else {
1110 chan->data_pid_start = DWC2_HC_PID_DATA0;
1111 }
1112 }
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128 static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1129 struct dwc2_host_chan *chan)
1130 {
1131 u32 i;
1132 u32 remaining_count;
1133 u32 byte_count;
1134 u32 dword_count;
1135 u32 *data_buf = (u32 *)chan->xfer_buf;
1136
1137 if (dbg_hc(chan))
1138 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1139
1140 remaining_count = chan->xfer_len - chan->xfer_count;
1141 if (remaining_count > chan->max_packet)
1142 byte_count = chan->max_packet;
1143 else
1144 byte_count = remaining_count;
1145
1146 dword_count = (byte_count + 3) / 4;
1147
1148 if (((unsigned long)data_buf & 0x3) == 0) {
1149
1150 for (i = 0; i < dword_count; i++, data_buf++)
1151 dwc2_writel(hsotg, *data_buf, HCFIFO(chan->hc_num));
1152 } else {
1153
1154 for (i = 0; i < dword_count; i++, data_buf++) {
1155 u32 data = data_buf[0] | data_buf[1] << 8 |
1156 data_buf[2] << 16 | data_buf[3] << 24;
1157 dwc2_writel(hsotg, data, HCFIFO(chan->hc_num));
1158 }
1159 }
1160
1161 chan->xfer_count += byte_count;
1162 chan->xfer_buf += byte_count;
1163 }
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174 static void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg,
1175 struct dwc2_host_chan *chan)
1176 {
1177 u32 hcchar;
1178 u32 hctsiz;
1179
1180 if (dbg_hc(chan))
1181 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1182 chan->hc_num);
1183
1184 hctsiz = TSIZ_DOPNG;
1185 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
1186 dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num));
1187
1188 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
1189 hcchar |= HCCHAR_CHENA;
1190 hcchar &= ~HCCHAR_CHDIS;
1191 dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
1192 }
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227 static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1228 struct dwc2_host_chan *chan)
1229 {
1230 u32 max_hc_xfer_size = hsotg->params.max_transfer_size;
1231 u16 max_hc_pkt_count = hsotg->params.max_packet_count;
1232 u32 hcchar;
1233 u32 hctsiz = 0;
1234 u16 num_packets;
1235 u32 ec_mc;
1236
1237 if (dbg_hc(chan))
1238 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1239
1240 if (chan->do_ping) {
1241 if (!hsotg->params.host_dma) {
1242 if (dbg_hc(chan))
1243 dev_vdbg(hsotg->dev, "ping, no DMA\n");
1244 dwc2_hc_do_ping(hsotg, chan);
1245 chan->xfer_started = 1;
1246 return;
1247 }
1248
1249 if (dbg_hc(chan))
1250 dev_vdbg(hsotg->dev, "ping, DMA\n");
1251
1252 hctsiz |= TSIZ_DOPNG;
1253 }
1254
1255 if (chan->do_split) {
1256 if (dbg_hc(chan))
1257 dev_vdbg(hsotg->dev, "split\n");
1258 num_packets = 1;
1259
1260 if (chan->complete_split && !chan->ep_is_in)
1261
1262
1263
1264
1265 chan->xfer_len = 0;
1266 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1267 chan->xfer_len = chan->max_packet;
1268 else if (!chan->ep_is_in && chan->xfer_len > 188)
1269 chan->xfer_len = 188;
1270
1271 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1272 TSIZ_XFERSIZE_MASK;
1273
1274
1275 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1276 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1277 ec_mc = 3;
1278 else
1279 ec_mc = 1;
1280 } else {
1281 if (dbg_hc(chan))
1282 dev_vdbg(hsotg->dev, "no split\n");
1283
1284
1285
1286
1287 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1288 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1289
1290
1291
1292
1293
1294
1295
1296 u32 max_periodic_len =
1297 chan->multi_count * chan->max_packet;
1298
1299 if (chan->xfer_len > max_periodic_len)
1300 chan->xfer_len = max_periodic_len;
1301 } else if (chan->xfer_len > max_hc_xfer_size) {
1302
1303
1304
1305
1306 chan->xfer_len =
1307 max_hc_xfer_size - chan->max_packet + 1;
1308 }
1309
1310 if (chan->xfer_len > 0) {
1311 num_packets = (chan->xfer_len + chan->max_packet - 1) /
1312 chan->max_packet;
1313 if (num_packets > max_hc_pkt_count) {
1314 num_packets = max_hc_pkt_count;
1315 chan->xfer_len = num_packets * chan->max_packet;
1316 }
1317 } else {
1318
1319 num_packets = 1;
1320 }
1321
1322 if (chan->ep_is_in)
1323
1324
1325
1326
1327 chan->xfer_len = num_packets * chan->max_packet;
1328
1329 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1330 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1331
1332
1333
1334
1335 chan->multi_count = num_packets;
1336
1337 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1338 dwc2_set_pid_isoc(chan);
1339
1340 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1341 TSIZ_XFERSIZE_MASK;
1342
1343
1344 ec_mc = chan->multi_count;
1345 }
1346
1347 chan->start_pkt_count = num_packets;
1348 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1349 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1350 TSIZ_SC_MC_PID_MASK;
1351 dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num));
1352 if (dbg_hc(chan)) {
1353 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1354 hctsiz, chan->hc_num);
1355
1356 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1357 chan->hc_num);
1358 dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
1359 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1360 TSIZ_XFERSIZE_SHIFT);
1361 dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
1362 (hctsiz & TSIZ_PKTCNT_MASK) >>
1363 TSIZ_PKTCNT_SHIFT);
1364 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1365 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1366 TSIZ_SC_MC_PID_SHIFT);
1367 }
1368
1369 if (hsotg->params.host_dma) {
1370 dma_addr_t dma_addr;
1371
1372 if (chan->align_buf) {
1373 if (dbg_hc(chan))
1374 dev_vdbg(hsotg->dev, "align_buf\n");
1375 dma_addr = chan->align_buf;
1376 } else {
1377 dma_addr = chan->xfer_dma;
1378 }
1379 dwc2_writel(hsotg, (u32)dma_addr, HCDMA(chan->hc_num));
1380
1381 if (dbg_hc(chan))
1382 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1383 (unsigned long)dma_addr, chan->hc_num);
1384 }
1385
1386
1387 if (chan->do_split) {
1388 u32 hcsplt = dwc2_readl(hsotg, HCSPLT(chan->hc_num));
1389
1390 hcsplt |= HCSPLT_SPLTENA;
1391 dwc2_writel(hsotg, hcsplt, HCSPLT(chan->hc_num));
1392 }
1393
1394 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
1395 hcchar &= ~HCCHAR_MULTICNT_MASK;
1396 hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK;
1397 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1398
1399 if (hcchar & HCCHAR_CHDIS)
1400 dev_warn(hsotg->dev,
1401 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1402 __func__, chan->hc_num, hcchar);
1403
1404
1405 hcchar |= HCCHAR_CHENA;
1406 hcchar &= ~HCCHAR_CHDIS;
1407
1408 if (dbg_hc(chan))
1409 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1410 (hcchar & HCCHAR_MULTICNT_MASK) >>
1411 HCCHAR_MULTICNT_SHIFT);
1412
1413 dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
1414 if (dbg_hc(chan))
1415 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1416 chan->hc_num);
1417
1418 chan->xfer_started = 1;
1419 chan->requests++;
1420
1421 if (!hsotg->params.host_dma &&
1422 !chan->ep_is_in && chan->xfer_len > 0)
1423
1424 dwc2_hc_write_packet(hsotg, chan);
1425 }
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1442 struct dwc2_host_chan *chan)
1443 {
1444 u32 hcchar;
1445 u32 hctsiz = 0;
1446
1447 if (chan->do_ping)
1448 hctsiz |= TSIZ_DOPNG;
1449
1450 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1451 dwc2_set_pid_isoc(chan);
1452
1453
1454 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1455 TSIZ_SC_MC_PID_MASK;
1456
1457
1458 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1459
1460
1461 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1462
1463 if (dbg_hc(chan)) {
1464 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1465 chan->hc_num);
1466 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1467 chan->data_pid_start);
1468 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
1469 }
1470
1471 dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num));
1472
1473 dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr,
1474 chan->desc_list_sz, DMA_TO_DEVICE);
1475
1476 dwc2_writel(hsotg, chan->desc_list_addr, HCDMA(chan->hc_num));
1477
1478 if (dbg_hc(chan))
1479 dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n",
1480 &chan->desc_list_addr, chan->hc_num);
1481
1482 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
1483 hcchar &= ~HCCHAR_MULTICNT_MASK;
1484 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1485 HCCHAR_MULTICNT_MASK;
1486
1487 if (hcchar & HCCHAR_CHDIS)
1488 dev_warn(hsotg->dev,
1489 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1490 __func__, chan->hc_num, hcchar);
1491
1492
1493 hcchar |= HCCHAR_CHENA;
1494 hcchar &= ~HCCHAR_CHDIS;
1495
1496 if (dbg_hc(chan))
1497 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1498 (hcchar & HCCHAR_MULTICNT_MASK) >>
1499 HCCHAR_MULTICNT_SHIFT);
1500
1501 dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
1502 if (dbg_hc(chan))
1503 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1504 chan->hc_num);
1505
1506 chan->xfer_started = 1;
1507 chan->requests++;
1508 }
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530 static int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1531 struct dwc2_host_chan *chan)
1532 {
1533 if (dbg_hc(chan))
1534 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1535 chan->hc_num);
1536
1537 if (chan->do_split)
1538
1539 return 0;
1540
1541 if (chan->data_pid_start == DWC2_HC_PID_SETUP)
1542
1543 return 0;
1544
1545 if (chan->ep_is_in) {
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558 u32 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
1559
1560 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1561 hcchar |= HCCHAR_CHENA;
1562 hcchar &= ~HCCHAR_CHDIS;
1563 if (dbg_hc(chan))
1564 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
1565 hcchar);
1566 dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num));
1567 chan->requests++;
1568 return 1;
1569 }
1570
1571
1572
1573 if (chan->xfer_count < chan->xfer_len) {
1574 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1575 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1576 u32 hcchar = dwc2_readl(hsotg,
1577 HCCHAR(chan->hc_num));
1578
1579 dwc2_hc_set_even_odd_frame(hsotg, chan,
1580 &hcchar);
1581 }
1582
1583
1584 dwc2_hc_write_packet(hsotg, chan);
1585 chan->requests++;
1586 return 1;
1587 }
1588
1589 return 0;
1590 }
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604 static void dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg *hsotg,
1605 struct list_head *qh_list)
1606 {
1607 struct dwc2_qh *qh, *qh_tmp;
1608 struct dwc2_qtd *qtd, *qtd_tmp;
1609
1610 list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
1611 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
1612 qtd_list_entry) {
1613 dwc2_host_complete(hsotg, qtd, -ECONNRESET);
1614 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1615 }
1616 }
1617 }
1618
1619 static void dwc2_qh_list_free(struct dwc2_hsotg *hsotg,
1620 struct list_head *qh_list)
1621 {
1622 struct dwc2_qtd *qtd, *qtd_tmp;
1623 struct dwc2_qh *qh, *qh_tmp;
1624 unsigned long flags;
1625
1626 if (!qh_list->next)
1627
1628 return;
1629
1630 spin_lock_irqsave(&hsotg->lock, flags);
1631
1632
1633 dwc2_kill_urbs_in_qh_list(hsotg, qh_list);
1634
1635 list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
1636 dwc2_hcd_qh_unlink(hsotg, qh);
1637
1638
1639 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
1640 qtd_list_entry)
1641 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1642
1643 if (qh->channel && qh->channel->qh == qh)
1644 qh->channel->qh = NULL;
1645
1646 spin_unlock_irqrestore(&hsotg->lock, flags);
1647 dwc2_hcd_qh_free(hsotg, qh);
1648 spin_lock_irqsave(&hsotg->lock, flags);
1649 }
1650
1651 spin_unlock_irqrestore(&hsotg->lock, flags);
1652 }
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662 static void dwc2_kill_all_urbs(struct dwc2_hsotg *hsotg)
1663 {
1664 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_inactive);
1665 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_waiting);
1666 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_active);
1667 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_inactive);
1668 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_ready);
1669 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_assigned);
1670 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_queued);
1671 }
1672
1673
1674
1675
1676
1677
1678 void dwc2_hcd_start(struct dwc2_hsotg *hsotg)
1679 {
1680 u32 hprt0;
1681
1682 if (hsotg->op_state == OTG_STATE_B_HOST) {
1683
1684
1685
1686
1687
1688 hprt0 = dwc2_read_hprt0(hsotg);
1689 hprt0 |= HPRT0_RST;
1690 dwc2_writel(hsotg, hprt0, HPRT0);
1691 }
1692
1693 queue_delayed_work(hsotg->wq_otg, &hsotg->start_work,
1694 msecs_to_jiffies(50));
1695 }
1696
1697
1698 static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
1699 {
1700 int num_channels = hsotg->params.host_channels;
1701 struct dwc2_host_chan *channel;
1702 u32 hcchar;
1703 int i;
1704
1705 if (!hsotg->params.host_dma) {
1706
1707 for (i = 0; i < num_channels; i++) {
1708 channel = hsotg->hc_ptr_array[i];
1709 if (!list_empty(&channel->hc_list_entry))
1710 continue;
1711 hcchar = dwc2_readl(hsotg, HCCHAR(i));
1712 if (hcchar & HCCHAR_CHENA) {
1713 hcchar &= ~(HCCHAR_CHENA | HCCHAR_EPDIR);
1714 hcchar |= HCCHAR_CHDIS;
1715 dwc2_writel(hsotg, hcchar, HCCHAR(i));
1716 }
1717 }
1718 }
1719
1720 for (i = 0; i < num_channels; i++) {
1721 channel = hsotg->hc_ptr_array[i];
1722 if (!list_empty(&channel->hc_list_entry))
1723 continue;
1724 hcchar = dwc2_readl(hsotg, HCCHAR(i));
1725 if (hcchar & HCCHAR_CHENA) {
1726
1727 hcchar |= HCCHAR_CHDIS;
1728 dwc2_writel(hsotg, hcchar, HCCHAR(i));
1729 }
1730
1731 dwc2_hc_cleanup(hsotg, channel);
1732 list_add_tail(&channel->hc_list_entry, &hsotg->free_hc_list);
1733
1734
1735
1736
1737
1738 channel->qh = NULL;
1739 }
1740
1741 if (hsotg->params.uframe_sched) {
1742 hsotg->available_host_channels =
1743 hsotg->params.host_channels;
1744 } else {
1745 hsotg->non_periodic_channels = 0;
1746 hsotg->periodic_channels = 0;
1747 }
1748 }
1749
1750
1751
1752
1753
1754
1755
1756
1757 void dwc2_hcd_connect(struct dwc2_hsotg *hsotg)
1758 {
1759 if (hsotg->lx_state != DWC2_L0)
1760 usb_hcd_resume_root_hub(hsotg->priv);
1761
1762 hsotg->flags.b.port_connect_status_change = 1;
1763 hsotg->flags.b.port_connect_status = 1;
1764 }
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774 void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force)
1775 {
1776 u32 intr;
1777 u32 hprt0;
1778
1779
1780 hsotg->flags.b.port_connect_status_change = 1;
1781 hsotg->flags.b.port_connect_status = 0;
1782
1783
1784
1785
1786
1787
1788 intr = dwc2_readl(hsotg, GINTMSK);
1789 intr &= ~(GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT);
1790 dwc2_writel(hsotg, intr, GINTMSK);
1791 intr = GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT;
1792 dwc2_writel(hsotg, intr, GINTSTS);
1793
1794
1795
1796
1797
1798
1799 if (dwc2_is_device_mode(hsotg)) {
1800 if (hsotg->op_state != OTG_STATE_A_SUSPEND) {
1801 dev_dbg(hsotg->dev, "Disconnect: PortPower off\n");
1802 dwc2_writel(hsotg, 0, HPRT0);
1803 }
1804
1805 dwc2_disable_host_interrupts(hsotg);
1806 }
1807
1808
1809 dwc2_kill_all_urbs(hsotg);
1810
1811 if (dwc2_is_host_mode(hsotg))
1812
1813 dwc2_hcd_cleanup_channels(hsotg);
1814
1815 dwc2_host_disconnect(hsotg);
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829 if (!force) {
1830 hprt0 = dwc2_readl(hsotg, HPRT0);
1831 if (!(hprt0 & HPRT0_CONNDET) && (hprt0 & HPRT0_CONNSTS))
1832 dwc2_hcd_connect(hsotg);
1833 }
1834 }
1835
1836
1837
1838
1839
1840
1841 static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg)
1842 {
1843 if (hsotg->bus_suspended) {
1844 hsotg->flags.b.port_suspend_change = 1;
1845 usb_hcd_resume_root_hub(hsotg->priv);
1846 }
1847
1848 if (hsotg->lx_state == DWC2_L1)
1849 hsotg->flags.b.port_l1_change = 1;
1850 }
1851
1852
1853
1854
1855
1856
1857
1858
1859 void dwc2_hcd_stop(struct dwc2_hsotg *hsotg)
1860 {
1861 dev_dbg(hsotg->dev, "DWC OTG HCD STOP\n");
1862
1863
1864
1865
1866
1867
1868
1869
1870 dwc2_disable_host_interrupts(hsotg);
1871
1872
1873 dev_dbg(hsotg->dev, "PortPower off\n");
1874 dwc2_writel(hsotg, 0, HPRT0);
1875 }
1876
1877
1878 static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
1879 struct dwc2_hcd_urb *urb, struct dwc2_qh *qh,
1880 struct dwc2_qtd *qtd)
1881 {
1882 u32 intr_mask;
1883 int retval;
1884 int dev_speed;
1885
1886 if (!hsotg->flags.b.port_connect_status) {
1887
1888 dev_err(hsotg->dev, "Not connected\n");
1889 return -ENODEV;
1890 }
1891
1892 dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
1893
1894
1895 if ((dev_speed == USB_SPEED_LOW) &&
1896 (hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) &&
1897 (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI)) {
1898 u32 hprt0 = dwc2_readl(hsotg, HPRT0);
1899 u32 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
1900
1901 if (prtspd == HPRT0_SPD_FULL_SPEED)
1902 return -ENODEV;
1903 }
1904
1905 if (!qtd)
1906 return -EINVAL;
1907
1908 dwc2_hcd_qtd_init(qtd, urb);
1909 retval = dwc2_hcd_qtd_add(hsotg, qtd, qh);
1910 if (retval) {
1911 dev_err(hsotg->dev,
1912 "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
1913 retval);
1914 return retval;
1915 }
1916
1917 intr_mask = dwc2_readl(hsotg, GINTMSK);
1918 if (!(intr_mask & GINTSTS_SOF)) {
1919 enum dwc2_transaction_type tr_type;
1920
1921 if (qtd->qh->ep_type == USB_ENDPOINT_XFER_BULK &&
1922 !(qtd->urb->flags & URB_GIVEBACK_ASAP))
1923
1924
1925
1926
1927 return 0;
1928
1929 tr_type = dwc2_hcd_select_transactions(hsotg);
1930 if (tr_type != DWC2_TRANSACTION_NONE)
1931 dwc2_hcd_queue_transactions(hsotg, tr_type);
1932 }
1933
1934 return 0;
1935 }
1936
1937
1938 static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg,
1939 struct dwc2_hcd_urb *urb)
1940 {
1941 struct dwc2_qh *qh;
1942 struct dwc2_qtd *urb_qtd;
1943
1944 urb_qtd = urb->qtd;
1945 if (!urb_qtd) {
1946 dev_dbg(hsotg->dev, "## Urb QTD is NULL ##\n");
1947 return -EINVAL;
1948 }
1949
1950 qh = urb_qtd->qh;
1951 if (!qh) {
1952 dev_dbg(hsotg->dev, "## Urb QTD QH is NULL ##\n");
1953 return -EINVAL;
1954 }
1955
1956 urb->priv = NULL;
1957
1958 if (urb_qtd->in_process && qh->channel) {
1959 dwc2_dump_channel_info(hsotg, qh->channel);
1960
1961
1962 if (hsotg->flags.b.port_connect_status)
1963
1964
1965
1966
1967
1968
1969
1970 dwc2_hc_halt(hsotg, qh->channel,
1971 DWC2_HC_XFER_URB_DEQUEUE);
1972 }
1973
1974
1975
1976
1977
1978 if (!hsotg->params.dma_desc_enable) {
1979 u8 in_process = urb_qtd->in_process;
1980
1981 dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
1982 if (in_process) {
1983 dwc2_hcd_qh_deactivate(hsotg, qh, 0);
1984 qh->channel = NULL;
1985 } else if (list_empty(&qh->qtd_list)) {
1986 dwc2_hcd_qh_unlink(hsotg, qh);
1987 }
1988 } else {
1989 dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
1990 }
1991
1992 return 0;
1993 }
1994
1995
1996 static int dwc2_hcd_endpoint_disable(struct dwc2_hsotg *hsotg,
1997 struct usb_host_endpoint *ep, int retry)
1998 {
1999 struct dwc2_qtd *qtd, *qtd_tmp;
2000 struct dwc2_qh *qh;
2001 unsigned long flags;
2002 int rc;
2003
2004 spin_lock_irqsave(&hsotg->lock, flags);
2005
2006 qh = ep->hcpriv;
2007 if (!qh) {
2008 rc = -EINVAL;
2009 goto err;
2010 }
2011
2012 while (!list_empty(&qh->qtd_list) && retry--) {
2013 if (retry == 0) {
2014 dev_err(hsotg->dev,
2015 "## timeout in dwc2_hcd_endpoint_disable() ##\n");
2016 rc = -EBUSY;
2017 goto err;
2018 }
2019
2020 spin_unlock_irqrestore(&hsotg->lock, flags);
2021 msleep(20);
2022 spin_lock_irqsave(&hsotg->lock, flags);
2023 qh = ep->hcpriv;
2024 if (!qh) {
2025 rc = -EINVAL;
2026 goto err;
2027 }
2028 }
2029
2030 dwc2_hcd_qh_unlink(hsotg, qh);
2031
2032
2033 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry)
2034 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
2035
2036 ep->hcpriv = NULL;
2037
2038 if (qh->channel && qh->channel->qh == qh)
2039 qh->channel->qh = NULL;
2040
2041 spin_unlock_irqrestore(&hsotg->lock, flags);
2042
2043 dwc2_hcd_qh_free(hsotg, qh);
2044
2045 return 0;
2046
2047 err:
2048 ep->hcpriv = NULL;
2049 spin_unlock_irqrestore(&hsotg->lock, flags);
2050
2051 return rc;
2052 }
2053
2054
2055 static int dwc2_hcd_endpoint_reset(struct dwc2_hsotg *hsotg,
2056 struct usb_host_endpoint *ep)
2057 {
2058 struct dwc2_qh *qh = ep->hcpriv;
2059
2060 if (!qh)
2061 return -EINVAL;
2062
2063 qh->data_toggle = DWC2_HC_PID_DATA0;
2064
2065 return 0;
2066 }
2067
2068
2069
2070
2071
2072
2073
2074
2075 int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
2076 {
2077 u32 usbcfg, otgctl;
2078 int retval;
2079
2080 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
2081
2082 usbcfg = dwc2_readl(hsotg, GUSBCFG);
2083
2084
2085 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
2086 if (hsotg->params.phy_ulpi_ext_vbus)
2087 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
2088
2089
2090 usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
2091 if (hsotg->params.ts_dline)
2092 usbcfg |= GUSBCFG_TERMSELDLPULSE;
2093
2094 dwc2_writel(hsotg, usbcfg, GUSBCFG);
2095
2096
2097
2098
2099
2100
2101
2102
2103 if (!initial_setup) {
2104 retval = dwc2_core_reset(hsotg, false);
2105 if (retval) {
2106 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
2107 __func__);
2108 return retval;
2109 }
2110 }
2111
2112
2113
2114
2115 retval = dwc2_phy_init(hsotg, initial_setup);
2116 if (retval)
2117 return retval;
2118
2119
2120 retval = dwc2_gahbcfg_init(hsotg);
2121 if (retval)
2122 return retval;
2123
2124
2125 dwc2_gusbcfg_init(hsotg);
2126
2127
2128 otgctl = dwc2_readl(hsotg, GOTGCTL);
2129 otgctl &= ~GOTGCTL_OTGVER;
2130 dwc2_writel(hsotg, otgctl, GOTGCTL);
2131
2132
2133 hsotg->srp_success = 0;
2134
2135
2136 dwc2_enable_common_interrupts(hsotg);
2137
2138
2139
2140
2141
2142 if (dwc2_is_host_mode(hsotg)) {
2143 dev_dbg(hsotg->dev, "Host Mode\n");
2144 hsotg->op_state = OTG_STATE_A_HOST;
2145 } else {
2146 dev_dbg(hsotg->dev, "Device Mode\n");
2147 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
2148 }
2149
2150 return 0;
2151 }
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163 static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
2164 {
2165 u32 hcfg, hfir, otgctl, usbcfg;
2166
2167 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177 usbcfg = dwc2_readl(hsotg, GUSBCFG);
2178 usbcfg |= GUSBCFG_TOUTCAL(7);
2179 dwc2_writel(hsotg, usbcfg, GUSBCFG);
2180
2181
2182 dwc2_writel(hsotg, 0, PCGCTL);
2183
2184
2185 dwc2_init_fs_ls_pclk_sel(hsotg);
2186 if (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
2187 hsotg->params.speed == DWC2_SPEED_PARAM_LOW) {
2188 hcfg = dwc2_readl(hsotg, HCFG);
2189 hcfg |= HCFG_FSLSSUPP;
2190 dwc2_writel(hsotg, hcfg, HCFG);
2191 }
2192
2193
2194
2195
2196
2197
2198 if (hsotg->params.reload_ctl) {
2199 hfir = dwc2_readl(hsotg, HFIR);
2200 hfir |= HFIR_RLDCTRL;
2201 dwc2_writel(hsotg, hfir, HFIR);
2202 }
2203
2204 if (hsotg->params.dma_desc_enable) {
2205 u32 op_mode = hsotg->hw_params.op_mode;
2206
2207 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
2208 !hsotg->hw_params.dma_desc_enable ||
2209 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
2210 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
2211 op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
2212 dev_err(hsotg->dev,
2213 "Hardware does not support descriptor DMA mode -\n");
2214 dev_err(hsotg->dev,
2215 "falling back to buffer DMA mode.\n");
2216 hsotg->params.dma_desc_enable = false;
2217 } else {
2218 hcfg = dwc2_readl(hsotg, HCFG);
2219 hcfg |= HCFG_DESCDMA;
2220 dwc2_writel(hsotg, hcfg, HCFG);
2221 }
2222 }
2223
2224
2225 dwc2_config_fifos(hsotg);
2226
2227
2228
2229 otgctl = dwc2_readl(hsotg, GOTGCTL);
2230 otgctl &= ~GOTGCTL_HSTSETHNPEN;
2231 dwc2_writel(hsotg, otgctl, GOTGCTL);
2232
2233
2234 dwc2_flush_tx_fifo(hsotg, 0x10 );
2235 dwc2_flush_rx_fifo(hsotg);
2236
2237
2238 otgctl = dwc2_readl(hsotg, GOTGCTL);
2239 otgctl &= ~GOTGCTL_HSTSETHNPEN;
2240 dwc2_writel(hsotg, otgctl, GOTGCTL);
2241
2242 if (!hsotg->params.dma_desc_enable) {
2243 int num_channels, i;
2244 u32 hcchar;
2245
2246
2247 num_channels = hsotg->params.host_channels;
2248 for (i = 0; i < num_channels; i++) {
2249 hcchar = dwc2_readl(hsotg, HCCHAR(i));
2250 if (hcchar & HCCHAR_CHENA) {
2251 hcchar &= ~HCCHAR_CHENA;
2252 hcchar |= HCCHAR_CHDIS;
2253 hcchar &= ~HCCHAR_EPDIR;
2254 dwc2_writel(hsotg, hcchar, HCCHAR(i));
2255 }
2256 }
2257
2258
2259 for (i = 0; i < num_channels; i++) {
2260 hcchar = dwc2_readl(hsotg, HCCHAR(i));
2261 if (hcchar & HCCHAR_CHENA) {
2262 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
2263 hcchar &= ~HCCHAR_EPDIR;
2264 dwc2_writel(hsotg, hcchar, HCCHAR(i));
2265 dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
2266 __func__, i);
2267
2268 if (dwc2_hsotg_wait_bit_clear(hsotg, HCCHAR(i),
2269 HCCHAR_CHENA,
2270 1000)) {
2271 dev_warn(hsotg->dev,
2272 "Unable to clear enable on channel %d\n",
2273 i);
2274 }
2275 }
2276 }
2277 }
2278
2279
2280 dwc2_enable_acg(hsotg);
2281
2282
2283 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
2284 if (hsotg->op_state == OTG_STATE_A_HOST) {
2285 u32 hprt0 = dwc2_read_hprt0(hsotg);
2286
2287 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
2288 !!(hprt0 & HPRT0_PWR));
2289 if (!(hprt0 & HPRT0_PWR)) {
2290 hprt0 |= HPRT0_PWR;
2291 dwc2_writel(hsotg, hprt0, HPRT0);
2292 }
2293 }
2294
2295 dwc2_enable_host_interrupts(hsotg);
2296 }
2297
2298
2299
2300
2301
2302
2303 static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg)
2304 {
2305 struct dwc2_host_chan *chan, *chan_tmp;
2306 int num_channels;
2307 int i;
2308
2309 hsotg->flags.d32 = 0;
2310 hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active;
2311
2312 if (hsotg->params.uframe_sched) {
2313 hsotg->available_host_channels =
2314 hsotg->params.host_channels;
2315 } else {
2316 hsotg->non_periodic_channels = 0;
2317 hsotg->periodic_channels = 0;
2318 }
2319
2320
2321
2322
2323
2324 list_for_each_entry_safe(chan, chan_tmp, &hsotg->free_hc_list,
2325 hc_list_entry)
2326 list_del_init(&chan->hc_list_entry);
2327
2328 num_channels = hsotg->params.host_channels;
2329 for (i = 0; i < num_channels; i++) {
2330 chan = hsotg->hc_ptr_array[i];
2331 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
2332 dwc2_hc_cleanup(hsotg, chan);
2333 }
2334
2335
2336 dwc2_core_host_init(hsotg);
2337 }
2338
2339 static void dwc2_hc_init_split(struct dwc2_hsotg *hsotg,
2340 struct dwc2_host_chan *chan,
2341 struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
2342 {
2343 int hub_addr, hub_port;
2344
2345 chan->do_split = 1;
2346 chan->xact_pos = qtd->isoc_split_pos;
2347 chan->complete_split = qtd->complete_split;
2348 dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
2349 chan->hub_addr = (u8)hub_addr;
2350 chan->hub_port = (u8)hub_port;
2351 }
2352
2353 static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
2354 struct dwc2_host_chan *chan,
2355 struct dwc2_qtd *qtd)
2356 {
2357 struct dwc2_hcd_urb *urb = qtd->urb;
2358 struct dwc2_hcd_iso_packet_desc *frame_desc;
2359
2360 switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
2361 case USB_ENDPOINT_XFER_CONTROL:
2362 chan->ep_type = USB_ENDPOINT_XFER_CONTROL;
2363
2364 switch (qtd->control_phase) {
2365 case DWC2_CONTROL_SETUP:
2366 dev_vdbg(hsotg->dev, " Control setup transaction\n");
2367 chan->do_ping = 0;
2368 chan->ep_is_in = 0;
2369 chan->data_pid_start = DWC2_HC_PID_SETUP;
2370 if (hsotg->params.host_dma)
2371 chan->xfer_dma = urb->setup_dma;
2372 else
2373 chan->xfer_buf = urb->setup_packet;
2374 chan->xfer_len = 8;
2375 break;
2376
2377 case DWC2_CONTROL_DATA:
2378 dev_vdbg(hsotg->dev, " Control data transaction\n");
2379 chan->data_pid_start = qtd->data_toggle;
2380 break;
2381
2382 case DWC2_CONTROL_STATUS:
2383
2384
2385
2386
2387 dev_vdbg(hsotg->dev, " Control status transaction\n");
2388 if (urb->length == 0)
2389 chan->ep_is_in = 1;
2390 else
2391 chan->ep_is_in =
2392 dwc2_hcd_is_pipe_out(&urb->pipe_info);
2393 if (chan->ep_is_in)
2394 chan->do_ping = 0;
2395 chan->data_pid_start = DWC2_HC_PID_DATA1;
2396 chan->xfer_len = 0;
2397 if (hsotg->params.host_dma)
2398 chan->xfer_dma = hsotg->status_buf_dma;
2399 else
2400 chan->xfer_buf = hsotg->status_buf;
2401 break;
2402 }
2403 break;
2404
2405 case USB_ENDPOINT_XFER_BULK:
2406 chan->ep_type = USB_ENDPOINT_XFER_BULK;
2407 break;
2408
2409 case USB_ENDPOINT_XFER_INT:
2410 chan->ep_type = USB_ENDPOINT_XFER_INT;
2411 break;
2412
2413 case USB_ENDPOINT_XFER_ISOC:
2414 chan->ep_type = USB_ENDPOINT_XFER_ISOC;
2415 if (hsotg->params.dma_desc_enable)
2416 break;
2417
2418 frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
2419 frame_desc->status = 0;
2420
2421 if (hsotg->params.host_dma) {
2422 chan->xfer_dma = urb->dma;
2423 chan->xfer_dma += frame_desc->offset +
2424 qtd->isoc_split_offset;
2425 } else {
2426 chan->xfer_buf = urb->buf;
2427 chan->xfer_buf += frame_desc->offset +
2428 qtd->isoc_split_offset;
2429 }
2430
2431 chan->xfer_len = frame_desc->length - qtd->isoc_split_offset;
2432
2433 if (chan->xact_pos == DWC2_HCSPLT_XACTPOS_ALL) {
2434 if (chan->xfer_len <= 188)
2435 chan->xact_pos = DWC2_HCSPLT_XACTPOS_ALL;
2436 else
2437 chan->xact_pos = DWC2_HCSPLT_XACTPOS_BEGIN;
2438 }
2439 break;
2440 }
2441 }
2442
2443 static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
2444 struct dwc2_qh *qh,
2445 struct dwc2_host_chan *chan)
2446 {
2447 if (!hsotg->unaligned_cache ||
2448 chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE)
2449 return -ENOMEM;
2450
2451 if (!qh->dw_align_buf) {
2452 qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache,
2453 GFP_ATOMIC | GFP_DMA);
2454 if (!qh->dw_align_buf)
2455 return -ENOMEM;
2456 }
2457
2458 qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf,
2459 DWC2_KMEM_UNALIGNED_BUF_SIZE,
2460 DMA_FROM_DEVICE);
2461
2462 if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) {
2463 dev_err(hsotg->dev, "can't map align_buf\n");
2464 chan->align_buf = 0;
2465 return -EINVAL;
2466 }
2467
2468 chan->align_buf = qh->dw_align_buf_dma;
2469 return 0;
2470 }
2471
2472 #define DWC2_USB_DMA_ALIGN 4
2473
2474 static void dwc2_free_dma_aligned_buffer(struct urb *urb)
2475 {
2476 void *stored_xfer_buffer;
2477 size_t length;
2478
2479 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2480 return;
2481
2482
2483 memcpy(&stored_xfer_buffer,
2484 PTR_ALIGN(urb->transfer_buffer + urb->transfer_buffer_length,
2485 dma_get_cache_alignment()),
2486 sizeof(urb->transfer_buffer));
2487
2488 if (usb_urb_dir_in(urb)) {
2489 if (usb_pipeisoc(urb->pipe))
2490 length = urb->transfer_buffer_length;
2491 else
2492 length = urb->actual_length;
2493
2494 memcpy(stored_xfer_buffer, urb->transfer_buffer, length);
2495 }
2496 kfree(urb->transfer_buffer);
2497 urb->transfer_buffer = stored_xfer_buffer;
2498
2499 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2500 }
2501
2502 static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
2503 {
2504 void *kmalloc_ptr;
2505 size_t kmalloc_size;
2506
2507 if (urb->num_sgs || urb->sg ||
2508 urb->transfer_buffer_length == 0 ||
2509 !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
2510 return 0;
2511
2512
2513
2514
2515
2516
2517 kmalloc_size = urb->transfer_buffer_length +
2518 (dma_get_cache_alignment() - 1) +
2519 sizeof(urb->transfer_buffer);
2520
2521 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2522 if (!kmalloc_ptr)
2523 return -ENOMEM;
2524
2525
2526
2527
2528
2529 memcpy(PTR_ALIGN(kmalloc_ptr + urb->transfer_buffer_length,
2530 dma_get_cache_alignment()),
2531 &urb->transfer_buffer, sizeof(urb->transfer_buffer));
2532
2533 if (usb_urb_dir_out(urb))
2534 memcpy(kmalloc_ptr, urb->transfer_buffer,
2535 urb->transfer_buffer_length);
2536 urb->transfer_buffer = kmalloc_ptr;
2537
2538 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2539
2540 return 0;
2541 }
2542
2543 static int dwc2_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2544 gfp_t mem_flags)
2545 {
2546 int ret;
2547
2548
2549 WARN_ON_ONCE(urb->setup_dma &&
2550 (urb->setup_dma & (DWC2_USB_DMA_ALIGN - 1)));
2551
2552 ret = dwc2_alloc_dma_aligned_buffer(urb, mem_flags);
2553 if (ret)
2554 return ret;
2555
2556 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2557 if (ret)
2558 dwc2_free_dma_aligned_buffer(urb);
2559
2560 return ret;
2561 }
2562
2563 static void dwc2_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2564 {
2565 usb_hcd_unmap_urb_for_dma(hcd, urb);
2566 dwc2_free_dma_aligned_buffer(urb);
2567 }
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578 static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
2579 {
2580 struct dwc2_host_chan *chan;
2581 struct dwc2_hcd_urb *urb;
2582 struct dwc2_qtd *qtd;
2583
2584 if (dbg_qh(qh))
2585 dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh);
2586
2587 if (list_empty(&qh->qtd_list)) {
2588 dev_dbg(hsotg->dev, "No QTDs in QH list\n");
2589 return -ENOMEM;
2590 }
2591
2592 if (list_empty(&hsotg->free_hc_list)) {
2593 dev_dbg(hsotg->dev, "No free channel to assign\n");
2594 return -ENOMEM;
2595 }
2596
2597 chan = list_first_entry(&hsotg->free_hc_list, struct dwc2_host_chan,
2598 hc_list_entry);
2599
2600
2601 list_del_init(&chan->hc_list_entry);
2602
2603 qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
2604 urb = qtd->urb;
2605 qh->channel = chan;
2606 qtd->in_process = 1;
2607
2608
2609
2610
2611
2612 chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info);
2613 chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info);
2614 chan->speed = qh->dev_speed;
2615 chan->max_packet = qh->maxp;
2616
2617 chan->xfer_started = 0;
2618 chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
2619 chan->error_state = (qtd->error_count > 0);
2620 chan->halt_on_queue = 0;
2621 chan->halt_pending = 0;
2622 chan->requests = 0;
2623
2624
2625
2626
2627
2628
2629
2630
2631 chan->ep_is_in = (dwc2_hcd_is_pipe_in(&urb->pipe_info) != 0);
2632 if (chan->ep_is_in)
2633 chan->do_ping = 0;
2634 else
2635 chan->do_ping = qh->ping_state;
2636
2637 chan->data_pid_start = qh->data_toggle;
2638 chan->multi_count = 1;
2639
2640 if (urb->actual_length > urb->length &&
2641 !dwc2_hcd_is_pipe_in(&urb->pipe_info))
2642 urb->actual_length = urb->length;
2643
2644 if (hsotg->params.host_dma)
2645 chan->xfer_dma = urb->dma + urb->actual_length;
2646 else
2647 chan->xfer_buf = (u8 *)urb->buf + urb->actual_length;
2648
2649 chan->xfer_len = urb->length - urb->actual_length;
2650 chan->xfer_count = 0;
2651
2652
2653 if (qh->do_split)
2654 dwc2_hc_init_split(hsotg, chan, qtd, urb);
2655 else
2656 chan->do_split = 0;
2657
2658
2659 dwc2_hc_init_xfer(hsotg, chan, qtd);
2660
2661
2662 if (hsotg->params.host_dma && qh->do_split &&
2663 chan->ep_is_in && (chan->xfer_dma & 0x3)) {
2664 dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
2665 if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) {
2666 dev_err(hsotg->dev,
2667 "Failed to allocate memory to handle non-aligned buffer\n");
2668
2669 chan->align_buf = 0;
2670 chan->multi_count = 0;
2671 list_add_tail(&chan->hc_list_entry,
2672 &hsotg->free_hc_list);
2673 qtd->in_process = 0;
2674 qh->channel = NULL;
2675 return -ENOMEM;
2676 }
2677 } else {
2678
2679
2680
2681
2682 WARN_ON_ONCE(hsotg->params.host_dma &&
2683 (chan->xfer_dma & 0x3));
2684 chan->align_buf = 0;
2685 }
2686
2687 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
2688 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
2689
2690
2691
2692
2693 chan->multi_count = qh->maxp_mult;
2694
2695 if (hsotg->params.dma_desc_enable) {
2696 chan->desc_list_addr = qh->desc_list_dma;
2697 chan->desc_list_sz = qh->desc_list_sz;
2698 }
2699
2700 dwc2_hc_init(hsotg, chan);
2701 chan->qh = qh;
2702
2703 return 0;
2704 }
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715 enum dwc2_transaction_type dwc2_hcd_select_transactions(
2716 struct dwc2_hsotg *hsotg)
2717 {
2718 enum dwc2_transaction_type ret_val = DWC2_TRANSACTION_NONE;
2719 struct list_head *qh_ptr;
2720 struct dwc2_qh *qh;
2721 int num_channels;
2722
2723 #ifdef DWC2_DEBUG_SOF
2724 dev_vdbg(hsotg->dev, " Select Transactions\n");
2725 #endif
2726
2727
2728 qh_ptr = hsotg->periodic_sched_ready.next;
2729 while (qh_ptr != &hsotg->periodic_sched_ready) {
2730 if (list_empty(&hsotg->free_hc_list))
2731 break;
2732 if (hsotg->params.uframe_sched) {
2733 if (hsotg->available_host_channels <= 1)
2734 break;
2735 hsotg->available_host_channels--;
2736 }
2737 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
2738 if (dwc2_assign_and_init_hc(hsotg, qh))
2739 break;
2740
2741
2742
2743
2744
2745 qh_ptr = qh_ptr->next;
2746 list_move_tail(&qh->qh_list_entry,
2747 &hsotg->periodic_sched_assigned);
2748 ret_val = DWC2_TRANSACTION_PERIODIC;
2749 }
2750
2751
2752
2753
2754
2755
2756 num_channels = hsotg->params.host_channels;
2757 qh_ptr = hsotg->non_periodic_sched_inactive.next;
2758 while (qh_ptr != &hsotg->non_periodic_sched_inactive) {
2759 if (!hsotg->params.uframe_sched &&
2760 hsotg->non_periodic_channels >= num_channels -
2761 hsotg->periodic_channels)
2762 break;
2763 if (list_empty(&hsotg->free_hc_list))
2764 break;
2765 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
2766 if (hsotg->params.uframe_sched) {
2767 if (hsotg->available_host_channels < 1)
2768 break;
2769 hsotg->available_host_channels--;
2770 }
2771
2772 if (dwc2_assign_and_init_hc(hsotg, qh))
2773 break;
2774
2775
2776
2777
2778
2779 qh_ptr = qh_ptr->next;
2780 list_move_tail(&qh->qh_list_entry,
2781 &hsotg->non_periodic_sched_active);
2782
2783 if (ret_val == DWC2_TRANSACTION_NONE)
2784 ret_val = DWC2_TRANSACTION_NON_PERIODIC;
2785 else
2786 ret_val = DWC2_TRANSACTION_ALL;
2787
2788 if (!hsotg->params.uframe_sched)
2789 hsotg->non_periodic_channels++;
2790 }
2791
2792 return ret_val;
2793 }
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816 static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
2817 struct dwc2_host_chan *chan,
2818 u16 fifo_dwords_avail)
2819 {
2820 int retval = 0;
2821
2822 if (chan->do_split)
2823
2824 list_move_tail(&chan->split_order_list_entry,
2825 &hsotg->split_order);
2826
2827 if (hsotg->params.host_dma) {
2828 if (hsotg->params.dma_desc_enable) {
2829 if (!chan->xfer_started ||
2830 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
2831 dwc2_hcd_start_xfer_ddma(hsotg, chan->qh);
2832 chan->qh->ping_state = 0;
2833 }
2834 } else if (!chan->xfer_started) {
2835 dwc2_hc_start_transfer(hsotg, chan);
2836 chan->qh->ping_state = 0;
2837 }
2838 } else if (chan->halt_pending) {
2839
2840 } else if (chan->halt_on_queue) {
2841 dwc2_hc_halt(hsotg, chan, chan->halt_status);
2842 } else if (chan->do_ping) {
2843 if (!chan->xfer_started)
2844 dwc2_hc_start_transfer(hsotg, chan);
2845 } else if (!chan->ep_is_in ||
2846 chan->data_pid_start == DWC2_HC_PID_SETUP) {
2847 if ((fifo_dwords_avail * 4) >= chan->max_packet) {
2848 if (!chan->xfer_started) {
2849 dwc2_hc_start_transfer(hsotg, chan);
2850 retval = 1;
2851 } else {
2852 retval = dwc2_hc_continue_transfer(hsotg, chan);
2853 }
2854 } else {
2855 retval = -1;
2856 }
2857 } else {
2858 if (!chan->xfer_started) {
2859 dwc2_hc_start_transfer(hsotg, chan);
2860 retval = 1;
2861 } else {
2862 retval = dwc2_hc_continue_transfer(hsotg, chan);
2863 }
2864 }
2865
2866 return retval;
2867 }
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878 static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
2879 {
2880 struct list_head *qh_ptr;
2881 struct dwc2_qh *qh;
2882 u32 tx_status;
2883 u32 fspcavail;
2884 u32 gintmsk;
2885 int status;
2886 bool no_queue_space = false;
2887 bool no_fifo_space = false;
2888 u32 qspcavail;
2889
2890
2891 if (list_empty(&hsotg->periodic_sched_assigned))
2892 goto exit;
2893
2894 if (dbg_perio())
2895 dev_vdbg(hsotg->dev, "Queue periodic transactions\n");
2896
2897 tx_status = dwc2_readl(hsotg, HPTXSTS);
2898 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
2899 TXSTS_QSPCAVAIL_SHIFT;
2900 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
2901 TXSTS_FSPCAVAIL_SHIFT;
2902
2903 if (dbg_perio()) {
2904 dev_vdbg(hsotg->dev, " P Tx Req Queue Space Avail (before queue): %d\n",
2905 qspcavail);
2906 dev_vdbg(hsotg->dev, " P Tx FIFO Space Avail (before queue): %d\n",
2907 fspcavail);
2908 }
2909
2910 qh_ptr = hsotg->periodic_sched_assigned.next;
2911 while (qh_ptr != &hsotg->periodic_sched_assigned) {
2912 tx_status = dwc2_readl(hsotg, HPTXSTS);
2913 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
2914 TXSTS_QSPCAVAIL_SHIFT;
2915 if (qspcavail == 0) {
2916 no_queue_space = true;
2917 break;
2918 }
2919
2920 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
2921 if (!qh->channel) {
2922 qh_ptr = qh_ptr->next;
2923 continue;
2924 }
2925
2926
2927 if (qh->tt_buffer_dirty) {
2928 qh_ptr = qh_ptr->next;
2929 continue;
2930 }
2931
2932
2933
2934
2935
2936
2937 if (!hsotg->params.host_dma &&
2938 qh->channel->multi_count > 1)
2939 hsotg->queuing_high_bandwidth = 1;
2940
2941 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
2942 TXSTS_FSPCAVAIL_SHIFT;
2943 status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
2944 if (status < 0) {
2945 no_fifo_space = true;
2946 break;
2947 }
2948
2949
2950
2951
2952
2953
2954
2955
2956 if (hsotg->params.host_dma || status == 0 ||
2957 qh->channel->requests == qh->channel->multi_count) {
2958 qh_ptr = qh_ptr->next;
2959
2960
2961
2962
2963 list_move_tail(&qh->qh_list_entry,
2964 &hsotg->periodic_sched_queued);
2965
2966
2967 hsotg->queuing_high_bandwidth = 0;
2968 }
2969 }
2970
2971 exit:
2972 if (no_queue_space || no_fifo_space ||
2973 (!hsotg->params.host_dma &&
2974 !list_empty(&hsotg->periodic_sched_assigned))) {
2975
2976
2977
2978
2979
2980
2981
2982 gintmsk = dwc2_readl(hsotg, GINTMSK);
2983 if (!(gintmsk & GINTSTS_PTXFEMP)) {
2984 gintmsk |= GINTSTS_PTXFEMP;
2985 dwc2_writel(hsotg, gintmsk, GINTMSK);
2986 }
2987 } else {
2988
2989
2990
2991
2992
2993
2994
2995 gintmsk = dwc2_readl(hsotg, GINTMSK);
2996 if (gintmsk & GINTSTS_PTXFEMP) {
2997 gintmsk &= ~GINTSTS_PTXFEMP;
2998 dwc2_writel(hsotg, gintmsk, GINTMSK);
2999 }
3000 }
3001 }
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012 static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
3013 {
3014 struct list_head *orig_qh_ptr;
3015 struct dwc2_qh *qh;
3016 u32 tx_status;
3017 u32 qspcavail;
3018 u32 fspcavail;
3019 u32 gintmsk;
3020 int status;
3021 int no_queue_space = 0;
3022 int no_fifo_space = 0;
3023 int more_to_do = 0;
3024
3025 dev_vdbg(hsotg->dev, "Queue non-periodic transactions\n");
3026
3027 tx_status = dwc2_readl(hsotg, GNPTXSTS);
3028 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
3029 TXSTS_QSPCAVAIL_SHIFT;
3030 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
3031 TXSTS_FSPCAVAIL_SHIFT;
3032 dev_vdbg(hsotg->dev, " NP Tx Req Queue Space Avail (before queue): %d\n",
3033 qspcavail);
3034 dev_vdbg(hsotg->dev, " NP Tx FIFO Space Avail (before queue): %d\n",
3035 fspcavail);
3036
3037
3038
3039
3040
3041 if (hsotg->non_periodic_qh_ptr == &hsotg->non_periodic_sched_active)
3042 hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next;
3043 orig_qh_ptr = hsotg->non_periodic_qh_ptr;
3044
3045
3046
3047
3048
3049 do {
3050 tx_status = dwc2_readl(hsotg, GNPTXSTS);
3051 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
3052 TXSTS_QSPCAVAIL_SHIFT;
3053 if (!hsotg->params.host_dma && qspcavail == 0) {
3054 no_queue_space = 1;
3055 break;
3056 }
3057
3058 qh = list_entry(hsotg->non_periodic_qh_ptr, struct dwc2_qh,
3059 qh_list_entry);
3060 if (!qh->channel)
3061 goto next;
3062
3063
3064 if (qh->tt_buffer_dirty)
3065 goto next;
3066
3067 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
3068 TXSTS_FSPCAVAIL_SHIFT;
3069 status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
3070
3071 if (status > 0) {
3072 more_to_do = 1;
3073 } else if (status < 0) {
3074 no_fifo_space = 1;
3075 break;
3076 }
3077 next:
3078
3079 hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next;
3080 if (hsotg->non_periodic_qh_ptr ==
3081 &hsotg->non_periodic_sched_active)
3082 hsotg->non_periodic_qh_ptr =
3083 hsotg->non_periodic_qh_ptr->next;
3084 } while (hsotg->non_periodic_qh_ptr != orig_qh_ptr);
3085
3086 if (!hsotg->params.host_dma) {
3087 tx_status = dwc2_readl(hsotg, GNPTXSTS);
3088 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
3089 TXSTS_QSPCAVAIL_SHIFT;
3090 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
3091 TXSTS_FSPCAVAIL_SHIFT;
3092 dev_vdbg(hsotg->dev,
3093 " NP Tx Req Queue Space Avail (after queue): %d\n",
3094 qspcavail);
3095 dev_vdbg(hsotg->dev,
3096 " NP Tx FIFO Space Avail (after queue): %d\n",
3097 fspcavail);
3098
3099 if (more_to_do || no_queue_space || no_fifo_space) {
3100
3101
3102
3103
3104
3105
3106
3107 gintmsk = dwc2_readl(hsotg, GINTMSK);
3108 gintmsk |= GINTSTS_NPTXFEMP;
3109 dwc2_writel(hsotg, gintmsk, GINTMSK);
3110 } else {
3111
3112
3113
3114
3115
3116
3117
3118 gintmsk = dwc2_readl(hsotg, GINTMSK);
3119 gintmsk &= ~GINTSTS_NPTXFEMP;
3120 dwc2_writel(hsotg, gintmsk, GINTMSK);
3121 }
3122 }
3123 }
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136 void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
3137 enum dwc2_transaction_type tr_type)
3138 {
3139 #ifdef DWC2_DEBUG_SOF
3140 dev_vdbg(hsotg->dev, "Queue Transactions\n");
3141 #endif
3142
3143 if (tr_type == DWC2_TRANSACTION_PERIODIC ||
3144 tr_type == DWC2_TRANSACTION_ALL)
3145 dwc2_process_periodic_channels(hsotg);
3146
3147
3148 if (tr_type == DWC2_TRANSACTION_NON_PERIODIC ||
3149 tr_type == DWC2_TRANSACTION_ALL) {
3150 if (!list_empty(&hsotg->non_periodic_sched_active)) {
3151 dwc2_process_non_periodic_channels(hsotg);
3152 } else {
3153
3154
3155
3156
3157 u32 gintmsk = dwc2_readl(hsotg, GINTMSK);
3158
3159 gintmsk &= ~GINTSTS_NPTXFEMP;
3160 dwc2_writel(hsotg, gintmsk, GINTMSK);
3161 }
3162 }
3163 }
3164
3165 static void dwc2_conn_id_status_change(struct work_struct *work)
3166 {
3167 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
3168 wf_otg);
3169 u32 count = 0;
3170 u32 gotgctl;
3171 unsigned long flags;
3172
3173 dev_dbg(hsotg->dev, "%s()\n", __func__);
3174
3175 gotgctl = dwc2_readl(hsotg, GOTGCTL);
3176 dev_dbg(hsotg->dev, "gotgctl=%0x\n", gotgctl);
3177 dev_dbg(hsotg->dev, "gotgctl.b.conidsts=%d\n",
3178 !!(gotgctl & GOTGCTL_CONID_B));
3179
3180
3181 if (gotgctl & GOTGCTL_CONID_B) {
3182 dwc2_vbus_supply_exit(hsotg);
3183
3184 dev_dbg(hsotg->dev, "connId B\n");
3185 if (hsotg->bus_suspended) {
3186 dev_info(hsotg->dev,
3187 "Do port resume before switching to device mode\n");
3188 dwc2_port_resume(hsotg);
3189 }
3190 while (!dwc2_is_device_mode(hsotg)) {
3191 dev_info(hsotg->dev,
3192 "Waiting for Peripheral Mode, Mode=%s\n",
3193 dwc2_is_host_mode(hsotg) ? "Host" :
3194 "Peripheral");
3195 msleep(20);
3196
3197
3198
3199
3200
3201 gotgctl = dwc2_readl(hsotg, GOTGCTL);
3202 if (!(gotgctl & GOTGCTL_CONID_B))
3203 goto host;
3204 if (++count > 250)
3205 break;
3206 }
3207 if (count > 250)
3208 dev_err(hsotg->dev,
3209 "Connection id status change timed out\n");
3210 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
3211 dwc2_core_init(hsotg, false);
3212 dwc2_enable_global_interrupts(hsotg);
3213 spin_lock_irqsave(&hsotg->lock, flags);
3214 dwc2_hsotg_core_init_disconnected(hsotg, false);
3215 spin_unlock_irqrestore(&hsotg->lock, flags);
3216
3217 dwc2_enable_acg(hsotg);
3218 dwc2_hsotg_core_connect(hsotg);
3219 } else {
3220 host:
3221
3222 dev_dbg(hsotg->dev, "connId A\n");
3223 while (!dwc2_is_host_mode(hsotg)) {
3224 dev_info(hsotg->dev, "Waiting for Host Mode, Mode=%s\n",
3225 dwc2_is_host_mode(hsotg) ?
3226 "Host" : "Peripheral");
3227 msleep(20);
3228 if (++count > 250)
3229 break;
3230 }
3231 if (count > 250)
3232 dev_err(hsotg->dev,
3233 "Connection id status change timed out\n");
3234
3235 spin_lock_irqsave(&hsotg->lock, flags);
3236 dwc2_hsotg_disconnect(hsotg);
3237 spin_unlock_irqrestore(&hsotg->lock, flags);
3238
3239 hsotg->op_state = OTG_STATE_A_HOST;
3240
3241 dwc2_core_init(hsotg, false);
3242 dwc2_enable_global_interrupts(hsotg);
3243 dwc2_hcd_start(hsotg);
3244 }
3245 }
3246
3247 static void dwc2_wakeup_detected(struct timer_list *t)
3248 {
3249 struct dwc2_hsotg *hsotg = from_timer(hsotg, t, wkp_timer);
3250 u32 hprt0;
3251
3252 dev_dbg(hsotg->dev, "%s()\n", __func__);
3253
3254
3255
3256
3257
3258 hprt0 = dwc2_read_hprt0(hsotg);
3259 dev_dbg(hsotg->dev, "Resume: HPRT0=%0x\n", hprt0);
3260 hprt0 &= ~HPRT0_RES;
3261 dwc2_writel(hsotg, hprt0, HPRT0);
3262 dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n",
3263 dwc2_readl(hsotg, HPRT0));
3264
3265 dwc2_hcd_rem_wakeup(hsotg);
3266 hsotg->bus_suspended = false;
3267
3268
3269 hsotg->lx_state = DWC2_L0;
3270 }
3271
3272 static int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg)
3273 {
3274 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
3275
3276 return hcd->self.b_hnp_enable;
3277 }
3278
3279
3280 static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
3281 {
3282 unsigned long flags;
3283 u32 hprt0;
3284 u32 pcgctl;
3285 u32 gotgctl;
3286
3287 dev_dbg(hsotg->dev, "%s()\n", __func__);
3288
3289 spin_lock_irqsave(&hsotg->lock, flags);
3290
3291 if (windex == hsotg->otg_port && dwc2_host_is_b_hnp_enabled(hsotg)) {
3292 gotgctl = dwc2_readl(hsotg, GOTGCTL);
3293 gotgctl |= GOTGCTL_HSTSETHNPEN;
3294 dwc2_writel(hsotg, gotgctl, GOTGCTL);
3295 hsotg->op_state = OTG_STATE_A_SUSPEND;
3296 }
3297
3298 hprt0 = dwc2_read_hprt0(hsotg);
3299 hprt0 |= HPRT0_SUSP;
3300 dwc2_writel(hsotg, hprt0, HPRT0);
3301
3302 hsotg->bus_suspended = true;
3303
3304
3305
3306
3307
3308 if (!hsotg->params.power_down) {
3309
3310 pcgctl = dwc2_readl(hsotg, PCGCTL);
3311 pcgctl |= PCGCTL_STOPPCLK;
3312 dwc2_writel(hsotg, pcgctl, PCGCTL);
3313 udelay(10);
3314 }
3315
3316
3317 if (dwc2_host_is_b_hnp_enabled(hsotg)) {
3318 pcgctl = dwc2_readl(hsotg, PCGCTL);
3319 pcgctl &= ~PCGCTL_STOPPCLK;
3320 dwc2_writel(hsotg, pcgctl, PCGCTL);
3321
3322 spin_unlock_irqrestore(&hsotg->lock, flags);
3323
3324 msleep(200);
3325 } else {
3326 spin_unlock_irqrestore(&hsotg->lock, flags);
3327 }
3328 }
3329
3330
3331 static void dwc2_port_resume(struct dwc2_hsotg *hsotg)
3332 {
3333 unsigned long flags;
3334 u32 hprt0;
3335 u32 pcgctl;
3336
3337 spin_lock_irqsave(&hsotg->lock, flags);
3338
3339
3340
3341
3342
3343 if (!hsotg->params.power_down) {
3344 pcgctl = dwc2_readl(hsotg, PCGCTL);
3345 pcgctl &= ~PCGCTL_STOPPCLK;
3346 dwc2_writel(hsotg, pcgctl, PCGCTL);
3347 spin_unlock_irqrestore(&hsotg->lock, flags);
3348 msleep(20);
3349 spin_lock_irqsave(&hsotg->lock, flags);
3350 }
3351
3352 hprt0 = dwc2_read_hprt0(hsotg);
3353 hprt0 |= HPRT0_RES;
3354 hprt0 &= ~HPRT0_SUSP;
3355 dwc2_writel(hsotg, hprt0, HPRT0);
3356 spin_unlock_irqrestore(&hsotg->lock, flags);
3357
3358 msleep(USB_RESUME_TIMEOUT);
3359
3360 spin_lock_irqsave(&hsotg->lock, flags);
3361 hprt0 = dwc2_read_hprt0(hsotg);
3362 hprt0 &= ~(HPRT0_RES | HPRT0_SUSP);
3363 dwc2_writel(hsotg, hprt0, HPRT0);
3364 hsotg->bus_suspended = false;
3365 spin_unlock_irqrestore(&hsotg->lock, flags);
3366 }
3367
3368
3369 static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
3370 u16 wvalue, u16 windex, char *buf, u16 wlength)
3371 {
3372 struct usb_hub_descriptor *hub_desc;
3373 int retval = 0;
3374 u32 hprt0;
3375 u32 port_status;
3376 u32 speed;
3377 u32 pcgctl;
3378 u32 pwr;
3379
3380 switch (typereq) {
3381 case ClearHubFeature:
3382 dev_dbg(hsotg->dev, "ClearHubFeature %1xh\n", wvalue);
3383
3384 switch (wvalue) {
3385 case C_HUB_LOCAL_POWER:
3386 case C_HUB_OVER_CURRENT:
3387
3388 break;
3389
3390 default:
3391 retval = -EINVAL;
3392 dev_err(hsotg->dev,
3393 "ClearHubFeature request %1xh unknown\n",
3394 wvalue);
3395 }
3396 break;
3397
3398 case ClearPortFeature:
3399 if (wvalue != USB_PORT_FEAT_L1)
3400 if (!windex || windex > 1)
3401 goto error;
3402 switch (wvalue) {
3403 case USB_PORT_FEAT_ENABLE:
3404 dev_dbg(hsotg->dev,
3405 "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
3406 hprt0 = dwc2_read_hprt0(hsotg);
3407 hprt0 |= HPRT0_ENA;
3408 dwc2_writel(hsotg, hprt0, HPRT0);
3409 break;
3410
3411 case USB_PORT_FEAT_SUSPEND:
3412 dev_dbg(hsotg->dev,
3413 "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
3414
3415 if (hsotg->bus_suspended) {
3416 if (hsotg->hibernated)
3417 dwc2_exit_hibernation(hsotg, 0, 0, 1);
3418 else
3419 dwc2_port_resume(hsotg);
3420 }
3421 break;
3422
3423 case USB_PORT_FEAT_POWER:
3424 dev_dbg(hsotg->dev,
3425 "ClearPortFeature USB_PORT_FEAT_POWER\n");
3426 hprt0 = dwc2_read_hprt0(hsotg);
3427 pwr = hprt0 & HPRT0_PWR;
3428 hprt0 &= ~HPRT0_PWR;
3429 dwc2_writel(hsotg, hprt0, HPRT0);
3430 if (pwr)
3431 dwc2_vbus_supply_exit(hsotg);
3432 break;
3433
3434 case USB_PORT_FEAT_INDICATOR:
3435 dev_dbg(hsotg->dev,
3436 "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
3437
3438 break;
3439
3440 case USB_PORT_FEAT_C_CONNECTION:
3441
3442
3443
3444 dev_dbg(hsotg->dev,
3445 "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
3446 hsotg->flags.b.port_connect_status_change = 0;
3447 break;
3448
3449 case USB_PORT_FEAT_C_RESET:
3450
3451 dev_dbg(hsotg->dev,
3452 "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
3453 hsotg->flags.b.port_reset_change = 0;
3454 break;
3455
3456 case USB_PORT_FEAT_C_ENABLE:
3457
3458
3459
3460
3461 dev_dbg(hsotg->dev,
3462 "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
3463 hsotg->flags.b.port_enable_change = 0;
3464 break;
3465
3466 case USB_PORT_FEAT_C_SUSPEND:
3467
3468
3469
3470
3471
3472 dev_dbg(hsotg->dev,
3473 "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
3474 hsotg->flags.b.port_suspend_change = 0;
3475 break;
3476
3477 case USB_PORT_FEAT_C_PORT_L1:
3478 dev_dbg(hsotg->dev,
3479 "ClearPortFeature USB_PORT_FEAT_C_PORT_L1\n");
3480 hsotg->flags.b.port_l1_change = 0;
3481 break;
3482
3483 case USB_PORT_FEAT_C_OVER_CURRENT:
3484 dev_dbg(hsotg->dev,
3485 "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
3486 hsotg->flags.b.port_over_current_change = 0;
3487 break;
3488
3489 default:
3490 retval = -EINVAL;
3491 dev_err(hsotg->dev,
3492 "ClearPortFeature request %1xh unknown or unsupported\n",
3493 wvalue);
3494 }
3495 break;
3496
3497 case GetHubDescriptor:
3498 dev_dbg(hsotg->dev, "GetHubDescriptor\n");
3499 hub_desc = (struct usb_hub_descriptor *)buf;
3500 hub_desc->bDescLength = 9;
3501 hub_desc->bDescriptorType = USB_DT_HUB;
3502 hub_desc->bNbrPorts = 1;
3503 hub_desc->wHubCharacteristics =
3504 cpu_to_le16(HUB_CHAR_COMMON_LPSM |
3505 HUB_CHAR_INDV_PORT_OCPM);
3506 hub_desc->bPwrOn2PwrGood = 1;
3507 hub_desc->bHubContrCurrent = 0;
3508 hub_desc->u.hs.DeviceRemovable[0] = 0;
3509 hub_desc->u.hs.DeviceRemovable[1] = 0xff;
3510 break;
3511
3512 case GetHubStatus:
3513 dev_dbg(hsotg->dev, "GetHubStatus\n");
3514 memset(buf, 0, 4);
3515 break;
3516
3517 case GetPortStatus:
3518 dev_vdbg(hsotg->dev,
3519 "GetPortStatus wIndex=0x%04x flags=0x%08x\n", windex,
3520 hsotg->flags.d32);
3521 if (!windex || windex > 1)
3522 goto error;
3523
3524 port_status = 0;
3525 if (hsotg->flags.b.port_connect_status_change)
3526 port_status |= USB_PORT_STAT_C_CONNECTION << 16;
3527 if (hsotg->flags.b.port_enable_change)
3528 port_status |= USB_PORT_STAT_C_ENABLE << 16;
3529 if (hsotg->flags.b.port_suspend_change)
3530 port_status |= USB_PORT_STAT_C_SUSPEND << 16;
3531 if (hsotg->flags.b.port_l1_change)
3532 port_status |= USB_PORT_STAT_C_L1 << 16;
3533 if (hsotg->flags.b.port_reset_change)
3534 port_status |= USB_PORT_STAT_C_RESET << 16;
3535 if (hsotg->flags.b.port_over_current_change) {
3536 dev_warn(hsotg->dev, "Overcurrent change detected\n");
3537 port_status |= USB_PORT_STAT_C_OVERCURRENT << 16;
3538 }
3539
3540 if (!hsotg->flags.b.port_connect_status) {
3541
3542
3543
3544
3545
3546
3547
3548 *(__le32 *)buf = cpu_to_le32(port_status);
3549 break;
3550 }
3551
3552 hprt0 = dwc2_readl(hsotg, HPRT0);
3553 dev_vdbg(hsotg->dev, " HPRT0: 0x%08x\n", hprt0);
3554
3555 if (hprt0 & HPRT0_CONNSTS)
3556 port_status |= USB_PORT_STAT_CONNECTION;
3557 if (hprt0 & HPRT0_ENA)
3558 port_status |= USB_PORT_STAT_ENABLE;
3559 if (hprt0 & HPRT0_SUSP)
3560 port_status |= USB_PORT_STAT_SUSPEND;
3561 if (hprt0 & HPRT0_OVRCURRACT)
3562 port_status |= USB_PORT_STAT_OVERCURRENT;
3563 if (hprt0 & HPRT0_RST)
3564 port_status |= USB_PORT_STAT_RESET;
3565 if (hprt0 & HPRT0_PWR)
3566 port_status |= USB_PORT_STAT_POWER;
3567
3568 speed = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
3569 if (speed == HPRT0_SPD_HIGH_SPEED)
3570 port_status |= USB_PORT_STAT_HIGH_SPEED;
3571 else if (speed == HPRT0_SPD_LOW_SPEED)
3572 port_status |= USB_PORT_STAT_LOW_SPEED;
3573
3574 if (hprt0 & HPRT0_TSTCTL_MASK)
3575 port_status |= USB_PORT_STAT_TEST;
3576
3577
3578 if (hsotg->params.dma_desc_fs_enable) {
3579
3580
3581
3582
3583 if (hsotg->new_connection &&
3584 ((port_status &
3585 (USB_PORT_STAT_CONNECTION |
3586 USB_PORT_STAT_HIGH_SPEED |
3587 USB_PORT_STAT_LOW_SPEED)) ==
3588 USB_PORT_STAT_CONNECTION)) {
3589 u32 hcfg;
3590
3591 dev_info(hsotg->dev, "Enabling descriptor DMA mode\n");
3592 hsotg->params.dma_desc_enable = true;
3593 hcfg = dwc2_readl(hsotg, HCFG);
3594 hcfg |= HCFG_DESCDMA;
3595 dwc2_writel(hsotg, hcfg, HCFG);
3596 hsotg->new_connection = false;
3597 }
3598 }
3599
3600 dev_vdbg(hsotg->dev, "port_status=%08x\n", port_status);
3601 *(__le32 *)buf = cpu_to_le32(port_status);
3602 break;
3603
3604 case SetHubFeature:
3605 dev_dbg(hsotg->dev, "SetHubFeature\n");
3606
3607 break;
3608
3609 case SetPortFeature:
3610 dev_dbg(hsotg->dev, "SetPortFeature\n");
3611 if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1))
3612 goto error;
3613
3614 if (!hsotg->flags.b.port_connect_status) {
3615
3616
3617
3618
3619
3620
3621
3622 break;
3623 }
3624
3625 switch (wvalue) {
3626 case USB_PORT_FEAT_SUSPEND:
3627 dev_dbg(hsotg->dev,
3628 "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
3629 if (windex != hsotg->otg_port)
3630 goto error;
3631 if (hsotg->params.power_down == 2)
3632 dwc2_enter_hibernation(hsotg, 1);
3633 else
3634 dwc2_port_suspend(hsotg, windex);
3635 break;
3636
3637 case USB_PORT_FEAT_POWER:
3638 dev_dbg(hsotg->dev,
3639 "SetPortFeature - USB_PORT_FEAT_POWER\n");
3640 hprt0 = dwc2_read_hprt0(hsotg);
3641 pwr = hprt0 & HPRT0_PWR;
3642 hprt0 |= HPRT0_PWR;
3643 dwc2_writel(hsotg, hprt0, HPRT0);
3644 if (!pwr)
3645 dwc2_vbus_supply_init(hsotg);
3646 break;
3647
3648 case USB_PORT_FEAT_RESET:
3649 if (hsotg->params.power_down == 2 &&
3650 hsotg->hibernated)
3651 dwc2_exit_hibernation(hsotg, 0, 1, 1);
3652 hprt0 = dwc2_read_hprt0(hsotg);
3653 dev_dbg(hsotg->dev,
3654 "SetPortFeature - USB_PORT_FEAT_RESET\n");
3655 pcgctl = dwc2_readl(hsotg, PCGCTL);
3656 pcgctl &= ~(PCGCTL_ENBL_SLEEP_GATING | PCGCTL_STOPPCLK);
3657 dwc2_writel(hsotg, pcgctl, PCGCTL);
3658
3659 dwc2_writel(hsotg, 0, PCGCTL);
3660
3661 hprt0 = dwc2_read_hprt0(hsotg);
3662 pwr = hprt0 & HPRT0_PWR;
3663
3664 hprt0 &= ~HPRT0_SUSP;
3665
3666
3667
3668
3669
3670
3671 if (!dwc2_hcd_is_b_host(hsotg)) {
3672 hprt0 |= HPRT0_PWR | HPRT0_RST;
3673 dev_dbg(hsotg->dev,
3674 "In host mode, hprt0=%08x\n", hprt0);
3675 dwc2_writel(hsotg, hprt0, HPRT0);
3676 if (!pwr)
3677 dwc2_vbus_supply_init(hsotg);
3678 }
3679
3680
3681 msleep(50);
3682 hprt0 &= ~HPRT0_RST;
3683 dwc2_writel(hsotg, hprt0, HPRT0);
3684 hsotg->lx_state = DWC2_L0;
3685 break;
3686
3687 case USB_PORT_FEAT_INDICATOR:
3688 dev_dbg(hsotg->dev,
3689 "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
3690
3691 break;
3692
3693 case USB_PORT_FEAT_TEST:
3694 hprt0 = dwc2_read_hprt0(hsotg);
3695 dev_dbg(hsotg->dev,
3696 "SetPortFeature - USB_PORT_FEAT_TEST\n");
3697 hprt0 &= ~HPRT0_TSTCTL_MASK;
3698 hprt0 |= (windex >> 8) << HPRT0_TSTCTL_SHIFT;
3699 dwc2_writel(hsotg, hprt0, HPRT0);
3700 break;
3701
3702 default:
3703 retval = -EINVAL;
3704 dev_err(hsotg->dev,
3705 "SetPortFeature %1xh unknown or unsupported\n",
3706 wvalue);
3707 break;
3708 }
3709 break;
3710
3711 default:
3712 error:
3713 retval = -EINVAL;
3714 dev_dbg(hsotg->dev,
3715 "Unknown hub control request: %1xh wIndex: %1xh wValue: %1xh\n",
3716 typereq, windex, wvalue);
3717 break;
3718 }
3719
3720 return retval;
3721 }
3722
3723 static int dwc2_hcd_is_status_changed(struct dwc2_hsotg *hsotg, int port)
3724 {
3725 int retval;
3726
3727 if (port != 1)
3728 return -EINVAL;
3729
3730 retval = (hsotg->flags.b.port_connect_status_change ||
3731 hsotg->flags.b.port_reset_change ||
3732 hsotg->flags.b.port_enable_change ||
3733 hsotg->flags.b.port_suspend_change ||
3734 hsotg->flags.b.port_over_current_change);
3735
3736 if (retval) {
3737 dev_dbg(hsotg->dev,
3738 "DWC OTG HCD HUB STATUS DATA: Root port status changed\n");
3739 dev_dbg(hsotg->dev, " port_connect_status_change: %d\n",
3740 hsotg->flags.b.port_connect_status_change);
3741 dev_dbg(hsotg->dev, " port_reset_change: %d\n",
3742 hsotg->flags.b.port_reset_change);
3743 dev_dbg(hsotg->dev, " port_enable_change: %d\n",
3744 hsotg->flags.b.port_enable_change);
3745 dev_dbg(hsotg->dev, " port_suspend_change: %d\n",
3746 hsotg->flags.b.port_suspend_change);
3747 dev_dbg(hsotg->dev, " port_over_current_change: %d\n",
3748 hsotg->flags.b.port_over_current_change);
3749 }
3750
3751 return retval;
3752 }
3753
3754 int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
3755 {
3756 u32 hfnum = dwc2_readl(hsotg, HFNUM);
3757
3758 #ifdef DWC2_DEBUG_SOF
3759 dev_vdbg(hsotg->dev, "DWC OTG HCD GET FRAME NUMBER %d\n",
3760 (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT);
3761 #endif
3762 return (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
3763 }
3764
3765 int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us)
3766 {
3767 u32 hprt = dwc2_readl(hsotg, HPRT0);
3768 u32 hfir = dwc2_readl(hsotg, HFIR);
3769 u32 hfnum = dwc2_readl(hsotg, HFNUM);
3770 unsigned int us_per_frame;
3771 unsigned int frame_number;
3772 unsigned int remaining;
3773 unsigned int interval;
3774 unsigned int phy_clks;
3775
3776
3777 us_per_frame = (hprt & HPRT0_SPD_MASK) ? 1000 : 125;
3778
3779
3780 frame_number = (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
3781 remaining = (hfnum & HFNUM_FRREM_MASK) >> HFNUM_FRREM_SHIFT;
3782 interval = (hfir & HFIR_FRINT_MASK) >> HFIR_FRINT_SHIFT;
3783
3784
3785
3786
3787
3788 phy_clks = (interval - remaining) +
3789 DIV_ROUND_UP(interval * us, us_per_frame);
3790
3791 return dwc2_frame_num_inc(frame_number, phy_clks / interval);
3792 }
3793
3794 int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg)
3795 {
3796 return hsotg->op_state == OTG_STATE_B_HOST;
3797 }
3798
3799 static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg,
3800 int iso_desc_count,
3801 gfp_t mem_flags)
3802 {
3803 struct dwc2_hcd_urb *urb;
3804
3805 urb = kzalloc(struct_size(urb, iso_descs, iso_desc_count), mem_flags);
3806 if (urb)
3807 urb->packet_count = iso_desc_count;
3808 return urb;
3809 }
3810
3811 static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg,
3812 struct dwc2_hcd_urb *urb, u8 dev_addr,
3813 u8 ep_num, u8 ep_type, u8 ep_dir,
3814 u16 maxp, u16 maxp_mult)
3815 {
3816 if (dbg_perio() ||
3817 ep_type == USB_ENDPOINT_XFER_BULK ||
3818 ep_type == USB_ENDPOINT_XFER_CONTROL)
3819 dev_vdbg(hsotg->dev,
3820 "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, maxp=%d (%d mult)\n",
3821 dev_addr, ep_num, ep_dir, ep_type, maxp, maxp_mult);
3822 urb->pipe_info.dev_addr = dev_addr;
3823 urb->pipe_info.ep_num = ep_num;
3824 urb->pipe_info.pipe_type = ep_type;
3825 urb->pipe_info.pipe_dir = ep_dir;
3826 urb->pipe_info.maxp = maxp;
3827 urb->pipe_info.maxp_mult = maxp_mult;
3828 }
3829
3830
3831
3832
3833
3834 void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
3835 {
3836 #ifdef DEBUG
3837 struct dwc2_host_chan *chan;
3838 struct dwc2_hcd_urb *urb;
3839 struct dwc2_qtd *qtd;
3840 int num_channels;
3841 u32 np_tx_status;
3842 u32 p_tx_status;
3843 int i;
3844
3845 num_channels = hsotg->params.host_channels;
3846 dev_dbg(hsotg->dev, "\n");
3847 dev_dbg(hsotg->dev,
3848 "************************************************************\n");
3849 dev_dbg(hsotg->dev, "HCD State:\n");
3850 dev_dbg(hsotg->dev, " Num channels: %d\n", num_channels);
3851
3852 for (i = 0; i < num_channels; i++) {
3853 chan = hsotg->hc_ptr_array[i];
3854 dev_dbg(hsotg->dev, " Channel %d:\n", i);
3855 dev_dbg(hsotg->dev,
3856 " dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
3857 chan->dev_addr, chan->ep_num, chan->ep_is_in);
3858 dev_dbg(hsotg->dev, " speed: %d\n", chan->speed);
3859 dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type);
3860 dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet);
3861 dev_dbg(hsotg->dev, " data_pid_start: %d\n",
3862 chan->data_pid_start);
3863 dev_dbg(hsotg->dev, " multi_count: %d\n", chan->multi_count);
3864 dev_dbg(hsotg->dev, " xfer_started: %d\n",
3865 chan->xfer_started);
3866 dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf);
3867 dev_dbg(hsotg->dev, " xfer_dma: %08lx\n",
3868 (unsigned long)chan->xfer_dma);
3869 dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len);
3870 dev_dbg(hsotg->dev, " xfer_count: %d\n", chan->xfer_count);
3871 dev_dbg(hsotg->dev, " halt_on_queue: %d\n",
3872 chan->halt_on_queue);
3873 dev_dbg(hsotg->dev, " halt_pending: %d\n",
3874 chan->halt_pending);
3875 dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status);
3876 dev_dbg(hsotg->dev, " do_split: %d\n", chan->do_split);
3877 dev_dbg(hsotg->dev, " complete_split: %d\n",
3878 chan->complete_split);
3879 dev_dbg(hsotg->dev, " hub_addr: %d\n", chan->hub_addr);
3880 dev_dbg(hsotg->dev, " hub_port: %d\n", chan->hub_port);
3881 dev_dbg(hsotg->dev, " xact_pos: %d\n", chan->xact_pos);
3882 dev_dbg(hsotg->dev, " requests: %d\n", chan->requests);
3883 dev_dbg(hsotg->dev, " qh: %p\n", chan->qh);
3884
3885 if (chan->xfer_started) {
3886 u32 hfnum, hcchar, hctsiz, hcint, hcintmsk;
3887
3888 hfnum = dwc2_readl(hsotg, HFNUM);
3889 hcchar = dwc2_readl(hsotg, HCCHAR(i));
3890 hctsiz = dwc2_readl(hsotg, HCTSIZ(i));
3891 hcint = dwc2_readl(hsotg, HCINT(i));
3892 hcintmsk = dwc2_readl(hsotg, HCINTMSK(i));
3893 dev_dbg(hsotg->dev, " hfnum: 0x%08x\n", hfnum);
3894 dev_dbg(hsotg->dev, " hcchar: 0x%08x\n", hcchar);
3895 dev_dbg(hsotg->dev, " hctsiz: 0x%08x\n", hctsiz);
3896 dev_dbg(hsotg->dev, " hcint: 0x%08x\n", hcint);
3897 dev_dbg(hsotg->dev, " hcintmsk: 0x%08x\n", hcintmsk);
3898 }
3899
3900 if (!(chan->xfer_started && chan->qh))
3901 continue;
3902
3903 list_for_each_entry(qtd, &chan->qh->qtd_list, qtd_list_entry) {
3904 if (!qtd->in_process)
3905 break;
3906 urb = qtd->urb;
3907 dev_dbg(hsotg->dev, " URB Info:\n");
3908 dev_dbg(hsotg->dev, " qtd: %p, urb: %p\n",
3909 qtd, urb);
3910 if (urb) {
3911 dev_dbg(hsotg->dev,
3912 " Dev: %d, EP: %d %s\n",
3913 dwc2_hcd_get_dev_addr(&urb->pipe_info),
3914 dwc2_hcd_get_ep_num(&urb->pipe_info),
3915 dwc2_hcd_is_pipe_in(&urb->pipe_info) ?
3916 "IN" : "OUT");
3917 dev_dbg(hsotg->dev,
3918 " Max packet size: %d (%d mult)\n",
3919 dwc2_hcd_get_maxp(&urb->pipe_info),
3920 dwc2_hcd_get_maxp_mult(&urb->pipe_info));
3921 dev_dbg(hsotg->dev,
3922 " transfer_buffer: %p\n",
3923 urb->buf);
3924 dev_dbg(hsotg->dev,
3925 " transfer_dma: %08lx\n",
3926 (unsigned long)urb->dma);
3927 dev_dbg(hsotg->dev,
3928 " transfer_buffer_length: %d\n",
3929 urb->length);
3930 dev_dbg(hsotg->dev, " actual_length: %d\n",
3931 urb->actual_length);
3932 }
3933 }
3934 }
3935
3936 dev_dbg(hsotg->dev, " non_periodic_channels: %d\n",
3937 hsotg->non_periodic_channels);
3938 dev_dbg(hsotg->dev, " periodic_channels: %d\n",
3939 hsotg->periodic_channels);
3940 dev_dbg(hsotg->dev, " periodic_usecs: %d\n", hsotg->periodic_usecs);
3941 np_tx_status = dwc2_readl(hsotg, GNPTXSTS);
3942 dev_dbg(hsotg->dev, " NP Tx Req Queue Space Avail: %d\n",
3943 (np_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
3944 dev_dbg(hsotg->dev, " NP Tx FIFO Space Avail: %d\n",
3945 (np_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
3946 p_tx_status = dwc2_readl(hsotg, HPTXSTS);
3947 dev_dbg(hsotg->dev, " P Tx Req Queue Space Avail: %d\n",
3948 (p_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
3949 dev_dbg(hsotg->dev, " P Tx FIFO Space Avail: %d\n",
3950 (p_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
3951 dwc2_dump_global_registers(hsotg);
3952 dwc2_dump_host_registers(hsotg);
3953 dev_dbg(hsotg->dev,
3954 "************************************************************\n");
3955 dev_dbg(hsotg->dev, "\n");
3956 #endif
3957 }
3958
3959 struct wrapper_priv_data {
3960 struct dwc2_hsotg *hsotg;
3961 };
3962
3963
3964 static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd)
3965 {
3966 struct wrapper_priv_data *p;
3967
3968 p = (struct wrapper_priv_data *)&hcd->hcd_priv;
3969 return p->hsotg;
3970 }
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992 struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, void *context,
3993 gfp_t mem_flags, int *ttport)
3994 {
3995 struct urb *urb = context;
3996 struct dwc2_tt *dwc_tt = NULL;
3997
3998 if (urb->dev->tt) {
3999 *ttport = urb->dev->ttport;
4000
4001 dwc_tt = urb->dev->tt->hcpriv;
4002 if (!dwc_tt) {
4003 size_t bitmap_size;
4004
4005
4006
4007
4008
4009 bitmap_size = DWC2_ELEMENTS_PER_LS_BITMAP *
4010 sizeof(dwc_tt->periodic_bitmaps[0]);
4011 if (urb->dev->tt->multi)
4012 bitmap_size *= urb->dev->tt->hub->maxchild;
4013
4014 dwc_tt = kzalloc(sizeof(*dwc_tt) + bitmap_size,
4015 mem_flags);
4016 if (!dwc_tt)
4017 return NULL;
4018
4019 dwc_tt->usb_tt = urb->dev->tt;
4020 dwc_tt->usb_tt->hcpriv = dwc_tt;
4021 }
4022
4023 dwc_tt->refcount++;
4024 }
4025
4026 return dwc_tt;
4027 }
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040 void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg, struct dwc2_tt *dwc_tt)
4041 {
4042
4043 if (!dwc_tt)
4044 return;
4045
4046 WARN_ON(dwc_tt->refcount < 1);
4047
4048 dwc_tt->refcount--;
4049 if (!dwc_tt->refcount) {
4050 dwc_tt->usb_tt->hcpriv = NULL;
4051 kfree(dwc_tt);
4052 }
4053 }
4054
4055 int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context)
4056 {
4057 struct urb *urb = context;
4058
4059 return urb->dev->speed;
4060 }
4061
4062 static void dwc2_allocate_bus_bandwidth(struct usb_hcd *hcd, u16 bw,
4063 struct urb *urb)
4064 {
4065 struct usb_bus *bus = hcd_to_bus(hcd);
4066
4067 if (urb->interval)
4068 bus->bandwidth_allocated += bw / urb->interval;
4069 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
4070 bus->bandwidth_isoc_reqs++;
4071 else
4072 bus->bandwidth_int_reqs++;
4073 }
4074
4075 static void dwc2_free_bus_bandwidth(struct usb_hcd *hcd, u16 bw,
4076 struct urb *urb)
4077 {
4078 struct usb_bus *bus = hcd_to_bus(hcd);
4079
4080 if (urb->interval)
4081 bus->bandwidth_allocated -= bw / urb->interval;
4082 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
4083 bus->bandwidth_isoc_reqs--;
4084 else
4085 bus->bandwidth_int_reqs--;
4086 }
4087
4088
4089
4090
4091
4092
4093
4094 void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
4095 int status)
4096 {
4097 struct urb *urb;
4098 int i;
4099
4100 if (!qtd) {
4101 dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__);
4102 return;
4103 }
4104
4105 if (!qtd->urb) {
4106 dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__);
4107 return;
4108 }
4109
4110 urb = qtd->urb->priv;
4111 if (!urb) {
4112 dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__);
4113 return;
4114 }
4115
4116 urb->actual_length = dwc2_hcd_urb_get_actual_length(qtd->urb);
4117
4118 if (dbg_urb(urb))
4119 dev_vdbg(hsotg->dev,
4120 "%s: urb %p device %d ep %d-%s status %d actual %d\n",
4121 __func__, urb, usb_pipedevice(urb->pipe),
4122 usb_pipeendpoint(urb->pipe),
4123 usb_pipein(urb->pipe) ? "IN" : "OUT", status,
4124 urb->actual_length);
4125
4126 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
4127 urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb);
4128 for (i = 0; i < urb->number_of_packets; ++i) {
4129 urb->iso_frame_desc[i].actual_length =
4130 dwc2_hcd_urb_get_iso_desc_actual_length(
4131 qtd->urb, i);
4132 urb->iso_frame_desc[i].status =
4133 dwc2_hcd_urb_get_iso_desc_status(qtd->urb, i);
4134 }
4135 }
4136
4137 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS && dbg_perio()) {
4138 for (i = 0; i < urb->number_of_packets; i++)
4139 dev_vdbg(hsotg->dev, " ISO Desc %d status %d\n",
4140 i, urb->iso_frame_desc[i].status);
4141 }
4142
4143 urb->status = status;
4144 if (!status) {
4145 if ((urb->transfer_flags & URB_SHORT_NOT_OK) &&
4146 urb->actual_length < urb->transfer_buffer_length)
4147 urb->status = -EREMOTEIO;
4148 }
4149
4150 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS ||
4151 usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
4152 struct usb_host_endpoint *ep = urb->ep;
4153
4154 if (ep)
4155 dwc2_free_bus_bandwidth(dwc2_hsotg_to_hcd(hsotg),
4156 dwc2_hcd_get_ep_bandwidth(hsotg, ep),
4157 urb);
4158 }
4159
4160 usb_hcd_unlink_urb_from_ep(dwc2_hsotg_to_hcd(hsotg), urb);
4161 urb->hcpriv = NULL;
4162 kfree(qtd->urb);
4163 qtd->urb = NULL;
4164
4165 usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status);
4166 }
4167
4168
4169
4170
4171 static void dwc2_hcd_start_func(struct work_struct *work)
4172 {
4173 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
4174 start_work.work);
4175
4176 dev_dbg(hsotg->dev, "%s() %p\n", __func__, hsotg);
4177 dwc2_host_start(hsotg);
4178 }
4179
4180
4181
4182
4183 static void dwc2_hcd_reset_func(struct work_struct *work)
4184 {
4185 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
4186 reset_work.work);
4187 unsigned long flags;
4188 u32 hprt0;
4189
4190 dev_dbg(hsotg->dev, "USB RESET function called\n");
4191
4192 spin_lock_irqsave(&hsotg->lock, flags);
4193
4194 hprt0 = dwc2_read_hprt0(hsotg);
4195 hprt0 &= ~HPRT0_RST;
4196 dwc2_writel(hsotg, hprt0, HPRT0);
4197 hsotg->flags.b.port_reset_change = 1;
4198
4199 spin_unlock_irqrestore(&hsotg->lock, flags);
4200 }
4201
4202 static void dwc2_hcd_phy_reset_func(struct work_struct *work)
4203 {
4204 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
4205 phy_reset_work);
4206 int ret;
4207
4208 ret = phy_reset(hsotg->phy);
4209 if (ret)
4210 dev_warn(hsotg->dev, "PHY reset failed\n");
4211 }
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224 static int _dwc2_hcd_start(struct usb_hcd *hcd)
4225 {
4226 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4227 struct usb_bus *bus = hcd_to_bus(hcd);
4228 unsigned long flags;
4229 u32 hprt0;
4230 int ret;
4231
4232 dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
4233
4234 spin_lock_irqsave(&hsotg->lock, flags);
4235 hsotg->lx_state = DWC2_L0;
4236 hcd->state = HC_STATE_RUNNING;
4237 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4238
4239 if (dwc2_is_device_mode(hsotg)) {
4240 spin_unlock_irqrestore(&hsotg->lock, flags);
4241 return 0;
4242 }
4243
4244 dwc2_hcd_reinit(hsotg);
4245
4246 hprt0 = dwc2_read_hprt0(hsotg);
4247
4248 if (hprt0 & HPRT0_PWR) {
4249
4250 spin_unlock_irqrestore(&hsotg->lock, flags);
4251 ret = dwc2_vbus_supply_init(hsotg);
4252 if (ret)
4253 return ret;
4254 spin_lock_irqsave(&hsotg->lock, flags);
4255 }
4256
4257
4258 if (bus->root_hub) {
4259 dev_dbg(hsotg->dev, "DWC OTG HCD Has Root Hub\n");
4260
4261 usb_hcd_resume_root_hub(hcd);
4262 }
4263
4264 spin_unlock_irqrestore(&hsotg->lock, flags);
4265
4266 return 0;
4267 }
4268
4269
4270
4271
4272
4273 static void _dwc2_hcd_stop(struct usb_hcd *hcd)
4274 {
4275 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4276 unsigned long flags;
4277 u32 hprt0;
4278
4279
4280 dwc2_disable_host_interrupts(hsotg);
4281
4282
4283 synchronize_irq(hcd->irq);
4284
4285 spin_lock_irqsave(&hsotg->lock, flags);
4286 hprt0 = dwc2_read_hprt0(hsotg);
4287
4288 dwc2_hcd_disconnect(hsotg, true);
4289 dwc2_hcd_stop(hsotg);
4290 hsotg->lx_state = DWC2_L3;
4291 hcd->state = HC_STATE_HALT;
4292 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4293 spin_unlock_irqrestore(&hsotg->lock, flags);
4294
4295
4296 if (hprt0 & HPRT0_PWR)
4297 dwc2_vbus_supply_exit(hsotg);
4298
4299 usleep_range(1000, 3000);
4300 }
4301
4302 static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
4303 {
4304 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4305 unsigned long flags;
4306 int ret = 0;
4307 u32 hprt0;
4308 u32 pcgctl;
4309
4310 spin_lock_irqsave(&hsotg->lock, flags);
4311
4312 if (dwc2_is_device_mode(hsotg))
4313 goto unlock;
4314
4315 if (hsotg->lx_state != DWC2_L0)
4316 goto unlock;
4317
4318 if (!HCD_HW_ACCESSIBLE(hcd))
4319 goto unlock;
4320
4321 if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
4322 goto unlock;
4323
4324 if (hsotg->params.power_down > DWC2_POWER_DOWN_PARAM_PARTIAL)
4325 goto skip_power_saving;
4326
4327
4328
4329
4330
4331 if (!hsotg->bus_suspended) {
4332 hprt0 = dwc2_read_hprt0(hsotg);
4333 if (hprt0 & HPRT0_CONNSTS) {
4334 hprt0 |= HPRT0_SUSP;
4335 if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_PARTIAL)
4336 hprt0 &= ~HPRT0_PWR;
4337 dwc2_writel(hsotg, hprt0, HPRT0);
4338 }
4339 if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_PARTIAL) {
4340 spin_unlock_irqrestore(&hsotg->lock, flags);
4341 dwc2_vbus_supply_exit(hsotg);
4342 spin_lock_irqsave(&hsotg->lock, flags);
4343 } else {
4344 pcgctl = readl(hsotg->regs + PCGCTL);
4345 pcgctl |= PCGCTL_STOPPCLK;
4346 writel(pcgctl, hsotg->regs + PCGCTL);
4347 }
4348 }
4349
4350 if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_PARTIAL) {
4351
4352 ret = dwc2_enter_partial_power_down(hsotg);
4353 if (ret) {
4354 if (ret != -ENOTSUPP)
4355 dev_err(hsotg->dev,
4356 "enter partial_power_down failed\n");
4357 goto skip_power_saving;
4358 }
4359
4360
4361 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4362 }
4363
4364
4365 if (!IS_ERR_OR_NULL(hsotg->uphy)) {
4366 spin_unlock_irqrestore(&hsotg->lock, flags);
4367 usb_phy_set_suspend(hsotg->uphy, true);
4368 spin_lock_irqsave(&hsotg->lock, flags);
4369 }
4370
4371 skip_power_saving:
4372 hsotg->lx_state = DWC2_L2;
4373 unlock:
4374 spin_unlock_irqrestore(&hsotg->lock, flags);
4375
4376 return ret;
4377 }
4378
4379 static int _dwc2_hcd_resume(struct usb_hcd *hcd)
4380 {
4381 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4382 unsigned long flags;
4383 u32 pcgctl;
4384 int ret = 0;
4385
4386 spin_lock_irqsave(&hsotg->lock, flags);
4387
4388 if (dwc2_is_device_mode(hsotg))
4389 goto unlock;
4390
4391 if (hsotg->lx_state != DWC2_L2)
4392 goto unlock;
4393
4394 if (hsotg->params.power_down > DWC2_POWER_DOWN_PARAM_PARTIAL) {
4395 hsotg->lx_state = DWC2_L0;
4396 goto unlock;
4397 }
4398
4399
4400
4401
4402
4403
4404 if (!IS_ERR_OR_NULL(hsotg->uphy)) {
4405 spin_unlock_irqrestore(&hsotg->lock, flags);
4406 usb_phy_set_suspend(hsotg->uphy, false);
4407 spin_lock_irqsave(&hsotg->lock, flags);
4408 }
4409
4410 if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_PARTIAL) {
4411
4412
4413
4414
4415 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4416
4417
4418
4419 ret = dwc2_exit_partial_power_down(hsotg, true);
4420 if (ret && (ret != -ENOTSUPP))
4421 dev_err(hsotg->dev, "exit partial_power_down failed\n");
4422 } else {
4423 pcgctl = readl(hsotg->regs + PCGCTL);
4424 pcgctl &= ~PCGCTL_STOPPCLK;
4425 writel(pcgctl, hsotg->regs + PCGCTL);
4426 }
4427
4428 hsotg->lx_state = DWC2_L0;
4429
4430 spin_unlock_irqrestore(&hsotg->lock, flags);
4431
4432 if (hsotg->bus_suspended) {
4433 spin_lock_irqsave(&hsotg->lock, flags);
4434 hsotg->flags.b.port_suspend_change = 1;
4435 spin_unlock_irqrestore(&hsotg->lock, flags);
4436 dwc2_port_resume(hsotg);
4437 } else {
4438 if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_PARTIAL) {
4439 dwc2_vbus_supply_init(hsotg);
4440
4441
4442 usleep_range(3000, 5000);
4443 }
4444
4445
4446
4447
4448
4449 dwc2_writel(hsotg, HPRT0_PWR | HPRT0_CONNDET |
4450 HPRT0_ENACHG, HPRT0);
4451
4452 usleep_range(5000, 7000);
4453 }
4454
4455 return ret;
4456 unlock:
4457 spin_unlock_irqrestore(&hsotg->lock, flags);
4458
4459 return ret;
4460 }
4461
4462
4463 static int _dwc2_hcd_get_frame_number(struct usb_hcd *hcd)
4464 {
4465 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4466
4467 return dwc2_hcd_get_frame_number(hsotg);
4468 }
4469
4470 static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
4471 char *fn_name)
4472 {
4473 #ifdef VERBOSE_DEBUG
4474 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4475 char *pipetype = NULL;
4476 char *speed = NULL;
4477
4478 dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb);
4479 dev_vdbg(hsotg->dev, " Device address: %d\n",
4480 usb_pipedevice(urb->pipe));
4481 dev_vdbg(hsotg->dev, " Endpoint: %d, %s\n",
4482 usb_pipeendpoint(urb->pipe),
4483 usb_pipein(urb->pipe) ? "IN" : "OUT");
4484
4485 switch (usb_pipetype(urb->pipe)) {
4486 case PIPE_CONTROL:
4487 pipetype = "CONTROL";
4488 break;
4489 case PIPE_BULK:
4490 pipetype = "BULK";
4491 break;
4492 case PIPE_INTERRUPT:
4493 pipetype = "INTERRUPT";
4494 break;
4495 case PIPE_ISOCHRONOUS:
4496 pipetype = "ISOCHRONOUS";
4497 break;
4498 }
4499
4500 dev_vdbg(hsotg->dev, " Endpoint type: %s %s (%s)\n", pipetype,
4501 usb_urb_dir_in(urb) ? "IN" : "OUT", usb_pipein(urb->pipe) ?
4502 "IN" : "OUT");
4503
4504 switch (urb->dev->speed) {
4505 case USB_SPEED_HIGH:
4506 speed = "HIGH";
4507 break;
4508 case USB_SPEED_FULL:
4509 speed = "FULL";
4510 break;
4511 case USB_SPEED_LOW:
4512 speed = "LOW";
4513 break;
4514 default:
4515 speed = "UNKNOWN";
4516 break;
4517 }
4518
4519 dev_vdbg(hsotg->dev, " Speed: %s\n", speed);
4520 dev_vdbg(hsotg->dev, " Max packet size: %d (%d mult)\n",
4521 usb_endpoint_maxp(&urb->ep->desc),
4522 usb_endpoint_maxp_mult(&urb->ep->desc));
4523
4524 dev_vdbg(hsotg->dev, " Data buffer length: %d\n",
4525 urb->transfer_buffer_length);
4526 dev_vdbg(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
4527 urb->transfer_buffer, (unsigned long)urb->transfer_dma);
4528 dev_vdbg(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n",
4529 urb->setup_packet, (unsigned long)urb->setup_dma);
4530 dev_vdbg(hsotg->dev, " Interval: %d\n", urb->interval);
4531
4532 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
4533 int i;
4534
4535 for (i = 0; i < urb->number_of_packets; i++) {
4536 dev_vdbg(hsotg->dev, " ISO Desc %d:\n", i);
4537 dev_vdbg(hsotg->dev, " offset: %d, length %d\n",
4538 urb->iso_frame_desc[i].offset,
4539 urb->iso_frame_desc[i].length);
4540 }
4541 }
4542 #endif
4543 }
4544
4545
4546
4547
4548
4549
4550 static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
4551 gfp_t mem_flags)
4552 {
4553 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4554 struct usb_host_endpoint *ep = urb->ep;
4555 struct dwc2_hcd_urb *dwc2_urb;
4556 int i;
4557 int retval;
4558 int alloc_bandwidth = 0;
4559 u8 ep_type = 0;
4560 u32 tflags = 0;
4561 void *buf;
4562 unsigned long flags;
4563 struct dwc2_qh *qh;
4564 bool qh_allocated = false;
4565 struct dwc2_qtd *qtd;
4566
4567 if (dbg_urb(urb)) {
4568 dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n");
4569 dwc2_dump_urb_info(hcd, urb, "urb_enqueue");
4570 }
4571
4572 if (!ep)
4573 return -EINVAL;
4574
4575 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS ||
4576 usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
4577 spin_lock_irqsave(&hsotg->lock, flags);
4578 if (!dwc2_hcd_is_bandwidth_allocated(hsotg, ep))
4579 alloc_bandwidth = 1;
4580 spin_unlock_irqrestore(&hsotg->lock, flags);
4581 }
4582
4583 switch (usb_pipetype(urb->pipe)) {
4584 case PIPE_CONTROL:
4585 ep_type = USB_ENDPOINT_XFER_CONTROL;
4586 break;
4587 case PIPE_ISOCHRONOUS:
4588 ep_type = USB_ENDPOINT_XFER_ISOC;
4589 break;
4590 case PIPE_BULK:
4591 ep_type = USB_ENDPOINT_XFER_BULK;
4592 break;
4593 case PIPE_INTERRUPT:
4594 ep_type = USB_ENDPOINT_XFER_INT;
4595 break;
4596 }
4597
4598 dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets,
4599 mem_flags);
4600 if (!dwc2_urb)
4601 return -ENOMEM;
4602
4603 dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe),
4604 usb_pipeendpoint(urb->pipe), ep_type,
4605 usb_pipein(urb->pipe),
4606 usb_endpoint_maxp(&ep->desc),
4607 usb_endpoint_maxp_mult(&ep->desc));
4608
4609 buf = urb->transfer_buffer;
4610
4611 if (hcd_uses_dma(hcd)) {
4612 if (!buf && (urb->transfer_dma & 3)) {
4613 dev_err(hsotg->dev,
4614 "%s: unaligned transfer with no transfer_buffer",
4615 __func__);
4616 retval = -EINVAL;
4617 goto fail0;
4618 }
4619 }
4620
4621 if (!(urb->transfer_flags & URB_NO_INTERRUPT))
4622 tflags |= URB_GIVEBACK_ASAP;
4623 if (urb->transfer_flags & URB_ZERO_PACKET)
4624 tflags |= URB_SEND_ZERO_PACKET;
4625
4626 dwc2_urb->priv = urb;
4627 dwc2_urb->buf = buf;
4628 dwc2_urb->dma = urb->transfer_dma;
4629 dwc2_urb->length = urb->transfer_buffer_length;
4630 dwc2_urb->setup_packet = urb->setup_packet;
4631 dwc2_urb->setup_dma = urb->setup_dma;
4632 dwc2_urb->flags = tflags;
4633 dwc2_urb->interval = urb->interval;
4634 dwc2_urb->status = -EINPROGRESS;
4635
4636 for (i = 0; i < urb->number_of_packets; ++i)
4637 dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i,
4638 urb->iso_frame_desc[i].offset,
4639 urb->iso_frame_desc[i].length);
4640
4641 urb->hcpriv = dwc2_urb;
4642 qh = (struct dwc2_qh *)ep->hcpriv;
4643
4644 if (!qh) {
4645 qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, mem_flags);
4646 if (!qh) {
4647 retval = -ENOMEM;
4648 goto fail0;
4649 }
4650 ep->hcpriv = qh;
4651 qh_allocated = true;
4652 }
4653
4654 qtd = kzalloc(sizeof(*qtd), mem_flags);
4655 if (!qtd) {
4656 retval = -ENOMEM;
4657 goto fail1;
4658 }
4659
4660 spin_lock_irqsave(&hsotg->lock, flags);
4661 retval = usb_hcd_link_urb_to_ep(hcd, urb);
4662 if (retval)
4663 goto fail2;
4664
4665 retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
4666 if (retval)
4667 goto fail3;
4668
4669 if (alloc_bandwidth) {
4670 dwc2_allocate_bus_bandwidth(hcd,
4671 dwc2_hcd_get_ep_bandwidth(hsotg, ep),
4672 urb);
4673 }
4674
4675 spin_unlock_irqrestore(&hsotg->lock, flags);
4676
4677 return 0;
4678
4679 fail3:
4680 dwc2_urb->priv = NULL;
4681 usb_hcd_unlink_urb_from_ep(hcd, urb);
4682 if (qh_allocated && qh->channel && qh->channel->qh == qh)
4683 qh->channel->qh = NULL;
4684 fail2:
4685 spin_unlock_irqrestore(&hsotg->lock, flags);
4686 urb->hcpriv = NULL;
4687 kfree(qtd);
4688 fail1:
4689 if (qh_allocated) {
4690 struct dwc2_qtd *qtd2, *qtd2_tmp;
4691
4692 ep->hcpriv = NULL;
4693 dwc2_hcd_qh_unlink(hsotg, qh);
4694
4695 list_for_each_entry_safe(qtd2, qtd2_tmp, &qh->qtd_list,
4696 qtd_list_entry)
4697 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh);
4698 dwc2_hcd_qh_free(hsotg, qh);
4699 }
4700 fail0:
4701 kfree(dwc2_urb);
4702
4703 return retval;
4704 }
4705
4706
4707
4708
4709 static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
4710 int status)
4711 {
4712 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4713 int rc;
4714 unsigned long flags;
4715
4716 dev_dbg(hsotg->dev, "DWC OTG HCD URB Dequeue\n");
4717 dwc2_dump_urb_info(hcd, urb, "urb_dequeue");
4718
4719 spin_lock_irqsave(&hsotg->lock, flags);
4720
4721 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
4722 if (rc)
4723 goto out;
4724
4725 if (!urb->hcpriv) {
4726 dev_dbg(hsotg->dev, "## urb->hcpriv is NULL ##\n");
4727 goto out;
4728 }
4729
4730 rc = dwc2_hcd_urb_dequeue(hsotg, urb->hcpriv);
4731
4732 usb_hcd_unlink_urb_from_ep(hcd, urb);
4733
4734 kfree(urb->hcpriv);
4735 urb->hcpriv = NULL;
4736
4737
4738 spin_unlock(&hsotg->lock);
4739 usb_hcd_giveback_urb(hcd, urb, status);
4740 spin_lock(&hsotg->lock);
4741
4742 dev_dbg(hsotg->dev, "Called usb_hcd_giveback_urb()\n");
4743 dev_dbg(hsotg->dev, " urb->status = %d\n", urb->status);
4744 out:
4745 spin_unlock_irqrestore(&hsotg->lock, flags);
4746
4747 return rc;
4748 }
4749
4750
4751
4752
4753
4754
4755 static void _dwc2_hcd_endpoint_disable(struct usb_hcd *hcd,
4756 struct usb_host_endpoint *ep)
4757 {
4758 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4759
4760 dev_dbg(hsotg->dev,
4761 "DWC OTG HCD EP DISABLE: bEndpointAddress=0x%02x, ep->hcpriv=%p\n",
4762 ep->desc.bEndpointAddress, ep->hcpriv);
4763 dwc2_hcd_endpoint_disable(hsotg, ep, 250);
4764 }
4765
4766
4767
4768
4769
4770
4771 static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd,
4772 struct usb_host_endpoint *ep)
4773 {
4774 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4775 unsigned long flags;
4776
4777 dev_dbg(hsotg->dev,
4778 "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n",
4779 ep->desc.bEndpointAddress);
4780
4781 spin_lock_irqsave(&hsotg->lock, flags);
4782 dwc2_hcd_endpoint_reset(hsotg, ep);
4783 spin_unlock_irqrestore(&hsotg->lock, flags);
4784 }
4785
4786
4787
4788
4789
4790
4791
4792
4793 static irqreturn_t _dwc2_hcd_irq(struct usb_hcd *hcd)
4794 {
4795 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4796
4797 return dwc2_handle_hcd_intr(hsotg);
4798 }
4799
4800
4801
4802
4803
4804
4805
4806 static int _dwc2_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
4807 {
4808 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4809
4810 buf[0] = dwc2_hcd_is_status_changed(hsotg, 1) << 1;
4811 return buf[0] != 0;
4812 }
4813
4814
4815 static int _dwc2_hcd_hub_control(struct usb_hcd *hcd, u16 typereq, u16 wvalue,
4816 u16 windex, char *buf, u16 wlength)
4817 {
4818 int retval = dwc2_hcd_hub_control(dwc2_hcd_to_hsotg(hcd), typereq,
4819 wvalue, windex, buf, wlength);
4820 return retval;
4821 }
4822
4823
4824 static void _dwc2_hcd_clear_tt_buffer_complete(struct usb_hcd *hcd,
4825 struct usb_host_endpoint *ep)
4826 {
4827 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4828 struct dwc2_qh *qh;
4829 unsigned long flags;
4830
4831 qh = ep->hcpriv;
4832 if (!qh)
4833 return;
4834
4835 spin_lock_irqsave(&hsotg->lock, flags);
4836 qh->tt_buffer_dirty = 0;
4837
4838 if (hsotg->flags.b.port_connect_status)
4839 dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_ALL);
4840
4841 spin_unlock_irqrestore(&hsotg->lock, flags);
4842 }
4843
4844
4845
4846
4847
4848 static void dwc2_change_bus_speed(struct usb_hcd *hcd, int speed)
4849 {
4850 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4851
4852 if (hsotg->params.speed == speed)
4853 return;
4854
4855 hsotg->params.speed = speed;
4856 queue_work(hsotg->wq_otg, &hsotg->wf_otg);
4857 }
4858
4859 static void dwc2_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
4860 {
4861 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4862
4863 if (!hsotg->params.change_speed_quirk)
4864 return;
4865
4866
4867
4868
4869 if (udev->parent && udev->parent->speed > USB_SPEED_UNKNOWN &&
4870 udev->parent->speed < USB_SPEED_HIGH) {
4871 dev_info(hsotg->dev, "Set speed to default high-speed\n");
4872 dwc2_change_bus_speed(hcd, HPRT0_SPD_HIGH_SPEED);
4873 }
4874 }
4875
4876 static int dwc2_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
4877 {
4878 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4879
4880 if (!hsotg->params.change_speed_quirk)
4881 return 0;
4882
4883 if (udev->speed == USB_SPEED_HIGH) {
4884 dev_info(hsotg->dev, "Set speed to high-speed\n");
4885 dwc2_change_bus_speed(hcd, HPRT0_SPD_HIGH_SPEED);
4886 } else if ((udev->speed == USB_SPEED_FULL ||
4887 udev->speed == USB_SPEED_LOW)) {
4888
4889
4890
4891
4892 dev_info(hsotg->dev, "Set speed to full-speed\n");
4893 dwc2_change_bus_speed(hcd, HPRT0_SPD_FULL_SPEED);
4894 }
4895
4896 return 0;
4897 }
4898
4899 static struct hc_driver dwc2_hc_driver = {
4900 .description = "dwc2_hsotg",
4901 .product_desc = "DWC OTG Controller",
4902 .hcd_priv_size = sizeof(struct wrapper_priv_data),
4903
4904 .irq = _dwc2_hcd_irq,
4905 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
4906
4907 .start = _dwc2_hcd_start,
4908 .stop = _dwc2_hcd_stop,
4909 .urb_enqueue = _dwc2_hcd_urb_enqueue,
4910 .urb_dequeue = _dwc2_hcd_urb_dequeue,
4911 .endpoint_disable = _dwc2_hcd_endpoint_disable,
4912 .endpoint_reset = _dwc2_hcd_endpoint_reset,
4913 .get_frame_number = _dwc2_hcd_get_frame_number,
4914
4915 .hub_status_data = _dwc2_hcd_hub_status_data,
4916 .hub_control = _dwc2_hcd_hub_control,
4917 .clear_tt_buffer_complete = _dwc2_hcd_clear_tt_buffer_complete,
4918
4919 .bus_suspend = _dwc2_hcd_suspend,
4920 .bus_resume = _dwc2_hcd_resume,
4921
4922 .map_urb_for_dma = dwc2_map_urb_for_dma,
4923 .unmap_urb_for_dma = dwc2_unmap_urb_for_dma,
4924 };
4925
4926
4927
4928
4929
4930 static void dwc2_hcd_free(struct dwc2_hsotg *hsotg)
4931 {
4932 u32 ahbcfg;
4933 u32 dctl;
4934 int i;
4935
4936 dev_dbg(hsotg->dev, "DWC OTG HCD FREE\n");
4937
4938
4939 dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_inactive);
4940 dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_waiting);
4941 dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_active);
4942 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_inactive);
4943 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_ready);
4944 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_assigned);
4945 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_queued);
4946
4947
4948 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
4949 struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i];
4950
4951 if (chan) {
4952 dev_dbg(hsotg->dev, "HCD Free channel #%i, chan=%p\n",
4953 i, chan);
4954 hsotg->hc_ptr_array[i] = NULL;
4955 kfree(chan);
4956 }
4957 }
4958
4959 if (hsotg->params.host_dma) {
4960 if (hsotg->status_buf) {
4961 dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE,
4962 hsotg->status_buf,
4963 hsotg->status_buf_dma);
4964 hsotg->status_buf = NULL;
4965 }
4966 } else {
4967 kfree(hsotg->status_buf);
4968 hsotg->status_buf = NULL;
4969 }
4970
4971 ahbcfg = dwc2_readl(hsotg, GAHBCFG);
4972
4973
4974 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
4975 dwc2_writel(hsotg, ahbcfg, GAHBCFG);
4976 dwc2_writel(hsotg, 0, GINTMSK);
4977
4978 if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a) {
4979 dctl = dwc2_readl(hsotg, DCTL);
4980 dctl |= DCTL_SFTDISCON;
4981 dwc2_writel(hsotg, dctl, DCTL);
4982 }
4983
4984 if (hsotg->wq_otg) {
4985 if (!cancel_work_sync(&hsotg->wf_otg))
4986 flush_workqueue(hsotg->wq_otg);
4987 destroy_workqueue(hsotg->wq_otg);
4988 }
4989
4990 cancel_work_sync(&hsotg->phy_reset_work);
4991
4992 del_timer(&hsotg->wkp_timer);
4993 }
4994
4995 static void dwc2_hcd_release(struct dwc2_hsotg *hsotg)
4996 {
4997
4998 dwc2_disable_host_interrupts(hsotg);
4999
5000 dwc2_hcd_free(hsotg);
5001 }
5002
5003
5004
5005
5006
5007
5008
5009 int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
5010 {
5011 struct platform_device *pdev = to_platform_device(hsotg->dev);
5012 struct resource *res;
5013 struct usb_hcd *hcd;
5014 struct dwc2_host_chan *channel;
5015 u32 hcfg;
5016 int i, num_channels;
5017 int retval;
5018
5019 if (usb_disabled())
5020 return -ENODEV;
5021
5022 dev_dbg(hsotg->dev, "DWC OTG HCD INIT\n");
5023
5024 retval = -ENOMEM;
5025
5026 hcfg = dwc2_readl(hsotg, HCFG);
5027 dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg);
5028
5029 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
5030 hsotg->frame_num_array = kcalloc(FRAME_NUM_ARRAY_SIZE,
5031 sizeof(*hsotg->frame_num_array),
5032 GFP_KERNEL);
5033 if (!hsotg->frame_num_array)
5034 goto error1;
5035 hsotg->last_frame_num_array =
5036 kcalloc(FRAME_NUM_ARRAY_SIZE,
5037 sizeof(*hsotg->last_frame_num_array), GFP_KERNEL);
5038 if (!hsotg->last_frame_num_array)
5039 goto error1;
5040 #endif
5041 hsotg->last_frame_num = HFNUM_MAX_FRNUM;
5042
5043
5044 if (hsotg->params.host_dma &&
5045 !hsotg->dev->dma_mask) {
5046 dev_warn(hsotg->dev,
5047 "dma_mask not set, disabling DMA\n");
5048 hsotg->params.host_dma = false;
5049 hsotg->params.dma_desc_enable = false;
5050 }
5051
5052
5053 if (hsotg->params.host_dma) {
5054 if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
5055 dev_warn(hsotg->dev, "can't set DMA mask\n");
5056 if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
5057 dev_warn(hsotg->dev, "can't set coherent DMA mask\n");
5058 }
5059
5060 if (hsotg->params.change_speed_quirk) {
5061 dwc2_hc_driver.free_dev = dwc2_free_dev;
5062 dwc2_hc_driver.reset_device = dwc2_reset_device;
5063 }
5064
5065 if (hsotg->params.host_dma)
5066 dwc2_hc_driver.flags |= HCD_DMA;
5067
5068 hcd = usb_create_hcd(&dwc2_hc_driver, hsotg->dev, dev_name(hsotg->dev));
5069 if (!hcd)
5070 goto error1;
5071
5072 hcd->has_tt = 1;
5073
5074 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5075 hcd->rsrc_start = res->start;
5076 hcd->rsrc_len = resource_size(res);
5077
5078 ((struct wrapper_priv_data *)&hcd->hcd_priv)->hsotg = hsotg;
5079 hsotg->priv = hcd;
5080
5081
5082
5083
5084
5085 dwc2_disable_global_interrupts(hsotg);
5086
5087
5088 retval = dwc2_core_init(hsotg, true);
5089 if (retval)
5090 goto error2;
5091
5092
5093 retval = -ENOMEM;
5094 hsotg->wq_otg = alloc_ordered_workqueue("dwc2", 0);
5095 if (!hsotg->wq_otg) {
5096 dev_err(hsotg->dev, "Failed to create workqueue\n");
5097 goto error2;
5098 }
5099 INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change);
5100
5101 timer_setup(&hsotg->wkp_timer, dwc2_wakeup_detected, 0);
5102
5103
5104 INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive);
5105 INIT_LIST_HEAD(&hsotg->non_periodic_sched_waiting);
5106 INIT_LIST_HEAD(&hsotg->non_periodic_sched_active);
5107
5108
5109 INIT_LIST_HEAD(&hsotg->periodic_sched_inactive);
5110 INIT_LIST_HEAD(&hsotg->periodic_sched_ready);
5111 INIT_LIST_HEAD(&hsotg->periodic_sched_assigned);
5112 INIT_LIST_HEAD(&hsotg->periodic_sched_queued);
5113
5114 INIT_LIST_HEAD(&hsotg->split_order);
5115
5116
5117
5118
5119
5120 INIT_LIST_HEAD(&hsotg->free_hc_list);
5121 num_channels = hsotg->params.host_channels;
5122 memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array));
5123
5124 for (i = 0; i < num_channels; i++) {
5125 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
5126 if (!channel)
5127 goto error3;
5128 channel->hc_num = i;
5129 INIT_LIST_HEAD(&channel->split_order_list_entry);
5130 hsotg->hc_ptr_array[i] = channel;
5131 }
5132
5133
5134 INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func);
5135 INIT_DELAYED_WORK(&hsotg->reset_work, dwc2_hcd_reset_func);
5136 INIT_WORK(&hsotg->phy_reset_work, dwc2_hcd_phy_reset_func);
5137
5138
5139
5140
5141
5142
5143
5144 if (hsotg->params.host_dma)
5145 hsotg->status_buf = dma_alloc_coherent(hsotg->dev,
5146 DWC2_HCD_STATUS_BUF_SIZE,
5147 &hsotg->status_buf_dma, GFP_KERNEL);
5148 else
5149 hsotg->status_buf = kzalloc(DWC2_HCD_STATUS_BUF_SIZE,
5150 GFP_KERNEL);
5151
5152 if (!hsotg->status_buf)
5153 goto error3;
5154
5155
5156
5157
5158
5159
5160 if (hsotg->params.dma_desc_enable ||
5161 hsotg->params.dma_desc_fs_enable) {
5162 hsotg->desc_gen_cache = kmem_cache_create("dwc2-gen-desc",
5163 sizeof(struct dwc2_dma_desc) *
5164 MAX_DMA_DESC_NUM_GENERIC, 512, SLAB_CACHE_DMA,
5165 NULL);
5166 if (!hsotg->desc_gen_cache) {
5167 dev_err(hsotg->dev,
5168 "unable to create dwc2 generic desc cache\n");
5169
5170
5171
5172
5173
5174 hsotg->params.dma_desc_enable = false;
5175 hsotg->params.dma_desc_fs_enable = false;
5176 }
5177
5178 hsotg->desc_hsisoc_cache = kmem_cache_create("dwc2-hsisoc-desc",
5179 sizeof(struct dwc2_dma_desc) *
5180 MAX_DMA_DESC_NUM_HS_ISOC, 512, 0, NULL);
5181 if (!hsotg->desc_hsisoc_cache) {
5182 dev_err(hsotg->dev,
5183 "unable to create dwc2 hs isoc desc cache\n");
5184
5185 kmem_cache_destroy(hsotg->desc_gen_cache);
5186
5187
5188
5189
5190
5191 hsotg->params.dma_desc_enable = false;
5192 hsotg->params.dma_desc_fs_enable = false;
5193 }
5194 }
5195
5196 if (hsotg->params.host_dma) {
5197
5198
5199
5200
5201 hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma",
5202 DWC2_KMEM_UNALIGNED_BUF_SIZE, 4,
5203 SLAB_CACHE_DMA, NULL);
5204 if (!hsotg->unaligned_cache)
5205 dev_err(hsotg->dev,
5206 "unable to create dwc2 unaligned cache\n");
5207 }
5208
5209 hsotg->otg_port = 1;
5210 hsotg->frame_list = NULL;
5211 hsotg->frame_list_dma = 0;
5212 hsotg->periodic_qh_count = 0;
5213
5214
5215 hsotg->lx_state = DWC2_L3;
5216
5217 hcd->self.otg_port = hsotg->otg_port;
5218
5219
5220 hcd->self.sg_tablesize = 0;
5221
5222 if (!IS_ERR_OR_NULL(hsotg->uphy))
5223 otg_set_host(hsotg->uphy->otg, &hcd->self);
5224
5225
5226
5227
5228
5229
5230 retval = usb_add_hcd(hcd, hsotg->irq, IRQF_SHARED);
5231 if (retval < 0)
5232 goto error4;
5233
5234 device_wakeup_enable(hcd->self.controller);
5235
5236 dwc2_hcd_dump_state(hsotg);
5237
5238 dwc2_enable_global_interrupts(hsotg);
5239
5240 return 0;
5241
5242 error4:
5243 kmem_cache_destroy(hsotg->unaligned_cache);
5244 kmem_cache_destroy(hsotg->desc_hsisoc_cache);
5245 kmem_cache_destroy(hsotg->desc_gen_cache);
5246 error3:
5247 dwc2_hcd_release(hsotg);
5248 error2:
5249 usb_put_hcd(hcd);
5250 error1:
5251
5252 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
5253 kfree(hsotg->last_frame_num_array);
5254 kfree(hsotg->frame_num_array);
5255 #endif
5256
5257 dev_err(hsotg->dev, "%s() FAILED, returning %d\n", __func__, retval);
5258 return retval;
5259 }
5260
5261
5262
5263
5264
5265 void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
5266 {
5267 struct usb_hcd *hcd;
5268
5269 dev_dbg(hsotg->dev, "DWC OTG HCD REMOVE\n");
5270
5271 hcd = dwc2_hsotg_to_hcd(hsotg);
5272 dev_dbg(hsotg->dev, "hsotg->hcd = %p\n", hcd);
5273
5274 if (!hcd) {
5275 dev_dbg(hsotg->dev, "%s: dwc2_hsotg_to_hcd(hsotg) NULL!\n",
5276 __func__);
5277 return;
5278 }
5279
5280 if (!IS_ERR_OR_NULL(hsotg->uphy))
5281 otg_set_host(hsotg->uphy->otg, NULL);
5282
5283 usb_remove_hcd(hcd);
5284 hsotg->priv = NULL;
5285
5286 kmem_cache_destroy(hsotg->unaligned_cache);
5287 kmem_cache_destroy(hsotg->desc_hsisoc_cache);
5288 kmem_cache_destroy(hsotg->desc_gen_cache);
5289
5290 dwc2_hcd_release(hsotg);
5291 usb_put_hcd(hcd);
5292
5293 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
5294 kfree(hsotg->last_frame_num_array);
5295 kfree(hsotg->frame_num_array);
5296 #endif
5297 }
5298
5299
5300
5301
5302
5303
5304
5305
5306 int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
5307 {
5308 struct dwc2_hregs_backup *hr;
5309 int i;
5310
5311 dev_dbg(hsotg->dev, "%s\n", __func__);
5312
5313
5314 hr = &hsotg->hr_backup;
5315 hr->hcfg = dwc2_readl(hsotg, HCFG);
5316 hr->haintmsk = dwc2_readl(hsotg, HAINTMSK);
5317 for (i = 0; i < hsotg->params.host_channels; ++i)
5318 hr->hcintmsk[i] = dwc2_readl(hsotg, HCINTMSK(i));
5319
5320 hr->hprt0 = dwc2_read_hprt0(hsotg);
5321 hr->hfir = dwc2_readl(hsotg, HFIR);
5322 hr->hptxfsiz = dwc2_readl(hsotg, HPTXFSIZ);
5323 hr->valid = true;
5324
5325 return 0;
5326 }
5327
5328
5329
5330
5331
5332
5333
5334
5335 int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
5336 {
5337 struct dwc2_hregs_backup *hr;
5338 int i;
5339
5340 dev_dbg(hsotg->dev, "%s\n", __func__);
5341
5342
5343 hr = &hsotg->hr_backup;
5344 if (!hr->valid) {
5345 dev_err(hsotg->dev, "%s: no host registers to restore\n",
5346 __func__);
5347 return -EINVAL;
5348 }
5349 hr->valid = false;
5350
5351 dwc2_writel(hsotg, hr->hcfg, HCFG);
5352 dwc2_writel(hsotg, hr->haintmsk, HAINTMSK);
5353
5354 for (i = 0; i < hsotg->params.host_channels; ++i)
5355 dwc2_writel(hsotg, hr->hcintmsk[i], HCINTMSK(i));
5356
5357 dwc2_writel(hsotg, hr->hprt0, HPRT0);
5358 dwc2_writel(hsotg, hr->hfir, HFIR);
5359 dwc2_writel(hsotg, hr->hptxfsiz, HPTXFSIZ);
5360 hsotg->frame_number = 0;
5361
5362 return 0;
5363 }
5364
5365
5366
5367
5368
5369
5370 int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
5371 {
5372 unsigned long flags;
5373 int ret = 0;
5374 u32 hprt0;
5375 u32 pcgcctl;
5376 u32 gusbcfg;
5377 u32 gpwrdn;
5378
5379 dev_dbg(hsotg->dev, "Preparing host for hibernation\n");
5380 ret = dwc2_backup_global_registers(hsotg);
5381 if (ret) {
5382 dev_err(hsotg->dev, "%s: failed to backup global registers\n",
5383 __func__);
5384 return ret;
5385 }
5386 ret = dwc2_backup_host_registers(hsotg);
5387 if (ret) {
5388 dev_err(hsotg->dev, "%s: failed to backup host registers\n",
5389 __func__);
5390 return ret;
5391 }
5392
5393
5394 hprt0 = dwc2_readl(hsotg, HPRT0);
5395 hprt0 |= HPRT0_SUSP;
5396 hprt0 &= ~HPRT0_ENA;
5397 dwc2_writel(hsotg, hprt0, HPRT0);
5398
5399
5400 if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
5401 dev_warn(hsotg->dev, "Suspend wasn't generated\n");
5402
5403
5404
5405
5406
5407 spin_lock_irqsave(&hsotg->lock, flags);
5408 hsotg->lx_state = DWC2_L2;
5409
5410 gusbcfg = dwc2_readl(hsotg, GUSBCFG);
5411 if (gusbcfg & GUSBCFG_ULPI_UTMI_SEL) {
5412
5413
5414 pcgcctl = dwc2_readl(hsotg, PCGCTL);
5415 pcgcctl |= PCGCTL_STOPPCLK;
5416 dwc2_writel(hsotg, pcgcctl, PCGCTL);
5417 udelay(10);
5418
5419 gpwrdn = dwc2_readl(hsotg, GPWRDN);
5420 gpwrdn |= GPWRDN_PMUACTV;
5421 dwc2_writel(hsotg, gpwrdn, GPWRDN);
5422 udelay(10);
5423 } else {
5424
5425 gpwrdn = dwc2_readl(hsotg, GPWRDN);
5426 gpwrdn |= GPWRDN_PMUACTV;
5427 dwc2_writel(hsotg, gpwrdn, GPWRDN);
5428 udelay(10);
5429
5430 pcgcctl = dwc2_readl(hsotg, PCGCTL);
5431 pcgcctl |= PCGCTL_STOPPCLK;
5432 dwc2_writel(hsotg, pcgcctl, PCGCTL);
5433 udelay(10);
5434 }
5435
5436
5437 gpwrdn = dwc2_readl(hsotg, GPWRDN);
5438 gpwrdn |= GPWRDN_PMUINTSEL;
5439 dwc2_writel(hsotg, gpwrdn, GPWRDN);
5440 udelay(10);
5441
5442
5443 gpwrdn = dwc2_readl(hsotg, GPWRDN);
5444 gpwrdn |= GPWRDN_DISCONN_DET_MSK;
5445 gpwrdn |= GPWRDN_LNSTSCHG_MSK;
5446 gpwrdn |= GPWRDN_STS_CHGINT_MSK;
5447 dwc2_writel(hsotg, gpwrdn, GPWRDN);
5448 udelay(10);
5449
5450
5451 gpwrdn = dwc2_readl(hsotg, GPWRDN);
5452 gpwrdn |= GPWRDN_PWRDNCLMP;
5453 dwc2_writel(hsotg, gpwrdn, GPWRDN);
5454 udelay(10);
5455
5456
5457 gpwrdn = dwc2_readl(hsotg, GPWRDN);
5458 gpwrdn |= GPWRDN_PWRDNSWTCH;
5459 dwc2_writel(hsotg, gpwrdn, GPWRDN);
5460
5461 hsotg->hibernated = 1;
5462 hsotg->bus_suspended = 1;
5463 dev_dbg(hsotg->dev, "Host hibernation completed\n");
5464 spin_unlock_irqrestore(&hsotg->lock, flags);
5465 return ret;
5466 }
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480 int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
5481 int reset)
5482 {
5483 u32 gpwrdn;
5484 u32 hprt0;
5485 int ret = 0;
5486 struct dwc2_gregs_backup *gr;
5487 struct dwc2_hregs_backup *hr;
5488
5489 gr = &hsotg->gr_backup;
5490 hr = &hsotg->hr_backup;
5491
5492 dev_dbg(hsotg->dev,
5493 "%s: called with rem_wakeup = %d reset = %d\n",
5494 __func__, rem_wakeup, reset);
5495
5496 dwc2_hib_restore_common(hsotg, rem_wakeup, 1);
5497 hsotg->hibernated = 0;
5498
5499
5500
5501
5502
5503
5504 mdelay(100);
5505
5506
5507 dwc2_writel(hsotg, 0xffffffff, GINTSTS);
5508
5509
5510 gpwrdn = dwc2_readl(hsotg, GPWRDN);
5511 gpwrdn &= ~GPWRDN_RESTORE;
5512 dwc2_writel(hsotg, gpwrdn, GPWRDN);
5513 udelay(10);
5514
5515
5516 dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
5517 dwc2_writel(hsotg, hr->hcfg, HCFG);
5518
5519
5520 gpwrdn = dwc2_readl(hsotg, GPWRDN);
5521 gpwrdn &= ~GPWRDN_PMUACTV;
5522 dwc2_writel(hsotg, gpwrdn, GPWRDN);
5523 udelay(10);
5524
5525 hprt0 = hr->hprt0;
5526 hprt0 |= HPRT0_PWR;
5527 hprt0 &= ~HPRT0_ENA;
5528 hprt0 &= ~HPRT0_SUSP;
5529 dwc2_writel(hsotg, hprt0, HPRT0);
5530
5531 hprt0 = hr->hprt0;
5532 hprt0 |= HPRT0_PWR;
5533 hprt0 &= ~HPRT0_ENA;
5534 hprt0 &= ~HPRT0_SUSP;
5535
5536 if (reset) {
5537 hprt0 |= HPRT0_RST;
5538 dwc2_writel(hsotg, hprt0, HPRT0);
5539
5540
5541 mdelay(60);
5542 hprt0 &= ~HPRT0_RST;
5543 dwc2_writel(hsotg, hprt0, HPRT0);
5544 } else {
5545 hprt0 |= HPRT0_RES;
5546 dwc2_writel(hsotg, hprt0, HPRT0);
5547
5548
5549 mdelay(100);
5550 hprt0 &= ~HPRT0_RES;
5551 dwc2_writel(hsotg, hprt0, HPRT0);
5552 }
5553
5554 hprt0 = dwc2_readl(hsotg, HPRT0);
5555 hprt0 |= HPRT0_CONNDET;
5556 hprt0 |= HPRT0_ENACHG;
5557 hprt0 &= ~HPRT0_ENA;
5558 dwc2_writel(hsotg, hprt0, HPRT0);
5559
5560 hprt0 = dwc2_readl(hsotg, HPRT0);
5561
5562
5563 dwc2_writel(hsotg, 0xffffffff, GINTSTS);
5564
5565
5566 ret = dwc2_restore_global_registers(hsotg);
5567 if (ret) {
5568 dev_err(hsotg->dev, "%s: failed to restore registers\n",
5569 __func__);
5570 return ret;
5571 }
5572
5573
5574 ret = dwc2_restore_host_registers(hsotg);
5575 if (ret) {
5576 dev_err(hsotg->dev, "%s: failed to restore host registers\n",
5577 __func__);
5578 return ret;
5579 }
5580
5581 dwc2_hcd_rem_wakeup(hsotg);
5582
5583 hsotg->hibernated = 0;
5584 hsotg->bus_suspended = 0;
5585 hsotg->lx_state = DWC2_L0;
5586 dev_dbg(hsotg->dev, "Host hibernation restore complete\n");
5587 return ret;
5588 }
5589
5590 bool dwc2_host_can_poweroff_phy(struct dwc2_hsotg *dwc2)
5591 {
5592 struct usb_device *root_hub = dwc2_hsotg_to_hcd(dwc2)->self.root_hub;
5593
5594
5595 if (!device_may_wakeup(dwc2->dev))
5596 return true;
5597
5598
5599
5600
5601
5602 if (usb_wakeup_enabled_descendants(root_hub))
5603 return false;
5604
5605
5606 return true;
5607 }