This source file includes following definitions.
- pch_udc_readl
- pch_udc_writel
- pch_udc_bit_set
- pch_udc_bit_clr
- pch_udc_ep_readl
- pch_udc_ep_writel
- pch_udc_ep_bit_set
- pch_udc_ep_bit_clr
- pch_udc_csr_busy
- pch_udc_write_csr
- pch_udc_read_csr
- pch_udc_rmt_wakeup
- pch_udc_get_frame
- pch_udc_clear_selfpowered
- pch_udc_set_selfpowered
- pch_udc_set_disconnect
- pch_udc_clear_disconnect
- pch_udc_reconnect
- pch_udc_vbus_session
- pch_udc_ep_set_stall
- pch_udc_ep_clear_stall
- pch_udc_ep_set_trfr_type
- pch_udc_ep_set_bufsz
- pch_udc_ep_set_maxpkt
- pch_udc_ep_set_subptr
- pch_udc_ep_set_ddptr
- pch_udc_ep_set_pd
- pch_udc_ep_set_rrdy
- pch_udc_ep_clear_rrdy
- pch_udc_set_dma
- pch_udc_clear_dma
- pch_udc_set_csr_done
- pch_udc_disable_interrupts
- pch_udc_enable_interrupts
- pch_udc_disable_ep_interrupts
- pch_udc_enable_ep_interrupts
- pch_udc_read_device_interrupts
- pch_udc_write_device_interrupts
- pch_udc_read_ep_interrupts
- pch_udc_write_ep_interrupts
- pch_udc_read_device_status
- pch_udc_read_ep_control
- pch_udc_clear_ep_control
- pch_udc_read_ep_status
- pch_udc_clear_ep_status
- pch_udc_ep_set_nak
- pch_udc_ep_clear_nak
- pch_udc_ep_fifo_flush
- pch_udc_ep_enable
- pch_udc_ep_disable
- pch_udc_wait_ep_stall
- pch_udc_init
- pch_udc_exit
- pch_udc_pcd_get_frame
- pch_udc_pcd_wakeup
- pch_udc_pcd_selfpowered
- pch_udc_pcd_pullup
- pch_udc_pcd_vbus_session
- pch_udc_pcd_vbus_draw
- pch_vbus_gpio_get_value
- pch_vbus_gpio_work_fall
- pch_vbus_gpio_work_rise
- pch_vbus_gpio_irq
- pch_vbus_gpio_init
- pch_vbus_gpio_free
- complete_req
- empty_req_queue
- pch_udc_free_dma_chain
- pch_udc_create_dma_chain
- prepare_dma
- process_zlp
- pch_udc_start_rxrequest
- pch_udc_pcd_ep_enable
- pch_udc_pcd_ep_disable
- pch_udc_alloc_request
- pch_udc_free_request
- pch_udc_pcd_queue
- pch_udc_pcd_dequeue
- pch_udc_pcd_set_halt
- pch_udc_pcd_set_wedge
- pch_udc_pcd_fifo_flush
- pch_udc_init_setup_buff
- pch_udc_start_next_txrequest
- pch_udc_complete_transfer
- pch_udc_complete_receiver
- pch_udc_svc_data_in
- pch_udc_svc_data_out
- pch_udc_svc_control_in
- pch_udc_svc_control_out
- pch_udc_postsvc_epinters
- pch_udc_read_all_epstatus
- pch_udc_activate_control_ep
- pch_udc_svc_ur_interrupt
- pch_udc_svc_enum_interrupt
- pch_udc_svc_intf_interrupt
- pch_udc_svc_cfg_interrupt
- pch_udc_dev_isr
- pch_udc_isr
- pch_udc_setup_ep0
- pch_udc_pcd_reinit
- pch_udc_pcd_init
- init_dma_pools
- pch_udc_start
- pch_udc_stop
- pch_udc_shutdown
- pch_udc_remove
- pch_udc_suspend
- pch_udc_resume
- pch_udc_probe
1
2
3
4
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/pci.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/list.h>
12 #include <linux/interrupt.h>
13 #include <linux/usb/ch9.h>
14 #include <linux/usb/gadget.h>
15 #include <linux/gpio.h>
16 #include <linux/irq.h>
17
18
19 static int vbus_gpio_port = -1;
20
21 #define PCH_VBUS_PERIOD 3000
22 #define PCH_VBUS_INTERVAL 10
23
24
25 #define UDC_EP_REG_SHIFT 0x20
26
27 #define UDC_EPCTL_ADDR 0x00
28 #define UDC_EPSTS_ADDR 0x04
29 #define UDC_BUFIN_FRAMENUM_ADDR 0x08
30 #define UDC_BUFOUT_MAXPKT_ADDR 0x0C
31 #define UDC_SUBPTR_ADDR 0x10
32 #define UDC_DESPTR_ADDR 0x14
33 #define UDC_CONFIRM_ADDR 0x18
34
35 #define UDC_DEVCFG_ADDR 0x400
36 #define UDC_DEVCTL_ADDR 0x404
37 #define UDC_DEVSTS_ADDR 0x408
38 #define UDC_DEVIRQSTS_ADDR 0x40C
39 #define UDC_DEVIRQMSK_ADDR 0x410
40 #define UDC_EPIRQSTS_ADDR 0x414
41 #define UDC_EPIRQMSK_ADDR 0x418
42 #define UDC_DEVLPM_ADDR 0x41C
43 #define UDC_CSR_BUSY_ADDR 0x4f0
44 #define UDC_SRST_ADDR 0x4fc
45 #define UDC_CSR_ADDR 0x500
46
47
48
49 #define UDC_EPCTL_MRXFLUSH (1 << 12)
50 #define UDC_EPCTL_RRDY (1 << 9)
51 #define UDC_EPCTL_CNAK (1 << 8)
52 #define UDC_EPCTL_SNAK (1 << 7)
53 #define UDC_EPCTL_NAK (1 << 6)
54 #define UDC_EPCTL_P (1 << 3)
55 #define UDC_EPCTL_F (1 << 1)
56 #define UDC_EPCTL_S (1 << 0)
57 #define UDC_EPCTL_ET_SHIFT 4
58
59 #define UDC_EPCTL_ET_MASK 0x00000030
60
61 #define UDC_EPCTL_ET_CONTROL 0
62 #define UDC_EPCTL_ET_ISO 1
63 #define UDC_EPCTL_ET_BULK 2
64 #define UDC_EPCTL_ET_INTERRUPT 3
65
66
67
68 #define UDC_EPSTS_XFERDONE (1 << 27)
69 #define UDC_EPSTS_RSS (1 << 26)
70 #define UDC_EPSTS_RCS (1 << 25)
71 #define UDC_EPSTS_TXEMPTY (1 << 24)
72 #define UDC_EPSTS_TDC (1 << 10)
73 #define UDC_EPSTS_HE (1 << 9)
74 #define UDC_EPSTS_MRXFIFO_EMP (1 << 8)
75 #define UDC_EPSTS_BNA (1 << 7)
76 #define UDC_EPSTS_IN (1 << 6)
77 #define UDC_EPSTS_OUT_SHIFT 4
78
79 #define UDC_EPSTS_OUT_MASK 0x00000030
80 #define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0
81
82 #define UDC_EPSTS_OUT_SETUP 2
83 #define UDC_EPSTS_OUT_DATA 1
84
85
86
87 #define UDC_DEVCFG_CSR_PRG (1 << 17)
88 #define UDC_DEVCFG_SP (1 << 3)
89
90 #define UDC_DEVCFG_SPD_HS 0x0
91 #define UDC_DEVCFG_SPD_FS 0x1
92 #define UDC_DEVCFG_SPD_LS 0x2
93
94
95
96 #define UDC_DEVCTL_THLEN_SHIFT 24
97 #define UDC_DEVCTL_BRLEN_SHIFT 16
98 #define UDC_DEVCTL_CSR_DONE (1 << 13)
99 #define UDC_DEVCTL_SD (1 << 10)
100 #define UDC_DEVCTL_MODE (1 << 9)
101 #define UDC_DEVCTL_BREN (1 << 8)
102 #define UDC_DEVCTL_THE (1 << 7)
103 #define UDC_DEVCTL_DU (1 << 4)
104 #define UDC_DEVCTL_TDE (1 << 3)
105 #define UDC_DEVCTL_RDE (1 << 2)
106 #define UDC_DEVCTL_RES (1 << 0)
107
108
109
110 #define UDC_DEVSTS_TS_SHIFT 18
111 #define UDC_DEVSTS_ENUM_SPEED_SHIFT 13
112 #define UDC_DEVSTS_ALT_SHIFT 8
113 #define UDC_DEVSTS_INTF_SHIFT 4
114 #define UDC_DEVSTS_CFG_SHIFT 0
115
116 #define UDC_DEVSTS_TS_MASK 0xfffc0000
117 #define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
118 #define UDC_DEVSTS_ALT_MASK 0x00000f00
119 #define UDC_DEVSTS_INTF_MASK 0x000000f0
120 #define UDC_DEVSTS_CFG_MASK 0x0000000f
121
122 #define UDC_DEVSTS_ENUM_SPEED_FULL 1
123 #define UDC_DEVSTS_ENUM_SPEED_HIGH 0
124 #define UDC_DEVSTS_ENUM_SPEED_LOW 2
125 #define UDC_DEVSTS_ENUM_SPEED_FULLX 3
126
127
128
129 #define UDC_DEVINT_RWKP (1 << 7)
130 #define UDC_DEVINT_ENUM (1 << 6)
131 #define UDC_DEVINT_SOF (1 << 5)
132 #define UDC_DEVINT_US (1 << 4)
133 #define UDC_DEVINT_UR (1 << 3)
134 #define UDC_DEVINT_ES (1 << 2)
135 #define UDC_DEVINT_SI (1 << 1)
136 #define UDC_DEVINT_SC (1 << 0)
137
138 #define UDC_DEVINT_MSK 0x7f
139
140
141
142 #define UDC_EPINT_IN_SHIFT 0
143 #define UDC_EPINT_OUT_SHIFT 16
144 #define UDC_EPINT_IN_EP0 (1 << 0)
145 #define UDC_EPINT_OUT_EP0 (1 << 16)
146
147 #define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
148
149
150
151 #define UDC_CSR_BUSY (1 << 0)
152
153
154
155 #define UDC_PSRST (1 << 1)
156 #define UDC_SRST (1 << 0)
157
158
159
160 #define UDC_CSR_NE_NUM_SHIFT 0
161 #define UDC_CSR_NE_DIR_SHIFT 4
162 #define UDC_CSR_NE_TYPE_SHIFT 5
163 #define UDC_CSR_NE_CFG_SHIFT 7
164 #define UDC_CSR_NE_INTF_SHIFT 11
165 #define UDC_CSR_NE_ALT_SHIFT 15
166 #define UDC_CSR_NE_MAX_PKT_SHIFT 19
167
168 #define UDC_CSR_NE_NUM_MASK 0x0000000f
169 #define UDC_CSR_NE_DIR_MASK 0x00000010
170 #define UDC_CSR_NE_TYPE_MASK 0x00000060
171 #define UDC_CSR_NE_CFG_MASK 0x00000780
172 #define UDC_CSR_NE_INTF_MASK 0x00007800
173 #define UDC_CSR_NE_ALT_MASK 0x00078000
174 #define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
175
176 #define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
177 #define PCH_UDC_EPINT(in, num)\
178 (1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
179
180
181 #define UDC_EP0IN_IDX 0
182 #define UDC_EP0OUT_IDX 1
183 #define UDC_EPIN_IDX(ep) (ep * 2)
184 #define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
185 #define PCH_UDC_EP0 0
186 #define PCH_UDC_EP1 1
187 #define PCH_UDC_EP2 2
188 #define PCH_UDC_EP3 3
189
190
191 #define PCH_UDC_EP_NUM 32
192 #define PCH_UDC_USED_EP_NUM 4
193
194 #define PCH_UDC_BRLEN 0x0F
195 #define PCH_UDC_THLEN 0x1F
196
197 #define UDC_EP0IN_BUFF_SIZE 16
198 #define UDC_EPIN_BUFF_SIZE 256
199 #define UDC_EP0OUT_BUFF_SIZE 16
200 #define UDC_EPOUT_BUFF_SIZE 256
201
202 #define UDC_EP0IN_MAX_PKT_SIZE 64
203 #define UDC_EP0OUT_MAX_PKT_SIZE 64
204 #define UDC_BULK_MAX_PKT_SIZE 512
205
206
207 #define DMA_DIR_RX 1
208 #define DMA_DIR_TX 2
209 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
210 #define UDC_DMA_MAXPACKET 65536
211
212
213
214
215
216
217
218
219
220 struct pch_udc_data_dma_desc {
221 u32 status;
222 u32 reserved;
223 u32 dataptr;
224 u32 next;
225 };
226
227
228
229
230
231
232
233
234
235 struct pch_udc_stp_dma_desc {
236 u32 status;
237 u32 reserved;
238 struct usb_ctrlrequest request;
239 } __attribute((packed));
240
241
242
243 #define PCH_UDC_BUFF_STS 0xC0000000
244 #define PCH_UDC_BS_HST_RDY 0x00000000
245 #define PCH_UDC_BS_DMA_BSY 0x40000000
246 #define PCH_UDC_BS_DMA_DONE 0x80000000
247 #define PCH_UDC_BS_HST_BSY 0xC0000000
248
249 #define PCH_UDC_RXTX_STS 0x30000000
250 #define PCH_UDC_RTS_SUCC 0x00000000
251 #define PCH_UDC_RTS_DESERR 0x10000000
252 #define PCH_UDC_RTS_BUFERR 0x30000000
253
254 #define PCH_UDC_DMA_LAST 0x08000000
255
256 #define PCH_UDC_RXTX_BYTES 0x0000ffff
257
258
259
260
261
262
263
264
265 struct pch_udc_cfg_data {
266 u16 cur_cfg;
267 u16 cur_intf;
268 u16 cur_alt;
269 };
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287 struct pch_udc_ep {
288 struct usb_ep ep;
289 dma_addr_t td_stp_phys;
290 dma_addr_t td_data_phys;
291 struct pch_udc_stp_dma_desc *td_stp;
292 struct pch_udc_data_dma_desc *td_data;
293 struct pch_udc_dev *dev;
294 unsigned long offset_addr;
295 struct list_head queue;
296 unsigned num:5,
297 in:1,
298 halted:1;
299 unsigned long epsts;
300 };
301
302
303
304
305
306
307
308
309
310 struct pch_vbus_gpio_data {
311 int port;
312 int intr;
313 struct work_struct irq_work_fall;
314 struct work_struct irq_work_rise;
315 };
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341 struct pch_udc_dev {
342 struct usb_gadget gadget;
343 struct usb_gadget_driver *driver;
344 struct pci_dev *pdev;
345 struct pch_udc_ep ep[PCH_UDC_EP_NUM];
346 spinlock_t lock;
347 unsigned
348 stall:1,
349 prot_stall:1,
350 suspended:1,
351 connected:1,
352 vbus_session:1,
353 set_cfg_not_acked:1,
354 waiting_zlp_ack:1;
355 struct dma_pool *data_requests;
356 struct dma_pool *stp_requests;
357 dma_addr_t dma_addr;
358 struct usb_ctrlrequest setup_data;
359 void __iomem *base_addr;
360 struct pch_udc_cfg_data cfg_data;
361 struct pch_vbus_gpio_data vbus_gpio;
362 };
363 #define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget))
364
365 #define PCH_UDC_PCI_BAR_QUARK_X1000 0
366 #define PCH_UDC_PCI_BAR 1
367
368 #define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939
369 #define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
370
371 #define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
372 #define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
373
374 static const char ep0_string[] = "ep0in";
375 static DEFINE_SPINLOCK(udc_stall_spinlock);
376 static bool speed_fs;
377 module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
378 MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394 struct pch_udc_request {
395 struct usb_request req;
396 dma_addr_t td_data_phys;
397 struct pch_udc_data_dma_desc *td_data;
398 struct pch_udc_data_dma_desc *td_data_last;
399 struct list_head queue;
400 unsigned dma_going:1,
401 dma_mapped:1,
402 dma_done:1;
403 unsigned chain_len;
404 void *buf;
405 dma_addr_t dma;
406 };
407
408 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
409 {
410 return ioread32(dev->base_addr + reg);
411 }
412
413 static inline void pch_udc_writel(struct pch_udc_dev *dev,
414 unsigned long val, unsigned long reg)
415 {
416 iowrite32(val, dev->base_addr + reg);
417 }
418
419 static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
420 unsigned long reg,
421 unsigned long bitmask)
422 {
423 pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
424 }
425
426 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
427 unsigned long reg,
428 unsigned long bitmask)
429 {
430 pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
431 }
432
433 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
434 {
435 return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
436 }
437
438 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
439 unsigned long val, unsigned long reg)
440 {
441 iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
442 }
443
444 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
445 unsigned long reg,
446 unsigned long bitmask)
447 {
448 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
449 }
450
451 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
452 unsigned long reg,
453 unsigned long bitmask)
454 {
455 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
456 }
457
458
459
460
461
462 static void pch_udc_csr_busy(struct pch_udc_dev *dev)
463 {
464 unsigned int count = 200;
465
466
467 while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
468 && --count)
469 cpu_relax();
470 if (!count)
471 dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
472 }
473
474
475
476
477
478
479
480 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
481 unsigned int ep)
482 {
483 unsigned long reg = PCH_UDC_CSR(ep);
484
485 pch_udc_csr_busy(dev);
486 pch_udc_writel(dev, val, reg);
487 pch_udc_csr_busy(dev);
488 }
489
490
491
492
493
494
495
496
497 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
498 {
499 unsigned long reg = PCH_UDC_CSR(ep);
500
501 pch_udc_csr_busy(dev);
502 pch_udc_readl(dev, reg);
503 pch_udc_csr_busy(dev);
504 return pch_udc_readl(dev, reg);
505 }
506
507
508
509
510
511 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
512 {
513 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
514 mdelay(1);
515 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
516 }
517
518
519
520
521
522
523 static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
524 {
525 u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
526 return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
527 }
528
529
530
531
532
533 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
534 {
535 pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
536 }
537
538
539
540
541
542 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
543 {
544 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
545 }
546
547
548
549
550
551 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
552 {
553 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
554 }
555
556
557
558
559
560 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
561 {
562
563 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
564 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
565 mdelay(1);
566
567 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
568 }
569
570
571
572
573
574
575 static void pch_udc_init(struct pch_udc_dev *dev);
576 static void pch_udc_reconnect(struct pch_udc_dev *dev)
577 {
578 pch_udc_init(dev);
579
580
581
582 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
583 UDC_DEVINT_UR | UDC_DEVINT_ENUM);
584
585
586 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
587 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
588 mdelay(1);
589
590 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
591 }
592
593
594
595
596
597
598
599
600 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
601 int is_active)
602 {
603 if (is_active) {
604 pch_udc_reconnect(dev);
605 dev->vbus_session = 1;
606 } else {
607 if (dev->driver && dev->driver->disconnect) {
608 spin_lock(&dev->lock);
609 dev->driver->disconnect(&dev->gadget);
610 spin_unlock(&dev->lock);
611 }
612 pch_udc_set_disconnect(dev);
613 dev->vbus_session = 0;
614 }
615 }
616
617
618
619
620
621 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
622 {
623 if (ep->in) {
624 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
625 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
626 } else {
627 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
628 }
629 }
630
631
632
633
634
635 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
636 {
637
638 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
639
640 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
641 }
642
643
644
645
646
647
648 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
649 u8 type)
650 {
651 pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
652 UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
653 }
654
655
656
657
658
659
660 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
661 u32 buf_size, u32 ep_in)
662 {
663 u32 data;
664 if (ep_in) {
665 data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
666 data = (data & 0xffff0000) | (buf_size & 0xffff);
667 pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
668 } else {
669 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
670 data = (buf_size << 16) | (data & 0xffff);
671 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
672 }
673 }
674
675
676
677
678
679
680 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
681 {
682 u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
683 data = (data & 0xffff0000) | (pkt_size & 0xffff);
684 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
685 }
686
687
688
689
690
691
692 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
693 {
694 pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
695 }
696
697
698
699
700
701
702 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
703 {
704 pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
705 }
706
707
708
709
710
711 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
712 {
713 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
714 }
715
716
717
718
719
720 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
721 {
722 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
723 }
724
725
726
727
728
729 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
730 {
731 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
732 }
733
734
735
736
737
738
739
740
741
742 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
743 {
744 if (dir == DMA_DIR_RX)
745 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
746 else if (dir == DMA_DIR_TX)
747 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
748 }
749
750
751
752
753
754
755
756
757
758 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
759 {
760 if (dir == DMA_DIR_RX)
761 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
762 else if (dir == DMA_DIR_TX)
763 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
764 }
765
766
767
768
769
770
771 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
772 {
773 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
774 }
775
776
777
778
779
780
781 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
782 u32 mask)
783 {
784 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
785 }
786
787
788
789
790
791
792 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
793 u32 mask)
794 {
795 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
796 }
797
798
799
800
801
802
803 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
804 u32 mask)
805 {
806 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
807 }
808
809
810
811
812
813
814 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
815 u32 mask)
816 {
817 pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
818 }
819
820
821
822
823
824
825 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
826 {
827 return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
828 }
829
830
831
832
833
834
835 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
836 u32 val)
837 {
838 pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
839 }
840
841
842
843
844
845
846 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
847 {
848 return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
849 }
850
851
852
853
854
855
856 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
857 u32 val)
858 {
859 pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
860 }
861
862
863
864
865
866
867 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
868 {
869 return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
870 }
871
872
873
874
875
876
877 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
878 {
879 return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
880 }
881
882
883
884
885
886
887 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
888 {
889 return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
890 }
891
892
893
894
895
896
897 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
898 {
899 return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
900 }
901
902
903
904
905
906
907 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
908 u32 stat)
909 {
910 return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
911 }
912
913
914
915
916
917
918 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
919 {
920 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
921 }
922
923
924
925
926
927
928 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
929 {
930 unsigned int loopcnt = 0;
931 struct pch_udc_dev *dev = ep->dev;
932
933 if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
934 return;
935 if (!ep->in) {
936 loopcnt = 10000;
937 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
938 --loopcnt)
939 udelay(5);
940 if (!loopcnt)
941 dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
942 __func__);
943 }
944 loopcnt = 10000;
945 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
946 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
947 udelay(5);
948 }
949 if (!loopcnt)
950 dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
951 __func__, ep->num, (ep->in ? "in" : "out"));
952 }
953
954
955
956
957
958
959
960
961 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
962 {
963 if (dir) {
964 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
965 return;
966 }
967 }
968
969
970
971
972
973
974 static void pch_udc_ep_enable(struct pch_udc_ep *ep,
975 struct pch_udc_cfg_data *cfg,
976 const struct usb_endpoint_descriptor *desc)
977 {
978 u32 val = 0;
979 u32 buff_size = 0;
980
981 pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
982 if (ep->in)
983 buff_size = UDC_EPIN_BUFF_SIZE;
984 else
985 buff_size = UDC_EPOUT_BUFF_SIZE;
986 pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
987 pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
988 pch_udc_ep_set_nak(ep);
989 pch_udc_ep_fifo_flush(ep, ep->in);
990
991 val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
992 ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
993 UDC_CSR_NE_TYPE_SHIFT) |
994 (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
995 (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
996 (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
997 usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
998
999 if (ep->in)
1000 pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1001 else
1002 pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1003 }
1004
1005
1006
1007
1008
1009 static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1010 {
1011 if (ep->in) {
1012
1013 pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1014
1015 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1016 pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1017 } else {
1018
1019 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1020 }
1021
1022 pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1023 }
1024
1025
1026
1027
1028
1029 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1030 {
1031 unsigned int count = 10000;
1032
1033
1034 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1035 udelay(5);
1036 if (!count)
1037 dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1038 }
1039
1040
1041
1042
1043
1044 static void pch_udc_init(struct pch_udc_dev *dev)
1045 {
1046 if (NULL == dev) {
1047 pr_err("%s: Invalid address\n", __func__);
1048 return;
1049 }
1050
1051 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1052 pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1053 mdelay(1);
1054 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1055 pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1056 mdelay(1);
1057
1058 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1059 pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1060
1061
1062 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1063 pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1064
1065
1066 if (speed_fs)
1067 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1068 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1069 else
1070 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1071 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1072 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1073 (PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1074 (PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1075 UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1076 UDC_DEVCTL_THE);
1077 }
1078
1079
1080
1081
1082
1083 static void pch_udc_exit(struct pch_udc_dev *dev)
1084 {
1085
1086 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1087
1088 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1089
1090 pch_udc_set_disconnect(dev);
1091 }
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1102 {
1103 struct pch_udc_dev *dev;
1104
1105 if (!gadget)
1106 return -EINVAL;
1107 dev = container_of(gadget, struct pch_udc_dev, gadget);
1108 return pch_udc_get_frame(dev);
1109 }
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1120 {
1121 struct pch_udc_dev *dev;
1122 unsigned long flags;
1123
1124 if (!gadget)
1125 return -EINVAL;
1126 dev = container_of(gadget, struct pch_udc_dev, gadget);
1127 spin_lock_irqsave(&dev->lock, flags);
1128 pch_udc_rmt_wakeup(dev);
1129 spin_unlock_irqrestore(&dev->lock, flags);
1130 return 0;
1131 }
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1144 {
1145 struct pch_udc_dev *dev;
1146
1147 if (!gadget)
1148 return -EINVAL;
1149 gadget->is_selfpowered = (value != 0);
1150 dev = container_of(gadget, struct pch_udc_dev, gadget);
1151 if (value)
1152 pch_udc_set_selfpowered(dev);
1153 else
1154 pch_udc_clear_selfpowered(dev);
1155 return 0;
1156 }
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1169 {
1170 struct pch_udc_dev *dev;
1171
1172 if (!gadget)
1173 return -EINVAL;
1174 dev = container_of(gadget, struct pch_udc_dev, gadget);
1175 if (is_on) {
1176 pch_udc_reconnect(dev);
1177 } else {
1178 if (dev->driver && dev->driver->disconnect) {
1179 spin_lock(&dev->lock);
1180 dev->driver->disconnect(&dev->gadget);
1181 spin_unlock(&dev->lock);
1182 }
1183 pch_udc_set_disconnect(dev);
1184 }
1185
1186 return 0;
1187 }
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1201 {
1202 struct pch_udc_dev *dev;
1203
1204 if (!gadget)
1205 return -EINVAL;
1206 dev = container_of(gadget, struct pch_udc_dev, gadget);
1207 pch_udc_vbus_session(dev, is_active);
1208 return 0;
1209 }
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1223 {
1224 return -EOPNOTSUPP;
1225 }
1226
1227 static int pch_udc_start(struct usb_gadget *g,
1228 struct usb_gadget_driver *driver);
1229 static int pch_udc_stop(struct usb_gadget *g);
1230
1231 static const struct usb_gadget_ops pch_udc_ops = {
1232 .get_frame = pch_udc_pcd_get_frame,
1233 .wakeup = pch_udc_pcd_wakeup,
1234 .set_selfpowered = pch_udc_pcd_selfpowered,
1235 .pullup = pch_udc_pcd_pullup,
1236 .vbus_session = pch_udc_pcd_vbus_session,
1237 .vbus_draw = pch_udc_pcd_vbus_draw,
1238 .udc_start = pch_udc_start,
1239 .udc_stop = pch_udc_stop,
1240 };
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251 static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1252 {
1253 int vbus = 0;
1254
1255 if (dev->vbus_gpio.port)
1256 vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
1257 else
1258 vbus = -1;
1259
1260 return vbus;
1261 }
1262
1263
1264
1265
1266
1267
1268
1269 static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1270 {
1271 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1272 struct pch_vbus_gpio_data, irq_work_fall);
1273 struct pch_udc_dev *dev =
1274 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1275 int vbus_saved = -1;
1276 int vbus;
1277 int count;
1278
1279 if (!dev->vbus_gpio.port)
1280 return;
1281
1282 for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1283 count++) {
1284 vbus = pch_vbus_gpio_get_value(dev);
1285
1286 if ((vbus_saved == vbus) && (vbus == 0)) {
1287 dev_dbg(&dev->pdev->dev, "VBUS fell");
1288 if (dev->driver
1289 && dev->driver->disconnect) {
1290 dev->driver->disconnect(
1291 &dev->gadget);
1292 }
1293 if (dev->vbus_gpio.intr)
1294 pch_udc_init(dev);
1295 else
1296 pch_udc_reconnect(dev);
1297 return;
1298 }
1299 vbus_saved = vbus;
1300 mdelay(PCH_VBUS_INTERVAL);
1301 }
1302 }
1303
1304
1305
1306
1307
1308
1309
1310 static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1311 {
1312 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1313 struct pch_vbus_gpio_data, irq_work_rise);
1314 struct pch_udc_dev *dev =
1315 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1316 int vbus;
1317
1318 if (!dev->vbus_gpio.port)
1319 return;
1320
1321 mdelay(PCH_VBUS_INTERVAL);
1322 vbus = pch_vbus_gpio_get_value(dev);
1323
1324 if (vbus == 1) {
1325 dev_dbg(&dev->pdev->dev, "VBUS rose");
1326 pch_udc_reconnect(dev);
1327 return;
1328 }
1329 }
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340 static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1341 {
1342 struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1343
1344 if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1345 return IRQ_NONE;
1346
1347 if (pch_vbus_gpio_get_value(dev))
1348 schedule_work(&dev->vbus_gpio.irq_work_rise);
1349 else
1350 schedule_work(&dev->vbus_gpio.irq_work_fall);
1351
1352 return IRQ_HANDLED;
1353 }
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364 static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
1365 {
1366 int err;
1367 int irq_num = 0;
1368
1369 dev->vbus_gpio.port = 0;
1370 dev->vbus_gpio.intr = 0;
1371
1372 if (vbus_gpio_port <= -1)
1373 return -EINVAL;
1374
1375 err = gpio_is_valid(vbus_gpio_port);
1376 if (!err) {
1377 pr_err("%s: gpio port %d is invalid\n",
1378 __func__, vbus_gpio_port);
1379 return -EINVAL;
1380 }
1381
1382 err = gpio_request(vbus_gpio_port, "pch_vbus");
1383 if (err) {
1384 pr_err("%s: can't request gpio port %d, err: %d\n",
1385 __func__, vbus_gpio_port, err);
1386 return -EINVAL;
1387 }
1388
1389 dev->vbus_gpio.port = vbus_gpio_port;
1390 gpio_direction_input(vbus_gpio_port);
1391 INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1392
1393 irq_num = gpio_to_irq(vbus_gpio_port);
1394 if (irq_num > 0) {
1395 irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1396 err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1397 "vbus_detect", dev);
1398 if (!err) {
1399 dev->vbus_gpio.intr = irq_num;
1400 INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1401 pch_vbus_gpio_work_rise);
1402 } else {
1403 pr_err("%s: can't request irq %d, err: %d\n",
1404 __func__, irq_num, err);
1405 }
1406 }
1407
1408 return 0;
1409 }
1410
1411
1412
1413
1414
1415 static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1416 {
1417 if (dev->vbus_gpio.intr)
1418 free_irq(dev->vbus_gpio.intr, dev);
1419
1420 if (dev->vbus_gpio.port)
1421 gpio_free(dev->vbus_gpio.port);
1422 }
1423
1424
1425
1426
1427
1428
1429
1430
1431 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1432 int status)
1433 __releases(&dev->lock)
1434 __acquires(&dev->lock)
1435 {
1436 struct pch_udc_dev *dev;
1437 unsigned halted = ep->halted;
1438
1439 list_del_init(&req->queue);
1440
1441
1442 if (req->req.status == -EINPROGRESS)
1443 req->req.status = status;
1444 else
1445 status = req->req.status;
1446
1447 dev = ep->dev;
1448 if (req->dma_mapped) {
1449 if (req->dma == DMA_ADDR_INVALID) {
1450 if (ep->in)
1451 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1452 req->req.length,
1453 DMA_TO_DEVICE);
1454 else
1455 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1456 req->req.length,
1457 DMA_FROM_DEVICE);
1458 req->req.dma = DMA_ADDR_INVALID;
1459 } else {
1460 if (ep->in)
1461 dma_unmap_single(&dev->pdev->dev, req->dma,
1462 req->req.length,
1463 DMA_TO_DEVICE);
1464 else {
1465 dma_unmap_single(&dev->pdev->dev, req->dma,
1466 req->req.length,
1467 DMA_FROM_DEVICE);
1468 memcpy(req->req.buf, req->buf, req->req.length);
1469 }
1470 kfree(req->buf);
1471 req->dma = DMA_ADDR_INVALID;
1472 }
1473 req->dma_mapped = 0;
1474 }
1475 ep->halted = 1;
1476 spin_unlock(&dev->lock);
1477 if (!ep->in)
1478 pch_udc_ep_clear_rrdy(ep);
1479 usb_gadget_giveback_request(&ep->ep, &req->req);
1480 spin_lock(&dev->lock);
1481 ep->halted = halted;
1482 }
1483
1484
1485
1486
1487
1488 static void empty_req_queue(struct pch_udc_ep *ep)
1489 {
1490 struct pch_udc_request *req;
1491
1492 ep->halted = 1;
1493 while (!list_empty(&ep->queue)) {
1494 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1495 complete_req(ep, req, -ESHUTDOWN);
1496 }
1497 }
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1509 struct pch_udc_request *req)
1510 {
1511 struct pch_udc_data_dma_desc *td = req->td_data;
1512 unsigned i = req->chain_len;
1513
1514 dma_addr_t addr2;
1515 dma_addr_t addr = (dma_addr_t)td->next;
1516 td->next = 0x00;
1517 for (; i > 1; --i) {
1518
1519 td = phys_to_virt(addr);
1520 addr2 = (dma_addr_t)td->next;
1521 dma_pool_free(dev->data_requests, td, addr);
1522 addr = addr2;
1523 }
1524 req->chain_len = 1;
1525 }
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1540 struct pch_udc_request *req,
1541 unsigned long buf_len,
1542 gfp_t gfp_flags)
1543 {
1544 struct pch_udc_data_dma_desc *td = req->td_data, *last;
1545 unsigned long bytes = req->req.length, i = 0;
1546 dma_addr_t dma_addr;
1547 unsigned len = 1;
1548
1549 if (req->chain_len > 1)
1550 pch_udc_free_dma_chain(ep->dev, req);
1551
1552 if (req->dma == DMA_ADDR_INVALID)
1553 td->dataptr = req->req.dma;
1554 else
1555 td->dataptr = req->dma;
1556
1557 td->status = PCH_UDC_BS_HST_BSY;
1558 for (; ; bytes -= buf_len, ++len) {
1559 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1560 if (bytes <= buf_len)
1561 break;
1562 last = td;
1563 td = dma_pool_alloc(ep->dev->data_requests, gfp_flags,
1564 &dma_addr);
1565 if (!td)
1566 goto nomem;
1567 i += buf_len;
1568 td->dataptr = req->td_data->dataptr + i;
1569 last->next = dma_addr;
1570 }
1571
1572 req->td_data_last = td;
1573 td->status |= PCH_UDC_DMA_LAST;
1574 td->next = req->td_data_phys;
1575 req->chain_len = len;
1576 return 0;
1577
1578 nomem:
1579 if (len > 1) {
1580 req->chain_len = len;
1581 pch_udc_free_dma_chain(ep->dev, req);
1582 }
1583 req->chain_len = 1;
1584 return -ENOMEM;
1585 }
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1599 gfp_t gfp)
1600 {
1601 int retval;
1602
1603
1604 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1605 if (retval) {
1606 pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1607 return retval;
1608 }
1609 if (ep->in)
1610 req->td_data->status = (req->td_data->status &
1611 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1612 return 0;
1613 }
1614
1615
1616
1617
1618
1619
1620
1621 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1622 {
1623 struct pch_udc_dev *dev = ep->dev;
1624
1625
1626 complete_req(ep, req, 0);
1627
1628
1629
1630
1631 if (dev->set_cfg_not_acked) {
1632 pch_udc_set_csr_done(dev);
1633 dev->set_cfg_not_acked = 0;
1634 }
1635
1636 if (!dev->stall && dev->waiting_zlp_ack) {
1637 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1638 dev->waiting_zlp_ack = 0;
1639 }
1640 }
1641
1642
1643
1644
1645
1646
1647 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1648 struct pch_udc_request *req)
1649 {
1650 struct pch_udc_data_dma_desc *td_data;
1651
1652 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1653 td_data = req->td_data;
1654
1655 while (1) {
1656 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1657 PCH_UDC_BS_HST_RDY;
1658 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1659 break;
1660 td_data = phys_to_virt(td_data->next);
1661 }
1662
1663 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1664 req->dma_going = 1;
1665 pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1666 pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1667 pch_udc_ep_clear_nak(ep);
1668 pch_udc_ep_set_rrdy(ep);
1669 }
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1683 const struct usb_endpoint_descriptor *desc)
1684 {
1685 struct pch_udc_ep *ep;
1686 struct pch_udc_dev *dev;
1687 unsigned long iflags;
1688
1689 if (!usbep || (usbep->name == ep0_string) || !desc ||
1690 (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1691 return -EINVAL;
1692
1693 ep = container_of(usbep, struct pch_udc_ep, ep);
1694 dev = ep->dev;
1695 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1696 return -ESHUTDOWN;
1697 spin_lock_irqsave(&dev->lock, iflags);
1698 ep->ep.desc = desc;
1699 ep->halted = 0;
1700 pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1701 ep->ep.maxpacket = usb_endpoint_maxp(desc);
1702 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1703 spin_unlock_irqrestore(&dev->lock, iflags);
1704 return 0;
1705 }
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1717 {
1718 struct pch_udc_ep *ep;
1719 unsigned long iflags;
1720
1721 if (!usbep)
1722 return -EINVAL;
1723
1724 ep = container_of(usbep, struct pch_udc_ep, ep);
1725 if ((usbep->name == ep0_string) || !ep->ep.desc)
1726 return -EINVAL;
1727
1728 spin_lock_irqsave(&ep->dev->lock, iflags);
1729 empty_req_queue(ep);
1730 ep->halted = 1;
1731 pch_udc_ep_disable(ep);
1732 pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1733 ep->ep.desc = NULL;
1734 INIT_LIST_HEAD(&ep->queue);
1735 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1736 return 0;
1737 }
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1750 gfp_t gfp)
1751 {
1752 struct pch_udc_request *req;
1753 struct pch_udc_ep *ep;
1754 struct pch_udc_data_dma_desc *dma_desc;
1755
1756 if (!usbep)
1757 return NULL;
1758 ep = container_of(usbep, struct pch_udc_ep, ep);
1759 req = kzalloc(sizeof *req, gfp);
1760 if (!req)
1761 return NULL;
1762 req->req.dma = DMA_ADDR_INVALID;
1763 req->dma = DMA_ADDR_INVALID;
1764 INIT_LIST_HEAD(&req->queue);
1765 if (!ep->dev->dma_addr)
1766 return &req->req;
1767
1768 dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
1769 &req->td_data_phys);
1770 if (NULL == dma_desc) {
1771 kfree(req);
1772 return NULL;
1773 }
1774
1775 dma_desc->status |= PCH_UDC_BS_HST_BSY;
1776 dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
1777 req->td_data = dma_desc;
1778 req->td_data_last = dma_desc;
1779 req->chain_len = 1;
1780 return &req->req;
1781 }
1782
1783
1784
1785
1786
1787
1788
1789 static void pch_udc_free_request(struct usb_ep *usbep,
1790 struct usb_request *usbreq)
1791 {
1792 struct pch_udc_ep *ep;
1793 struct pch_udc_request *req;
1794 struct pch_udc_dev *dev;
1795
1796 if (!usbep || !usbreq)
1797 return;
1798 ep = container_of(usbep, struct pch_udc_ep, ep);
1799 req = container_of(usbreq, struct pch_udc_request, req);
1800 dev = ep->dev;
1801 if (!list_empty(&req->queue))
1802 dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1803 __func__, usbep->name, req);
1804 if (req->td_data != NULL) {
1805 if (req->chain_len > 1)
1806 pch_udc_free_dma_chain(ep->dev, req);
1807 dma_pool_free(ep->dev->data_requests, req->td_data,
1808 req->td_data_phys);
1809 }
1810 kfree(req);
1811 }
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1825 gfp_t gfp)
1826 {
1827 int retval = 0;
1828 struct pch_udc_ep *ep;
1829 struct pch_udc_dev *dev;
1830 struct pch_udc_request *req;
1831 unsigned long iflags;
1832
1833 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1834 return -EINVAL;
1835 ep = container_of(usbep, struct pch_udc_ep, ep);
1836 dev = ep->dev;
1837 if (!ep->ep.desc && ep->num)
1838 return -EINVAL;
1839 req = container_of(usbreq, struct pch_udc_request, req);
1840 if (!list_empty(&req->queue))
1841 return -EINVAL;
1842 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1843 return -ESHUTDOWN;
1844 spin_lock_irqsave(&dev->lock, iflags);
1845
1846 if (usbreq->length &&
1847 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1848 if (!((unsigned long)(usbreq->buf) & 0x03)) {
1849 if (ep->in)
1850 usbreq->dma = dma_map_single(&dev->pdev->dev,
1851 usbreq->buf,
1852 usbreq->length,
1853 DMA_TO_DEVICE);
1854 else
1855 usbreq->dma = dma_map_single(&dev->pdev->dev,
1856 usbreq->buf,
1857 usbreq->length,
1858 DMA_FROM_DEVICE);
1859 } else {
1860 req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1861 if (!req->buf) {
1862 retval = -ENOMEM;
1863 goto probe_end;
1864 }
1865 if (ep->in) {
1866 memcpy(req->buf, usbreq->buf, usbreq->length);
1867 req->dma = dma_map_single(&dev->pdev->dev,
1868 req->buf,
1869 usbreq->length,
1870 DMA_TO_DEVICE);
1871 } else
1872 req->dma = dma_map_single(&dev->pdev->dev,
1873 req->buf,
1874 usbreq->length,
1875 DMA_FROM_DEVICE);
1876 }
1877 req->dma_mapped = 1;
1878 }
1879 if (usbreq->length > 0) {
1880 retval = prepare_dma(ep, req, GFP_ATOMIC);
1881 if (retval)
1882 goto probe_end;
1883 }
1884 usbreq->actual = 0;
1885 usbreq->status = -EINPROGRESS;
1886 req->dma_done = 0;
1887 if (list_empty(&ep->queue) && !ep->halted) {
1888
1889 if (!usbreq->length) {
1890 process_zlp(ep, req);
1891 retval = 0;
1892 goto probe_end;
1893 }
1894 if (!ep->in) {
1895 pch_udc_start_rxrequest(ep, req);
1896 } else {
1897
1898
1899
1900
1901
1902 pch_udc_wait_ep_stall(ep);
1903 pch_udc_ep_clear_nak(ep);
1904 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1905 }
1906 }
1907
1908 if (req != NULL)
1909 list_add_tail(&req->queue, &ep->queue);
1910
1911 probe_end:
1912 spin_unlock_irqrestore(&dev->lock, iflags);
1913 return retval;
1914 }
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926 static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1927 struct usb_request *usbreq)
1928 {
1929 struct pch_udc_ep *ep;
1930 struct pch_udc_request *req;
1931 unsigned long flags;
1932 int ret = -EINVAL;
1933
1934 ep = container_of(usbep, struct pch_udc_ep, ep);
1935 if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1936 return ret;
1937 req = container_of(usbreq, struct pch_udc_request, req);
1938 spin_lock_irqsave(&ep->dev->lock, flags);
1939
1940 list_for_each_entry(req, &ep->queue, queue) {
1941 if (&req->req == usbreq) {
1942 pch_udc_ep_set_nak(ep);
1943 if (!list_empty(&req->queue))
1944 complete_req(ep, req, -ECONNRESET);
1945 ret = 0;
1946 break;
1947 }
1948 }
1949 spin_unlock_irqrestore(&ep->dev->lock, flags);
1950 return ret;
1951 }
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1964 {
1965 struct pch_udc_ep *ep;
1966 unsigned long iflags;
1967 int ret;
1968
1969 if (!usbep)
1970 return -EINVAL;
1971 ep = container_of(usbep, struct pch_udc_ep, ep);
1972 if (!ep->ep.desc && !ep->num)
1973 return -EINVAL;
1974 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1975 return -ESHUTDOWN;
1976 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1977 if (list_empty(&ep->queue)) {
1978 if (halt) {
1979 if (ep->num == PCH_UDC_EP0)
1980 ep->dev->stall = 1;
1981 pch_udc_ep_set_stall(ep);
1982 pch_udc_enable_ep_interrupts(
1983 ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1984 } else {
1985 pch_udc_ep_clear_stall(ep);
1986 }
1987 ret = 0;
1988 } else {
1989 ret = -EAGAIN;
1990 }
1991 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1992 return ret;
1993 }
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2006 {
2007 struct pch_udc_ep *ep;
2008 unsigned long iflags;
2009 int ret;
2010
2011 if (!usbep)
2012 return -EINVAL;
2013 ep = container_of(usbep, struct pch_udc_ep, ep);
2014 if (!ep->ep.desc && !ep->num)
2015 return -EINVAL;
2016 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2017 return -ESHUTDOWN;
2018 spin_lock_irqsave(&udc_stall_spinlock, iflags);
2019 if (!list_empty(&ep->queue)) {
2020 ret = -EAGAIN;
2021 } else {
2022 if (ep->num == PCH_UDC_EP0)
2023 ep->dev->stall = 1;
2024 pch_udc_ep_set_stall(ep);
2025 pch_udc_enable_ep_interrupts(ep->dev,
2026 PCH_UDC_EPINT(ep->in, ep->num));
2027 ep->dev->prot_stall = 1;
2028 ret = 0;
2029 }
2030 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2031 return ret;
2032 }
2033
2034
2035
2036
2037
2038 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2039 {
2040 struct pch_udc_ep *ep;
2041
2042 if (!usbep)
2043 return;
2044
2045 ep = container_of(usbep, struct pch_udc_ep, ep);
2046 if (ep->ep.desc || !ep->num)
2047 pch_udc_ep_fifo_flush(ep, ep->in);
2048 }
2049
2050 static const struct usb_ep_ops pch_udc_ep_ops = {
2051 .enable = pch_udc_pcd_ep_enable,
2052 .disable = pch_udc_pcd_ep_disable,
2053 .alloc_request = pch_udc_alloc_request,
2054 .free_request = pch_udc_free_request,
2055 .queue = pch_udc_pcd_queue,
2056 .dequeue = pch_udc_pcd_dequeue,
2057 .set_halt = pch_udc_pcd_set_halt,
2058 .set_wedge = pch_udc_pcd_set_wedge,
2059 .fifo_status = NULL,
2060 .fifo_flush = pch_udc_pcd_fifo_flush,
2061 };
2062
2063
2064
2065
2066
2067 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2068 {
2069 static u32 pky_marker;
2070
2071 if (!td_stp)
2072 return;
2073 td_stp->reserved = ++pky_marker;
2074 memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2075 td_stp->status = PCH_UDC_BS_HST_RDY;
2076 }
2077
2078
2079
2080
2081
2082
2083 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2084 {
2085 struct pch_udc_request *req;
2086 struct pch_udc_data_dma_desc *td_data;
2087
2088 if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2089 return;
2090
2091 if (list_empty(&ep->queue))
2092 return;
2093
2094
2095 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2096 if (req->dma_going)
2097 return;
2098 if (!req->td_data)
2099 return;
2100 pch_udc_wait_ep_stall(ep);
2101 req->dma_going = 1;
2102 pch_udc_ep_set_ddptr(ep, 0);
2103 td_data = req->td_data;
2104 while (1) {
2105 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2106 PCH_UDC_BS_HST_RDY;
2107 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2108 break;
2109 td_data = phys_to_virt(td_data->next);
2110 }
2111 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2112 pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2113 pch_udc_ep_set_pd(ep);
2114 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2115 pch_udc_ep_clear_nak(ep);
2116 }
2117
2118
2119
2120
2121
2122 static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2123 {
2124 struct pch_udc_request *req;
2125 struct pch_udc_dev *dev = ep->dev;
2126
2127 if (list_empty(&ep->queue))
2128 return;
2129 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2130 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2131 PCH_UDC_BS_DMA_DONE)
2132 return;
2133 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2134 PCH_UDC_RTS_SUCC) {
2135 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2136 "epstatus=0x%08x\n",
2137 (req->td_data_last->status & PCH_UDC_RXTX_STS),
2138 (int)(ep->epsts));
2139 return;
2140 }
2141
2142 req->req.actual = req->req.length;
2143 req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2144 req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2145 complete_req(ep, req, 0);
2146 req->dma_going = 0;
2147 if (!list_empty(&ep->queue)) {
2148 pch_udc_wait_ep_stall(ep);
2149 pch_udc_ep_clear_nak(ep);
2150 pch_udc_enable_ep_interrupts(ep->dev,
2151 PCH_UDC_EPINT(ep->in, ep->num));
2152 } else {
2153 pch_udc_disable_ep_interrupts(ep->dev,
2154 PCH_UDC_EPINT(ep->in, ep->num));
2155 }
2156 }
2157
2158
2159
2160
2161
2162 static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2163 {
2164 struct pch_udc_request *req;
2165 struct pch_udc_dev *dev = ep->dev;
2166 unsigned int count;
2167 struct pch_udc_data_dma_desc *td;
2168 dma_addr_t addr;
2169
2170 if (list_empty(&ep->queue))
2171 return;
2172
2173 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2174 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2175 pch_udc_ep_set_ddptr(ep, 0);
2176 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2177 PCH_UDC_BS_DMA_DONE)
2178 td = req->td_data_last;
2179 else
2180 td = req->td_data;
2181
2182 while (1) {
2183 if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2184 dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2185 "epstatus=0x%08x\n",
2186 (req->td_data->status & PCH_UDC_RXTX_STS),
2187 (int)(ep->epsts));
2188 return;
2189 }
2190 if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2191 if (td->status & PCH_UDC_DMA_LAST) {
2192 count = td->status & PCH_UDC_RXTX_BYTES;
2193 break;
2194 }
2195 if (td == req->td_data_last) {
2196 dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2197 return;
2198 }
2199 addr = (dma_addr_t)td->next;
2200 td = phys_to_virt(addr);
2201 }
2202
2203 if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2204 count = UDC_DMA_MAXPACKET;
2205 req->td_data->status |= PCH_UDC_DMA_LAST;
2206 td->status |= PCH_UDC_BS_HST_BSY;
2207
2208 req->dma_going = 0;
2209 req->req.actual = count;
2210 complete_req(ep, req, 0);
2211
2212 if (!list_empty(&ep->queue)) {
2213 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2214 pch_udc_start_rxrequest(ep, req);
2215 }
2216 }
2217
2218
2219
2220
2221
2222
2223
2224 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2225 {
2226 u32 epsts;
2227 struct pch_udc_ep *ep;
2228
2229 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2230 epsts = ep->epsts;
2231 ep->epsts = 0;
2232
2233 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2234 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2235 UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2236 return;
2237 if ((epsts & UDC_EPSTS_BNA))
2238 return;
2239 if (epsts & UDC_EPSTS_HE)
2240 return;
2241 if (epsts & UDC_EPSTS_RSS) {
2242 pch_udc_ep_set_stall(ep);
2243 pch_udc_enable_ep_interrupts(ep->dev,
2244 PCH_UDC_EPINT(ep->in, ep->num));
2245 }
2246 if (epsts & UDC_EPSTS_RCS) {
2247 if (!dev->prot_stall) {
2248 pch_udc_ep_clear_stall(ep);
2249 } else {
2250 pch_udc_ep_set_stall(ep);
2251 pch_udc_enable_ep_interrupts(ep->dev,
2252 PCH_UDC_EPINT(ep->in, ep->num));
2253 }
2254 }
2255 if (epsts & UDC_EPSTS_TDC)
2256 pch_udc_complete_transfer(ep);
2257
2258 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2259 !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2260 pch_udc_start_next_txrequest(ep);
2261 }
2262
2263
2264
2265
2266
2267
2268 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2269 {
2270 u32 epsts;
2271 struct pch_udc_ep *ep;
2272 struct pch_udc_request *req = NULL;
2273
2274 ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2275 epsts = ep->epsts;
2276 ep->epsts = 0;
2277
2278 if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2279
2280 req = list_entry(ep->queue.next, struct pch_udc_request,
2281 queue);
2282 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2283 PCH_UDC_BS_DMA_DONE) {
2284 if (!req->dma_going)
2285 pch_udc_start_rxrequest(ep, req);
2286 return;
2287 }
2288 }
2289 if (epsts & UDC_EPSTS_HE)
2290 return;
2291 if (epsts & UDC_EPSTS_RSS) {
2292 pch_udc_ep_set_stall(ep);
2293 pch_udc_enable_ep_interrupts(ep->dev,
2294 PCH_UDC_EPINT(ep->in, ep->num));
2295 }
2296 if (epsts & UDC_EPSTS_RCS) {
2297 if (!dev->prot_stall) {
2298 pch_udc_ep_clear_stall(ep);
2299 } else {
2300 pch_udc_ep_set_stall(ep);
2301 pch_udc_enable_ep_interrupts(ep->dev,
2302 PCH_UDC_EPINT(ep->in, ep->num));
2303 }
2304 }
2305 if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2306 UDC_EPSTS_OUT_DATA) {
2307 if (ep->dev->prot_stall == 1) {
2308 pch_udc_ep_set_stall(ep);
2309 pch_udc_enable_ep_interrupts(ep->dev,
2310 PCH_UDC_EPINT(ep->in, ep->num));
2311 } else {
2312 pch_udc_complete_receiver(ep);
2313 }
2314 }
2315 if (list_empty(&ep->queue))
2316 pch_udc_set_dma(dev, DMA_DIR_RX);
2317 }
2318
2319
2320
2321
2322
2323 static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2324 {
2325 u32 epsts;
2326 struct pch_udc_ep *ep;
2327 struct pch_udc_ep *ep_out;
2328
2329 ep = &dev->ep[UDC_EP0IN_IDX];
2330 ep_out = &dev->ep[UDC_EP0OUT_IDX];
2331 epsts = ep->epsts;
2332 ep->epsts = 0;
2333
2334 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2335 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2336 UDC_EPSTS_XFERDONE)))
2337 return;
2338 if ((epsts & UDC_EPSTS_BNA))
2339 return;
2340 if (epsts & UDC_EPSTS_HE)
2341 return;
2342 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2343 pch_udc_complete_transfer(ep);
2344 pch_udc_clear_dma(dev, DMA_DIR_RX);
2345 ep_out->td_data->status = (ep_out->td_data->status &
2346 ~PCH_UDC_BUFF_STS) |
2347 PCH_UDC_BS_HST_RDY;
2348 pch_udc_ep_clear_nak(ep_out);
2349 pch_udc_set_dma(dev, DMA_DIR_RX);
2350 pch_udc_ep_set_rrdy(ep_out);
2351 }
2352
2353 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2354 !(epsts & UDC_EPSTS_TXEMPTY))
2355 pch_udc_start_next_txrequest(ep);
2356 }
2357
2358
2359
2360
2361
2362
2363 static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2364 __releases(&dev->lock)
2365 __acquires(&dev->lock)
2366 {
2367 u32 stat;
2368 int setup_supported;
2369 struct pch_udc_ep *ep;
2370
2371 ep = &dev->ep[UDC_EP0OUT_IDX];
2372 stat = ep->epsts;
2373 ep->epsts = 0;
2374
2375
2376 if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2377 UDC_EPSTS_OUT_SETUP) {
2378 dev->stall = 0;
2379 dev->ep[UDC_EP0IN_IDX].halted = 0;
2380 dev->ep[UDC_EP0OUT_IDX].halted = 0;
2381 dev->setup_data = ep->td_stp->request;
2382 pch_udc_init_setup_buff(ep->td_stp);
2383 pch_udc_clear_dma(dev, DMA_DIR_RX);
2384 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2385 dev->ep[UDC_EP0IN_IDX].in);
2386 if ((dev->setup_data.bRequestType & USB_DIR_IN))
2387 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2388 else
2389 dev->gadget.ep0 = &ep->ep;
2390 spin_lock(&dev->lock);
2391
2392 if ((dev->setup_data.bRequestType == 0x21) &&
2393 (dev->setup_data.bRequest == 0xFF))
2394 dev->prot_stall = 0;
2395
2396 setup_supported = dev->driver->setup(&dev->gadget,
2397 &dev->setup_data);
2398 spin_unlock(&dev->lock);
2399
2400 if (dev->setup_data.bRequestType & USB_DIR_IN) {
2401 ep->td_data->status = (ep->td_data->status &
2402 ~PCH_UDC_BUFF_STS) |
2403 PCH_UDC_BS_HST_RDY;
2404 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2405 }
2406
2407 if (setup_supported >= 0 && setup_supported <
2408 UDC_EP0IN_MAX_PKT_SIZE) {
2409 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2410
2411
2412 if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2413 pch_udc_set_dma(dev, DMA_DIR_RX);
2414 pch_udc_ep_clear_nak(ep);
2415 }
2416 } else if (setup_supported < 0) {
2417
2418 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2419 pch_udc_enable_ep_interrupts(ep->dev,
2420 PCH_UDC_EPINT(ep->in, ep->num));
2421 dev->stall = 0;
2422 pch_udc_set_dma(dev, DMA_DIR_RX);
2423 } else {
2424 dev->waiting_zlp_ack = 1;
2425 }
2426 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2427 UDC_EPSTS_OUT_DATA) && !dev->stall) {
2428 pch_udc_clear_dma(dev, DMA_DIR_RX);
2429 pch_udc_ep_set_ddptr(ep, 0);
2430 if (!list_empty(&ep->queue)) {
2431 ep->epsts = stat;
2432 pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2433 }
2434 pch_udc_set_dma(dev, DMA_DIR_RX);
2435 }
2436 pch_udc_ep_set_rrdy(ep);
2437 }
2438
2439
2440
2441
2442
2443
2444
2445
2446 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2447 {
2448 struct pch_udc_ep *ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2449 if (list_empty(&ep->queue))
2450 return;
2451 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2452 pch_udc_ep_clear_nak(ep);
2453 }
2454
2455
2456
2457
2458
2459
2460 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2461 {
2462 int i;
2463 struct pch_udc_ep *ep;
2464
2465 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2466
2467 if (ep_intr & (0x1 << i)) {
2468 ep = &dev->ep[UDC_EPIN_IDX(i)];
2469 ep->epsts = pch_udc_read_ep_status(ep);
2470 pch_udc_clear_ep_status(ep, ep->epsts);
2471 }
2472
2473 if (ep_intr & (0x10000 << i)) {
2474 ep = &dev->ep[UDC_EPOUT_IDX(i)];
2475 ep->epsts = pch_udc_read_ep_status(ep);
2476 pch_udc_clear_ep_status(ep, ep->epsts);
2477 }
2478 }
2479 }
2480
2481
2482
2483
2484
2485
2486 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2487 {
2488 struct pch_udc_ep *ep;
2489 u32 val;
2490
2491
2492 ep = &dev->ep[UDC_EP0IN_IDX];
2493 pch_udc_clear_ep_control(ep);
2494 pch_udc_ep_fifo_flush(ep, ep->in);
2495 pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2496 pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2497
2498 ep->td_data = NULL;
2499 ep->td_stp = NULL;
2500 ep->td_data_phys = 0;
2501 ep->td_stp_phys = 0;
2502
2503
2504 ep = &dev->ep[UDC_EP0OUT_IDX];
2505 pch_udc_clear_ep_control(ep);
2506 pch_udc_ep_fifo_flush(ep, ep->in);
2507 pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2508 pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2509 val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2510 pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2511
2512
2513 pch_udc_init_setup_buff(ep->td_stp);
2514
2515 pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2516
2517 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2518
2519
2520 ep->td_data->status = PCH_UDC_DMA_LAST;
2521 ep->td_data->dataptr = dev->dma_addr;
2522 ep->td_data->next = ep->td_data_phys;
2523
2524 pch_udc_ep_clear_nak(ep);
2525 }
2526
2527
2528
2529
2530
2531
2532 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2533 {
2534 struct pch_udc_ep *ep;
2535 int i;
2536
2537 pch_udc_clear_dma(dev, DMA_DIR_TX);
2538 pch_udc_clear_dma(dev, DMA_DIR_RX);
2539
2540 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2541
2542 pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2543
2544 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2545 ep = &dev->ep[i];
2546 pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2547 pch_udc_clear_ep_control(ep);
2548 pch_udc_ep_set_ddptr(ep, 0);
2549 pch_udc_write_csr(ep->dev, 0x00, i);
2550 }
2551 dev->stall = 0;
2552 dev->prot_stall = 0;
2553 dev->waiting_zlp_ack = 0;
2554 dev->set_cfg_not_acked = 0;
2555
2556
2557 for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2558 ep = &dev->ep[i];
2559 pch_udc_ep_set_nak(ep);
2560 pch_udc_ep_fifo_flush(ep, ep->in);
2561
2562 empty_req_queue(ep);
2563 }
2564 if (dev->driver) {
2565 spin_unlock(&dev->lock);
2566 usb_gadget_udc_reset(&dev->gadget, dev->driver);
2567 spin_lock(&dev->lock);
2568 }
2569 }
2570
2571
2572
2573
2574
2575
2576 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2577 {
2578 u32 dev_stat, dev_speed;
2579 u32 speed = USB_SPEED_FULL;
2580
2581 dev_stat = pch_udc_read_device_status(dev);
2582 dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2583 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2584 switch (dev_speed) {
2585 case UDC_DEVSTS_ENUM_SPEED_HIGH:
2586 speed = USB_SPEED_HIGH;
2587 break;
2588 case UDC_DEVSTS_ENUM_SPEED_FULL:
2589 speed = USB_SPEED_FULL;
2590 break;
2591 case UDC_DEVSTS_ENUM_SPEED_LOW:
2592 speed = USB_SPEED_LOW;
2593 break;
2594 default:
2595 BUG();
2596 }
2597 dev->gadget.speed = speed;
2598 pch_udc_activate_control_ep(dev);
2599 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2600 pch_udc_set_dma(dev, DMA_DIR_TX);
2601 pch_udc_set_dma(dev, DMA_DIR_RX);
2602 pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2603
2604
2605 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2606 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2607 UDC_DEVINT_SI | UDC_DEVINT_SC);
2608 }
2609
2610
2611
2612
2613
2614
2615 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2616 {
2617 u32 reg, dev_stat = 0;
2618 int i;
2619
2620 dev_stat = pch_udc_read_device_status(dev);
2621 dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2622 UDC_DEVSTS_INTF_SHIFT;
2623 dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2624 UDC_DEVSTS_ALT_SHIFT;
2625 dev->set_cfg_not_acked = 1;
2626
2627 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2628 dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2629 dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2630 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2631 dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2632
2633
2634 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2635 reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2636 (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2637 reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2638 (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2639 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2640 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2641
2642 pch_udc_ep_clear_stall(&(dev->ep[i]));
2643 dev->ep[i].halted = 0;
2644 }
2645 dev->stall = 0;
2646 spin_unlock(&dev->lock);
2647 dev->driver->setup(&dev->gadget, &dev->setup_data);
2648 spin_lock(&dev->lock);
2649 }
2650
2651
2652
2653
2654
2655
2656 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2657 {
2658 int i;
2659 u32 reg, dev_stat = 0;
2660
2661 dev_stat = pch_udc_read_device_status(dev);
2662 dev->set_cfg_not_acked = 1;
2663 dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2664 UDC_DEVSTS_CFG_SHIFT;
2665
2666 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2667 dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2668 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2669
2670
2671 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2672 reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2673 (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2674 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2675 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2676
2677 pch_udc_ep_clear_stall(&(dev->ep[i]));
2678 dev->ep[i].halted = 0;
2679 }
2680 dev->stall = 0;
2681
2682
2683 spin_unlock(&dev->lock);
2684 dev->driver->setup(&dev->gadget, &dev->setup_data);
2685 spin_lock(&dev->lock);
2686 }
2687
2688
2689
2690
2691
2692
2693
2694 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2695 {
2696 int vbus;
2697
2698
2699 if (dev_intr & UDC_DEVINT_UR) {
2700 pch_udc_svc_ur_interrupt(dev);
2701 dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2702 }
2703
2704 if (dev_intr & UDC_DEVINT_ENUM) {
2705 pch_udc_svc_enum_interrupt(dev);
2706 dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2707 }
2708
2709 if (dev_intr & UDC_DEVINT_SI)
2710 pch_udc_svc_intf_interrupt(dev);
2711
2712 if (dev_intr & UDC_DEVINT_SC)
2713 pch_udc_svc_cfg_interrupt(dev);
2714
2715 if (dev_intr & UDC_DEVINT_US) {
2716 if (dev->driver
2717 && dev->driver->suspend) {
2718 spin_unlock(&dev->lock);
2719 dev->driver->suspend(&dev->gadget);
2720 spin_lock(&dev->lock);
2721 }
2722
2723 vbus = pch_vbus_gpio_get_value(dev);
2724 if ((dev->vbus_session == 0)
2725 && (vbus != 1)) {
2726 if (dev->driver && dev->driver->disconnect) {
2727 spin_unlock(&dev->lock);
2728 dev->driver->disconnect(&dev->gadget);
2729 spin_lock(&dev->lock);
2730 }
2731 pch_udc_reconnect(dev);
2732 } else if ((dev->vbus_session == 0)
2733 && (vbus == 1)
2734 && !dev->vbus_gpio.intr)
2735 schedule_work(&dev->vbus_gpio.irq_work_fall);
2736
2737 dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2738 }
2739
2740 if (dev_intr & UDC_DEVINT_SOF)
2741 dev_dbg(&dev->pdev->dev, "SOF\n");
2742
2743 if (dev_intr & UDC_DEVINT_ES)
2744 dev_dbg(&dev->pdev->dev, "ES\n");
2745
2746 if (dev_intr & UDC_DEVINT_RWKP)
2747 dev_dbg(&dev->pdev->dev, "RWKP\n");
2748 }
2749
2750
2751
2752
2753
2754
2755 static irqreturn_t pch_udc_isr(int irq, void *pdev)
2756 {
2757 struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2758 u32 dev_intr, ep_intr;
2759 int i;
2760
2761 dev_intr = pch_udc_read_device_interrupts(dev);
2762 ep_intr = pch_udc_read_ep_interrupts(dev);
2763
2764
2765 if (dev_intr == ep_intr)
2766 if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2767 dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2768
2769 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2770 return IRQ_HANDLED;
2771 }
2772 if (dev_intr)
2773
2774 pch_udc_write_device_interrupts(dev, dev_intr);
2775 if (ep_intr)
2776
2777 pch_udc_write_ep_interrupts(dev, ep_intr);
2778 if (!dev_intr && !ep_intr)
2779 return IRQ_NONE;
2780 spin_lock(&dev->lock);
2781 if (dev_intr)
2782 pch_udc_dev_isr(dev, dev_intr);
2783 if (ep_intr) {
2784 pch_udc_read_all_epstatus(dev, ep_intr);
2785
2786 if (ep_intr & UDC_EPINT_IN_EP0) {
2787 pch_udc_svc_control_in(dev);
2788 pch_udc_postsvc_epinters(dev, 0);
2789 }
2790
2791 if (ep_intr & UDC_EPINT_OUT_EP0)
2792 pch_udc_svc_control_out(dev);
2793
2794 for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2795 if (ep_intr & (1 << i)) {
2796 pch_udc_svc_data_in(dev, i);
2797 pch_udc_postsvc_epinters(dev, i);
2798 }
2799 }
2800
2801 for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2802 PCH_UDC_USED_EP_NUM); i++)
2803 if (ep_intr & (1 << i))
2804 pch_udc_svc_data_out(dev, i -
2805 UDC_EPINT_OUT_SHIFT);
2806 }
2807 spin_unlock(&dev->lock);
2808 return IRQ_HANDLED;
2809 }
2810
2811
2812
2813
2814
2815 static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2816 {
2817
2818 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2819 UDC_EPINT_OUT_EP0);
2820
2821 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2822 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2823 UDC_DEVINT_SI | UDC_DEVINT_SC);
2824 }
2825
2826
2827
2828
2829
2830 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2831 {
2832 const char *const ep_string[] = {
2833 ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2834 "ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2835 "ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2836 "ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2837 "ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2838 "ep15in", "ep15out",
2839 };
2840 int i;
2841
2842 dev->gadget.speed = USB_SPEED_UNKNOWN;
2843 INIT_LIST_HEAD(&dev->gadget.ep_list);
2844
2845
2846 memset(dev->ep, 0, sizeof dev->ep);
2847 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2848 struct pch_udc_ep *ep = &dev->ep[i];
2849 ep->dev = dev;
2850 ep->halted = 1;
2851 ep->num = i / 2;
2852 ep->in = ~i & 1;
2853 ep->ep.name = ep_string[i];
2854 ep->ep.ops = &pch_udc_ep_ops;
2855 if (ep->in) {
2856 ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2857 ep->ep.caps.dir_in = true;
2858 } else {
2859 ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2860 UDC_EP_REG_SHIFT;
2861 ep->ep.caps.dir_out = true;
2862 }
2863 if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
2864 ep->ep.caps.type_control = true;
2865 } else {
2866 ep->ep.caps.type_iso = true;
2867 ep->ep.caps.type_bulk = true;
2868 ep->ep.caps.type_int = true;
2869 }
2870
2871 usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2872 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2873 INIT_LIST_HEAD(&ep->queue);
2874 }
2875 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2876 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2877
2878
2879 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2880 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2881
2882 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2883 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2884 }
2885
2886
2887
2888
2889
2890
2891
2892
2893 static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2894 {
2895 pch_udc_init(dev);
2896 pch_udc_pcd_reinit(dev);
2897 pch_vbus_gpio_init(dev, vbus_gpio_port);
2898 return 0;
2899 }
2900
2901
2902
2903
2904
2905 static int init_dma_pools(struct pch_udc_dev *dev)
2906 {
2907 struct pch_udc_stp_dma_desc *td_stp;
2908 struct pch_udc_data_dma_desc *td_data;
2909 void *ep0out_buf;
2910
2911
2912 dev->data_requests = dma_pool_create("data_requests", &dev->pdev->dev,
2913 sizeof(struct pch_udc_data_dma_desc), 0, 0);
2914 if (!dev->data_requests) {
2915 dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2916 __func__);
2917 return -ENOMEM;
2918 }
2919
2920
2921 dev->stp_requests = dma_pool_create("setup requests", &dev->pdev->dev,
2922 sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2923 if (!dev->stp_requests) {
2924 dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2925 __func__);
2926 return -ENOMEM;
2927 }
2928
2929 td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
2930 &dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2931 if (!td_stp) {
2932 dev_err(&dev->pdev->dev,
2933 "%s: can't allocate setup dma descriptor\n", __func__);
2934 return -ENOMEM;
2935 }
2936 dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2937
2938
2939 td_data = dma_pool_alloc(dev->data_requests, GFP_KERNEL,
2940 &dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2941 if (!td_data) {
2942 dev_err(&dev->pdev->dev,
2943 "%s: can't allocate data dma descriptor\n", __func__);
2944 return -ENOMEM;
2945 }
2946 dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2947 dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2948 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2949 dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2950 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2951
2952 ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
2953 GFP_KERNEL);
2954 if (!ep0out_buf)
2955 return -ENOMEM;
2956 dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
2957 UDC_EP0OUT_BUFF_SIZE * 4,
2958 DMA_FROM_DEVICE);
2959 return 0;
2960 }
2961
2962 static int pch_udc_start(struct usb_gadget *g,
2963 struct usb_gadget_driver *driver)
2964 {
2965 struct pch_udc_dev *dev = to_pch_udc(g);
2966
2967 driver->driver.bus = NULL;
2968 dev->driver = driver;
2969
2970
2971 pch_udc_setup_ep0(dev);
2972
2973
2974 if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
2975 pch_udc_clear_disconnect(dev);
2976
2977 dev->connected = 1;
2978 return 0;
2979 }
2980
2981 static int pch_udc_stop(struct usb_gadget *g)
2982 {
2983 struct pch_udc_dev *dev = to_pch_udc(g);
2984
2985 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2986
2987
2988 dev->driver = NULL;
2989 dev->connected = 0;
2990
2991
2992 pch_udc_set_disconnect(dev);
2993
2994 return 0;
2995 }
2996
2997 static void pch_udc_shutdown(struct pci_dev *pdev)
2998 {
2999 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3000
3001 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3002 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3003
3004
3005 pch_udc_set_disconnect(dev);
3006 }
3007
3008 static void pch_udc_remove(struct pci_dev *pdev)
3009 {
3010 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3011
3012 usb_del_gadget_udc(&dev->gadget);
3013
3014
3015 if (dev->driver)
3016 dev_err(&pdev->dev,
3017 "%s: gadget driver still bound!!!\n", __func__);
3018
3019 dma_pool_destroy(dev->data_requests);
3020
3021 if (dev->stp_requests) {
3022
3023 if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3024 dma_pool_free(dev->stp_requests,
3025 dev->ep[UDC_EP0OUT_IDX].td_stp,
3026 dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3027 }
3028 if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3029 dma_pool_free(dev->stp_requests,
3030 dev->ep[UDC_EP0OUT_IDX].td_data,
3031 dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3032 }
3033 dma_pool_destroy(dev->stp_requests);
3034 }
3035
3036 if (dev->dma_addr)
3037 dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3038 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3039
3040 pch_vbus_gpio_free(dev);
3041
3042 pch_udc_exit(dev);
3043 }
3044
3045 #ifdef CONFIG_PM_SLEEP
3046 static int pch_udc_suspend(struct device *d)
3047 {
3048 struct pch_udc_dev *dev = dev_get_drvdata(d);
3049
3050 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3051 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3052
3053 return 0;
3054 }
3055
3056 static int pch_udc_resume(struct device *d)
3057 {
3058 return 0;
3059 }
3060
3061 static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
3062 #define PCH_UDC_PM_OPS (&pch_udc_pm)
3063 #else
3064 #define PCH_UDC_PM_OPS NULL
3065 #endif
3066
3067 static int pch_udc_probe(struct pci_dev *pdev,
3068 const struct pci_device_id *id)
3069 {
3070 int bar;
3071 int retval;
3072 struct pch_udc_dev *dev;
3073
3074
3075 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
3076 if (!dev)
3077 return -ENOMEM;
3078
3079
3080 retval = pcim_enable_device(pdev);
3081 if (retval)
3082 return retval;
3083
3084 pci_set_drvdata(pdev, dev);
3085
3086
3087 if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
3088 bar = PCH_UDC_PCI_BAR_QUARK_X1000;
3089 else
3090 bar = PCH_UDC_PCI_BAR;
3091
3092
3093 retval = pcim_iomap_regions(pdev, 1 << bar, pci_name(pdev));
3094 if (retval)
3095 return retval;
3096
3097 dev->base_addr = pcim_iomap_table(pdev)[bar];
3098
3099
3100 if (pch_udc_pcd_init(dev))
3101 return -ENODEV;
3102
3103 pci_enable_msi(pdev);
3104
3105 retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
3106 IRQF_SHARED, KBUILD_MODNAME, dev);
3107 if (retval) {
3108 dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3109 pdev->irq);
3110 goto finished;
3111 }
3112
3113 pci_set_master(pdev);
3114 pci_try_set_mwi(pdev);
3115
3116
3117 spin_lock_init(&dev->lock);
3118 dev->pdev = pdev;
3119 dev->gadget.ops = &pch_udc_ops;
3120
3121 retval = init_dma_pools(dev);
3122 if (retval)
3123 goto finished;
3124
3125 dev->gadget.name = KBUILD_MODNAME;
3126 dev->gadget.max_speed = USB_SPEED_HIGH;
3127
3128
3129 pch_udc_set_disconnect(dev);
3130 retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3131 if (retval)
3132 goto finished;
3133 return 0;
3134
3135 finished:
3136 pch_udc_remove(pdev);
3137 return retval;
3138 }
3139
3140 static const struct pci_device_id pch_udc_pcidev_id[] = {
3141 {
3142 PCI_DEVICE(PCI_VENDOR_ID_INTEL,
3143 PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
3144 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3145 .class_mask = 0xffffffff,
3146 },
3147 {
3148 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3149 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3150 .class_mask = 0xffffffff,
3151 },
3152 {
3153 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3154 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3155 .class_mask = 0xffffffff,
3156 },
3157 {
3158 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3159 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3160 .class_mask = 0xffffffff,
3161 },
3162 { 0 },
3163 };
3164
3165 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3166
3167 static struct pci_driver pch_udc_driver = {
3168 .name = KBUILD_MODNAME,
3169 .id_table = pch_udc_pcidev_id,
3170 .probe = pch_udc_probe,
3171 .remove = pch_udc_remove,
3172 .shutdown = pch_udc_shutdown,
3173 .driver = {
3174 .pm = PCH_UDC_PM_OPS,
3175 },
3176 };
3177
3178 module_pci_driver(pch_udc_driver);
3179
3180 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3181 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3182 MODULE_LICENSE("GPL");