This source file includes following definitions.
- queue_dbg_open
- queue_dbg_read
- queue_dbg_release
- regs_dbg_open
- regs_dbg_read
- regs_dbg_release
- usba_ep_init_debugfs
- usba_ep_cleanup_debugfs
- usba_init_debugfs
- usba_cleanup_debugfs
- usba_ep_init_debugfs
- usba_ep_cleanup_debugfs
- usba_init_debugfs
- usba_cleanup_debugfs
- usba_config_fifo_table
- usba_int_enb_get
- usba_int_enb_set
- usba_int_enb_clear
- vbus_is_present
- toggle_bias
- generate_bias_pulse
- next_fifo_transaction
- submit_request
- submit_next_request
- send_status
- receive_data
- request_complete
- request_complete_list
- usba_ep_enable
- usba_ep_disable
- usba_ep_alloc_request
- usba_ep_free_request
- queue_dma
- usba_ep_queue
- usba_update_req
- stop_dma
- usba_ep_dequeue
- usba_ep_set_halt
- usba_ep_fifo_status
- usba_ep_fifo_flush
- usba_udc_get_frame
- usba_udc_wakeup
- usba_udc_set_selfpowered
- atmel_usba_match_ep
- reset_all_endpoints
- get_ep_by_addr
- set_protocol_stall
- is_stalled
- set_address
- do_test_mode
- feature_is_dev_remote_wakeup
- feature_is_dev_test_mode
- feature_is_ep_halt
- handle_ep0_setup
- usba_control_irq
- usba_ep_irq
- usba_dma_irq
- usba_udc_irq
- start_clock
- stop_clock
- usba_start
- usba_stop
- usba_vbus_irq_thread
- atmel_usba_start
- atmel_usba_stop
- at91sam9rl_toggle_bias
- at91sam9g45_pulse_bias
- atmel_udc_of_init
- usba_udc_probe
- usba_udc_remove
- usba_udc_suspend
- usba_udc_resume
1
2
3
4
5
6
7 #include <linux/clk.h>
8 #include <linux/clk/at91_pmc.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/slab.h>
14 #include <linux/device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/list.h>
17 #include <linux/mfd/syscon.h>
18 #include <linux/platform_device.h>
19 #include <linux/regmap.h>
20 #include <linux/ctype.h>
21 #include <linux/usb/ch9.h>
22 #include <linux/usb/gadget.h>
23 #include <linux/delay.h>
24 #include <linux/of.h>
25 #include <linux/irq.h>
26 #include <linux/gpio/consumer.h>
27
28 #include "atmel_usba_udc.h"
29 #define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \
30 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING)
31
32 #ifdef CONFIG_USB_GADGET_DEBUG_FS
33 #include <linux/debugfs.h>
34 #include <linux/uaccess.h>
35
36 static int queue_dbg_open(struct inode *inode, struct file *file)
37 {
38 struct usba_ep *ep = inode->i_private;
39 struct usba_request *req, *req_copy;
40 struct list_head *queue_data;
41
42 queue_data = kmalloc(sizeof(*queue_data), GFP_KERNEL);
43 if (!queue_data)
44 return -ENOMEM;
45 INIT_LIST_HEAD(queue_data);
46
47 spin_lock_irq(&ep->udc->lock);
48 list_for_each_entry(req, &ep->queue, queue) {
49 req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC);
50 if (!req_copy)
51 goto fail;
52 list_add_tail(&req_copy->queue, queue_data);
53 }
54 spin_unlock_irq(&ep->udc->lock);
55
56 file->private_data = queue_data;
57 return 0;
58
59 fail:
60 spin_unlock_irq(&ep->udc->lock);
61 list_for_each_entry_safe(req, req_copy, queue_data, queue) {
62 list_del(&req->queue);
63 kfree(req);
64 }
65 kfree(queue_data);
66 return -ENOMEM;
67 }
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83 static ssize_t queue_dbg_read(struct file *file, char __user *buf,
84 size_t nbytes, loff_t *ppos)
85 {
86 struct list_head *queue = file->private_data;
87 struct usba_request *req, *tmp_req;
88 size_t len, remaining, actual = 0;
89 char tmpbuf[38];
90
91 if (!access_ok(buf, nbytes))
92 return -EFAULT;
93
94 inode_lock(file_inode(file));
95 list_for_each_entry_safe(req, tmp_req, queue, queue) {
96 len = snprintf(tmpbuf, sizeof(tmpbuf),
97 "%8p %08x %c%c%c %5d %c%c%c\n",
98 req->req.buf, req->req.length,
99 req->req.no_interrupt ? 'i' : 'I',
100 req->req.zero ? 'Z' : 'z',
101 req->req.short_not_ok ? 's' : 'S',
102 req->req.status,
103 req->submitted ? 'F' : 'f',
104 req->using_dma ? 'D' : 'd',
105 req->last_transaction ? 'L' : 'l');
106 len = min(len, sizeof(tmpbuf));
107 if (len > nbytes)
108 break;
109
110 list_del(&req->queue);
111 kfree(req);
112
113 remaining = __copy_to_user(buf, tmpbuf, len);
114 actual += len - remaining;
115 if (remaining)
116 break;
117
118 nbytes -= len;
119 buf += len;
120 }
121 inode_unlock(file_inode(file));
122
123 return actual;
124 }
125
126 static int queue_dbg_release(struct inode *inode, struct file *file)
127 {
128 struct list_head *queue_data = file->private_data;
129 struct usba_request *req, *tmp_req;
130
131 list_for_each_entry_safe(req, tmp_req, queue_data, queue) {
132 list_del(&req->queue);
133 kfree(req);
134 }
135 kfree(queue_data);
136 return 0;
137 }
138
139 static int regs_dbg_open(struct inode *inode, struct file *file)
140 {
141 struct usba_udc *udc;
142 unsigned int i;
143 u32 *data;
144 int ret = -ENOMEM;
145
146 inode_lock(inode);
147 udc = inode->i_private;
148 data = kmalloc(inode->i_size, GFP_KERNEL);
149 if (!data)
150 goto out;
151
152 spin_lock_irq(&udc->lock);
153 for (i = 0; i < inode->i_size / 4; i++)
154 data[i] = readl_relaxed(udc->regs + i * 4);
155 spin_unlock_irq(&udc->lock);
156
157 file->private_data = data;
158 ret = 0;
159
160 out:
161 inode_unlock(inode);
162
163 return ret;
164 }
165
166 static ssize_t regs_dbg_read(struct file *file, char __user *buf,
167 size_t nbytes, loff_t *ppos)
168 {
169 struct inode *inode = file_inode(file);
170 int ret;
171
172 inode_lock(inode);
173 ret = simple_read_from_buffer(buf, nbytes, ppos,
174 file->private_data,
175 file_inode(file)->i_size);
176 inode_unlock(inode);
177
178 return ret;
179 }
180
181 static int regs_dbg_release(struct inode *inode, struct file *file)
182 {
183 kfree(file->private_data);
184 return 0;
185 }
186
187 const struct file_operations queue_dbg_fops = {
188 .owner = THIS_MODULE,
189 .open = queue_dbg_open,
190 .llseek = no_llseek,
191 .read = queue_dbg_read,
192 .release = queue_dbg_release,
193 };
194
195 const struct file_operations regs_dbg_fops = {
196 .owner = THIS_MODULE,
197 .open = regs_dbg_open,
198 .llseek = generic_file_llseek,
199 .read = regs_dbg_read,
200 .release = regs_dbg_release,
201 };
202
203 static void usba_ep_init_debugfs(struct usba_udc *udc,
204 struct usba_ep *ep)
205 {
206 struct dentry *ep_root;
207
208 ep_root = debugfs_create_dir(ep->ep.name, udc->debugfs_root);
209 ep->debugfs_dir = ep_root;
210
211 debugfs_create_file("queue", 0400, ep_root, ep, &queue_dbg_fops);
212 if (ep->can_dma)
213 debugfs_create_u32("dma_status", 0400, ep_root,
214 &ep->last_dma_status);
215 if (ep_is_control(ep))
216 debugfs_create_u32("state", 0400, ep_root, &ep->state);
217 }
218
219 static void usba_ep_cleanup_debugfs(struct usba_ep *ep)
220 {
221 debugfs_remove_recursive(ep->debugfs_dir);
222 }
223
224 static void usba_init_debugfs(struct usba_udc *udc)
225 {
226 struct dentry *root;
227 struct resource *regs_resource;
228
229 root = debugfs_create_dir(udc->gadget.name, NULL);
230 udc->debugfs_root = root;
231
232 regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
233 CTRL_IOMEM_ID);
234
235 if (regs_resource) {
236 debugfs_create_file_size("regs", 0400, root, udc,
237 ®s_dbg_fops,
238 resource_size(regs_resource));
239 }
240
241 usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0));
242 }
243
244 static void usba_cleanup_debugfs(struct usba_udc *udc)
245 {
246 usba_ep_cleanup_debugfs(to_usba_ep(udc->gadget.ep0));
247 debugfs_remove_recursive(udc->debugfs_root);
248 }
249 #else
250 static inline void usba_ep_init_debugfs(struct usba_udc *udc,
251 struct usba_ep *ep)
252 {
253
254 }
255
256 static inline void usba_ep_cleanup_debugfs(struct usba_ep *ep)
257 {
258
259 }
260
261 static inline void usba_init_debugfs(struct usba_udc *udc)
262 {
263
264 }
265
266 static inline void usba_cleanup_debugfs(struct usba_udc *udc)
267 {
268
269 }
270 #endif
271
272 static ushort fifo_mode;
273
274 module_param(fifo_mode, ushort, 0x0);
275 MODULE_PARM_DESC(fifo_mode, "Endpoint configuration mode");
276
277
278
279
280 static struct usba_fifo_cfg mode_1_cfg[] = {
281 { .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, },
282 { .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 2, },
283 { .hw_ep_num = 2, .fifo_size = 1024, .nr_banks = 1, },
284 { .hw_ep_num = 3, .fifo_size = 1024, .nr_banks = 1, },
285 { .hw_ep_num = 4, .fifo_size = 1024, .nr_banks = 1, },
286 { .hw_ep_num = 5, .fifo_size = 1024, .nr_banks = 1, },
287 { .hw_ep_num = 6, .fifo_size = 1024, .nr_banks = 1, },
288 };
289
290
291 static struct usba_fifo_cfg mode_2_cfg[] = {
292 { .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, },
293 { .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 3, },
294 { .hw_ep_num = 2, .fifo_size = 1024, .nr_banks = 2, },
295 { .hw_ep_num = 3, .fifo_size = 1024, .nr_banks = 2, },
296 };
297
298
299 static struct usba_fifo_cfg mode_3_cfg[] = {
300 { .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, },
301 { .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 2, },
302 { .hw_ep_num = 2, .fifo_size = 512, .nr_banks = 2, },
303 { .hw_ep_num = 3, .fifo_size = 512, .nr_banks = 2, },
304 { .hw_ep_num = 4, .fifo_size = 512, .nr_banks = 2, },
305 { .hw_ep_num = 5, .fifo_size = 512, .nr_banks = 2, },
306 { .hw_ep_num = 6, .fifo_size = 512, .nr_banks = 2, },
307 };
308
309
310 static struct usba_fifo_cfg mode_4_cfg[] = {
311 { .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, },
312 { .hw_ep_num = 1, .fifo_size = 512, .nr_banks = 2, },
313 { .hw_ep_num = 2, .fifo_size = 512, .nr_banks = 2, },
314 { .hw_ep_num = 3, .fifo_size = 8, .nr_banks = 2, },
315 { .hw_ep_num = 4, .fifo_size = 512, .nr_banks = 2, },
316 { .hw_ep_num = 5, .fifo_size = 512, .nr_banks = 2, },
317 { .hw_ep_num = 6, .fifo_size = 16, .nr_banks = 2, },
318 { .hw_ep_num = 7, .fifo_size = 8, .nr_banks = 2, },
319 { .hw_ep_num = 8, .fifo_size = 8, .nr_banks = 2, },
320 };
321
322
323 static int usba_config_fifo_table(struct usba_udc *udc)
324 {
325 int n;
326
327 switch (fifo_mode) {
328 default:
329 fifo_mode = 0;
330
331 case 0:
332 udc->fifo_cfg = NULL;
333 n = 0;
334 break;
335 case 1:
336 udc->fifo_cfg = mode_1_cfg;
337 n = ARRAY_SIZE(mode_1_cfg);
338 break;
339 case 2:
340 udc->fifo_cfg = mode_2_cfg;
341 n = ARRAY_SIZE(mode_2_cfg);
342 break;
343 case 3:
344 udc->fifo_cfg = mode_3_cfg;
345 n = ARRAY_SIZE(mode_3_cfg);
346 break;
347 case 4:
348 udc->fifo_cfg = mode_4_cfg;
349 n = ARRAY_SIZE(mode_4_cfg);
350 break;
351 }
352 DBG(DBG_HW, "Setup fifo_mode %d\n", fifo_mode);
353
354 return n;
355 }
356
357 static inline u32 usba_int_enb_get(struct usba_udc *udc)
358 {
359 return udc->int_enb_cache;
360 }
361
362 static inline void usba_int_enb_set(struct usba_udc *udc, u32 mask)
363 {
364 u32 val;
365
366 val = udc->int_enb_cache | mask;
367 usba_writel(udc, INT_ENB, val);
368 udc->int_enb_cache = val;
369 }
370
371 static inline void usba_int_enb_clear(struct usba_udc *udc, u32 mask)
372 {
373 u32 val;
374
375 val = udc->int_enb_cache & ~mask;
376 usba_writel(udc, INT_ENB, val);
377 udc->int_enb_cache = val;
378 }
379
380 static int vbus_is_present(struct usba_udc *udc)
381 {
382 if (udc->vbus_pin)
383 return gpiod_get_value(udc->vbus_pin);
384
385
386 return 1;
387 }
388
389 static void toggle_bias(struct usba_udc *udc, int is_on)
390 {
391 if (udc->errata && udc->errata->toggle_bias)
392 udc->errata->toggle_bias(udc, is_on);
393 }
394
395 static void generate_bias_pulse(struct usba_udc *udc)
396 {
397 if (!udc->bias_pulse_needed)
398 return;
399
400 if (udc->errata && udc->errata->pulse_bias)
401 udc->errata->pulse_bias(udc);
402
403 udc->bias_pulse_needed = false;
404 }
405
406 static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
407 {
408 unsigned int transaction_len;
409
410 transaction_len = req->req.length - req->req.actual;
411 req->last_transaction = 1;
412 if (transaction_len > ep->ep.maxpacket) {
413 transaction_len = ep->ep.maxpacket;
414 req->last_transaction = 0;
415 } else if (transaction_len == ep->ep.maxpacket && req->req.zero)
416 req->last_transaction = 0;
417
418 DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n",
419 ep->ep.name, req, transaction_len,
420 req->last_transaction ? ", done" : "");
421
422 memcpy_toio(ep->fifo, req->req.buf + req->req.actual, transaction_len);
423 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
424 req->req.actual += transaction_len;
425 }
426
427 static void submit_request(struct usba_ep *ep, struct usba_request *req)
428 {
429 DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n",
430 ep->ep.name, req, req->req.length);
431
432 req->req.actual = 0;
433 req->submitted = 1;
434
435 if (req->using_dma) {
436 if (req->req.length == 0) {
437 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
438 return;
439 }
440
441 if (req->req.zero)
442 usba_ep_writel(ep, CTL_ENB, USBA_SHORT_PACKET);
443 else
444 usba_ep_writel(ep, CTL_DIS, USBA_SHORT_PACKET);
445
446 usba_dma_writel(ep, ADDRESS, req->req.dma);
447 usba_dma_writel(ep, CONTROL, req->ctrl);
448 } else {
449 next_fifo_transaction(ep, req);
450 if (req->last_transaction) {
451 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
452 if (ep_is_control(ep))
453 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
454 } else {
455 if (ep_is_control(ep))
456 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
457 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
458 }
459 }
460 }
461
462 static void submit_next_request(struct usba_ep *ep)
463 {
464 struct usba_request *req;
465
466 if (list_empty(&ep->queue)) {
467 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY);
468 return;
469 }
470
471 req = list_entry(ep->queue.next, struct usba_request, queue);
472 if (!req->submitted)
473 submit_request(ep, req);
474 }
475
476 static void send_status(struct usba_udc *udc, struct usba_ep *ep)
477 {
478 ep->state = STATUS_STAGE_IN;
479 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
480 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
481 }
482
483 static void receive_data(struct usba_ep *ep)
484 {
485 struct usba_udc *udc = ep->udc;
486 struct usba_request *req;
487 unsigned long status;
488 unsigned int bytecount, nr_busy;
489 int is_complete = 0;
490
491 status = usba_ep_readl(ep, STA);
492 nr_busy = USBA_BFEXT(BUSY_BANKS, status);
493
494 DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy);
495
496 while (nr_busy > 0) {
497 if (list_empty(&ep->queue)) {
498 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
499 break;
500 }
501 req = list_entry(ep->queue.next,
502 struct usba_request, queue);
503
504 bytecount = USBA_BFEXT(BYTE_COUNT, status);
505
506 if (status & (1 << 31))
507 is_complete = 1;
508 if (req->req.actual + bytecount >= req->req.length) {
509 is_complete = 1;
510 bytecount = req->req.length - req->req.actual;
511 }
512
513 memcpy_fromio(req->req.buf + req->req.actual,
514 ep->fifo, bytecount);
515 req->req.actual += bytecount;
516
517 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
518
519 if (is_complete) {
520 DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name);
521 req->req.status = 0;
522 list_del_init(&req->queue);
523 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
524 spin_unlock(&udc->lock);
525 usb_gadget_giveback_request(&ep->ep, &req->req);
526 spin_lock(&udc->lock);
527 }
528
529 status = usba_ep_readl(ep, STA);
530 nr_busy = USBA_BFEXT(BUSY_BANKS, status);
531
532 if (is_complete && ep_is_control(ep)) {
533 send_status(udc, ep);
534 break;
535 }
536 }
537 }
538
539 static void
540 request_complete(struct usba_ep *ep, struct usba_request *req, int status)
541 {
542 struct usba_udc *udc = ep->udc;
543
544 WARN_ON(!list_empty(&req->queue));
545
546 if (req->req.status == -EINPROGRESS)
547 req->req.status = status;
548
549 if (req->using_dma)
550 usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in);
551
552 DBG(DBG_GADGET | DBG_REQ,
553 "%s: req %p complete: status %d, actual %u\n",
554 ep->ep.name, req, req->req.status, req->req.actual);
555
556 spin_unlock(&udc->lock);
557 usb_gadget_giveback_request(&ep->ep, &req->req);
558 spin_lock(&udc->lock);
559 }
560
561 static void
562 request_complete_list(struct usba_ep *ep, struct list_head *list, int status)
563 {
564 struct usba_request *req, *tmp_req;
565
566 list_for_each_entry_safe(req, tmp_req, list, queue) {
567 list_del_init(&req->queue);
568 request_complete(ep, req, status);
569 }
570 }
571
572 static int
573 usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
574 {
575 struct usba_ep *ep = to_usba_ep(_ep);
576 struct usba_udc *udc = ep->udc;
577 unsigned long flags, maxpacket;
578 unsigned int nr_trans;
579
580 DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
581
582 maxpacket = usb_endpoint_maxp(desc);
583
584 if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index)
585 || ep->index == 0
586 || desc->bDescriptorType != USB_DT_ENDPOINT
587 || maxpacket == 0
588 || maxpacket > ep->fifo_size) {
589 DBG(DBG_ERR, "ep_enable: Invalid argument");
590 return -EINVAL;
591 }
592
593 ep->is_isoc = 0;
594 ep->is_in = 0;
595
596 DBG(DBG_ERR, "%s: EPT_CFG = 0x%lx (maxpacket = %lu)\n",
597 ep->ep.name, ep->ept_cfg, maxpacket);
598
599 if (usb_endpoint_dir_in(desc)) {
600 ep->is_in = 1;
601 ep->ept_cfg |= USBA_EPT_DIR_IN;
602 }
603
604 switch (usb_endpoint_type(desc)) {
605 case USB_ENDPOINT_XFER_CONTROL:
606 ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL);
607 break;
608 case USB_ENDPOINT_XFER_ISOC:
609 if (!ep->can_isoc) {
610 DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n",
611 ep->ep.name);
612 return -EINVAL;
613 }
614
615
616
617
618
619 nr_trans = usb_endpoint_maxp_mult(desc);
620 if (nr_trans > 3)
621 return -EINVAL;
622
623 ep->is_isoc = 1;
624 ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO);
625 ep->ept_cfg |= USBA_BF(NB_TRANS, nr_trans);
626
627 break;
628 case USB_ENDPOINT_XFER_BULK:
629 ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK);
630 break;
631 case USB_ENDPOINT_XFER_INT:
632 ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT);
633 break;
634 }
635
636 spin_lock_irqsave(&ep->udc->lock, flags);
637
638 ep->ep.desc = desc;
639 ep->ep.maxpacket = maxpacket;
640
641 usba_ep_writel(ep, CFG, ep->ept_cfg);
642 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
643
644 if (ep->can_dma) {
645 u32 ctrl;
646
647 usba_int_enb_set(udc, USBA_BF(EPT_INT, 1 << ep->index) |
648 USBA_BF(DMA_INT, 1 << ep->index));
649 ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA;
650 usba_ep_writel(ep, CTL_ENB, ctrl);
651 } else {
652 usba_int_enb_set(udc, USBA_BF(EPT_INT, 1 << ep->index));
653 }
654
655 spin_unlock_irqrestore(&udc->lock, flags);
656
657 DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index,
658 (unsigned long)usba_ep_readl(ep, CFG));
659 DBG(DBG_HW, "INT_ENB after init: %#08lx\n",
660 (unsigned long)usba_int_enb_get(udc));
661
662 return 0;
663 }
664
665 static int usba_ep_disable(struct usb_ep *_ep)
666 {
667 struct usba_ep *ep = to_usba_ep(_ep);
668 struct usba_udc *udc = ep->udc;
669 LIST_HEAD(req_list);
670 unsigned long flags;
671
672 DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name);
673
674 spin_lock_irqsave(&udc->lock, flags);
675
676 if (!ep->ep.desc) {
677 spin_unlock_irqrestore(&udc->lock, flags);
678
679
680
681
682 if (udc->gadget.speed != USB_SPEED_UNKNOWN)
683 DBG(DBG_ERR, "ep_disable: %s not enabled\n",
684 ep->ep.name);
685 return -EINVAL;
686 }
687 ep->ep.desc = NULL;
688
689 list_splice_init(&ep->queue, &req_list);
690 if (ep->can_dma) {
691 usba_dma_writel(ep, CONTROL, 0);
692 usba_dma_writel(ep, ADDRESS, 0);
693 usba_dma_readl(ep, STATUS);
694 }
695 usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE);
696 usba_int_enb_clear(udc, USBA_BF(EPT_INT, 1 << ep->index));
697
698 request_complete_list(ep, &req_list, -ESHUTDOWN);
699
700 spin_unlock_irqrestore(&udc->lock, flags);
701
702 return 0;
703 }
704
705 static struct usb_request *
706 usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
707 {
708 struct usba_request *req;
709
710 DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags);
711
712 req = kzalloc(sizeof(*req), gfp_flags);
713 if (!req)
714 return NULL;
715
716 INIT_LIST_HEAD(&req->queue);
717
718 return &req->req;
719 }
720
721 static void
722 usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
723 {
724 struct usba_request *req = to_usba_req(_req);
725
726 DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req);
727
728 kfree(req);
729 }
730
731 static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
732 struct usba_request *req, gfp_t gfp_flags)
733 {
734 unsigned long flags;
735 int ret;
736
737 DBG(DBG_DMA, "%s: req l/%u d/%pad %c%c%c\n",
738 ep->ep.name, req->req.length, &req->req.dma,
739 req->req.zero ? 'Z' : 'z',
740 req->req.short_not_ok ? 'S' : 's',
741 req->req.no_interrupt ? 'I' : 'i');
742
743 if (req->req.length > 0x10000) {
744
745 DBG(DBG_ERR, "invalid request length %u\n", req->req.length);
746 return -EINVAL;
747 }
748
749 ret = usb_gadget_map_request(&udc->gadget, &req->req, ep->is_in);
750 if (ret)
751 return ret;
752
753 req->using_dma = 1;
754 req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
755 | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
756 | USBA_DMA_END_BUF_EN;
757
758 if (!ep->is_in)
759 req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
760
761
762
763
764
765
766 ret = -ESHUTDOWN;
767 spin_lock_irqsave(&udc->lock, flags);
768 if (ep->ep.desc) {
769 if (list_empty(&ep->queue))
770 submit_request(ep, req);
771
772 list_add_tail(&req->queue, &ep->queue);
773 ret = 0;
774 }
775 spin_unlock_irqrestore(&udc->lock, flags);
776
777 return ret;
778 }
779
780 static int
781 usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
782 {
783 struct usba_request *req = to_usba_req(_req);
784 struct usba_ep *ep = to_usba_ep(_ep);
785 struct usba_udc *udc = ep->udc;
786 unsigned long flags;
787 int ret;
788
789 DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n",
790 ep->ep.name, req, _req->length);
791
792 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN ||
793 !ep->ep.desc)
794 return -ESHUTDOWN;
795
796 req->submitted = 0;
797 req->using_dma = 0;
798 req->last_transaction = 0;
799
800 _req->status = -EINPROGRESS;
801 _req->actual = 0;
802
803 if (ep->can_dma)
804 return queue_dma(udc, ep, req, gfp_flags);
805
806
807 ret = -ESHUTDOWN;
808 spin_lock_irqsave(&udc->lock, flags);
809 if (ep->ep.desc) {
810 list_add_tail(&req->queue, &ep->queue);
811
812 if ((!ep_is_control(ep) && ep->is_in) ||
813 (ep_is_control(ep)
814 && (ep->state == DATA_STAGE_IN
815 || ep->state == STATUS_STAGE_IN)))
816 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
817 else
818 usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
819 ret = 0;
820 }
821 spin_unlock_irqrestore(&udc->lock, flags);
822
823 return ret;
824 }
825
826 static void
827 usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status)
828 {
829 req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status);
830 }
831
832 static int stop_dma(struct usba_ep *ep, u32 *pstatus)
833 {
834 unsigned int timeout;
835 u32 status;
836
837
838
839
840
841 usba_dma_writel(ep, CONTROL, 0);
842
843
844 for (timeout = 40; timeout; --timeout) {
845 status = usba_dma_readl(ep, STATUS);
846 if (!(status & USBA_DMA_CH_EN))
847 break;
848 udelay(1);
849 }
850
851 if (pstatus)
852 *pstatus = status;
853
854 if (timeout == 0) {
855 dev_err(&ep->udc->pdev->dev,
856 "%s: timed out waiting for DMA FIFO to empty\n",
857 ep->ep.name);
858 return -ETIMEDOUT;
859 }
860
861 return 0;
862 }
863
864 static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
865 {
866 struct usba_ep *ep = to_usba_ep(_ep);
867 struct usba_udc *udc = ep->udc;
868 struct usba_request *req;
869 unsigned long flags;
870 u32 status;
871
872 DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n",
873 ep->ep.name, req);
874
875 spin_lock_irqsave(&udc->lock, flags);
876
877 list_for_each_entry(req, &ep->queue, queue) {
878 if (&req->req == _req)
879 break;
880 }
881
882 if (&req->req != _req) {
883 spin_unlock_irqrestore(&udc->lock, flags);
884 return -EINVAL;
885 }
886
887 if (req->using_dma) {
888
889
890
891
892 if (ep->queue.next == &req->queue) {
893 status = usba_dma_readl(ep, STATUS);
894 if (status & USBA_DMA_CH_EN)
895 stop_dma(ep, &status);
896
897 #ifdef CONFIG_USB_GADGET_DEBUG_FS
898 ep->last_dma_status = status;
899 #endif
900
901 usba_writel(udc, EPT_RST, 1 << ep->index);
902
903 usba_update_req(ep, req, status);
904 }
905 }
906
907
908
909
910
911 list_del_init(&req->queue);
912
913 request_complete(ep, req, -ECONNRESET);
914
915
916 submit_next_request(ep);
917 spin_unlock_irqrestore(&udc->lock, flags);
918
919 return 0;
920 }
921
922 static int usba_ep_set_halt(struct usb_ep *_ep, int value)
923 {
924 struct usba_ep *ep = to_usba_ep(_ep);
925 struct usba_udc *udc = ep->udc;
926 unsigned long flags;
927 int ret = 0;
928
929 DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name,
930 value ? "set" : "clear");
931
932 if (!ep->ep.desc) {
933 DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n",
934 ep->ep.name);
935 return -ENODEV;
936 }
937 if (ep->is_isoc) {
938 DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n",
939 ep->ep.name);
940 return -ENOTTY;
941 }
942
943 spin_lock_irqsave(&udc->lock, flags);
944
945
946
947
948
949 if (!list_empty(&ep->queue)
950 || ((value && ep->is_in && (usba_ep_readl(ep, STA)
951 & USBA_BF(BUSY_BANKS, -1L))))) {
952 ret = -EAGAIN;
953 } else {
954 if (value)
955 usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
956 else
957 usba_ep_writel(ep, CLR_STA,
958 USBA_FORCE_STALL | USBA_TOGGLE_CLR);
959 usba_ep_readl(ep, STA);
960 }
961
962 spin_unlock_irqrestore(&udc->lock, flags);
963
964 return ret;
965 }
966
967 static int usba_ep_fifo_status(struct usb_ep *_ep)
968 {
969 struct usba_ep *ep = to_usba_ep(_ep);
970
971 return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
972 }
973
974 static void usba_ep_fifo_flush(struct usb_ep *_ep)
975 {
976 struct usba_ep *ep = to_usba_ep(_ep);
977 struct usba_udc *udc = ep->udc;
978
979 usba_writel(udc, EPT_RST, 1 << ep->index);
980 }
981
982 static const struct usb_ep_ops usba_ep_ops = {
983 .enable = usba_ep_enable,
984 .disable = usba_ep_disable,
985 .alloc_request = usba_ep_alloc_request,
986 .free_request = usba_ep_free_request,
987 .queue = usba_ep_queue,
988 .dequeue = usba_ep_dequeue,
989 .set_halt = usba_ep_set_halt,
990 .fifo_status = usba_ep_fifo_status,
991 .fifo_flush = usba_ep_fifo_flush,
992 };
993
994 static int usba_udc_get_frame(struct usb_gadget *gadget)
995 {
996 struct usba_udc *udc = to_usba_udc(gadget);
997
998 return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM));
999 }
1000
1001 static int usba_udc_wakeup(struct usb_gadget *gadget)
1002 {
1003 struct usba_udc *udc = to_usba_udc(gadget);
1004 unsigned long flags;
1005 u32 ctrl;
1006 int ret = -EINVAL;
1007
1008 spin_lock_irqsave(&udc->lock, flags);
1009 if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
1010 ctrl = usba_readl(udc, CTRL);
1011 usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP);
1012 ret = 0;
1013 }
1014 spin_unlock_irqrestore(&udc->lock, flags);
1015
1016 return ret;
1017 }
1018
1019 static int
1020 usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
1021 {
1022 struct usba_udc *udc = to_usba_udc(gadget);
1023 unsigned long flags;
1024
1025 gadget->is_selfpowered = (is_selfpowered != 0);
1026 spin_lock_irqsave(&udc->lock, flags);
1027 if (is_selfpowered)
1028 udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
1029 else
1030 udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
1031 spin_unlock_irqrestore(&udc->lock, flags);
1032
1033 return 0;
1034 }
1035
1036 static int atmel_usba_start(struct usb_gadget *gadget,
1037 struct usb_gadget_driver *driver);
1038 static int atmel_usba_stop(struct usb_gadget *gadget);
1039
1040 static struct usb_ep *atmel_usba_match_ep(struct usb_gadget *gadget,
1041 struct usb_endpoint_descriptor *desc,
1042 struct usb_ss_ep_comp_descriptor *ep_comp)
1043 {
1044 struct usb_ep *_ep;
1045 struct usba_ep *ep;
1046
1047
1048 list_for_each_entry(_ep, &gadget->ep_list, ep_list) {
1049 if (usb_gadget_ep_match_desc(gadget, _ep, desc, ep_comp))
1050 goto found_ep;
1051 }
1052
1053 return NULL;
1054
1055 found_ep:
1056
1057 if (fifo_mode == 0) {
1058
1059 ep = to_usba_ep(_ep);
1060
1061 switch (usb_endpoint_type(desc)) {
1062 case USB_ENDPOINT_XFER_CONTROL:
1063 break;
1064
1065 case USB_ENDPOINT_XFER_ISOC:
1066 ep->fifo_size = 1024;
1067 ep->nr_banks = 2;
1068 break;
1069
1070 case USB_ENDPOINT_XFER_BULK:
1071 ep->fifo_size = 512;
1072 ep->nr_banks = 1;
1073 break;
1074
1075 case USB_ENDPOINT_XFER_INT:
1076 if (desc->wMaxPacketSize == 0)
1077 ep->fifo_size =
1078 roundup_pow_of_two(_ep->maxpacket_limit);
1079 else
1080 ep->fifo_size =
1081 roundup_pow_of_two(le16_to_cpu(desc->wMaxPacketSize));
1082 ep->nr_banks = 1;
1083 break;
1084 }
1085
1086
1087 usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size);
1088
1089
1090 if (ep->fifo_size <= 8)
1091 ep->ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8);
1092 else
1093
1094 ep->ept_cfg =
1095 USBA_BF(EPT_SIZE, fls(ep->fifo_size - 1) - 3);
1096
1097 ep->ept_cfg |= USBA_BF(BK_NUMBER, ep->nr_banks);
1098
1099 ep->udc->configured_ep++;
1100 }
1101
1102 return _ep;
1103 }
1104
1105 static const struct usb_gadget_ops usba_udc_ops = {
1106 .get_frame = usba_udc_get_frame,
1107 .wakeup = usba_udc_wakeup,
1108 .set_selfpowered = usba_udc_set_selfpowered,
1109 .udc_start = atmel_usba_start,
1110 .udc_stop = atmel_usba_stop,
1111 .match_ep = atmel_usba_match_ep,
1112 };
1113
1114 static struct usb_endpoint_descriptor usba_ep0_desc = {
1115 .bLength = USB_DT_ENDPOINT_SIZE,
1116 .bDescriptorType = USB_DT_ENDPOINT,
1117 .bEndpointAddress = 0,
1118 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1119 .wMaxPacketSize = cpu_to_le16(64),
1120
1121 .bInterval = 1,
1122 };
1123
1124 static struct usb_gadget usba_gadget_template = {
1125 .ops = &usba_udc_ops,
1126 .max_speed = USB_SPEED_HIGH,
1127 .name = "atmel_usba_udc",
1128 };
1129
1130
1131
1132
1133 static void reset_all_endpoints(struct usba_udc *udc)
1134 {
1135 struct usba_ep *ep;
1136 struct usba_request *req, *tmp_req;
1137
1138 usba_writel(udc, EPT_RST, ~0UL);
1139
1140 ep = to_usba_ep(udc->gadget.ep0);
1141 list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) {
1142 list_del_init(&req->queue);
1143 request_complete(ep, req, -ECONNRESET);
1144 }
1145 }
1146
1147 static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
1148 {
1149 struct usba_ep *ep;
1150
1151 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1152 return to_usba_ep(udc->gadget.ep0);
1153
1154 list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
1155 u8 bEndpointAddress;
1156
1157 if (!ep->ep.desc)
1158 continue;
1159 bEndpointAddress = ep->ep.desc->bEndpointAddress;
1160 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1161 continue;
1162 if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
1163 == (wIndex & USB_ENDPOINT_NUMBER_MASK))
1164 return ep;
1165 }
1166
1167 return NULL;
1168 }
1169
1170
1171 static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep)
1172 {
1173 usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
1174 ep->state = WAIT_FOR_SETUP;
1175 }
1176
1177 static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep)
1178 {
1179 if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL)
1180 return 1;
1181 return 0;
1182 }
1183
1184 static inline void set_address(struct usba_udc *udc, unsigned int addr)
1185 {
1186 u32 regval;
1187
1188 DBG(DBG_BUS, "setting address %u...\n", addr);
1189 regval = usba_readl(udc, CTRL);
1190 regval = USBA_BFINS(DEV_ADDR, addr, regval);
1191 usba_writel(udc, CTRL, regval);
1192 }
1193
1194 static int do_test_mode(struct usba_udc *udc)
1195 {
1196 static const char test_packet_buffer[] = {
1197
1198 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1199
1200 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1201
1202 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1203
1204 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1205 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1206
1207 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1208
1209 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E
1210 };
1211 struct usba_ep *ep;
1212 struct device *dev = &udc->pdev->dev;
1213 int test_mode;
1214
1215 test_mode = udc->test_mode;
1216
1217
1218 reset_all_endpoints(udc);
1219
1220 switch (test_mode) {
1221 case 0x0100:
1222
1223 usba_writel(udc, TST, USBA_TST_J_MODE);
1224 dev_info(dev, "Entering Test_J mode...\n");
1225 break;
1226 case 0x0200:
1227
1228 usba_writel(udc, TST, USBA_TST_K_MODE);
1229 dev_info(dev, "Entering Test_K mode...\n");
1230 break;
1231 case 0x0300:
1232
1233
1234
1235
1236 ep = &udc->usba_ep[0];
1237 usba_writel(udc, TST,
1238 USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH));
1239 usba_ep_writel(ep, CFG,
1240 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
1241 | USBA_EPT_DIR_IN
1242 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
1243 | USBA_BF(BK_NUMBER, 1));
1244 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
1245 set_protocol_stall(udc, ep);
1246 dev_err(dev, "Test_SE0_NAK: ep0 not mapped\n");
1247 } else {
1248 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
1249 dev_info(dev, "Entering Test_SE0_NAK mode...\n");
1250 }
1251 break;
1252 case 0x0400:
1253
1254 ep = &udc->usba_ep[0];
1255 usba_ep_writel(ep, CFG,
1256 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
1257 | USBA_EPT_DIR_IN
1258 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
1259 | USBA_BF(BK_NUMBER, 1));
1260 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
1261 set_protocol_stall(udc, ep);
1262 dev_err(dev, "Test_Packet: ep0 not mapped\n");
1263 } else {
1264 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
1265 usba_writel(udc, TST, USBA_TST_PKT_MODE);
1266 memcpy_toio(ep->fifo, test_packet_buffer,
1267 sizeof(test_packet_buffer));
1268 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
1269 dev_info(dev, "Entering Test_Packet mode...\n");
1270 }
1271 break;
1272 default:
1273 dev_err(dev, "Invalid test mode: 0x%04x\n", test_mode);
1274 return -EINVAL;
1275 }
1276
1277 return 0;
1278 }
1279
1280
1281 static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq)
1282 {
1283 if (crq->wValue == cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
1284 return true;
1285 return false;
1286 }
1287
1288 static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq)
1289 {
1290 if (crq->wValue == cpu_to_le16(USB_DEVICE_TEST_MODE))
1291 return true;
1292 return false;
1293 }
1294
1295 static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq)
1296 {
1297 if (crq->wValue == cpu_to_le16(USB_ENDPOINT_HALT))
1298 return true;
1299 return false;
1300 }
1301
1302 static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep,
1303 struct usb_ctrlrequest *crq)
1304 {
1305 int retval = 0;
1306
1307 switch (crq->bRequest) {
1308 case USB_REQ_GET_STATUS: {
1309 u16 status;
1310
1311 if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) {
1312 status = cpu_to_le16(udc->devstatus);
1313 } else if (crq->bRequestType
1314 == (USB_DIR_IN | USB_RECIP_INTERFACE)) {
1315 status = cpu_to_le16(0);
1316 } else if (crq->bRequestType
1317 == (USB_DIR_IN | USB_RECIP_ENDPOINT)) {
1318 struct usba_ep *target;
1319
1320 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1321 if (!target)
1322 goto stall;
1323
1324 status = 0;
1325 if (is_stalled(udc, target))
1326 status |= cpu_to_le16(1);
1327 } else
1328 goto delegate;
1329
1330
1331 if (crq->wLength != cpu_to_le16(sizeof(status)))
1332 goto stall;
1333 ep->state = DATA_STAGE_IN;
1334 writew_relaxed(status, ep->fifo);
1335 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
1336 break;
1337 }
1338
1339 case USB_REQ_CLEAR_FEATURE: {
1340 if (crq->bRequestType == USB_RECIP_DEVICE) {
1341 if (feature_is_dev_remote_wakeup(crq))
1342 udc->devstatus
1343 &= ~(1 << USB_DEVICE_REMOTE_WAKEUP);
1344 else
1345
1346 goto stall;
1347 } else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
1348 struct usba_ep *target;
1349
1350 if (crq->wLength != cpu_to_le16(0)
1351 || !feature_is_ep_halt(crq))
1352 goto stall;
1353 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1354 if (!target)
1355 goto stall;
1356
1357 usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL);
1358 if (target->index != 0)
1359 usba_ep_writel(target, CLR_STA,
1360 USBA_TOGGLE_CLR);
1361 } else {
1362 goto delegate;
1363 }
1364
1365 send_status(udc, ep);
1366 break;
1367 }
1368
1369 case USB_REQ_SET_FEATURE: {
1370 if (crq->bRequestType == USB_RECIP_DEVICE) {
1371 if (feature_is_dev_test_mode(crq)) {
1372 send_status(udc, ep);
1373 ep->state = STATUS_STAGE_TEST;
1374 udc->test_mode = le16_to_cpu(crq->wIndex);
1375 return 0;
1376 } else if (feature_is_dev_remote_wakeup(crq)) {
1377 udc->devstatus |= 1 << USB_DEVICE_REMOTE_WAKEUP;
1378 } else {
1379 goto stall;
1380 }
1381 } else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
1382 struct usba_ep *target;
1383
1384 if (crq->wLength != cpu_to_le16(0)
1385 || !feature_is_ep_halt(crq))
1386 goto stall;
1387
1388 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1389 if (!target)
1390 goto stall;
1391
1392 usba_ep_writel(target, SET_STA, USBA_FORCE_STALL);
1393 } else
1394 goto delegate;
1395
1396 send_status(udc, ep);
1397 break;
1398 }
1399
1400 case USB_REQ_SET_ADDRESS:
1401 if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
1402 goto delegate;
1403
1404 set_address(udc, le16_to_cpu(crq->wValue));
1405 send_status(udc, ep);
1406 ep->state = STATUS_STAGE_ADDR;
1407 break;
1408
1409 default:
1410 delegate:
1411 spin_unlock(&udc->lock);
1412 retval = udc->driver->setup(&udc->gadget, crq);
1413 spin_lock(&udc->lock);
1414 }
1415
1416 return retval;
1417
1418 stall:
1419 pr_err("udc: %s: Invalid setup request: %02x.%02x v%04x i%04x l%d, "
1420 "halting endpoint...\n",
1421 ep->ep.name, crq->bRequestType, crq->bRequest,
1422 le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex),
1423 le16_to_cpu(crq->wLength));
1424 set_protocol_stall(udc, ep);
1425 return -1;
1426 }
1427
1428 static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep)
1429 {
1430 struct usba_request *req;
1431 u32 epstatus;
1432 u32 epctrl;
1433
1434 restart:
1435 epstatus = usba_ep_readl(ep, STA);
1436 epctrl = usba_ep_readl(ep, CTL);
1437
1438 DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n",
1439 ep->ep.name, ep->state, epstatus, epctrl);
1440
1441 req = NULL;
1442 if (!list_empty(&ep->queue))
1443 req = list_entry(ep->queue.next,
1444 struct usba_request, queue);
1445
1446 if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
1447 if (req->submitted)
1448 next_fifo_transaction(ep, req);
1449 else
1450 submit_request(ep, req);
1451
1452 if (req->last_transaction) {
1453 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
1454 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
1455 }
1456 goto restart;
1457 }
1458 if ((epstatus & epctrl) & USBA_TX_COMPLETE) {
1459 usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE);
1460
1461 switch (ep->state) {
1462 case DATA_STAGE_IN:
1463 usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
1464 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1465 ep->state = STATUS_STAGE_OUT;
1466 break;
1467 case STATUS_STAGE_ADDR:
1468
1469 usba_writel(udc, CTRL, (usba_readl(udc, CTRL)
1470 | USBA_FADDR_EN));
1471 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1472 ep->state = WAIT_FOR_SETUP;
1473 break;
1474 case STATUS_STAGE_IN:
1475 if (req) {
1476 list_del_init(&req->queue);
1477 request_complete(ep, req, 0);
1478 submit_next_request(ep);
1479 }
1480 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1481 ep->state = WAIT_FOR_SETUP;
1482 break;
1483 case STATUS_STAGE_TEST:
1484 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1485 ep->state = WAIT_FOR_SETUP;
1486 if (do_test_mode(udc))
1487 set_protocol_stall(udc, ep);
1488 break;
1489 default:
1490 pr_err("udc: %s: TXCOMP: Invalid endpoint state %d, "
1491 "halting endpoint...\n",
1492 ep->ep.name, ep->state);
1493 set_protocol_stall(udc, ep);
1494 break;
1495 }
1496
1497 goto restart;
1498 }
1499 if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
1500 switch (ep->state) {
1501 case STATUS_STAGE_OUT:
1502 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1503 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1504
1505 if (req) {
1506 list_del_init(&req->queue);
1507 request_complete(ep, req, 0);
1508 }
1509 ep->state = WAIT_FOR_SETUP;
1510 break;
1511
1512 case DATA_STAGE_OUT:
1513 receive_data(ep);
1514 break;
1515
1516 default:
1517 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1518 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1519 pr_err("udc: %s: RXRDY: Invalid endpoint state %d, "
1520 "halting endpoint...\n",
1521 ep->ep.name, ep->state);
1522 set_protocol_stall(udc, ep);
1523 break;
1524 }
1525
1526 goto restart;
1527 }
1528 if (epstatus & USBA_RX_SETUP) {
1529 union {
1530 struct usb_ctrlrequest crq;
1531 unsigned long data[2];
1532 } crq;
1533 unsigned int pkt_len;
1534 int ret;
1535
1536 if (ep->state != WAIT_FOR_SETUP) {
1537
1538
1539
1540
1541
1542 int status = -EPROTO;
1543
1544
1545
1546
1547
1548
1549 if (ep->state == STATUS_STAGE_OUT
1550 || ep->state == STATUS_STAGE_IN) {
1551 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1552 status = 0;
1553 }
1554
1555 if (req) {
1556 list_del_init(&req->queue);
1557 request_complete(ep, req, status);
1558 }
1559 }
1560
1561 pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
1562 DBG(DBG_HW, "Packet length: %u\n", pkt_len);
1563 if (pkt_len != sizeof(crq)) {
1564 pr_warn("udc: Invalid packet length %u (expected %zu)\n",
1565 pkt_len, sizeof(crq));
1566 set_protocol_stall(udc, ep);
1567 return;
1568 }
1569
1570 DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo);
1571 memcpy_fromio(crq.data, ep->fifo, sizeof(crq));
1572
1573
1574
1575 usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP);
1576
1577
1578
1579
1580
1581 if (crq.crq.bRequestType & USB_DIR_IN) {
1582
1583
1584
1585
1586
1587
1588 ep->state = DATA_STAGE_IN;
1589 } else {
1590 if (crq.crq.wLength != cpu_to_le16(0))
1591 ep->state = DATA_STAGE_OUT;
1592 else
1593 ep->state = STATUS_STAGE_IN;
1594 }
1595
1596 ret = -1;
1597 if (ep->index == 0)
1598 ret = handle_ep0_setup(udc, ep, &crq.crq);
1599 else {
1600 spin_unlock(&udc->lock);
1601 ret = udc->driver->setup(&udc->gadget, &crq.crq);
1602 spin_lock(&udc->lock);
1603 }
1604
1605 DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n",
1606 crq.crq.bRequestType, crq.crq.bRequest,
1607 le16_to_cpu(crq.crq.wLength), ep->state, ret);
1608
1609 if (ret < 0) {
1610
1611 set_protocol_stall(udc, ep);
1612 }
1613 }
1614 }
1615
1616 static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
1617 {
1618 struct usba_request *req;
1619 u32 epstatus;
1620 u32 epctrl;
1621
1622 epstatus = usba_ep_readl(ep, STA);
1623 epctrl = usba_ep_readl(ep, CTL);
1624
1625 DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus);
1626
1627 while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
1628 DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name);
1629
1630 if (list_empty(&ep->queue)) {
1631 dev_warn(&udc->pdev->dev, "ep_irq: queue empty\n");
1632 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
1633 return;
1634 }
1635
1636 req = list_entry(ep->queue.next, struct usba_request, queue);
1637
1638 if (req->using_dma) {
1639
1640 usba_ep_writel(ep, SET_STA,
1641 USBA_TX_PK_RDY);
1642 usba_ep_writel(ep, CTL_DIS,
1643 USBA_TX_PK_RDY);
1644 list_del_init(&req->queue);
1645 submit_next_request(ep);
1646 request_complete(ep, req, 0);
1647 } else {
1648 if (req->submitted)
1649 next_fifo_transaction(ep, req);
1650 else
1651 submit_request(ep, req);
1652
1653 if (req->last_transaction) {
1654 list_del_init(&req->queue);
1655 submit_next_request(ep);
1656 request_complete(ep, req, 0);
1657 }
1658 }
1659
1660 epstatus = usba_ep_readl(ep, STA);
1661 epctrl = usba_ep_readl(ep, CTL);
1662 }
1663 if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
1664 DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
1665 receive_data(ep);
1666 }
1667 }
1668
1669 static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep)
1670 {
1671 struct usba_request *req;
1672 u32 status, control, pending;
1673
1674 status = usba_dma_readl(ep, STATUS);
1675 control = usba_dma_readl(ep, CONTROL);
1676 #ifdef CONFIG_USB_GADGET_DEBUG_FS
1677 ep->last_dma_status = status;
1678 #endif
1679 pending = status & control;
1680 DBG(DBG_INT | DBG_DMA, "dma irq, s/%#08x, c/%#08x\n", status, control);
1681
1682 if (status & USBA_DMA_CH_EN) {
1683 dev_err(&udc->pdev->dev,
1684 "DMA_CH_EN is set after transfer is finished!\n");
1685 dev_err(&udc->pdev->dev,
1686 "status=%#08x, pending=%#08x, control=%#08x\n",
1687 status, pending, control);
1688
1689
1690
1691
1692
1693 }
1694
1695 if (list_empty(&ep->queue))
1696
1697 return;
1698
1699 if (pending & (USBA_DMA_END_TR_ST | USBA_DMA_END_BUF_ST)) {
1700 req = list_entry(ep->queue.next, struct usba_request, queue);
1701 usba_update_req(ep, req, status);
1702
1703 list_del_init(&req->queue);
1704 submit_next_request(ep);
1705 request_complete(ep, req, 0);
1706 }
1707 }
1708
1709 static int start_clock(struct usba_udc *udc);
1710 static void stop_clock(struct usba_udc *udc);
1711
1712 static irqreturn_t usba_udc_irq(int irq, void *devid)
1713 {
1714 struct usba_udc *udc = devid;
1715 u32 status, int_enb;
1716 u32 dma_status;
1717 u32 ep_status;
1718
1719 spin_lock(&udc->lock);
1720
1721 int_enb = usba_int_enb_get(udc);
1722 status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED);
1723 DBG(DBG_INT, "irq, status=%#08x\n", status);
1724
1725 if (status & USBA_DET_SUSPEND) {
1726 usba_writel(udc, INT_CLR, USBA_DET_SUSPEND|USBA_WAKE_UP);
1727 usba_int_enb_set(udc, USBA_WAKE_UP);
1728 usba_int_enb_clear(udc, USBA_DET_SUSPEND);
1729 udc->suspended = true;
1730 toggle_bias(udc, 0);
1731 udc->bias_pulse_needed = true;
1732 stop_clock(udc);
1733 DBG(DBG_BUS, "Suspend detected\n");
1734 if (udc->gadget.speed != USB_SPEED_UNKNOWN
1735 && udc->driver && udc->driver->suspend) {
1736 spin_unlock(&udc->lock);
1737 udc->driver->suspend(&udc->gadget);
1738 spin_lock(&udc->lock);
1739 }
1740 }
1741
1742 if (status & USBA_WAKE_UP) {
1743 start_clock(udc);
1744 toggle_bias(udc, 1);
1745 usba_writel(udc, INT_CLR, USBA_WAKE_UP);
1746 DBG(DBG_BUS, "Wake Up CPU detected\n");
1747 }
1748
1749 if (status & USBA_END_OF_RESUME) {
1750 udc->suspended = false;
1751 usba_writel(udc, INT_CLR, USBA_END_OF_RESUME);
1752 usba_int_enb_clear(udc, USBA_WAKE_UP);
1753 usba_int_enb_set(udc, USBA_DET_SUSPEND);
1754 generate_bias_pulse(udc);
1755 DBG(DBG_BUS, "Resume detected\n");
1756 if (udc->gadget.speed != USB_SPEED_UNKNOWN
1757 && udc->driver && udc->driver->resume) {
1758 spin_unlock(&udc->lock);
1759 udc->driver->resume(&udc->gadget);
1760 spin_lock(&udc->lock);
1761 }
1762 }
1763
1764 dma_status = USBA_BFEXT(DMA_INT, status);
1765 if (dma_status) {
1766 int i;
1767
1768 usba_int_enb_set(udc, USBA_DET_SUSPEND);
1769
1770 for (i = 1; i <= USBA_NR_DMAS; i++)
1771 if (dma_status & (1 << i))
1772 usba_dma_irq(udc, &udc->usba_ep[i]);
1773 }
1774
1775 ep_status = USBA_BFEXT(EPT_INT, status);
1776 if (ep_status) {
1777 int i;
1778
1779 usba_int_enb_set(udc, USBA_DET_SUSPEND);
1780
1781 for (i = 0; i < udc->num_ep; i++)
1782 if (ep_status & (1 << i)) {
1783 if (ep_is_control(&udc->usba_ep[i]))
1784 usba_control_irq(udc, &udc->usba_ep[i]);
1785 else
1786 usba_ep_irq(udc, &udc->usba_ep[i]);
1787 }
1788 }
1789
1790 if (status & USBA_END_OF_RESET) {
1791 struct usba_ep *ep0, *ep;
1792 int i, n;
1793
1794 usba_writel(udc, INT_CLR,
1795 USBA_END_OF_RESET|USBA_END_OF_RESUME
1796 |USBA_DET_SUSPEND|USBA_WAKE_UP);
1797 generate_bias_pulse(udc);
1798 reset_all_endpoints(udc);
1799
1800 if (udc->gadget.speed != USB_SPEED_UNKNOWN && udc->driver) {
1801 udc->gadget.speed = USB_SPEED_UNKNOWN;
1802 spin_unlock(&udc->lock);
1803 usb_gadget_udc_reset(&udc->gadget, udc->driver);
1804 spin_lock(&udc->lock);
1805 }
1806
1807 if (status & USBA_HIGH_SPEED)
1808 udc->gadget.speed = USB_SPEED_HIGH;
1809 else
1810 udc->gadget.speed = USB_SPEED_FULL;
1811 DBG(DBG_BUS, "%s bus reset detected\n",
1812 usb_speed_string(udc->gadget.speed));
1813
1814 ep0 = &udc->usba_ep[0];
1815 ep0->ep.desc = &usba_ep0_desc;
1816 ep0->state = WAIT_FOR_SETUP;
1817 usba_ep_writel(ep0, CFG,
1818 (USBA_BF(EPT_SIZE, EP0_EPT_SIZE)
1819 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL)
1820 | USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE)));
1821 usba_ep_writel(ep0, CTL_ENB,
1822 USBA_EPT_ENABLE | USBA_RX_SETUP);
1823
1824
1825 udc->suspended = false;
1826 usba_int_enb_clear(udc, USBA_WAKE_UP);
1827
1828 usba_int_enb_set(udc, USBA_BF(EPT_INT, 1) |
1829 USBA_DET_SUSPEND | USBA_END_OF_RESUME);
1830
1831
1832
1833
1834
1835 if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED))
1836 dev_err(&udc->pdev->dev,
1837 "ODD: EP0 configuration is invalid!\n");
1838
1839
1840 n = fifo_mode ? udc->num_ep : udc->configured_ep;
1841 for (i = 1; i < n; i++) {
1842 ep = &udc->usba_ep[i];
1843 usba_ep_writel(ep, CFG, ep->ept_cfg);
1844 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED))
1845 dev_err(&udc->pdev->dev,
1846 "ODD: EP%d configuration is invalid!\n", i);
1847 }
1848 }
1849
1850 spin_unlock(&udc->lock);
1851
1852 return IRQ_HANDLED;
1853 }
1854
1855 static int start_clock(struct usba_udc *udc)
1856 {
1857 int ret;
1858
1859 if (udc->clocked)
1860 return 0;
1861
1862 pm_stay_awake(&udc->pdev->dev);
1863
1864 ret = clk_prepare_enable(udc->pclk);
1865 if (ret)
1866 return ret;
1867 ret = clk_prepare_enable(udc->hclk);
1868 if (ret) {
1869 clk_disable_unprepare(udc->pclk);
1870 return ret;
1871 }
1872
1873 udc->clocked = true;
1874 return 0;
1875 }
1876
1877 static void stop_clock(struct usba_udc *udc)
1878 {
1879 if (!udc->clocked)
1880 return;
1881
1882 clk_disable_unprepare(udc->hclk);
1883 clk_disable_unprepare(udc->pclk);
1884
1885 udc->clocked = false;
1886
1887 pm_relax(&udc->pdev->dev);
1888 }
1889
1890 static int usba_start(struct usba_udc *udc)
1891 {
1892 unsigned long flags;
1893 int ret;
1894
1895 ret = start_clock(udc);
1896 if (ret)
1897 return ret;
1898
1899 if (udc->suspended)
1900 return 0;
1901
1902 spin_lock_irqsave(&udc->lock, flags);
1903 toggle_bias(udc, 1);
1904 usba_writel(udc, CTRL, USBA_ENABLE_MASK);
1905
1906 usba_writel(udc, INT_ENB, 0);
1907 udc->int_enb_cache = 0;
1908 usba_writel(udc, INT_CLR,
1909 USBA_END_OF_RESET|USBA_END_OF_RESUME
1910 |USBA_DET_SUSPEND|USBA_WAKE_UP);
1911
1912 usba_int_enb_set(udc, USBA_END_OF_RESET);
1913 spin_unlock_irqrestore(&udc->lock, flags);
1914
1915 return 0;
1916 }
1917
1918 static void usba_stop(struct usba_udc *udc)
1919 {
1920 unsigned long flags;
1921
1922 if (udc->suspended)
1923 return;
1924
1925 spin_lock_irqsave(&udc->lock, flags);
1926 udc->gadget.speed = USB_SPEED_UNKNOWN;
1927 reset_all_endpoints(udc);
1928
1929
1930 toggle_bias(udc, 0);
1931 usba_writel(udc, CTRL, USBA_DISABLE_MASK);
1932 spin_unlock_irqrestore(&udc->lock, flags);
1933
1934 stop_clock(udc);
1935 }
1936
1937 static irqreturn_t usba_vbus_irq_thread(int irq, void *devid)
1938 {
1939 struct usba_udc *udc = devid;
1940 int vbus;
1941
1942
1943 udelay(10);
1944
1945 mutex_lock(&udc->vbus_mutex);
1946
1947 vbus = vbus_is_present(udc);
1948 if (vbus != udc->vbus_prev) {
1949 if (vbus) {
1950 usba_start(udc);
1951 } else {
1952 udc->suspended = false;
1953 if (udc->driver->disconnect)
1954 udc->driver->disconnect(&udc->gadget);
1955
1956 usba_stop(udc);
1957 }
1958 udc->vbus_prev = vbus;
1959 }
1960
1961 mutex_unlock(&udc->vbus_mutex);
1962 return IRQ_HANDLED;
1963 }
1964
1965 static int atmel_usba_start(struct usb_gadget *gadget,
1966 struct usb_gadget_driver *driver)
1967 {
1968 int ret;
1969 struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
1970 unsigned long flags;
1971
1972 spin_lock_irqsave(&udc->lock, flags);
1973 udc->devstatus = 1 << USB_DEVICE_SELF_POWERED;
1974 udc->driver = driver;
1975 spin_unlock_irqrestore(&udc->lock, flags);
1976
1977 mutex_lock(&udc->vbus_mutex);
1978
1979 if (udc->vbus_pin)
1980 enable_irq(gpiod_to_irq(udc->vbus_pin));
1981
1982
1983 udc->vbus_prev = vbus_is_present(udc);
1984 if (udc->vbus_prev) {
1985 ret = usba_start(udc);
1986 if (ret)
1987 goto err;
1988 }
1989
1990 mutex_unlock(&udc->vbus_mutex);
1991 return 0;
1992
1993 err:
1994 if (udc->vbus_pin)
1995 disable_irq(gpiod_to_irq(udc->vbus_pin));
1996
1997 mutex_unlock(&udc->vbus_mutex);
1998
1999 spin_lock_irqsave(&udc->lock, flags);
2000 udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
2001 udc->driver = NULL;
2002 spin_unlock_irqrestore(&udc->lock, flags);
2003 return ret;
2004 }
2005
2006 static int atmel_usba_stop(struct usb_gadget *gadget)
2007 {
2008 struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
2009
2010 if (udc->vbus_pin)
2011 disable_irq(gpiod_to_irq(udc->vbus_pin));
2012
2013 if (fifo_mode == 0)
2014 udc->configured_ep = 1;
2015
2016 udc->suspended = false;
2017 usba_stop(udc);
2018
2019 udc->driver = NULL;
2020
2021 return 0;
2022 }
2023
2024 static void at91sam9rl_toggle_bias(struct usba_udc *udc, int is_on)
2025 {
2026 regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN,
2027 is_on ? AT91_PMC_BIASEN : 0);
2028 }
2029
2030 static void at91sam9g45_pulse_bias(struct usba_udc *udc)
2031 {
2032 regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, 0);
2033 regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN,
2034 AT91_PMC_BIASEN);
2035 }
2036
2037 static const struct usba_udc_errata at91sam9rl_errata = {
2038 .toggle_bias = at91sam9rl_toggle_bias,
2039 };
2040
2041 static const struct usba_udc_errata at91sam9g45_errata = {
2042 .pulse_bias = at91sam9g45_pulse_bias,
2043 };
2044
2045 static const struct of_device_id atmel_udc_dt_ids[] = {
2046 { .compatible = "atmel,at91sam9rl-udc", .data = &at91sam9rl_errata },
2047 { .compatible = "atmel,at91sam9g45-udc", .data = &at91sam9g45_errata },
2048 { .compatible = "atmel,sama5d3-udc" },
2049 { }
2050 };
2051
2052 MODULE_DEVICE_TABLE(of, atmel_udc_dt_ids);
2053
2054 static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
2055 struct usba_udc *udc)
2056 {
2057 u32 val;
2058 struct device_node *np = pdev->dev.of_node;
2059 const struct of_device_id *match;
2060 struct device_node *pp;
2061 int i, ret;
2062 struct usba_ep *eps, *ep;
2063
2064 match = of_match_node(atmel_udc_dt_ids, np);
2065 if (!match)
2066 return ERR_PTR(-EINVAL);
2067
2068 udc->errata = match->data;
2069 udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
2070 if (IS_ERR(udc->pmc))
2071 udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9rl-pmc");
2072 if (IS_ERR(udc->pmc))
2073 udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9x5-pmc");
2074 if (udc->errata && IS_ERR(udc->pmc))
2075 return ERR_CAST(udc->pmc);
2076
2077 udc->num_ep = 0;
2078
2079 udc->vbus_pin = devm_gpiod_get_optional(&pdev->dev, "atmel,vbus",
2080 GPIOD_IN);
2081
2082 if (fifo_mode == 0) {
2083 pp = NULL;
2084 while ((pp = of_get_next_child(np, pp)))
2085 udc->num_ep++;
2086 udc->configured_ep = 1;
2087 } else {
2088 udc->num_ep = usba_config_fifo_table(udc);
2089 }
2090
2091 eps = devm_kcalloc(&pdev->dev, udc->num_ep, sizeof(struct usba_ep),
2092 GFP_KERNEL);
2093 if (!eps)
2094 return ERR_PTR(-ENOMEM);
2095
2096 udc->gadget.ep0 = &eps[0].ep;
2097
2098 INIT_LIST_HEAD(&eps[0].ep.ep_list);
2099
2100 pp = NULL;
2101 i = 0;
2102 while ((pp = of_get_next_child(np, pp)) && i < udc->num_ep) {
2103 ep = &eps[i];
2104
2105 ret = of_property_read_u32(pp, "reg", &val);
2106 if (ret) {
2107 dev_err(&pdev->dev, "of_probe: reg error(%d)\n", ret);
2108 goto err;
2109 }
2110 ep->index = fifo_mode ? udc->fifo_cfg[i].hw_ep_num : val;
2111
2112 ret = of_property_read_u32(pp, "atmel,fifo-size", &val);
2113 if (ret) {
2114 dev_err(&pdev->dev, "of_probe: fifo-size error(%d)\n", ret);
2115 goto err;
2116 }
2117 if (fifo_mode) {
2118 if (val < udc->fifo_cfg[i].fifo_size) {
2119 dev_warn(&pdev->dev,
2120 "Using max fifo-size value from DT\n");
2121 ep->fifo_size = val;
2122 } else {
2123 ep->fifo_size = udc->fifo_cfg[i].fifo_size;
2124 }
2125 } else {
2126 ep->fifo_size = val;
2127 }
2128
2129 ret = of_property_read_u32(pp, "atmel,nb-banks", &val);
2130 if (ret) {
2131 dev_err(&pdev->dev, "of_probe: nb-banks error(%d)\n", ret);
2132 goto err;
2133 }
2134 if (fifo_mode) {
2135 if (val < udc->fifo_cfg[i].nr_banks) {
2136 dev_warn(&pdev->dev,
2137 "Using max nb-banks value from DT\n");
2138 ep->nr_banks = val;
2139 } else {
2140 ep->nr_banks = udc->fifo_cfg[i].nr_banks;
2141 }
2142 } else {
2143 ep->nr_banks = val;
2144 }
2145
2146 ep->can_dma = of_property_read_bool(pp, "atmel,can-dma");
2147 ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc");
2148
2149 sprintf(ep->name, "ep%d", ep->index);
2150 ep->ep.name = ep->name;
2151
2152 ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
2153 ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
2154 ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
2155 ep->ep.ops = &usba_ep_ops;
2156 usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size);
2157 ep->udc = udc;
2158 INIT_LIST_HEAD(&ep->queue);
2159
2160 if (ep->index == 0) {
2161 ep->ep.caps.type_control = true;
2162 } else {
2163 ep->ep.caps.type_iso = ep->can_isoc;
2164 ep->ep.caps.type_bulk = true;
2165 ep->ep.caps.type_int = true;
2166 }
2167
2168 ep->ep.caps.dir_in = true;
2169 ep->ep.caps.dir_out = true;
2170
2171 if (fifo_mode != 0) {
2172
2173
2174
2175
2176 if (ep->fifo_size <= 8)
2177 ep->ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8);
2178 else
2179
2180 ep->ept_cfg =
2181 USBA_BF(EPT_SIZE, fls(ep->fifo_size - 1) - 3);
2182
2183 ep->ept_cfg |= USBA_BF(BK_NUMBER, ep->nr_banks);
2184 }
2185
2186 if (i)
2187 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
2188
2189 i++;
2190 }
2191
2192 if (i == 0) {
2193 dev_err(&pdev->dev, "of_probe: no endpoint specified\n");
2194 ret = -EINVAL;
2195 goto err;
2196 }
2197
2198 return eps;
2199 err:
2200 return ERR_PTR(ret);
2201 }
2202
2203 static int usba_udc_probe(struct platform_device *pdev)
2204 {
2205 struct resource *res;
2206 struct clk *pclk, *hclk;
2207 struct usba_udc *udc;
2208 int irq, ret, i;
2209
2210 udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
2211 if (!udc)
2212 return -ENOMEM;
2213
2214 udc->gadget = usba_gadget_template;
2215 INIT_LIST_HEAD(&udc->gadget.ep_list);
2216
2217 res = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID);
2218 udc->regs = devm_ioremap_resource(&pdev->dev, res);
2219 if (IS_ERR(udc->regs))
2220 return PTR_ERR(udc->regs);
2221 dev_info(&pdev->dev, "MMIO registers at %pR mapped at %p\n",
2222 res, udc->regs);
2223
2224 res = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID);
2225 udc->fifo = devm_ioremap_resource(&pdev->dev, res);
2226 if (IS_ERR(udc->fifo))
2227 return PTR_ERR(udc->fifo);
2228 dev_info(&pdev->dev, "FIFO at %pR mapped at %p\n", res, udc->fifo);
2229
2230 irq = platform_get_irq(pdev, 0);
2231 if (irq < 0)
2232 return irq;
2233
2234 pclk = devm_clk_get(&pdev->dev, "pclk");
2235 if (IS_ERR(pclk))
2236 return PTR_ERR(pclk);
2237 hclk = devm_clk_get(&pdev->dev, "hclk");
2238 if (IS_ERR(hclk))
2239 return PTR_ERR(hclk);
2240
2241 spin_lock_init(&udc->lock);
2242 mutex_init(&udc->vbus_mutex);
2243 udc->pdev = pdev;
2244 udc->pclk = pclk;
2245 udc->hclk = hclk;
2246
2247 platform_set_drvdata(pdev, udc);
2248
2249
2250 ret = clk_prepare_enable(pclk);
2251 if (ret) {
2252 dev_err(&pdev->dev, "Unable to enable pclk, aborting.\n");
2253 return ret;
2254 }
2255
2256 usba_writel(udc, CTRL, USBA_DISABLE_MASK);
2257 clk_disable_unprepare(pclk);
2258
2259 udc->usba_ep = atmel_udc_of_init(pdev, udc);
2260
2261 toggle_bias(udc, 0);
2262
2263 if (IS_ERR(udc->usba_ep))
2264 return PTR_ERR(udc->usba_ep);
2265
2266 ret = devm_request_irq(&pdev->dev, irq, usba_udc_irq, 0,
2267 "atmel_usba_udc", udc);
2268 if (ret) {
2269 dev_err(&pdev->dev, "Cannot request irq %d (error %d)\n",
2270 irq, ret);
2271 return ret;
2272 }
2273 udc->irq = irq;
2274
2275 if (udc->vbus_pin) {
2276 irq_set_status_flags(gpiod_to_irq(udc->vbus_pin), IRQ_NOAUTOEN);
2277 ret = devm_request_threaded_irq(&pdev->dev,
2278 gpiod_to_irq(udc->vbus_pin), NULL,
2279 usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS,
2280 "atmel_usba_udc", udc);
2281 if (ret) {
2282 udc->vbus_pin = NULL;
2283 dev_warn(&udc->pdev->dev,
2284 "failed to request vbus irq; "
2285 "assuming always on\n");
2286 }
2287 }
2288
2289 ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
2290 if (ret)
2291 return ret;
2292 device_init_wakeup(&pdev->dev, 1);
2293
2294 usba_init_debugfs(udc);
2295 for (i = 1; i < udc->num_ep; i++)
2296 usba_ep_init_debugfs(udc, &udc->usba_ep[i]);
2297
2298 return 0;
2299 }
2300
2301 static int usba_udc_remove(struct platform_device *pdev)
2302 {
2303 struct usba_udc *udc;
2304 int i;
2305
2306 udc = platform_get_drvdata(pdev);
2307
2308 device_init_wakeup(&pdev->dev, 0);
2309 usb_del_gadget_udc(&udc->gadget);
2310
2311 for (i = 1; i < udc->num_ep; i++)
2312 usba_ep_cleanup_debugfs(&udc->usba_ep[i]);
2313 usba_cleanup_debugfs(udc);
2314
2315 return 0;
2316 }
2317
2318 #ifdef CONFIG_PM_SLEEP
2319 static int usba_udc_suspend(struct device *dev)
2320 {
2321 struct usba_udc *udc = dev_get_drvdata(dev);
2322
2323
2324 if (!udc->driver)
2325 return 0;
2326
2327 mutex_lock(&udc->vbus_mutex);
2328
2329 if (!device_may_wakeup(dev)) {
2330 udc->suspended = false;
2331 usba_stop(udc);
2332 goto out;
2333 }
2334
2335
2336
2337
2338
2339 if (udc->vbus_pin) {
2340
2341 usba_stop(udc);
2342 enable_irq_wake(gpiod_to_irq(udc->vbus_pin));
2343 }
2344
2345 enable_irq_wake(udc->irq);
2346
2347 out:
2348 mutex_unlock(&udc->vbus_mutex);
2349 return 0;
2350 }
2351
2352 static int usba_udc_resume(struct device *dev)
2353 {
2354 struct usba_udc *udc = dev_get_drvdata(dev);
2355
2356
2357 if (!udc->driver)
2358 return 0;
2359
2360 if (device_may_wakeup(dev)) {
2361 if (udc->vbus_pin)
2362 disable_irq_wake(gpiod_to_irq(udc->vbus_pin));
2363
2364 disable_irq_wake(udc->irq);
2365 }
2366
2367
2368 mutex_lock(&udc->vbus_mutex);
2369 udc->vbus_prev = vbus_is_present(udc);
2370 if (udc->vbus_prev)
2371 usba_start(udc);
2372 mutex_unlock(&udc->vbus_mutex);
2373
2374 return 0;
2375 }
2376 #endif
2377
2378 static SIMPLE_DEV_PM_OPS(usba_udc_pm_ops, usba_udc_suspend, usba_udc_resume);
2379
2380 static struct platform_driver udc_driver = {
2381 .remove = usba_udc_remove,
2382 .driver = {
2383 .name = "atmel_usba_udc",
2384 .pm = &usba_udc_pm_ops,
2385 .of_match_table = atmel_udc_dt_ids,
2386 },
2387 };
2388
2389 module_platform_driver_probe(udc_driver, usba_udc_probe);
2390
2391 MODULE_DESCRIPTION("Atmel USBA UDC driver");
2392 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2393 MODULE_LICENSE("GPL");
2394 MODULE_ALIAS("platform:atmel_usba_udc");