Searched refs:c67x00 (Results 1 - 8 of 8) sorted by relevance

/linux-4.1.27/drivers/usb/c67x00/
H A DMakefile5 obj-$(CONFIG_USB_C67X00_HCD) += c67x00.o
7 c67x00-y := c67x00-drv.o c67x00-ll-hpi.o c67x00-hcd.o c67x00-sched.o
H A Dc67x00-drv.c2 * c67x00-drv.c: Cypress C67X00 USB Common infrastructure
25 * This file implements the common infrastructure for using the c67x00.
30 * The c67x00 has 2 SIE's (serial interface engine) which can be configured
43 #include <linux/usb/c67x00.h>
45 #include "c67x00.h"
46 #include "c67x00-hcd.h"
88 struct c67x00_device *c67x00 = __dev; c67x00_irq() local
93 int_status = c67x00_ll_hpi_status(c67x00); c67x00_irq()
98 c67x00_ll_irq(c67x00, int_status); c67x00_irq()
100 sie = &c67x00->sie[i]; c67x00_irq()
103 msg = c67x00_ll_fetch_siemsg(c67x00, i); c67x00_irq()
107 int_status = c67x00_ll_hpi_status(c67x00); c67x00_irq()
111 dev_warn(&c67x00->pdev->dev, "Not all interrupts handled! " c67x00_irq()
121 struct c67x00_device *c67x00; c67x00_drv_probe() local
138 c67x00 = kzalloc(sizeof(*c67x00), GFP_KERNEL); c67x00_drv_probe()
139 if (!c67x00) c67x00_drv_probe()
148 c67x00->hpi.base = ioremap(res->start, resource_size(res)); c67x00_drv_probe()
149 if (!c67x00->hpi.base) { c67x00_drv_probe()
155 spin_lock_init(&c67x00->hpi.lock); c67x00_drv_probe()
156 c67x00->hpi.regstep = pdata->hpi_regstep; c67x00_drv_probe()
157 c67x00->pdata = dev_get_platdata(&pdev->dev); c67x00_drv_probe()
158 c67x00->pdev = pdev; c67x00_drv_probe()
160 c67x00_ll_init(c67x00); c67x00_drv_probe()
161 c67x00_ll_hpi_reg_init(c67x00); c67x00_drv_probe()
163 ret = request_irq(res2->start, c67x00_irq, 0, pdev->name, c67x00); c67x00_drv_probe()
169 ret = c67x00_ll_reset(c67x00); c67x00_drv_probe()
176 c67x00_probe_sie(&c67x00->sie[i], c67x00, i); c67x00_drv_probe()
178 platform_set_drvdata(pdev, c67x00); c67x00_drv_probe()
183 free_irq(res2->start, c67x00); c67x00_drv_probe()
185 iounmap(c67x00->hpi.base); c67x00_drv_probe()
189 kfree(c67x00); c67x00_drv_probe()
196 struct c67x00_device *c67x00 = platform_get_drvdata(pdev); c67x00_drv_remove() local
201 c67x00_remove_sie(&c67x00->sie[i]); c67x00_drv_remove()
203 c67x00_ll_release(c67x00); c67x00_drv_remove()
207 free_irq(res->start, c67x00); c67x00_drv_remove()
209 iounmap(c67x00->hpi.base); c67x00_drv_remove()
215 kfree(c67x00); c67x00_drv_remove()
224 .name = "c67x00",
233 MODULE_ALIAS("platform:c67x00");
H A Dc67x00-hcd.c2 * c67x00-hcd.c: Cypress C67X00 USB Host Controller Driver
28 #include "c67x00.h"
29 #include "c67x00-hcd.h"
49 struct c67x00_hcd *c67x00 = sie->private_data; c67x00_hub_reset_host_port() local
54 spin_lock_irqsave(&c67x00->lock, flags); c67x00_hub_reset_host_port()
56 spin_unlock_irqrestore(&c67x00->lock, flags); c67x00_hub_reset_host_port()
63 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd); c67x00_hub_status_data() local
64 struct c67x00_sie *sie = c67x00->sie; c67x00_hub_status_data()
83 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd); c67x00_hub_control() local
84 struct c67x00_sie *sie = c67x00->sie; c67x00_hub_control()
113 c67x00->low_speed_ports |= (1 << port); c67x00_hub_control()
115 c67x00->low_speed_ports &= ~(1 << port); c67x00_hub_control()
144 dev_dbg(c67x00_hcd_dev(c67x00), c67x00_hub_control()
160 dev_dbg(c67x00_hcd_dev(c67x00), c67x00_hub_control()
173 /* Reset the port so that the c67x00 also notices the c67x00_hub_control()
180 dev_dbg(c67x00_hcd_dev(c67x00), c67x00_hub_control()
186 dev_dbg(c67x00_hcd_dev(c67x00), c67x00_hub_control()
192 dev_dbg(c67x00_hcd_dev(c67x00), c67x00_hub_control()
198 dev_dbg(c67x00_hcd_dev(c67x00), c67x00_hub_control()
209 dev_dbg(c67x00_hcd_dev(c67x00), c67x00_hub_control()
215 dev_dbg(c67x00_hcd_dev(c67x00), c67x00_hub_control()
221 dev_dbg(c67x00_hcd_dev(c67x00), c67x00_hub_control()
234 dev_dbg(c67x00_hcd_dev(c67x00), "%s: unknown\n", __func__); c67x00_hub_control()
248 * This function is called from the interrupt handler in c67x00-drv.c
252 struct c67x00_hcd *c67x00 = sie->private_data; c67x00_hcd_irq() local
253 struct usb_hcd *hcd = c67x00_hcd_to_hcd(c67x00); c67x00_hcd_irq()
258 c67x00_sched_kick(c67x00); c67x00_hcd_irq()
260 dev_warn(c67x00_hcd_dev(c67x00), c67x00_hcd_irq()
273 c67x00_sched_kick(c67x00); c67x00_hcd_irq()
299 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd); c67x00_hcd_get_frame() local
302 dev_dbg(c67x00_hcd_dev(c67x00), "%s\n", __func__); c67x00_hcd_get_frame()
303 temp_val = c67x00_ll_husb_get_frame(c67x00->sie); c67x00_hcd_get_frame()
309 .description = "c67x00-hcd",
345 struct c67x00_hcd *c67x00; c67x00_hcd_probe() local
358 c67x00 = hcd_to_c67x00_hcd(hcd); c67x00_hcd_probe()
360 spin_lock_init(&c67x00->lock); c67x00_hcd_probe()
361 c67x00->sie = sie; c67x00_hcd_probe()
363 INIT_LIST_HEAD(&c67x00->list[PIPE_ISOCHRONOUS]); c67x00_hcd_probe()
364 INIT_LIST_HEAD(&c67x00->list[PIPE_INTERRUPT]); c67x00_hcd_probe()
365 INIT_LIST_HEAD(&c67x00->list[PIPE_CONTROL]); c67x00_hcd_probe()
366 INIT_LIST_HEAD(&c67x00->list[PIPE_BULK]); c67x00_hcd_probe()
367 c67x00->urb_count = 0; c67x00_hcd_probe()
368 INIT_LIST_HEAD(&c67x00->td_list); c67x00_hcd_probe()
369 c67x00->td_base_addr = CY_HCD_BUF_ADDR + SIE_TD_OFFSET(sie->sie_num); c67x00_hcd_probe()
370 c67x00->buf_base_addr = CY_HCD_BUF_ADDR + SIE_BUF_OFFSET(sie->sie_num); c67x00_hcd_probe()
371 c67x00->max_frame_bw = MAX_FRAME_BW_STD; c67x00_hcd_probe()
375 init_completion(&c67x00->endpoint_disable); c67x00_hcd_probe()
376 retval = c67x00_sched_start_scheduler(c67x00); c67x00_hcd_probe()
390 sie->private_data = c67x00; c67x00_hcd_probe()
397 c67x00_sched_stop_scheduler(c67x00); c67x00_hcd_probe()
407 struct c67x00_hcd *c67x00 = sie->private_data; c67x00_hcd_remove() local
408 struct usb_hcd *hcd = c67x00_hcd_to_hcd(c67x00); c67x00_hcd_remove()
410 c67x00_sched_stop_scheduler(c67x00); c67x00_hcd_remove()
H A Dc67x00-sched.c2 * c67x00-sched.c: Cypress C67X00 USB Host Controller Driver - TD scheduling
27 #include "c67x00.h"
28 #include "c67x00-hcd.h"
150 static void dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg) dbg_td() argument
152 struct device *dev = c67x00_hcd_dev(c67x00); dbg_td()
173 static inline u16 c67x00_get_current_frame_number(struct c67x00_hcd *c67x00) c67x00_get_current_frame_number() argument
175 return c67x00_ll_husb_get_frame(c67x00->sie) & HOST_FRAME_MASK; c67x00_get_current_frame_number()
212 static void c67x00_release_urb(struct c67x00_hcd *c67x00, struct urb *urb) c67x00_release_urb() argument
219 c67x00->urb_count--; c67x00_release_urb()
222 c67x00->urb_iso_count--; c67x00_release_urb()
223 if (c67x00->urb_iso_count == 0) c67x00_release_urb()
224 c67x00->max_frame_bw = MAX_FRAME_BW_STD; c67x00_release_urb()
232 list_for_each_entry(td, &c67x00->td_list, td_list) c67x00_release_urb()
245 c67x00_ep_data_alloc(struct c67x00_hcd *c67x00, struct urb *urb) c67x00_ep_data_alloc() argument
251 c67x00->current_frame = c67x00_get_current_frame_number(c67x00); c67x00_ep_data_alloc()
256 if (frame_after(c67x00->current_frame, ep_data->next_frame)) c67x00_ep_data_alloc()
258 frame_add(c67x00->current_frame, 1); c67x00_ep_data_alloc()
262 /* Allocate and initialize a new c67x00 endpoint data structure */ c67x00_ep_data_alloc()
277 ep_data->next_frame = frame_add(c67x00->current_frame, 1); c67x00_ep_data_alloc()
283 list_add(&ep_data->node, &c67x00->list[type]); c67x00_ep_data_alloc()
287 list_for_each_entry(prev, &c67x00->list[type], node) { c67x00_ep_data_alloc()
321 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd); c67x00_endpoint_disable() local
325 dev_warn(c67x00_hcd_dev(c67x00), "error: urb list not empty\n"); c67x00_endpoint_disable()
327 spin_lock_irqsave(&c67x00->lock, flags); c67x00_endpoint_disable()
332 spin_unlock_irqrestore(&c67x00->lock, flags); c67x00_endpoint_disable()
337 reinit_completion(&c67x00->endpoint_disable); c67x00_endpoint_disable()
338 c67x00_sched_kick(c67x00); c67x00_endpoint_disable()
339 wait_for_completion_timeout(&c67x00->endpoint_disable, 1 * HZ); c67x00_endpoint_disable()
341 spin_lock_irqsave(&c67x00->lock, flags); c67x00_endpoint_disable()
344 spin_unlock_irqrestore(&c67x00->lock, flags); c67x00_endpoint_disable()
362 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd); c67x00_urb_enqueue() local
372 spin_lock_irqsave(&c67x00->lock, flags); c67x00_urb_enqueue()
388 urbp->ep_data = c67x00_ep_data_alloc(c67x00, urb); c67x00_urb_enqueue()
411 if (c67x00->urb_iso_count == 0) c67x00_urb_enqueue()
412 c67x00->max_frame_bw = MAX_FRAME_BW_ISO; c67x00_urb_enqueue()
413 c67x00->urb_iso_count++; c67x00_urb_enqueue()
437 if (!c67x00->urb_count++) c67x00_urb_enqueue()
438 c67x00_ll_hpi_enable_sofeop(c67x00->sie); c67x00_urb_enqueue()
440 c67x00_sched_kick(c67x00); c67x00_urb_enqueue()
441 spin_unlock_irqrestore(&c67x00->lock, flags); c67x00_urb_enqueue()
448 spin_unlock_irqrestore(&c67x00->lock, flags); c67x00_urb_enqueue()
457 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd); c67x00_urb_dequeue() local
461 spin_lock_irqsave(&c67x00->lock, flags); c67x00_urb_dequeue()
466 c67x00_release_urb(c67x00, urb); c67x00_urb_dequeue()
469 spin_unlock(&c67x00->lock); c67x00_urb_dequeue()
471 spin_lock(&c67x00->lock); c67x00_urb_dequeue()
473 spin_unlock_irqrestore(&c67x00->lock, flags); c67x00_urb_dequeue()
478 spin_unlock_irqrestore(&c67x00->lock, flags); c67x00_urb_dequeue()
485 * pre: c67x00 locked, urb unlocked
488 c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status) c67x00_giveback_urb() argument
500 c67x00_release_urb(c67x00, urb); c67x00_giveback_urb()
501 usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb); c67x00_giveback_urb()
502 spin_unlock(&c67x00->lock); c67x00_giveback_urb()
503 usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status); c67x00_giveback_urb()
504 spin_lock(&c67x00->lock); c67x00_giveback_urb()
509 static int c67x00_claim_frame_bw(struct c67x00_hcd *c67x00, struct urb *urb, c67x00_claim_frame_bw() argument
549 if (unlikely(bit_time + c67x00->bandwidth_allocated >= c67x00_claim_frame_bw()
550 c67x00->max_frame_bw)) c67x00_claim_frame_bw()
553 if (unlikely(c67x00->next_td_addr + CY_TD_SIZE >= c67x00_claim_frame_bw()
554 c67x00->td_base_addr + SIE_TD_SIZE)) c67x00_claim_frame_bw()
557 if (unlikely(c67x00->next_buf_addr + len >= c67x00_claim_frame_bw()
558 c67x00->buf_base_addr + SIE_TD_BUF_SIZE)) c67x00_claim_frame_bw()
562 if (unlikely(bit_time + c67x00->periodic_bw_allocated >= c67x00_claim_frame_bw()
563 MAX_PERIODIC_BW(c67x00->max_frame_bw))) c67x00_claim_frame_bw()
565 c67x00->periodic_bw_allocated += bit_time; c67x00_claim_frame_bw()
568 c67x00->bandwidth_allocated += bit_time; c67x00_claim_frame_bw()
577 static int c67x00_create_td(struct c67x00_hcd *c67x00, struct urb *urb, c67x00_create_td() argument
587 if (c67x00_claim_frame_bw(c67x00, urb, len, usb_pipeisoc(urb->pipe) c67x00_create_td()
599 !(c67x00->low_speed_ports & (1 << urbp->port))) c67x00_create_td()
624 td->td_addr = c67x00->next_td_addr; c67x00_create_td()
625 c67x00->next_td_addr = c67x00->next_td_addr + CY_TD_SIZE; c67x00_create_td()
628 td->ly_base_addr = __cpu_to_le16(c67x00->next_buf_addr); c67x00_create_td()
629 td->port_length = __cpu_to_le16((c67x00->sie->sie_num << 15) | c67x00_create_td()
638 td->next_td_addr = __cpu_to_le16(c67x00->next_td_addr); c67x00_create_td()
645 c67x00->next_buf_addr += (len + 1) & ~0x01; /* properly align */ c67x00_create_td()
647 list_add_tail(&td->td_list, &c67x00->td_list); c67x00_create_td()
659 static int c67x00_add_data_urb(struct c67x00_hcd *c67x00, struct urb *urb) c67x00_add_data_urb() argument
688 ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, toggle, c67x00_add_data_urb()
705 static int c67x00_add_ctrl_urb(struct c67x00_hcd *c67x00, struct urb *urb) c67x00_add_ctrl_urb() argument
713 ret = c67x00_create_td(c67x00, urb, urb->setup_packet, c67x00_add_ctrl_urb()
723 ret = c67x00_add_data_urb(c67x00, urb); c67x00_add_ctrl_urb()
730 ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1, c67x00_add_ctrl_urb()
743 static int c67x00_add_int_urb(struct c67x00_hcd *c67x00, struct urb *urb) c67x00_add_int_urb() argument
747 if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) { c67x00_add_int_urb()
750 return c67x00_add_data_urb(c67x00, urb); c67x00_add_int_urb()
755 static int c67x00_add_iso_urb(struct c67x00_hcd *c67x00, struct urb *urb) c67x00_add_iso_urb() argument
759 if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) { c67x00_add_iso_urb()
770 ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, 0, c67x00_add_iso_urb()
773 dev_dbg(c67x00_hcd_dev(c67x00), "create failed: %d\n", c67x00_add_iso_urb()
778 c67x00_giveback_urb(c67x00, urb, 0); c67x00_add_iso_urb()
790 static void c67x00_fill_from_list(struct c67x00_hcd *c67x00, int type, c67x00_fill_from_list() argument
797 list_for_each_entry(ep_data, &c67x00->list[type], node) { c67x00_fill_from_list()
804 add(c67x00, urb); c67x00_fill_from_list()
809 static void c67x00_fill_frame(struct c67x00_hcd *c67x00) c67x00_fill_frame() argument
814 if (!list_empty(&c67x00->td_list)) { c67x00_fill_frame()
815 dev_warn(c67x00_hcd_dev(c67x00), c67x00_fill_frame()
817 list_for_each_entry_safe(td, ttd, &c67x00->td_list, td_list) { c67x00_fill_frame()
818 dbg_td(c67x00, td, "Unprocessed td"); c67x00_fill_frame()
824 c67x00->bandwidth_allocated = 0; c67x00_fill_frame()
825 c67x00->periodic_bw_allocated = 0; c67x00_fill_frame()
827 c67x00->next_td_addr = c67x00->td_base_addr; c67x00_fill_frame()
828 c67x00->next_buf_addr = c67x00->buf_base_addr; c67x00_fill_frame()
831 c67x00_fill_from_list(c67x00, PIPE_ISOCHRONOUS, c67x00_add_iso_urb); c67x00_fill_frame()
832 c67x00_fill_from_list(c67x00, PIPE_INTERRUPT, c67x00_add_int_urb); c67x00_fill_frame()
833 c67x00_fill_from_list(c67x00, PIPE_CONTROL, c67x00_add_ctrl_urb); c67x00_fill_frame()
834 c67x00_fill_from_list(c67x00, PIPE_BULK, c67x00_add_data_urb); c67x00_fill_frame()
843 c67x00_parse_td(struct c67x00_hcd *c67x00, struct c67x00_td *td) c67x00_parse_td() argument
845 c67x00_ll_read_mem_le16(c67x00->sie->dev, c67x00_parse_td()
849 c67x00_ll_read_mem_le16(c67x00->sie->dev, td_ly_base_addr(td), c67x00_parse_td()
853 static int c67x00_td_to_error(struct c67x00_hcd *c67x00, struct c67x00_td *td) c67x00_td_to_error() argument
856 dbg_td(c67x00, td, "ERROR_FLAG"); c67x00_td_to_error()
860 /* dbg_td(c67x00, td, "STALL"); */ c67x00_td_to_error()
864 dbg_td(c67x00, td, "TIMEOUT"); c67x00_td_to_error()
902 static inline void c67x00_clear_pipe(struct c67x00_hcd *c67x00, c67x00_clear_pipe() argument
908 while (td->td_list.next != &c67x00->td_list) { c67x00_clear_pipe()
920 static void c67x00_handle_successful_td(struct c67x00_hcd *c67x00, c67x00_handle_successful_td() argument
945 c67x00_clear_pipe(c67x00, td); c67x00_handle_successful_td()
951 c67x00_giveback_urb(c67x00, urb, 0); c67x00_handle_successful_td()
959 c67x00_clear_pipe(c67x00, td); c67x00_handle_successful_td()
960 c67x00_giveback_urb(c67x00, urb, 0); c67x00_handle_successful_td()
966 static void c67x00_handle_isoc(struct c67x00_hcd *c67x00, struct c67x00_td *td) c67x00_handle_isoc() argument
982 urb->iso_frame_desc[cnt].status = c67x00_td_to_error(c67x00, td); c67x00_handle_isoc()
984 c67x00_giveback_urb(c67x00, urb, 0); c67x00_handle_isoc()
990 * c67x00_check_td_list - handle tds which have been processed by the c67x00
993 static inline void c67x00_check_td_list(struct c67x00_hcd *c67x00) c67x00_check_td_list() argument
1000 list_for_each_entry_safe(td, tmp, &c67x00->td_list, td_list) { c67x00_check_td_list()
1002 c67x00_parse_td(c67x00, td); c67x00_check_td_list()
1010 c67x00_handle_isoc(c67x00, td); c67x00_check_td_list()
1018 c67x00_giveback_urb(c67x00, urb, c67x00_check_td_list()
1019 c67x00_td_to_error(c67x00, td)); c67x00_check_td_list()
1033 c67x00_giveback_urb(c67x00, urb, -EOVERFLOW); c67x00_check_td_list()
1039 c67x00_handle_successful_td(c67x00, td); c67x00_check_td_list()
1043 c67x00_clear_pipe(c67x00, td); c67x00_check_td_list()
1056 static inline int c67x00_all_tds_processed(struct c67x00_hcd *c67x00) c67x00_all_tds_processed() argument
1061 return !c67x00_ll_husb_get_current_td(c67x00->sie); c67x00_all_tds_processed()
1067 static void c67x00_send_td(struct c67x00_hcd *c67x00, struct c67x00_td *td) c67x00_send_td() argument
1072 c67x00_ll_write_mem_le16(c67x00->sie->dev, td_ly_base_addr(td), c67x00_send_td()
1075 c67x00_ll_write_mem_le16(c67x00->sie->dev, c67x00_send_td()
1079 static void c67x00_send_frame(struct c67x00_hcd *c67x00) c67x00_send_frame() argument
1083 if (list_empty(&c67x00->td_list)) c67x00_send_frame()
1084 dev_warn(c67x00_hcd_dev(c67x00), c67x00_send_frame()
1088 list_for_each_entry(td, &c67x00->td_list, td_list) { c67x00_send_frame()
1089 if (td->td_list.next == &c67x00->td_list) c67x00_send_frame()
1092 c67x00_send_td(c67x00, td); c67x00_send_frame()
1095 c67x00_ll_husb_set_current_td(c67x00->sie, c67x00->td_base_addr); c67x00_send_frame()
1103 static void c67x00_do_work(struct c67x00_hcd *c67x00) c67x00_do_work() argument
1105 spin_lock(&c67x00->lock); c67x00_do_work()
1107 if (!c67x00_all_tds_processed(c67x00)) c67x00_do_work()
1110 c67x00_check_td_list(c67x00); c67x00_do_work()
1114 complete(&c67x00->endpoint_disable); c67x00_do_work()
1116 if (!list_empty(&c67x00->td_list)) c67x00_do_work()
1119 c67x00->current_frame = c67x00_get_current_frame_number(c67x00); c67x00_do_work()
1120 if (c67x00->current_frame == c67x00->last_frame) c67x00_do_work()
1122 c67x00->last_frame = c67x00->current_frame; c67x00_do_work()
1125 if (!c67x00->urb_count) { c67x00_do_work()
1126 c67x00_ll_hpi_disable_sofeop(c67x00->sie); c67x00_do_work()
1130 c67x00_fill_frame(c67x00); c67x00_do_work()
1131 if (!list_empty(&c67x00->td_list)) c67x00_do_work()
1133 c67x00_send_frame(c67x00); c67x00_do_work()
1136 spin_unlock(&c67x00->lock); c67x00_do_work()
1143 struct c67x00_hcd *c67x00 = (struct c67x00_hcd *)__c67x00; c67x00_sched_tasklet() local
1144 c67x00_do_work(c67x00); c67x00_sched_tasklet()
1147 void c67x00_sched_kick(struct c67x00_hcd *c67x00) c67x00_sched_kick() argument
1149 tasklet_hi_schedule(&c67x00->tasklet); c67x00_sched_kick()
1152 int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00) c67x00_sched_start_scheduler() argument
1154 tasklet_init(&c67x00->tasklet, c67x00_sched_tasklet, c67x00_sched_start_scheduler()
1155 (unsigned long)c67x00); c67x00_sched_start_scheduler()
1159 void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00) c67x00_sched_stop_scheduler() argument
1161 tasklet_kill(&c67x00->tasklet); c67x00_sched_stop_scheduler()
H A Dc67x00-hcd.h2 * c67x00-hcd.h: Cypress C67X00 USB HCD
32 #include "c67x00.h"
106 static inline struct usb_hcd *c67x00_hcd_to_hcd(struct c67x00_hcd *c67x00) c67x00_hcd_to_hcd() argument
108 return container_of((void *)c67x00, struct usb_hcd, hcd_priv); c67x00_hcd_to_hcd()
112 * Functions used by c67x00-drv
127 void c67x00_sched_kick(struct c67x00_hcd *c67x00);
128 int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00);
129 void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00);
H A Dc67x00-ll-hpi.c2 * c67x00-ll-hpi.c: Cypress C67X00 USB Low level interface using HPI
28 #include <linux/usb/c67x00.h>
29 #include "c67x00.h"
65 * The c67x00 chip also support control via SPI or HSS serial
413 * c67x00_ll_write_mem_le16 - write into c67x00 memory
452 * c67x00_ll_read_mem_le16 - read from c67x00 memory
H A Dc67x00.h2 * c67x00.h: Cypress C67X00 USB register and field definitions
244 * struct c67x00_device - Common data associated with a c67x00 instance
/linux-4.1.27/arch/xtensa/platforms/xtfpga/
H A Dsetup.c192 #include <linux/usb/c67x00.h>
259 .name = "c67x00",

Completed in 172 milliseconds