Searched refs:ep (Results 1 - 200 of 469) sorted by relevance

123

/linux-4.4.14/drivers/infiniband/hw/cxgb3/
H A Diwch_cm.c111 static void connect_reply_upcall(struct iwch_ep *ep, int status);
113 static void start_ep_timer(struct iwch_ep *ep) start_ep_timer() argument
115 PDBG("%s ep %p\n", __func__, ep); start_ep_timer()
116 if (timer_pending(&ep->timer)) { start_ep_timer()
117 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep); start_ep_timer()
118 del_timer_sync(&ep->timer); start_ep_timer()
120 get_ep(&ep->com); start_ep_timer()
121 ep->timer.expires = jiffies + ep_timeout_secs * HZ; start_ep_timer()
122 ep->timer.data = (unsigned long)ep; start_ep_timer()
123 ep->timer.function = ep_timeout; start_ep_timer()
124 add_timer(&ep->timer); start_ep_timer()
127 static void stop_ep_timer(struct iwch_ep *ep) stop_ep_timer() argument
129 PDBG("%s ep %p\n", __func__, ep); stop_ep_timer()
130 if (!timer_pending(&ep->timer)) { stop_ep_timer()
131 WARN(1, "%s timer stopped when its not running! ep %p state %u\n", stop_ep_timer()
132 __func__, ep, ep->com.state); stop_ep_timer()
135 del_timer_sync(&ep->timer); stop_ep_timer()
136 put_ep(&ep->com); stop_ep_timer()
186 int iwch_quiesce_tid(struct iwch_ep *ep) iwch_quiesce_tid() argument
195 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); iwch_quiesce_tid()
196 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); iwch_quiesce_tid()
204 return iwch_cxgb3_ofld_send(ep->com.tdev, skb); iwch_quiesce_tid()
207 int iwch_resume_tid(struct iwch_ep *ep) iwch_resume_tid() argument
216 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); iwch_resume_tid()
217 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); iwch_resume_tid()
225 return iwch_cxgb3_ofld_send(ep->com.tdev, skb); iwch_resume_tid()
228 static void set_emss(struct iwch_ep *ep, u16 opt) set_emss() argument
230 PDBG("%s ep %p opt %u\n", __func__, ep, opt); set_emss()
231 ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40; set_emss()
233 ep->emss -= 12; set_emss()
234 if (ep->emss < 128) set_emss()
235 ep->emss = 128; set_emss()
236 PDBG("emss=%d\n", ep->emss); set_emss()
276 PDBG("%s alloc ep %p\n", __func__, epc); alloc_ep()
282 struct iwch_ep *ep; __free_ep() local
283 ep = container_of(container_of(kref, struct iwch_ep_common, kref), __free_ep()
285 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); __free_ep()
286 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { __free_ep()
287 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); __free_ep()
288 dst_release(ep->dst); __free_ep()
289 l2t_release(ep->com.tdev, ep->l2t); __free_ep()
291 kfree(ep); __free_ep()
294 static void release_ep_resources(struct iwch_ep *ep) release_ep_resources() argument
296 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); release_ep_resources()
297 set_bit(RELEASE_RESOURCES, &ep->com.flags); release_ep_resources()
298 put_ep(&ep->com); release_ep_resources()
387 static int send_halfclose(struct iwch_ep *ep, gfp_t gfp) send_halfclose() argument
392 PDBG("%s ep %p\n", __func__, ep); send_halfclose()
402 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); send_halfclose()
403 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid)); send_halfclose()
404 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t); send_halfclose()
407 static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) send_abort() argument
411 PDBG("%s ep %p\n", __func__, ep); send_abort()
423 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); send_abort()
424 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); send_abort()
426 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t); send_abort()
429 static int send_connect(struct iwch_ep *ep) send_connect() argument
437 PDBG("%s ep %p\n", __func__, ep); send_connect()
445 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst)); send_connect()
453 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); send_connect()
454 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); send_connect()
462 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid)); send_connect()
463 req->local_port = ep->com.local_addr.sin_port; send_connect()
464 req->peer_port = ep->com.remote_addr.sin_port; send_connect()
465 req->local_ip = ep->com.local_addr.sin_addr.s_addr; send_connect()
466 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; send_connect()
471 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t); send_connect()
474 static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) send_mpa_req() argument
481 PDBG("%s ep %p pd_len %d\n", __func__, ep, ep->plen); send_mpa_req()
485 mpalen = sizeof(*mpa) + ep->plen; send_mpa_req()
490 connect_reply_upcall(ep, -ENOMEM); send_mpa_req()
503 mpa->private_data_size = htons(ep->plen); send_mpa_req()
506 if (ep->plen) send_mpa_req()
507 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen); send_mpa_req()
520 req->wr_lo = htonl(V_WR_TID(ep->hwtid)); send_mpa_req()
522 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | send_mpa_req()
525 req->sndseq = htonl(ep->snd_seq); send_mpa_req()
526 BUG_ON(ep->mpa_skb); send_mpa_req()
527 ep->mpa_skb = skb; send_mpa_req()
528 iwch_l2t_send(ep->com.tdev, skb, ep->l2t); send_mpa_req()
529 start_ep_timer(ep); send_mpa_req()
530 state_set(&ep->com, MPA_REQ_SENT); send_mpa_req()
534 static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen) send_mpa_reject() argument
541 PDBG("%s ep %p plen %d\n", __func__, ep, plen); send_mpa_reject()
571 req->wr_lo = htonl(V_WR_TID(ep->hwtid)); send_mpa_reject()
573 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | send_mpa_reject()
576 req->sndseq = htonl(ep->snd_seq); send_mpa_reject()
577 BUG_ON(ep->mpa_skb); send_mpa_reject()
578 ep->mpa_skb = skb; send_mpa_reject()
579 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t); send_mpa_reject()
582 static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) send_mpa_reply() argument
590 PDBG("%s ep %p plen %d\n", __func__, ep, plen); send_mpa_reply()
604 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | send_mpa_reply()
622 req->wr_lo = htonl(V_WR_TID(ep->hwtid)); send_mpa_reply()
624 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | send_mpa_reply()
627 req->sndseq = htonl(ep->snd_seq); send_mpa_reply()
628 ep->mpa_skb = skb; send_mpa_reply()
629 state_set(&ep->com, MPA_REP_SENT); send_mpa_reply()
630 return iwch_l2t_send(ep->com.tdev, skb, ep->l2t); send_mpa_reply()
635 struct iwch_ep *ep = ctx; act_establish() local
639 PDBG("%s ep %p tid %d\n", __func__, ep, tid); act_establish()
641 dst_confirm(ep->dst); act_establish()
644 ep->hwtid = tid; act_establish()
645 cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid); act_establish()
647 ep->snd_seq = ntohl(req->snd_isn); act_establish()
648 ep->rcv_seq = ntohl(req->rcv_isn); act_establish()
650 set_emss(ep, ntohs(req->tcp_opt)); act_establish()
653 cxgb3_free_atid(ep->com.tdev, ep->atid); act_establish()
656 send_mpa_req(ep, skb); act_establish()
661 static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) abort_connection() argument
663 PDBG("%s ep %p\n", __FILE__, ep); abort_connection()
664 state_set(&ep->com, ABORTING); abort_connection()
665 send_abort(ep, skb, gfp); abort_connection()
668 static void close_complete_upcall(struct iwch_ep *ep) close_complete_upcall() argument
672 PDBG("%s ep %p\n", __func__, ep); close_complete_upcall()
675 if (ep->com.cm_id) { close_complete_upcall()
676 PDBG("close complete delivered ep %p cm_id %p tid %d\n", close_complete_upcall()
677 ep, ep->com.cm_id, ep->hwtid); close_complete_upcall()
678 ep->com.cm_id->event_handler(ep->com.cm_id, &event); close_complete_upcall()
679 ep->com.cm_id->rem_ref(ep->com.cm_id); close_complete_upcall()
680 ep->com.cm_id = NULL; close_complete_upcall()
681 ep->com.qp = NULL; close_complete_upcall()
685 static void peer_close_upcall(struct iwch_ep *ep) peer_close_upcall() argument
689 PDBG("%s ep %p\n", __func__, ep); peer_close_upcall()
692 if (ep->com.cm_id) { peer_close_upcall()
693 PDBG("peer close delivered ep %p cm_id %p tid %d\n", peer_close_upcall()
694 ep, ep->com.cm_id, ep->hwtid); peer_close_upcall()
695 ep->com.cm_id->event_handler(ep->com.cm_id, &event); peer_close_upcall()
699 static void peer_abort_upcall(struct iwch_ep *ep) peer_abort_upcall() argument
703 PDBG("%s ep %p\n", __func__, ep); peer_abort_upcall()
707 if (ep->com.cm_id) { peer_abort_upcall()
708 PDBG("abort delivered ep %p cm_id %p tid %d\n", ep, peer_abort_upcall()
709 ep->com.cm_id, ep->hwtid); peer_abort_upcall()
710 ep->com.cm_id->event_handler(ep->com.cm_id, &event); peer_abort_upcall()
711 ep->com.cm_id->rem_ref(ep->com.cm_id); peer_abort_upcall()
712 ep->com.cm_id = NULL; peer_abort_upcall()
713 ep->com.qp = NULL; peer_abort_upcall()
717 static void connect_reply_upcall(struct iwch_ep *ep, int status) connect_reply_upcall() argument
721 PDBG("%s ep %p status %d\n", __func__, ep, status); connect_reply_upcall()
725 memcpy(&event.local_addr, &ep->com.local_addr, connect_reply_upcall()
726 sizeof(ep->com.local_addr)); connect_reply_upcall()
727 memcpy(&event.remote_addr, &ep->com.remote_addr, connect_reply_upcall()
728 sizeof(ep->com.remote_addr)); connect_reply_upcall()
731 event.private_data_len = ep->plen; connect_reply_upcall()
732 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); connect_reply_upcall()
734 if (ep->com.cm_id) { connect_reply_upcall()
735 PDBG("%s ep %p tid %d status %d\n", __func__, ep, connect_reply_upcall()
736 ep->hwtid, status); connect_reply_upcall()
737 ep->com.cm_id->event_handler(ep->com.cm_id, &event); connect_reply_upcall()
740 ep->com.cm_id->rem_ref(ep->com.cm_id); connect_reply_upcall()
741 ep->com.cm_id = NULL; connect_reply_upcall()
742 ep->com.qp = NULL; connect_reply_upcall()
746 static void connect_request_upcall(struct iwch_ep *ep) connect_request_upcall() argument
750 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); connect_request_upcall()
753 memcpy(&event.local_addr, &ep->com.local_addr, connect_request_upcall()
754 sizeof(ep->com.local_addr)); connect_request_upcall()
755 memcpy(&event.remote_addr, &ep->com.remote_addr, connect_request_upcall()
756 sizeof(ep->com.local_addr)); connect_request_upcall()
757 event.private_data_len = ep->plen; connect_request_upcall()
758 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); connect_request_upcall()
759 event.provider_data = ep; connect_request_upcall()
765 if (state_read(&ep->parent_ep->com) != DEAD) { connect_request_upcall()
766 get_ep(&ep->com); connect_request_upcall()
767 ep->parent_ep->com.cm_id->event_handler( connect_request_upcall()
768 ep->parent_ep->com.cm_id, connect_request_upcall()
771 put_ep(&ep->parent_ep->com); connect_request_upcall()
772 ep->parent_ep = NULL; connect_request_upcall()
775 static void established_upcall(struct iwch_ep *ep) established_upcall() argument
779 PDBG("%s ep %p\n", __func__, ep); established_upcall()
787 if (ep->com.cm_id) { established_upcall()
788 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); established_upcall()
789 ep->com.cm_id->event_handler(ep->com.cm_id, &event); established_upcall()
793 static int update_rx_credits(struct iwch_ep *ep, u32 credits) update_rx_credits() argument
798 PDBG("%s ep %p credits %u\n", __func__, ep, credits); update_rx_credits()
807 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); update_rx_credits()
810 iwch_cxgb3_ofld_send(ep->com.tdev, skb); update_rx_credits()
814 static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb) process_mpa_reply() argument
822 PDBG("%s ep %p\n", __func__, ep); process_mpa_reply()
829 stop_ep_timer(ep); process_mpa_reply()
830 if (state_read(&ep->com) != MPA_REQ_SENT) process_mpa_reply()
837 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { process_mpa_reply()
845 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), process_mpa_reply()
847 ep->mpa_pkt_len += skb->len; process_mpa_reply()
852 if (ep->mpa_pkt_len < sizeof(*mpa)) process_mpa_reply()
854 mpa = (struct mpa_message *) ep->mpa_pkt; process_mpa_reply()
879 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { process_mpa_reply()
884 ep->plen = (u8) plen; process_mpa_reply()
890 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) process_mpa_reply()
903 state_set(&ep->com, FPDU_MODE); process_mpa_reply()
904 ep->mpa_attr.initiator = 1; process_mpa_reply()
905 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; process_mpa_reply()
906 ep->mpa_attr.recv_marker_enabled = markers_enabled; process_mpa_reply()
907 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; process_mpa_reply()
908 ep->mpa_attr.version = mpa_rev; process_mpa_reply()
911 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, process_mpa_reply()
912 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); process_mpa_reply()
914 attrs.mpa_attr = ep->mpa_attr; process_mpa_reply()
915 attrs.max_ird = ep->ird; process_mpa_reply()
916 attrs.max_ord = ep->ord; process_mpa_reply()
917 attrs.llp_stream_handle = ep; process_mpa_reply()
925 err = iwch_modify_qp(ep->com.qp->rhp, process_mpa_reply()
926 ep->com.qp, mask, &attrs, 1); process_mpa_reply()
930 if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { process_mpa_reply()
931 iwch_post_zb_read(ep); process_mpa_reply()
936 abort_connection(ep, skb, GFP_KERNEL); process_mpa_reply()
938 connect_reply_upcall(ep, err); process_mpa_reply()
942 static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb) process_mpa_request() argument
947 PDBG("%s ep %p\n", __func__, ep); process_mpa_request()
954 stop_ep_timer(ep); process_mpa_request()
955 if (state_read(&ep->com) != MPA_REQ_WAIT) process_mpa_request()
962 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { process_mpa_request()
963 abort_connection(ep, skb, GFP_KERNEL); process_mpa_request()
972 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), process_mpa_request()
974 ep->mpa_pkt_len += skb->len; process_mpa_request()
980 if (ep->mpa_pkt_len < sizeof(*mpa)) process_mpa_request()
983 mpa = (struct mpa_message *) ep->mpa_pkt; process_mpa_request()
989 abort_connection(ep, skb, GFP_KERNEL); process_mpa_request()
994 abort_connection(ep, skb, GFP_KERNEL); process_mpa_request()
1004 abort_connection(ep, skb, GFP_KERNEL); process_mpa_request()
1011 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { process_mpa_request()
1012 abort_connection(ep, skb, GFP_KERNEL); process_mpa_request()
1015 ep->plen = (u8) plen; process_mpa_request()
1020 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) process_mpa_request()
1027 ep->mpa_attr.initiator = 0; process_mpa_request()
1028 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; process_mpa_request()
1029 ep->mpa_attr.recv_marker_enabled = markers_enabled; process_mpa_request()
1030 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; process_mpa_request()
1031 ep->mpa_attr.version = mpa_rev; process_mpa_request()
1034 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, process_mpa_request()
1035 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); process_mpa_request()
1037 state_set(&ep->com, MPA_REQ_RCVD); process_mpa_request()
1040 connect_request_upcall(ep); process_mpa_request()
1046 struct iwch_ep *ep = ctx; rx_data() local
1050 PDBG("%s ep %p dlen %u\n", __func__, ep, dlen); rx_data()
1055 ep->rcv_seq += dlen; rx_data()
1056 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen)); rx_data()
1058 switch (state_read(&ep->com)) { rx_data()
1060 process_mpa_reply(ep, skb); rx_data()
1063 process_mpa_request(ep, skb); rx_data()
1069 " ep %p state %d tid %d\n", rx_data()
1070 __func__, ep, state_read(&ep->com), ep->hwtid); rx_data()
1073 * The ep will timeout and inform the ULP of the failure. rx_data()
1080 update_rx_credits(ep, dlen); rx_data()
1092 struct iwch_ep *ep = ctx; tx_ack() local
1098 PDBG("%s ep %p credits %u\n", __func__, ep, credits); tx_ack()
1101 PDBG("%s 0 credit ack ep %p state %u\n", tx_ack()
1102 __func__, ep, state_read(&ep->com)); tx_ack()
1106 spin_lock_irqsave(&ep->com.lock, flags); tx_ack()
1108 dst_confirm(ep->dst); tx_ack()
1109 if (!ep->mpa_skb) { tx_ack()
1110 PDBG("%s rdma_init wr_ack ep %p state %u\n", tx_ack()
1111 __func__, ep, ep->com.state); tx_ack()
1112 if (ep->mpa_attr.initiator) { tx_ack()
1113 PDBG("%s initiator ep %p state %u\n", tx_ack()
1114 __func__, ep, ep->com.state); tx_ack()
1115 if (peer2peer && ep->com.state == FPDU_MODE) tx_ack()
1118 PDBG("%s responder ep %p state %u\n", tx_ack()
1119 __func__, ep, ep->com.state); tx_ack()
1120 if (ep->com.state == MPA_REQ_RCVD) { tx_ack()
1121 ep->com.rpl_done = 1; tx_ack()
1122 wake_up(&ep->com.waitq); tx_ack()
1126 PDBG("%s lsm ack ep %p state %u freeing skb\n", tx_ack()
1127 __func__, ep, ep->com.state); tx_ack()
1128 kfree_skb(ep->mpa_skb); tx_ack()
1129 ep->mpa_skb = NULL; tx_ack()
1131 spin_unlock_irqrestore(&ep->com.lock, flags); tx_ack()
1133 iwch_post_zb_read(ep); tx_ack()
1139 struct iwch_ep *ep = ctx; abort_rpl() local
1143 PDBG("%s ep %p\n", __func__, ep); abort_rpl()
1144 BUG_ON(!ep); abort_rpl()
1150 if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) { abort_rpl()
1154 spin_lock_irqsave(&ep->com.lock, flags); abort_rpl()
1155 switch (ep->com.state) { abort_rpl()
1157 close_complete_upcall(ep); abort_rpl()
1158 __state_set(&ep->com, DEAD); abort_rpl()
1162 printk(KERN_ERR "%s ep %p state %d\n", abort_rpl()
1163 __func__, ep, ep->com.state); abort_rpl()
1166 spin_unlock_irqrestore(&ep->com.lock, flags); abort_rpl()
1169 release_ep_resources(ep); abort_rpl()
1184 struct iwch_ep *ep = ctx; act_open_rpl() local
1187 PDBG("%s ep %p status %u errno %d\n", __func__, ep, rpl->status, act_open_rpl()
1189 connect_reply_upcall(ep, status2errno(rpl->status)); act_open_rpl()
1190 state_set(&ep->com, DEAD); act_open_rpl()
1191 if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status)) act_open_rpl()
1192 release_tid(ep->com.tdev, GET_TID(rpl), NULL); act_open_rpl()
1193 cxgb3_free_atid(ep->com.tdev, ep->atid); act_open_rpl()
1194 dst_release(ep->dst); act_open_rpl()
1195 l2t_release(ep->com.tdev, ep->l2t); act_open_rpl()
1196 put_ep(&ep->com); act_open_rpl()
1200 static int listen_start(struct iwch_listen_ep *ep) listen_start() argument
1205 PDBG("%s ep %p\n", __func__, ep); listen_start()
1214 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid)); listen_start()
1215 req->local_port = ep->com.local_addr.sin_port; listen_start()
1216 req->local_ip = ep->com.local_addr.sin_addr.s_addr; listen_start()
1225 return iwch_cxgb3_ofld_send(ep->com.tdev, skb); listen_start()
1230 struct iwch_listen_ep *ep = ctx; pass_open_rpl() local
1233 PDBG("%s ep %p status %d error %d\n", __func__, ep, pass_open_rpl()
1235 ep->com.rpl_err = status2errno(rpl->status); pass_open_rpl()
1236 ep->com.rpl_done = 1; pass_open_rpl()
1237 wake_up(&ep->com.waitq); pass_open_rpl()
1242 static int listen_stop(struct iwch_listen_ep *ep) listen_stop() argument
1247 PDBG("%s ep %p\n", __func__, ep); listen_stop()
1256 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); listen_stop()
1258 return iwch_cxgb3_ofld_send(ep->com.tdev, skb); listen_stop()
1264 struct iwch_listen_ep *ep = ctx; close_listsrv_rpl() local
1267 PDBG("%s ep %p\n", __func__, ep); close_listsrv_rpl()
1268 ep->com.rpl_err = status2errno(rpl->status); close_listsrv_rpl()
1269 ep->com.rpl_done = 1; close_listsrv_rpl()
1270 wake_up(&ep->com.waitq); close_listsrv_rpl()
1274 static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb) accept_cr() argument
1281 PDBG("%s ep %p\n", __func__, ep); accept_cr()
1285 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst)); accept_cr()
1293 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); accept_cr()
1294 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); accept_cr()
1300 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid)); accept_cr()
1307 iwch_l2t_send(ep->com.tdev, skb, ep->l2t); accept_cr()
1350 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); pass_accept_req()
1353 printk(KERN_ERR "%s - listening ep not in LISTEN\n", pass_accept_req()
1390 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", pass_accept_req()
1423 struct iwch_ep *ep = ctx; pass_establish() local
1426 PDBG("%s ep %p\n", __func__, ep); pass_establish()
1427 ep->snd_seq = ntohl(req->snd_isn); pass_establish()
1428 ep->rcv_seq = ntohl(req->rcv_isn); pass_establish()
1430 set_emss(ep, ntohs(req->tcp_opt)); pass_establish()
1432 dst_confirm(ep->dst); pass_establish()
1433 state_set(&ep->com, MPA_REQ_WAIT); pass_establish()
1434 start_ep_timer(ep); pass_establish()
1441 struct iwch_ep *ep = ctx; peer_close() local
1447 PDBG("%s ep %p\n", __func__, ep); peer_close()
1448 dst_confirm(ep->dst); peer_close()
1450 spin_lock_irqsave(&ep->com.lock, flags); peer_close()
1451 switch (ep->com.state) { peer_close()
1453 __state_set(&ep->com, CLOSING); peer_close()
1456 __state_set(&ep->com, CLOSING); peer_close()
1457 connect_reply_upcall(ep, -ECONNRESET); peer_close()
1467 __state_set(&ep->com, CLOSING); peer_close()
1468 ep->com.rpl_done = 1; peer_close()
1469 ep->com.rpl_err = -ECONNRESET; peer_close()
1470 PDBG("waking up ep %p\n", ep); peer_close()
1471 wake_up(&ep->com.waitq); peer_close()
1474 __state_set(&ep->com, CLOSING); peer_close()
1475 ep->com.rpl_done = 1; peer_close()
1476 ep->com.rpl_err = -ECONNRESET; peer_close()
1477 PDBG("waking up ep %p\n", ep); peer_close()
1478 wake_up(&ep->com.waitq); peer_close()
1481 start_ep_timer(ep); peer_close()
1482 __state_set(&ep->com, CLOSING); peer_close()
1484 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, peer_close()
1486 peer_close_upcall(ep); peer_close()
1492 __state_set(&ep->com, MORIBUND); peer_close()
1496 stop_ep_timer(ep); peer_close()
1497 if (ep->com.cm_id && ep->com.qp) { peer_close()
1499 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, peer_close()
1502 close_complete_upcall(ep); peer_close()
1503 __state_set(&ep->com, DEAD); peer_close()
1513 spin_unlock_irqrestore(&ep->com.lock, flags); peer_close()
1515 iwch_ep_disconnect(ep, 0, GFP_KERNEL); peer_close()
1517 release_ep_resources(ep); peer_close()
1533 struct iwch_ep *ep = ctx; peer_abort() local
1542 PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep, peer_abort()
1543 ep->hwtid); peer_abort()
1544 t3_l2t_send_event(ep->com.tdev, ep->l2t); peer_abort()
1552 if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) { peer_abort()
1556 spin_lock_irqsave(&ep->com.lock, flags); peer_abort()
1557 PDBG("%s ep %p state %u\n", __func__, ep, ep->com.state); peer_abort()
1558 switch (ep->com.state) { peer_abort()
1562 stop_ep_timer(ep); peer_abort()
1565 stop_ep_timer(ep); peer_abort()
1566 connect_reply_upcall(ep, -ECONNRESET); peer_abort()
1569 ep->com.rpl_done = 1; peer_abort()
1570 ep->com.rpl_err = -ECONNRESET; peer_abort()
1571 PDBG("waking up ep %p\n", ep); peer_abort()
1572 wake_up(&ep->com.waitq); peer_abort()
1582 ep->com.rpl_done = 1; peer_abort()
1583 ep->com.rpl_err = -ECONNRESET; peer_abort()
1584 PDBG("waking up ep %p\n", ep); peer_abort()
1585 wake_up(&ep->com.waitq); peer_abort()
1589 stop_ep_timer(ep); peer_abort()
1592 if (ep->com.cm_id && ep->com.qp) { peer_abort()
1594 ret = iwch_modify_qp(ep->com.qp->rhp, peer_abort()
1595 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, peer_abort()
1602 peer_abort_upcall(ep); peer_abort()
1608 spin_unlock_irqrestore(&ep->com.lock, flags); peer_abort()
1614 dst_confirm(ep->dst); peer_abort()
1615 if (ep->com.state != ABORTING) { peer_abort()
1616 __state_set(&ep->com, DEAD); peer_abort()
1619 spin_unlock_irqrestore(&ep->com.lock, flags); peer_abort()
1631 rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); peer_abort()
1632 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); peer_abort()
1634 iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb); peer_abort()
1637 release_ep_resources(ep); peer_abort()
1643 struct iwch_ep *ep = ctx; close_con_rpl() local
1648 PDBG("%s ep %p\n", __func__, ep); close_con_rpl()
1649 BUG_ON(!ep); close_con_rpl()
1652 spin_lock_irqsave(&ep->com.lock, flags); close_con_rpl()
1653 switch (ep->com.state) { close_con_rpl()
1655 __state_set(&ep->com, MORIBUND); close_con_rpl()
1658 stop_ep_timer(ep); close_con_rpl()
1659 if ((ep->com.cm_id) && (ep->com.qp)) { close_con_rpl()
1661 iwch_modify_qp(ep->com.qp->rhp, close_con_rpl()
1662 ep->com.qp, close_con_rpl()
1666 close_complete_upcall(ep); close_con_rpl()
1667 __state_set(&ep->com, DEAD); close_con_rpl()
1677 spin_unlock_irqrestore(&ep->com.lock, flags); close_con_rpl()
1679 release_ep_resources(ep); close_con_rpl()
1697 struct iwch_ep *ep = ctx; terminate() local
1699 if (state_read(&ep->com) != FPDU_MODE) terminate()
1702 PDBG("%s ep %p\n", __func__, ep); terminate()
1705 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer, terminate()
1707 ep->com.qp->attr.terminate_msg_len = skb->len; terminate()
1708 ep->com.qp->attr.is_terminate_local = 0; terminate()
1715 struct iwch_ep *ep = ctx; ec_status() local
1717 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, ec_status()
1723 __func__, ep->hwtid); ec_status()
1724 stop_ep_timer(ep); ec_status()
1726 iwch_modify_qp(ep->com.qp->rhp, ec_status()
1727 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, ec_status()
1729 abort_connection(ep, NULL, GFP_KERNEL); ec_status()
1736 struct iwch_ep *ep = (struct iwch_ep *)arg; ep_timeout() local
1741 spin_lock_irqsave(&ep->com.lock, flags); ep_timeout()
1742 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, ep_timeout()
1743 ep->com.state); ep_timeout()
1744 switch (ep->com.state) { ep_timeout()
1746 __state_set(&ep->com, ABORTING); ep_timeout()
1747 connect_reply_upcall(ep, -ETIMEDOUT); ep_timeout()
1750 __state_set(&ep->com, ABORTING); ep_timeout()
1754 if (ep->com.cm_id && ep->com.qp) { ep_timeout()
1756 iwch_modify_qp(ep->com.qp->rhp, ep_timeout()
1757 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, ep_timeout()
1760 __state_set(&ep->com, ABORTING); ep_timeout()
1763 WARN(1, "%s unexpected state ep %p state %u\n", ep_timeout()
1764 __func__, ep, ep->com.state); ep_timeout()
1767 spin_unlock_irqrestore(&ep->com.lock, flags); ep_timeout()
1769 abort_connection(ep, NULL, GFP_ATOMIC); ep_timeout()
1770 put_ep(&ep->com); ep_timeout()
1776 struct iwch_ep *ep = to_ep(cm_id); iwch_reject_cr() local
1777 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); iwch_reject_cr()
1779 if (state_read(&ep->com) == DEAD) { iwch_reject_cr()
1780 put_ep(&ep->com); iwch_reject_cr()
1783 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); iwch_reject_cr()
1785 abort_connection(ep, NULL, GFP_KERNEL); iwch_reject_cr()
1787 err = send_mpa_reject(ep, pdata, pdata_len); iwch_reject_cr()
1788 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL); iwch_reject_cr()
1790 put_ep(&ep->com); iwch_reject_cr()
1799 struct iwch_ep *ep = to_ep(cm_id); iwch_accept_cr() local
1803 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); iwch_accept_cr()
1804 if (state_read(&ep->com) == DEAD) { iwch_accept_cr()
1809 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); iwch_accept_cr()
1814 abort_connection(ep, NULL, GFP_KERNEL); iwch_accept_cr()
1820 ep->com.cm_id = cm_id; iwch_accept_cr()
1821 ep->com.qp = qp; iwch_accept_cr()
1823 ep->ird = conn_param->ird; iwch_accept_cr()
1824 ep->ord = conn_param->ord; iwch_accept_cr()
1826 if (peer2peer && ep->ird == 0) iwch_accept_cr()
1827 ep->ird = 1; iwch_accept_cr()
1829 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); iwch_accept_cr()
1832 attrs.mpa_attr = ep->mpa_attr; iwch_accept_cr()
1833 attrs.max_ird = ep->ird; iwch_accept_cr()
1834 attrs.max_ord = ep->ord; iwch_accept_cr()
1835 attrs.llp_stream_handle = ep; iwch_accept_cr()
1845 err = iwch_modify_qp(ep->com.qp->rhp, iwch_accept_cr()
1846 ep->com.qp, mask, &attrs, 1); iwch_accept_cr()
1852 wait_event(ep->com.waitq, ep->com.rpl_done); iwch_accept_cr()
1853 err = ep->com.rpl_err; iwch_accept_cr()
1858 err = send_mpa_reply(ep, conn_param->private_data, iwch_accept_cr()
1864 state_set(&ep->com, FPDU_MODE); iwch_accept_cr()
1865 established_upcall(ep); iwch_accept_cr()
1866 put_ep(&ep->com); iwch_accept_cr()
1869 ep->com.cm_id = NULL; iwch_accept_cr()
1870 ep->com.qp = NULL; iwch_accept_cr()
1873 put_ep(&ep->com); iwch_accept_cr()
1892 struct iwch_ep *ep; iwch_connect() local
1908 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); iwch_connect()
1909 if (!ep) { iwch_connect()
1910 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); iwch_connect()
1914 init_timer(&ep->timer); iwch_connect()
1915 ep->plen = conn_param->private_data_len; iwch_connect()
1916 if (ep->plen) iwch_connect()
1917 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), iwch_connect()
1918 conn_param->private_data, ep->plen); iwch_connect()
1919 ep->ird = conn_param->ird; iwch_connect()
1920 ep->ord = conn_param->ord; iwch_connect()
1922 if (peer2peer && ep->ord == 0) iwch_connect()
1923 ep->ord = 1; iwch_connect()
1925 ep->com.tdev = h->rdev.t3cdev_p; iwch_connect()
1928 ep->com.cm_id = cm_id; iwch_connect()
1929 ep->com.qp = get_qhp(h, conn_param->qpn); iwch_connect()
1930 BUG_ON(!ep->com.qp); iwch_connect()
1932 ep->com.qp, cm_id); iwch_connect()
1937 ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep); iwch_connect()
1938 if (ep->atid == -1) { iwch_connect()
1953 ep->dst = &rt->dst; iwch_connect()
1954 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL, iwch_connect()
1956 if (!ep->l2t) { iwch_connect()
1962 state_set(&ep->com, CONNECTING); iwch_connect()
1963 ep->tos = IPTOS_LOWDELAY; iwch_connect()
1964 memcpy(&ep->com.local_addr, &cm_id->local_addr, iwch_connect()
1965 sizeof(ep->com.local_addr)); iwch_connect()
1966 memcpy(&ep->com.remote_addr, &cm_id->remote_addr, iwch_connect()
1967 sizeof(ep->com.remote_addr)); iwch_connect()
1970 err = send_connect(ep); iwch_connect()
1974 l2t_release(h->rdev.t3cdev_p, ep->l2t); iwch_connect()
1976 dst_release(ep->dst); iwch_connect()
1978 cxgb3_free_atid(ep->com.tdev, ep->atid); iwch_connect()
1981 put_ep(&ep->com); iwch_connect()
1990 struct iwch_listen_ep *ep; iwch_create_listen() local
2000 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); iwch_create_listen()
2001 if (!ep) { iwch_create_listen()
2002 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); iwch_create_listen()
2006 PDBG("%s ep %p\n", __func__, ep); iwch_create_listen()
2007 ep->com.tdev = h->rdev.t3cdev_p; iwch_create_listen()
2009 ep->com.cm_id = cm_id; iwch_create_listen()
2010 ep->backlog = backlog; iwch_create_listen()
2011 memcpy(&ep->com.local_addr, &cm_id->local_addr, iwch_create_listen()
2012 sizeof(ep->com.local_addr)); iwch_create_listen()
2017 ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep); iwch_create_listen()
2018 if (ep->stid == -1) { iwch_create_listen()
2024 state_set(&ep->com, LISTEN); iwch_create_listen()
2025 err = listen_start(ep); iwch_create_listen()
2030 wait_event(ep->com.waitq, ep->com.rpl_done); iwch_create_listen()
2031 err = ep->com.rpl_err; iwch_create_listen()
2033 cm_id->provider_data = ep; iwch_create_listen()
2037 cxgb3_free_stid(ep->com.tdev, ep->stid); iwch_create_listen()
2040 put_ep(&ep->com); iwch_create_listen()
2049 struct iwch_listen_ep *ep = to_listen_ep(cm_id); iwch_destroy_listen() local
2051 PDBG("%s ep %p\n", __func__, ep); iwch_destroy_listen()
2054 state_set(&ep->com, DEAD); iwch_destroy_listen()
2055 ep->com.rpl_done = 0; iwch_destroy_listen()
2056 ep->com.rpl_err = 0; iwch_destroy_listen()
2057 err = listen_stop(ep); iwch_destroy_listen()
2060 wait_event(ep->com.waitq, ep->com.rpl_done); iwch_destroy_listen()
2061 cxgb3_free_stid(ep->com.tdev, ep->stid); iwch_destroy_listen()
2063 err = ep->com.rpl_err; iwch_destroy_listen()
2065 put_ep(&ep->com); iwch_destroy_listen()
2069 int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp) iwch_ep_disconnect() argument
2078 spin_lock_irqsave(&ep->com.lock, flags); iwch_ep_disconnect()
2080 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, iwch_ep_disconnect()
2081 states[ep->com.state], abrupt); iwch_ep_disconnect()
2083 tdev = (struct t3cdev *)ep->com.tdev; iwch_ep_disconnect()
2087 close_complete_upcall(ep); iwch_ep_disconnect()
2088 ep->com.state = DEAD; iwch_ep_disconnect()
2090 switch (ep->com.state) { iwch_ep_disconnect()
2098 ep->com.state = ABORTING; iwch_ep_disconnect()
2100 ep->com.state = CLOSING; iwch_ep_disconnect()
2101 start_ep_timer(ep); iwch_ep_disconnect()
2103 set_bit(CLOSE_SENT, &ep->com.flags); iwch_ep_disconnect()
2106 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { iwch_ep_disconnect()
2109 stop_ep_timer(ep); iwch_ep_disconnect()
2110 ep->com.state = ABORTING; iwch_ep_disconnect()
2112 ep->com.state = MORIBUND; iwch_ep_disconnect()
2118 PDBG("%s ignoring disconnect ep %p state %u\n", iwch_ep_disconnect()
2119 __func__, ep, ep->com.state); iwch_ep_disconnect()
2126 spin_unlock_irqrestore(&ep->com.lock, flags); iwch_ep_disconnect()
2129 ret = send_abort(ep, NULL, gfp); iwch_ep_disconnect()
2131 ret = send_halfclose(ep, gfp); iwch_ep_disconnect()
2136 release_ep_resources(ep); iwch_ep_disconnect()
2143 struct iwch_ep *ep = ctx; iwch_ep_redirect() local
2145 if (ep->dst != old) iwch_ep_redirect()
2148 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, iwch_ep_redirect()
2151 l2t_release(ep->com.tdev, ep->l2t); iwch_ep_redirect()
2152 ep->l2t = l2t; iwch_ep_redirect()
2154 ep->dst = new; iwch_ep_redirect()
2183 void *ep; process_work() local
2188 ep = *((void **) (skb->cb)); process_work()
2190 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep); process_work()
2195 * ep was referenced in sched(), and is freed here. process_work()
2197 put_ep((struct iwch_ep_common *)ep); process_work()
H A Diwch_cm.h56 #define put_ep(ep) { \
57 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
58 ep, atomic_read(&((ep)->kref.refcount))); \
59 WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
60 kref_put(&((ep)->kref), __free_ep); \
63 #define get_ep(ep) { \
64 PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
65 ep, atomic_read(&((ep)->kref.refcount))); \
66 kref_get(&((ep)->kref)); \
222 int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp);
223 int iwch_quiesce_tid(struct iwch_ep *ep);
224 int iwch_resume_tid(struct iwch_ep *ep);
226 void iwch_rearp(struct iwch_ep *ep);
H A Diwch_qp.c743 int iwch_post_zb_read(struct iwch_ep *ep) iwch_post_zb_read() argument
766 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)| iwch_post_zb_read()
769 return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb); iwch_post_zb_read()
799 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)); iwch_post_terminate()
903 init_attr.tid = qhp->ep->hwtid; rdma_init()
922 init_attr.tcp_emss = qhp->ep->emss; rdma_init()
929 init_attr.chan = qhp->ep->l2t->smt_idx; rdma_init()
938 init_attr.irs = qhp->ep->rcv_seq; rdma_init()
960 struct iwch_ep *ep = NULL; iwch_modify_qp() local
962 PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__, iwch_modify_qp()
963 qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state, iwch_modify_qp()
1018 qhp->ep = qhp->attr.llp_stream_handle; iwch_modify_qp()
1027 get_ep(&qhp->ep->com); iwch_modify_qp()
1046 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); iwch_modify_qp()
1051 ep = qhp->ep; iwch_modify_qp()
1052 get_ep(&ep->com); iwch_modify_qp()
1067 ep = qhp->ep; iwch_modify_qp()
1068 get_ep(&ep->com); iwch_modify_qp()
1087 put_ep(&qhp->ep->com); iwch_modify_qp()
1088 qhp->ep = NULL; iwch_modify_qp()
1127 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep, iwch_modify_qp()
1132 ep = qhp->ep; iwch_modify_qp()
1133 qhp->ep = NULL; iwch_modify_qp()
1137 BUG_ON(!ep); iwch_modify_qp()
1151 iwch_ep_disconnect(ep, abort, GFP_KERNEL); iwch_modify_qp()
1152 put_ep(&ep->com); iwch_modify_qp()
1160 put_ep(&ep->com); iwch_modify_qp()
H A Diwch_ev.c140 PDBG("%s QPID 0x%x ep %p disconnecting\n", iwch_ev_dispatch()
141 __func__, qhp->wq.qpid, qhp->ep); iwch_ev_dispatch()
142 iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC); iwch_ev_dispatch()
148 iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC); iwch_ev_dispatch()
175 if (qhp->ep && SQ_TYPE(rsp_msg->cqe)) iwch_ev_dispatch()
176 dst_confirm(qhp->ep->dst); iwch_ev_dispatch()
/linux-4.4.14/sound/usb/
H A Dendpoint.h11 int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
21 int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep);
22 void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep);
23 void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
24 int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
25 void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
26 void snd_usb_endpoint_release(struct snd_usb_endpoint *ep);
27 void snd_usb_endpoint_free(struct snd_usb_endpoint *ep);
29 int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
30 int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep);
32 void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
H A Dendpoint.c90 usb_free_coherent(u->ep->chip->dev, u->buffer_size, release_urb_ctx()
125 * @ep: The snd_usb_endpoint
130 int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep) snd_usb_endpoint_implicit_feedback_sink() argument
132 return ep->sync_master && snd_usb_endpoint_implicit_feedback_sink()
133 ep->sync_master->type == SND_USB_ENDPOINT_TYPE_DATA && snd_usb_endpoint_implicit_feedback_sink()
134 ep->type == SND_USB_ENDPOINT_TYPE_DATA && snd_usb_endpoint_implicit_feedback_sink()
135 usb_pipeout(ep->pipe); snd_usb_endpoint_implicit_feedback_sink()
145 int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) snd_usb_endpoint_next_packet_size() argument
150 if (ep->fill_max) snd_usb_endpoint_next_packet_size()
151 return ep->maxframesize; snd_usb_endpoint_next_packet_size()
153 spin_lock_irqsave(&ep->lock, flags); snd_usb_endpoint_next_packet_size()
154 ep->phase = (ep->phase & 0xffff) snd_usb_endpoint_next_packet_size()
155 + (ep->freqm << ep->datainterval); snd_usb_endpoint_next_packet_size()
156 ret = min(ep->phase >> 16, ep->maxframesize); snd_usb_endpoint_next_packet_size()
157 spin_unlock_irqrestore(&ep->lock, flags); snd_usb_endpoint_next_packet_size()
162 static void retire_outbound_urb(struct snd_usb_endpoint *ep, retire_outbound_urb() argument
165 if (ep->retire_data_urb) retire_outbound_urb()
166 ep->retire_data_urb(ep->data_subs, urb_ctx->urb); retire_outbound_urb()
169 static void retire_inbound_urb(struct snd_usb_endpoint *ep, retire_inbound_urb() argument
174 if (unlikely(ep->skip_packets > 0)) { retire_inbound_urb()
175 ep->skip_packets--; retire_inbound_urb()
179 if (ep->sync_slave) retire_inbound_urb()
180 snd_usb_handle_sync_urb(ep->sync_slave, ep, urb); retire_inbound_urb()
182 if (ep->retire_data_urb) retire_inbound_urb()
183 ep->retire_data_urb(ep->data_subs, urb); retire_inbound_urb()
186 static void prepare_silent_urb(struct snd_usb_endpoint *ep, prepare_silent_urb() argument
196 if (ep->chip->tx_length_quirk) prepare_silent_urb()
207 counts = snd_usb_endpoint_next_packet_size(ep); prepare_silent_urb()
209 length = counts * ep->stride; /* number of silent bytes */ prepare_silent_urb()
210 offset = offs * ep->stride + extra * i; prepare_silent_urb()
219 ep->silence_value, length); prepare_silent_urb()
224 urb->transfer_buffer_length = offs * ep->stride + ctx->packets * extra; prepare_silent_urb()
230 static void prepare_outbound_urb(struct snd_usb_endpoint *ep, prepare_outbound_urb() argument
236 urb->dev = ep->chip->dev; /* we need to set this at each time */ prepare_outbound_urb()
238 switch (ep->type) { prepare_outbound_urb()
240 if (ep->prepare_data_urb) { prepare_outbound_urb()
241 ep->prepare_data_urb(ep->data_subs, urb); prepare_outbound_urb()
244 prepare_silent_urb(ep, ctx); prepare_outbound_urb()
249 if (snd_usb_get_speed(ep->chip->dev) >= USB_SPEED_HIGH) { prepare_outbound_urb()
256 cp[0] = ep->freqn; prepare_outbound_urb()
257 cp[1] = ep->freqn >> 8; prepare_outbound_urb()
258 cp[2] = ep->freqn >> 16; prepare_outbound_urb()
259 cp[3] = ep->freqn >> 24; prepare_outbound_urb()
267 cp[0] = ep->freqn >> 2; prepare_outbound_urb()
268 cp[1] = ep->freqn >> 10; prepare_outbound_urb()
269 cp[2] = ep->freqn >> 18; prepare_outbound_urb()
279 static inline void prepare_inbound_urb(struct snd_usb_endpoint *ep, prepare_inbound_urb() argument
285 urb->dev = ep->chip->dev; /* we need to set this at each time */ prepare_inbound_urb()
287 switch (ep->type) { prepare_inbound_urb()
292 urb->iso_frame_desc[i].length = ep->curpacksize; prepare_inbound_urb()
293 offs += ep->curpacksize; prepare_inbound_urb()
301 urb->iso_frame_desc[0].length = min(4u, ep->syncmaxsize); prepare_inbound_urb()
309 * from ep->ready_playback_urbs and in case there there aren't any available
321 static void queue_pending_output_urbs(struct snd_usb_endpoint *ep) queue_pending_output_urbs() argument
323 while (test_bit(EP_FLAG_RUNNING, &ep->flags)) { queue_pending_output_urbs()
331 spin_lock_irqsave(&ep->lock, flags); queue_pending_output_urbs()
332 if (ep->next_packet_read_pos != ep->next_packet_write_pos) { queue_pending_output_urbs()
333 packet = ep->next_packet + ep->next_packet_read_pos; queue_pending_output_urbs()
334 ep->next_packet_read_pos++; queue_pending_output_urbs()
335 ep->next_packet_read_pos %= MAX_URBS; queue_pending_output_urbs()
338 if (!list_empty(&ep->ready_playback_urbs)) queue_pending_output_urbs()
339 ctx = list_first_entry(&ep->ready_playback_urbs, queue_pending_output_urbs()
342 spin_unlock_irqrestore(&ep->lock, flags); queue_pending_output_urbs()
355 prepare_outbound_urb(ep, ctx); queue_pending_output_urbs()
359 usb_audio_err(ep->chip, queue_pending_output_urbs()
363 set_bit(ctx->index, &ep->active_mask); queue_pending_output_urbs()
373 struct snd_usb_endpoint *ep = ctx->ep; snd_complete_urb() local
384 if (unlikely(atomic_read(&ep->chip->shutdown))) snd_complete_urb()
387 if (usb_pipeout(ep->pipe)) { snd_complete_urb()
388 retire_outbound_urb(ep, ctx); snd_complete_urb()
390 if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags))) snd_complete_urb()
393 if (snd_usb_endpoint_implicit_feedback_sink(ep)) { snd_complete_urb()
394 spin_lock_irqsave(&ep->lock, flags); snd_complete_urb()
395 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs); snd_complete_urb()
396 spin_unlock_irqrestore(&ep->lock, flags); snd_complete_urb()
397 queue_pending_output_urbs(ep); snd_complete_urb()
402 prepare_outbound_urb(ep, ctx); snd_complete_urb()
404 retire_inbound_urb(ep, ctx); snd_complete_urb()
406 if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags))) snd_complete_urb()
409 prepare_inbound_urb(ep, ctx); snd_complete_urb()
416 usb_audio_err(ep->chip, "cannot submit urb (err = %d)\n", err); snd_complete_urb()
417 if (ep->data_subs && ep->data_subs->pcm_substream) { snd_complete_urb()
418 substream = ep->data_subs->pcm_substream; snd_complete_urb()
423 clear_bit(ctx->index, &ep->active_mask); snd_complete_urb()
449 struct snd_usb_endpoint *ep; snd_usb_add_endpoint() local
457 list_for_each_entry(ep, &chip->ep_list, list) { snd_usb_add_endpoint()
458 if (ep->ep_num == ep_num && snd_usb_add_endpoint()
459 ep->iface == alts->desc.bInterfaceNumber && snd_usb_add_endpoint()
460 ep->altsetting == alts->desc.bAlternateSetting) { snd_usb_add_endpoint()
461 usb_audio_dbg(ep->chip, snd_usb_add_endpoint()
463 ep_num, ep->iface, ep->altsetting, ep); snd_usb_add_endpoint()
473 ep = kzalloc(sizeof(*ep), GFP_KERNEL); snd_usb_add_endpoint()
474 if (!ep) snd_usb_add_endpoint()
477 ep->chip = chip; snd_usb_add_endpoint()
478 spin_lock_init(&ep->lock); snd_usb_add_endpoint()
479 ep->type = type; snd_usb_add_endpoint()
480 ep->ep_num = ep_num; snd_usb_add_endpoint()
481 ep->iface = alts->desc.bInterfaceNumber; snd_usb_add_endpoint()
482 ep->altsetting = alts->desc.bAlternateSetting; snd_usb_add_endpoint()
483 INIT_LIST_HEAD(&ep->ready_playback_urbs); snd_usb_add_endpoint()
487 ep->pipe = usb_sndisocpipe(chip->dev, ep_num); snd_usb_add_endpoint()
489 ep->pipe = usb_rcvisocpipe(chip->dev, ep_num); snd_usb_add_endpoint()
495 ep->syncinterval = get_endpoint(alts, 1)->bRefresh; snd_usb_add_endpoint()
497 ep->syncinterval = 1; snd_usb_add_endpoint()
500 ep->syncinterval = get_endpoint(alts, 1)->bInterval - 1; snd_usb_add_endpoint()
502 ep->syncinterval = 3; snd_usb_add_endpoint()
504 ep->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize); snd_usb_add_endpoint()
507 ep->syncmaxsize == 4) snd_usb_add_endpoint()
508 ep->udh01_fb_quirk = 1; snd_usb_add_endpoint()
511 list_add_tail(&ep->list, &chip->ep_list); snd_usb_add_endpoint()
516 return ep; snd_usb_add_endpoint()
522 static int wait_clear_urbs(struct snd_usb_endpoint *ep) wait_clear_urbs() argument
528 alive = bitmap_weight(&ep->active_mask, ep->nurbs); wait_clear_urbs()
536 usb_audio_err(ep->chip, wait_clear_urbs()
538 alive, ep->ep_num); wait_clear_urbs()
539 clear_bit(EP_FLAG_STOPPING, &ep->flags); wait_clear_urbs()
547 void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep) snd_usb_endpoint_sync_pending_stop() argument
549 if (ep && test_bit(EP_FLAG_STOPPING, &ep->flags)) snd_usb_endpoint_sync_pending_stop()
550 wait_clear_urbs(ep); snd_usb_endpoint_sync_pending_stop()
556 static int deactivate_urbs(struct snd_usb_endpoint *ep, bool force) deactivate_urbs() argument
560 if (!force && atomic_read(&ep->chip->shutdown)) /* to be sure... */ deactivate_urbs()
563 clear_bit(EP_FLAG_RUNNING, &ep->flags); deactivate_urbs()
565 INIT_LIST_HEAD(&ep->ready_playback_urbs); deactivate_urbs()
566 ep->next_packet_read_pos = 0; deactivate_urbs()
567 ep->next_packet_write_pos = 0; deactivate_urbs()
569 for (i = 0; i < ep->nurbs; i++) { deactivate_urbs()
570 if (test_bit(i, &ep->active_mask)) { deactivate_urbs()
571 if (!test_and_set_bit(i, &ep->unlink_mask)) { deactivate_urbs()
572 struct urb *u = ep->urb[i].urb; deactivate_urbs()
584 static void release_urbs(struct snd_usb_endpoint *ep, int force) release_urbs() argument
589 ep->retire_data_urb = NULL; release_urbs()
590 ep->prepare_data_urb = NULL; release_urbs()
593 deactivate_urbs(ep, force); release_urbs()
594 wait_clear_urbs(ep); release_urbs()
596 for (i = 0; i < ep->nurbs; i++) release_urbs()
597 release_urb_ctx(&ep->urb[i]); release_urbs()
599 if (ep->syncbuf) release_urbs()
600 usb_free_coherent(ep->chip->dev, SYNC_URBS * 4, release_urbs()
601 ep->syncbuf, ep->sync_dma); release_urbs()
603 ep->syncbuf = NULL; release_urbs()
604 ep->nurbs = 0; release_urbs()
610 static int data_ep_set_params(struct snd_usb_endpoint *ep, data_ep_set_params() argument
623 int tx_length_quirk = (ep->chip->tx_length_quirk && data_ep_set_params()
624 usb_pipeout(ep->pipe)); data_ep_set_params()
635 ep->datainterval = fmt->datainterval; data_ep_set_params()
636 ep->stride = frame_bits >> 3; data_ep_set_params()
637 ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0; data_ep_set_params()
640 ep->freqmax = ep->freqn + (ep->freqn >> 2); data_ep_set_params()
646 * the data interval is more than 1 (i.e. ep->datainterval > 0), data_ep_set_params()
651 * (ep->freqmax << ep->datainterval overflows at 8.192 MHz for the data_ep_set_params()
653 * USB high speed, noting that ep->freqmax is in units of data_ep_set_params()
656 maxsize = (((ep->freqmax << ep->datainterval) + 0xffff) >> 16) * data_ep_set_params()
661 if (ep->maxpacksize && ep->maxpacksize < maxsize) { data_ep_set_params()
663 unsigned int data_maxsize = maxsize = ep->maxpacksize; data_ep_set_params()
668 ep->freqmax = (data_maxsize / (frame_bits >> 3)) data_ep_set_params()
669 << (16 - ep->datainterval); data_ep_set_params()
672 if (ep->fill_max) data_ep_set_params()
673 ep->curpacksize = ep->maxpacksize; data_ep_set_params()
675 ep->curpacksize = maxsize; data_ep_set_params()
677 if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL) { data_ep_set_params()
678 packs_per_ms = 8 >> ep->datainterval; data_ep_set_params()
684 if (sync_ep && !snd_usb_endpoint_implicit_feedback_sink(ep)) data_ep_set_params()
687 max_packs_per_urb = max(1u, max_packs_per_urb >> ep->datainterval); data_ep_set_params()
697 if (usb_pipein(ep->pipe) || data_ep_set_params()
698 snd_usb_endpoint_implicit_feedback_sink(ep)) { data_ep_set_params()
707 if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_WIRELESS) { data_ep_set_params()
708 int interval = ep->datainterval; data_ep_set_params()
718 ep->nurbs = MAX_URBS; data_ep_set_params()
728 minsize = (ep->freqn >> (16 - ep->datainterval)) * data_ep_set_params()
745 ep->max_urb_frames = DIV_ROUND_UP(frames_per_period, data_ep_set_params()
751 ep->nurbs = min(max_urbs, urbs_per_period * periods_per_buffer); data_ep_set_params()
755 for (i = 0; i < ep->nurbs; i++) { data_ep_set_params()
756 struct snd_urb_ctx *u = &ep->urb[i]; data_ep_set_params()
758 u->ep = ep; data_ep_set_params()
769 usb_alloc_coherent(ep->chip->dev, u->buffer_size, data_ep_set_params()
773 u->urb->pipe = ep->pipe; data_ep_set_params()
775 u->urb->interval = 1 << ep->datainterval; data_ep_set_params()
784 release_urbs(ep, 0); data_ep_set_params()
791 static int sync_ep_set_params(struct snd_usb_endpoint *ep) sync_ep_set_params() argument
795 ep->syncbuf = usb_alloc_coherent(ep->chip->dev, SYNC_URBS * 4, sync_ep_set_params()
796 GFP_KERNEL, &ep->sync_dma); sync_ep_set_params()
797 if (!ep->syncbuf) sync_ep_set_params()
801 struct snd_urb_ctx *u = &ep->urb[i]; sync_ep_set_params()
803 u->ep = ep; sync_ep_set_params()
808 u->urb->transfer_buffer = ep->syncbuf + i * 4; sync_ep_set_params()
809 u->urb->transfer_dma = ep->sync_dma + i * 4; sync_ep_set_params()
811 u->urb->pipe = ep->pipe; sync_ep_set_params()
814 u->urb->interval = 1 << ep->syncinterval; sync_ep_set_params()
819 ep->nurbs = SYNC_URBS; sync_ep_set_params()
824 release_urbs(ep, 0); sync_ep_set_params()
831 * @ep: the snd_usb_endpoint to configure
845 int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, snd_usb_endpoint_set_params() argument
857 if (ep->use_count != 0) { snd_usb_endpoint_set_params()
858 usb_audio_warn(ep->chip, snd_usb_endpoint_set_params()
859 "Unable to change format on ep #%x: already in use\n", snd_usb_endpoint_set_params()
860 ep->ep_num); snd_usb_endpoint_set_params()
865 release_urbs(ep, 0); snd_usb_endpoint_set_params()
867 ep->datainterval = fmt->datainterval; snd_usb_endpoint_set_params()
868 ep->maxpacksize = fmt->maxpacksize; snd_usb_endpoint_set_params()
869 ep->fill_max = !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX); snd_usb_endpoint_set_params()
871 if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) snd_usb_endpoint_set_params()
872 ep->freqn = get_usb_full_speed_rate(rate); snd_usb_endpoint_set_params()
874 ep->freqn = get_usb_high_speed_rate(rate); snd_usb_endpoint_set_params()
877 ep->freqm = ep->freqn; snd_usb_endpoint_set_params()
878 ep->freqshift = INT_MIN; snd_usb_endpoint_set_params()
880 ep->phase = 0; snd_usb_endpoint_set_params()
882 switch (ep->type) { snd_usb_endpoint_set_params()
884 err = data_ep_set_params(ep, pcm_format, channels, snd_usb_endpoint_set_params()
889 err = sync_ep_set_params(ep); snd_usb_endpoint_set_params()
895 usb_audio_dbg(ep->chip, snd_usb_endpoint_set_params()
896 "Setting params for ep #%x (type %d, %d urbs), ret=%d\n", snd_usb_endpoint_set_params()
897 ep->ep_num, ep->type, ep->nurbs, err); snd_usb_endpoint_set_params()
905 * @ep: the endpoint to start
917 int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep) snd_usb_endpoint_start() argument
922 if (atomic_read(&ep->chip->shutdown)) snd_usb_endpoint_start()
926 if (++ep->use_count != 1) snd_usb_endpoint_start()
930 deactivate_urbs(ep, false); snd_usb_endpoint_start()
932 wait_clear_urbs(ep); snd_usb_endpoint_start()
934 ep->active_mask = 0; snd_usb_endpoint_start()
935 ep->unlink_mask = 0; snd_usb_endpoint_start()
936 ep->phase = 0; snd_usb_endpoint_start()
938 snd_usb_endpoint_start_quirk(ep); snd_usb_endpoint_start()
947 set_bit(EP_FLAG_RUNNING, &ep->flags); snd_usb_endpoint_start()
949 if (snd_usb_endpoint_implicit_feedback_sink(ep)) { snd_usb_endpoint_start()
950 for (i = 0; i < ep->nurbs; i++) { snd_usb_endpoint_start()
951 struct snd_urb_ctx *ctx = ep->urb + i; snd_usb_endpoint_start()
952 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs); snd_usb_endpoint_start()
958 for (i = 0; i < ep->nurbs; i++) { snd_usb_endpoint_start()
959 struct urb *urb = ep->urb[i].urb; snd_usb_endpoint_start()
964 if (usb_pipeout(ep->pipe)) { snd_usb_endpoint_start()
965 prepare_outbound_urb(ep, urb->context); snd_usb_endpoint_start()
967 prepare_inbound_urb(ep, urb->context); snd_usb_endpoint_start()
972 usb_audio_err(ep->chip, snd_usb_endpoint_start()
977 set_bit(i, &ep->active_mask); snd_usb_endpoint_start()
983 clear_bit(EP_FLAG_RUNNING, &ep->flags); snd_usb_endpoint_start()
984 ep->use_count--; snd_usb_endpoint_start()
985 deactivate_urbs(ep, false); snd_usb_endpoint_start()
992 * @ep: the endpoint to stop (may be NULL)
1003 void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep) snd_usb_endpoint_stop() argument
1005 if (!ep) snd_usb_endpoint_stop()
1008 if (snd_BUG_ON(ep->use_count == 0)) snd_usb_endpoint_stop()
1011 if (--ep->use_count == 0) { snd_usb_endpoint_stop()
1012 deactivate_urbs(ep, false); snd_usb_endpoint_stop()
1013 ep->data_subs = NULL; snd_usb_endpoint_stop()
1014 ep->sync_slave = NULL; snd_usb_endpoint_stop()
1015 ep->retire_data_urb = NULL; snd_usb_endpoint_stop()
1016 ep->prepare_data_urb = NULL; snd_usb_endpoint_stop()
1017 set_bit(EP_FLAG_STOPPING, &ep->flags); snd_usb_endpoint_stop()
1024 * @ep: the endpoint to deactivate
1031 void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep) snd_usb_endpoint_deactivate() argument
1033 if (!ep) snd_usb_endpoint_deactivate()
1036 if (ep->use_count != 0) snd_usb_endpoint_deactivate()
1039 deactivate_urbs(ep, true); snd_usb_endpoint_deactivate()
1040 wait_clear_urbs(ep); snd_usb_endpoint_deactivate()
1046 * @ep: the endpoint to release
1051 void snd_usb_endpoint_release(struct snd_usb_endpoint *ep) snd_usb_endpoint_release() argument
1053 release_urbs(ep, 1); snd_usb_endpoint_release()
1059 * @ep: the endpoint to free
1061 * This free all resources of the given ep.
1063 void snd_usb_endpoint_free(struct snd_usb_endpoint *ep) snd_usb_endpoint_free() argument
1065 kfree(ep); snd_usb_endpoint_free()
1071 * @ep: the endpoint to handle the packet
1078 void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep, snd_usb_handle_sync_urb() argument
1086 snd_BUG_ON(ep == sender); snd_usb_handle_sync_urb()
1094 if (snd_usb_endpoint_implicit_feedback_sink(ep) && snd_usb_handle_sync_urb()
1095 ep->use_count != 0) { snd_usb_handle_sync_urb()
1116 spin_lock_irqsave(&ep->lock, flags); snd_usb_handle_sync_urb()
1117 out_packet = ep->next_packet + ep->next_packet_write_pos; snd_usb_handle_sync_urb()
1138 ep->next_packet_write_pos++; snd_usb_handle_sync_urb()
1139 ep->next_packet_write_pos %= MAX_URBS; snd_usb_handle_sync_urb()
1140 spin_unlock_irqrestore(&ep->lock, flags); snd_usb_handle_sync_urb()
1141 queue_pending_output_urbs(ep); snd_usb_handle_sync_urb()
1179 if (f < ep->freqn - 0x8000) snd_usb_handle_sync_urb()
1181 else if (f > ep->freqn + 0x8000) snd_usb_handle_sync_urb()
1183 } else if (unlikely(ep->freqshift == INT_MIN)) { snd_usb_handle_sync_urb()
1191 while (f < ep->freqn - ep->freqn / 4) { snd_usb_handle_sync_urb()
1195 while (f > ep->freqn + ep->freqn / 2) { snd_usb_handle_sync_urb()
1199 ep->freqshift = shift; snd_usb_handle_sync_urb()
1200 } else if (ep->freqshift >= 0) snd_usb_handle_sync_urb()
1201 f <<= ep->freqshift; snd_usb_handle_sync_urb()
1203 f >>= -ep->freqshift; snd_usb_handle_sync_urb()
1205 if (likely(f >= ep->freqn - ep->freqn / 8 && f <= ep->freqmax)) { snd_usb_handle_sync_urb()
1210 spin_lock_irqsave(&ep->lock, flags); snd_usb_handle_sync_urb()
1211 ep->freqm = f; snd_usb_handle_sync_urb()
1212 spin_unlock_irqrestore(&ep->lock, flags); snd_usb_handle_sync_urb()
1218 ep->freqshift = INT_MIN; snd_usb_handle_sync_urb()
H A Dmidi.c103 void (*output)(struct snd_usb_midi_out_endpoint *ep, struct urb *urb);
140 struct snd_usb_midi_out_endpoint *ep; member in struct:snd_usb_midi_out_endpoint::out_urb_context
150 struct snd_usb_midi_out_endpoint *ep; member in struct:snd_usb_midi_out_endpoint::usbmidi_out_port
183 static void snd_usbmidi_do_output(struct snd_usb_midi_out_endpoint *ep);
226 static void snd_usbmidi_input_data(struct snd_usb_midi_in_endpoint *ep, snd_usbmidi_input_data() argument
229 struct usbmidi_in_port *port = &ep->ports[portidx]; snd_usbmidi_input_data()
232 dev_dbg(&ep->umidi->dev->dev, "unexpected port %d!\n", portidx); snd_usbmidi_input_data()
235 if (!test_bit(port->substream->number, &ep->umidi->input_triggered)) snd_usbmidi_input_data()
257 struct snd_usb_midi_in_endpoint *ep = urb->context; snd_usbmidi_in_urb_complete() local
261 ep->umidi->usb_protocol_ops->input(ep, urb->transfer_buffer, snd_usbmidi_in_urb_complete()
267 ep->error_resubmit = 1; snd_usbmidi_in_urb_complete()
268 mod_timer(&ep->umidi->error_timer, snd_usbmidi_in_urb_complete()
275 urb->dev = ep->umidi->dev; snd_usbmidi_in_urb_complete()
282 struct snd_usb_midi_out_endpoint *ep = context->ep; snd_usbmidi_out_urb_complete() local
285 spin_lock(&ep->buffer_lock); snd_usbmidi_out_urb_complete()
286 urb_index = context - ep->urbs; snd_usbmidi_out_urb_complete()
287 ep->active_urbs &= ~(1 << urb_index); snd_usbmidi_out_urb_complete()
288 if (unlikely(ep->drain_urbs)) { snd_usbmidi_out_urb_complete()
289 ep->drain_urbs &= ~(1 << urb_index); snd_usbmidi_out_urb_complete()
290 wake_up(&ep->drain_wait); snd_usbmidi_out_urb_complete()
292 spin_unlock(&ep->buffer_lock); snd_usbmidi_out_urb_complete()
297 mod_timer(&ep->umidi->error_timer, snd_usbmidi_out_urb_complete()
302 snd_usbmidi_do_output(ep); snd_usbmidi_out_urb_complete()
309 static void snd_usbmidi_do_output(struct snd_usb_midi_out_endpoint *ep) snd_usbmidi_do_output() argument
315 spin_lock_irqsave(&ep->buffer_lock, flags); snd_usbmidi_do_output()
316 if (ep->umidi->disconnected) { snd_usbmidi_do_output()
317 spin_unlock_irqrestore(&ep->buffer_lock, flags); snd_usbmidi_do_output()
321 urb_index = ep->next_urb; snd_usbmidi_do_output()
323 if (!(ep->active_urbs & (1 << urb_index))) { snd_usbmidi_do_output()
324 urb = ep->urbs[urb_index].urb; snd_usbmidi_do_output()
326 ep->umidi->usb_protocol_ops->output(ep, urb); snd_usbmidi_do_output()
332 urb->dev = ep->umidi->dev; snd_usbmidi_do_output()
335 ep->active_urbs |= 1 << urb_index; snd_usbmidi_do_output()
339 if (urb_index == ep->next_urb) snd_usbmidi_do_output()
342 ep->next_urb = urb_index; snd_usbmidi_do_output()
343 spin_unlock_irqrestore(&ep->buffer_lock, flags); snd_usbmidi_do_output()
348 struct snd_usb_midi_out_endpoint *ep = snd_usbmidi_out_tasklet() local
351 snd_usbmidi_do_output(ep); snd_usbmidi_out_tasklet()
383 static int send_bulk_static_data(struct snd_usb_midi_out_endpoint *ep, send_bulk_static_data() argument
391 if (ep->urbs[0].urb) send_bulk_static_data()
392 err = usb_bulk_msg(ep->umidi->dev, ep->urbs[0].urb->pipe, send_bulk_static_data()
404 static void snd_usbmidi_standard_input(struct snd_usb_midi_in_endpoint *ep, snd_usbmidi_standard_input() argument
413 snd_usbmidi_input_data(ep, cable, &buffer[i + 1], snd_usbmidi_standard_input()
418 static void snd_usbmidi_midiman_input(struct snd_usb_midi_in_endpoint *ep, snd_usbmidi_midiman_input() argument
427 snd_usbmidi_input_data(ep, port, &buffer[i], length); snd_usbmidi_midiman_input()
436 struct snd_usb_midi_in_endpoint *ep, snd_usbmidi_maudio_broken_running_status_input()
445 struct usbmidi_in_port *port = &ep->ports[cable]; snd_usbmidi_maudio_broken_running_status_input()
467 snd_usbmidi_input_data(ep, cable, &buffer[i + 1], snd_usbmidi_maudio_broken_running_status_input()
476 static void ch345_broken_sysex_input(struct snd_usb_midi_in_endpoint *ep, ch345_broken_sysex_input() argument
485 if (ep->in_sysex && ch345_broken_sysex_input()
486 cin == ep->last_cin && ch345_broken_sysex_input()
498 snd_usbmidi_input_data(ep, 0, &buffer[i + 1], length); ch345_broken_sysex_input()
499 ep->in_sysex = cin == 0x4; ch345_broken_sysex_input()
500 if (!ep->in_sysex) ch345_broken_sysex_input()
501 ep->last_cin = cin; ch345_broken_sysex_input()
509 static void snd_usbmidi_cme_input(struct snd_usb_midi_in_endpoint *ep, snd_usbmidi_cme_input() argument
513 snd_usbmidi_standard_input(ep, buffer, buffer_length); snd_usbmidi_cme_input()
515 snd_usbmidi_input_data(ep, buffer[0] >> 4, snd_usbmidi_cme_input()
561 port->ep->umidi->usb_protocol_ops->output_packet; snd_usbmidi_transmit_byte()
653 static void snd_usbmidi_standard_output(struct snd_usb_midi_out_endpoint *ep, snd_usbmidi_standard_output() argument
660 struct usbmidi_out_port *port = &ep->ports[p]; snd_usbmidi_standard_output()
663 while (urb->transfer_buffer_length + 3 < ep->max_transfer) { snd_usbmidi_standard_output()
719 static void snd_usbmidi_akai_input(struct snd_usb_midi_in_endpoint *ep, snd_usbmidi_akai_input() argument
729 snd_usbmidi_input_data(ep, 0, &buffer[pos], msg_len); snd_usbmidi_akai_input()
736 static void snd_usbmidi_akai_output(struct snd_usb_midi_out_endpoint *ep, snd_usbmidi_akai_output() argument
742 struct snd_rawmidi_substream *substream = ep->ports[0].substream; snd_usbmidi_akai_output()
744 if (!ep->ports[0].active) snd_usbmidi_akai_output()
748 buf_end = ep->max_transfer - MAX_AKAI_SYSEX_LEN - 1; snd_usbmidi_akai_output()
755 ep->ports[0].active = 0; snd_usbmidi_akai_output()
790 ep->ports[0].active = 0; snd_usbmidi_akai_output()
809 static void snd_usbmidi_novation_input(struct snd_usb_midi_in_endpoint *ep, snd_usbmidi_novation_input() argument
814 snd_usbmidi_input_data(ep, 0, &buffer[2], buffer[0] - 1); snd_usbmidi_novation_input()
817 static void snd_usbmidi_novation_output(struct snd_usb_midi_out_endpoint *ep, snd_usbmidi_novation_output() argument
823 if (!ep->ports[0].active) snd_usbmidi_novation_output()
826 count = snd_rawmidi_transmit(ep->ports[0].substream, snd_usbmidi_novation_output()
828 ep->max_transfer - 2); snd_usbmidi_novation_output()
830 ep->ports[0].active = 0; snd_usbmidi_novation_output()
847 static void snd_usbmidi_raw_input(struct snd_usb_midi_in_endpoint *ep, snd_usbmidi_raw_input() argument
850 snd_usbmidi_input_data(ep, 0, buffer, buffer_length); snd_usbmidi_raw_input()
853 static void snd_usbmidi_raw_output(struct snd_usb_midi_out_endpoint *ep, snd_usbmidi_raw_output() argument
858 if (!ep->ports[0].active) snd_usbmidi_raw_output()
860 count = snd_rawmidi_transmit(ep->ports[0].substream, snd_usbmidi_raw_output()
862 ep->max_transfer); snd_usbmidi_raw_output()
864 ep->ports[0].active = 0; snd_usbmidi_raw_output()
879 static void snd_usbmidi_ftdi_input(struct snd_usb_midi_in_endpoint *ep, snd_usbmidi_ftdi_input() argument
883 snd_usbmidi_input_data(ep, 0, buffer + 2, buffer_length - 2); snd_usbmidi_ftdi_input()
891 static void snd_usbmidi_us122l_input(struct snd_usb_midi_in_endpoint *ep, snd_usbmidi_us122l_input() argument
900 snd_usbmidi_input_data(ep, 0, buffer, buffer_length); snd_usbmidi_us122l_input()
903 static void snd_usbmidi_us122l_output(struct snd_usb_midi_out_endpoint *ep, snd_usbmidi_us122l_output() argument
908 if (!ep->ports[0].active) snd_usbmidi_us122l_output()
910 switch (snd_usb_get_speed(ep->umidi->dev)) { snd_usbmidi_us122l_output()
918 count = snd_rawmidi_transmit(ep->ports[0].substream, snd_usbmidi_us122l_output()
922 ep->ports[0].active = 0; snd_usbmidi_us122l_output()
926 memset(urb->transfer_buffer + count, 0xFD, ep->max_transfer - count); snd_usbmidi_us122l_output()
927 urb->transfer_buffer_length = ep->max_transfer; snd_usbmidi_us122l_output()
939 static void snd_usbmidi_emagic_init_out(struct snd_usb_midi_out_endpoint *ep) snd_usbmidi_emagic_init_out() argument
951 send_bulk_static_data(ep, init_data, sizeof(init_data)); snd_usbmidi_emagic_init_out()
953 send_bulk_static_data(ep, init_data, sizeof(init_data)); snd_usbmidi_emagic_init_out()
956 static void snd_usbmidi_emagic_finish_out(struct snd_usb_midi_out_endpoint *ep) snd_usbmidi_emagic_finish_out() argument
969 send_bulk_static_data(ep, finish_data, sizeof(finish_data)); snd_usbmidi_emagic_finish_out()
972 static void snd_usbmidi_emagic_input(struct snd_usb_midi_in_endpoint *ep, snd_usbmidi_emagic_input() argument
985 if (ep->seen_f5) snd_usbmidi_emagic_input()
993 snd_usbmidi_input_data(ep, ep->current_port, buffer, i); snd_usbmidi_emagic_input()
1000 ep->seen_f5 = 1; snd_usbmidi_emagic_input()
1008 ep->current_port = (buffer[0] - 1) & 15; snd_usbmidi_emagic_input()
1012 ep->seen_f5 = 0; snd_usbmidi_emagic_input()
1016 static void snd_usbmidi_emagic_output(struct snd_usb_midi_out_endpoint *ep, snd_usbmidi_emagic_output() argument
1019 int port0 = ep->current_port; snd_usbmidi_emagic_output()
1021 int buf_free = ep->max_transfer; snd_usbmidi_emagic_output()
1027 struct usbmidi_out_port *port = &ep->ports[portnum]; snd_usbmidi_emagic_output()
1036 if (portnum != ep->current_port) { snd_usbmidi_emagic_output()
1039 ep->current_port = portnum; snd_usbmidi_emagic_output()
1056 if (buf_free < ep->max_transfer && buf_free > 0) { snd_usbmidi_emagic_output()
1060 urb->transfer_buffer_length = ep->max_transfer - buf_free; snd_usbmidi_emagic_output()
1172 if (port->ep->umidi->disconnected) { snd_usbmidi_output_trigger()
1179 tasklet_schedule(&port->ep->tasklet); snd_usbmidi_output_trigger()
1186 struct snd_usb_midi_out_endpoint *ep = port->ep; snd_usbmidi_output_drain() local
1191 if (ep->umidi->disconnected) snd_usbmidi_output_drain()
1197 spin_lock_irq(&ep->buffer_lock); snd_usbmidi_output_drain()
1198 drain_urbs = ep->active_urbs; snd_usbmidi_output_drain()
1200 ep->drain_urbs |= drain_urbs; snd_usbmidi_output_drain()
1202 prepare_to_wait(&ep->drain_wait, &wait, snd_usbmidi_output_drain()
1204 spin_unlock_irq(&ep->buffer_lock); snd_usbmidi_output_drain()
1206 spin_lock_irq(&ep->buffer_lock); snd_usbmidi_output_drain()
1207 drain_urbs &= ep->drain_urbs; snd_usbmidi_output_drain()
1209 finish_wait(&ep->drain_wait, &wait); snd_usbmidi_output_drain()
1211 spin_unlock_irq(&ep->buffer_lock); snd_usbmidi_output_drain()
1258 * May be called when ep hasn't been initialized completely.
1260 static void snd_usbmidi_in_endpoint_delete(struct snd_usb_midi_in_endpoint *ep) snd_usbmidi_in_endpoint_delete() argument
1265 if (ep->urbs[i]) snd_usbmidi_in_endpoint_delete()
1266 free_urb_and_buffer(ep->umidi, ep->urbs[i], snd_usbmidi_in_endpoint_delete()
1267 ep->urbs[i]->transfer_buffer_length); snd_usbmidi_in_endpoint_delete()
1268 kfree(ep); snd_usbmidi_in_endpoint_delete()
1278 struct snd_usb_midi_in_endpoint *ep; snd_usbmidi_in_endpoint_create() local
1285 ep = kzalloc(sizeof(*ep), GFP_KERNEL); snd_usbmidi_in_endpoint_create()
1286 if (!ep) snd_usbmidi_in_endpoint_create()
1288 ep->umidi = umidi; snd_usbmidi_in_endpoint_create()
1291 ep->urbs[i] = usb_alloc_urb(0, GFP_KERNEL); snd_usbmidi_in_endpoint_create()
1292 if (!ep->urbs[i]) { snd_usbmidi_in_endpoint_create()
1293 snd_usbmidi_in_endpoint_delete(ep); snd_usbmidi_in_endpoint_create()
1304 &ep->urbs[i]->transfer_dma); snd_usbmidi_in_endpoint_create()
1306 snd_usbmidi_in_endpoint_delete(ep); snd_usbmidi_in_endpoint_create()
1310 usb_fill_int_urb(ep->urbs[i], umidi->dev, snd_usbmidi_in_endpoint_create()
1313 ep, ep_info->in_interval); snd_usbmidi_in_endpoint_create()
1315 usb_fill_bulk_urb(ep->urbs[i], umidi->dev, snd_usbmidi_in_endpoint_create()
1317 snd_usbmidi_in_urb_complete, ep); snd_usbmidi_in_endpoint_create()
1318 ep->urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP; snd_usbmidi_in_endpoint_create()
1321 rep->in = ep; snd_usbmidi_in_endpoint_create()
1327 * May be called when ep hasn't been initialized completely.
1329 static void snd_usbmidi_out_endpoint_clear(struct snd_usb_midi_out_endpoint *ep) snd_usbmidi_out_endpoint_clear() argument
1334 if (ep->urbs[i].urb) { snd_usbmidi_out_endpoint_clear()
1335 free_urb_and_buffer(ep->umidi, ep->urbs[i].urb, snd_usbmidi_out_endpoint_clear()
1336 ep->max_transfer); snd_usbmidi_out_endpoint_clear()
1337 ep->urbs[i].urb = NULL; snd_usbmidi_out_endpoint_clear()
1341 static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint *ep) snd_usbmidi_out_endpoint_delete() argument
1343 snd_usbmidi_out_endpoint_clear(ep); snd_usbmidi_out_endpoint_delete()
1344 kfree(ep); snd_usbmidi_out_endpoint_delete()
1354 struct snd_usb_midi_out_endpoint *ep; snd_usbmidi_out_endpoint_create() local
1360 ep = kzalloc(sizeof(*ep), GFP_KERNEL); snd_usbmidi_out_endpoint_create()
1361 if (!ep) snd_usbmidi_out_endpoint_create()
1363 ep->umidi = umidi; snd_usbmidi_out_endpoint_create()
1366 ep->urbs[i].urb = usb_alloc_urb(0, GFP_KERNEL); snd_usbmidi_out_endpoint_create()
1367 if (!ep->urbs[i].urb) { snd_usbmidi_out_endpoint_create()
1368 snd_usbmidi_out_endpoint_delete(ep); snd_usbmidi_out_endpoint_create()
1371 ep->urbs[i].ep = ep; snd_usbmidi_out_endpoint_create()
1379 ep->max_transfer = usb_maxpacket(umidi->dev, pipe, 1); snd_usbmidi_out_endpoint_create()
1392 ep->max_transfer = 4; snd_usbmidi_out_endpoint_create()
1399 ep->max_transfer = 9; snd_usbmidi_out_endpoint_create()
1404 ep->max_transfer, GFP_KERNEL, snd_usbmidi_out_endpoint_create()
1405 &ep->urbs[i].urb->transfer_dma); snd_usbmidi_out_endpoint_create()
1407 snd_usbmidi_out_endpoint_delete(ep); snd_usbmidi_out_endpoint_create()
1411 usb_fill_int_urb(ep->urbs[i].urb, umidi->dev, snd_usbmidi_out_endpoint_create()
1412 pipe, buffer, ep->max_transfer, snd_usbmidi_out_endpoint_create()
1414 &ep->urbs[i], ep_info->out_interval); snd_usbmidi_out_endpoint_create()
1416 usb_fill_bulk_urb(ep->urbs[i].urb, umidi->dev, snd_usbmidi_out_endpoint_create()
1417 pipe, buffer, ep->max_transfer, snd_usbmidi_out_endpoint_create()
1419 &ep->urbs[i]); snd_usbmidi_out_endpoint_create()
1420 ep->urbs[i].urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; snd_usbmidi_out_endpoint_create()
1423 spin_lock_init(&ep->buffer_lock); snd_usbmidi_out_endpoint_create()
1424 tasklet_init(&ep->tasklet, snd_usbmidi_out_tasklet, (unsigned long)ep); snd_usbmidi_out_endpoint_create()
1425 init_waitqueue_head(&ep->drain_wait); snd_usbmidi_out_endpoint_create()
1429 ep->ports[i].ep = ep; snd_usbmidi_out_endpoint_create()
1430 ep->ports[i].cable = i << 4; snd_usbmidi_out_endpoint_create()
1434 umidi->usb_protocol_ops->init_out_endpoint(ep); snd_usbmidi_out_endpoint_create()
1436 rep->out = ep; snd_usbmidi_out_endpoint_create()
1448 struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i]; snd_usbmidi_free() local
1449 if (ep->out) snd_usbmidi_free()
1450 snd_usbmidi_out_endpoint_delete(ep->out); snd_usbmidi_free()
1451 if (ep->in) snd_usbmidi_free()
1452 snd_usbmidi_in_endpoint_delete(ep->in); snd_usbmidi_free()
1479 struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i]; snd_usbmidi_disconnect() local
1480 if (ep->out) snd_usbmidi_disconnect()
1481 tasklet_kill(&ep->out->tasklet); snd_usbmidi_disconnect()
1482 if (ep->out) { snd_usbmidi_disconnect()
1484 usb_kill_urb(ep->out->urbs[j].urb); snd_usbmidi_disconnect()
1486 umidi->usb_protocol_ops->finish_out_endpoint(ep->out); snd_usbmidi_disconnect()
1487 ep->out->active_urbs = 0; snd_usbmidi_disconnect()
1488 if (ep->out->drain_urbs) { snd_usbmidi_disconnect()
1489 ep->out->drain_urbs = 0; snd_usbmidi_disconnect()
1490 wake_up(&ep->out->drain_wait); snd_usbmidi_disconnect()
1493 if (ep->in) snd_usbmidi_disconnect()
1495 usb_kill_urb(ep->in->urbs[j]); snd_usbmidi_disconnect()
1497 if (ep->out) snd_usbmidi_disconnect()
1498 snd_usbmidi_out_endpoint_clear(ep->out); snd_usbmidi_disconnect()
1499 if (ep->in) { snd_usbmidi_disconnect()
1500 snd_usbmidi_in_endpoint_delete(ep->in); snd_usbmidi_disconnect()
1501 ep->in = NULL; snd_usbmidi_disconnect()
1816 struct usb_endpoint_descriptor *ep; snd_usbmidi_get_ms_info() local
1839 ep = get_ep_desc(hostep); snd_usbmidi_get_ms_info()
1840 if (!usb_endpoint_xfer_bulk(ep) && !usb_endpoint_xfer_int(ep)) snd_usbmidi_get_ms_info()
1848 if (usb_endpoint_dir_out(ep)) { snd_usbmidi_get_ms_info()
1856 endpoints[epidx].out_ep = usb_endpoint_num(ep); snd_usbmidi_get_ms_info()
1857 if (usb_endpoint_xfer_int(ep)) snd_usbmidi_get_ms_info()
1858 endpoints[epidx].out_interval = ep->bInterval; snd_usbmidi_get_ms_info()
1869 ep->bEndpointAddress, ms_ep->bNumEmbMIDIJack); snd_usbmidi_get_ms_info()
1878 endpoints[epidx].in_ep = usb_endpoint_num(ep); snd_usbmidi_get_ms_info()
1879 if (usb_endpoint_xfer_int(ep)) snd_usbmidi_get_ms_info()
1880 endpoints[epidx].in_interval = ep->bInterval; snd_usbmidi_get_ms_info()
1886 ep->bEndpointAddress, ms_ep->bNumEmbMIDIJack); snd_usbmidi_get_ms_info()
1958 dev_dbg(&umidi->dev->dev, "switching to altsetting %d with int ep\n", snd_usbmidi_switch_roland_altsetting()
2252 struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i]; snd_usbmidi_input_stop() local
2253 if (ep->in) snd_usbmidi_input_stop()
2255 usb_kill_urb(ep->in->urbs[j]); snd_usbmidi_input_stop()
2261 static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint *ep) snd_usbmidi_input_start_ep() argument
2265 if (!ep) snd_usbmidi_input_start_ep()
2268 struct urb *urb = ep->urbs[i]; snd_usbmidi_input_start_ep()
2269 urb->dev = ep->umidi->dev; snd_usbmidi_input_start_ep()
435 snd_usbmidi_maudio_broken_running_status_input( struct snd_usb_midi_in_endpoint *ep, uint8_t *buffer, int buffer_length) snd_usbmidi_maudio_broken_running_status_input() argument
H A Dhelper.h22 #define get_endpoint(alt,ep) (&(alt)->endpoint[ep].desc)
23 #define get_ep_desc(ep) (&(ep)->desc)
H A Dpcm.c158 unsigned int ep; init_pitch_v1() local
164 ep = get_endpoint(alts, 0)->bEndpointAddress; init_pitch_v1()
169 UAC_EP_CS_ATTR_PITCH_CONTROL << 8, ep, init_pitch_v1()
172 iface, ep); init_pitch_v1()
229 struct snd_usb_endpoint *ep = subs->data_endpoint; start_endpoints() local
231 dev_dbg(&subs->dev->dev, "Starting data EP @%p\n", ep); start_endpoints()
233 ep->data_subs = subs; start_endpoints()
234 err = snd_usb_endpoint_start(ep, can_sleep); start_endpoints()
243 struct snd_usb_endpoint *ep = subs->sync_endpoint; start_endpoints() local
260 dev_dbg(&subs->dev->dev, "Starting sync EP @%p\n", ep); start_endpoints()
262 ep->sync_slave = subs->data_endpoint; start_endpoints()
263 err = snd_usb_endpoint_start(ep, can_sleep); start_endpoints()
290 unsigned int *ep) search_roland_implicit_fb()
312 *ep = epd->bEndpointAddress; search_roland_implicit_fb()
323 unsigned int ep; set_sync_ep_implicit_fb_quirk() local
332 ep = 0x81; set_sync_ep_implicit_fb_quirk()
343 ep = 0x81; set_sync_ep_implicit_fb_quirk()
359 &alts, &ep) >= 0) { set_sync_ep_implicit_fb_quirk()
368 alts, ep, !subs->direction, set_sync_ep_implicit_fb_quirk()
385 unsigned int ep, attr; set_sync_endpoint() local
445 ep = get_endpoint(alts, 1)->bEndpointAddress; set_sync_endpoint()
447 ((is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) || set_sync_endpoint()
448 (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) { set_sync_endpoint()
450 "%d:%d : invalid sync pipe. is_playback %d, ep %02x, bSynchAddress %02x\n", set_sync_endpoint()
452 is_playback, ep, get_endpoint(alts, 0)->bSynchAddress); set_sync_endpoint()
462 alts, ep, !subs->direction, set_sync_endpoint()
608 * Configure the sync ep using the rate and pcm format of the data ep.
645 "%s: no valid audioformat for sync ep %x found\n", configure_sync_endpoint()
652 * data and sync ep audioformat. configure_sync_endpoint()
658 "%s: adjusted sync ep period bytes (%d -> %d)\n", configure_sync_endpoint()
1444 struct snd_usb_endpoint *ep = subs->data_endpoint; prepare_playback_urb() local
1455 subs->frame_limit += ep->max_urb_frames; prepare_playback_urb()
1460 counts = snd_usb_endpoint_next_packet_size(ep); prepare_playback_urb()
1463 urb->iso_frame_desc[i].offset = frames * ep->stride; prepare_playback_urb()
1464 urb->iso_frame_desc[i].length = counts * ep->stride; prepare_playback_urb()
1479 counts * ep->stride; prepare_playback_urb()
1486 frames * ep->stride; prepare_playback_urb()
1496 !snd_usb_endpoint_implicit_feedback_sink(ep)) prepare_playback_urb()
1499 bytes = frames * ep->stride; prepare_playback_urb()
1558 struct snd_usb_endpoint *ep = subs->data_endpoint; retire_playback_urb() local
1559 int processed = urb->transfer_buffer_length / ep->stride; retire_playback_urb()
287 search_roland_implicit_fb(struct usb_device *dev, int ifnum, unsigned int altsetting, struct usb_host_interface **alts, unsigned int *ep) search_roland_implicit_fb() argument
H A Dmidi.h9 int8_t out_ep; /* ep number, 0 autodetect */
H A Dquirks.h29 void snd_usb_endpoint_start_quirk(struct snd_usb_endpoint *ep);
H A Dclock.c284 unsigned int ep; set_sample_rate_v1() local
290 ep = get_endpoint(alts, 0)->bEndpointAddress; set_sample_rate_v1()
301 UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep, set_sample_rate_v1()
303 dev_err(&dev->dev, "%d:%d: cannot set freq %d to ep %#x\n", set_sample_rate_v1()
304 iface, fmt->altsetting, rate, ep); set_sample_rate_v1()
315 UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep, set_sample_rate_v1()
317 dev_err(&dev->dev, "%d:%d: cannot get freq at ep %#x\n", set_sample_rate_v1()
318 iface, fmt->altsetting, ep); set_sample_rate_v1()
/linux-4.4.14/drivers/usb/gadget/udc/
H A Dfsl_qe_udc.c56 /*ep name is important in gadget, it should obey the convention of ep_match()*/
80 static void done(struct qe_ep *ep, struct qe_req *req, int status) done() argument
82 struct qe_udc *udc = ep->udc; done()
83 unsigned char stopped = ep->stopped; done()
87 * so here the req will be dropped from the ep->queue done()
100 ep_is_in(ep) done()
108 ep_is_in(ep) done()
114 ep->ep.name, &req->req, status, done()
118 ep->stopped = 1; done()
121 usb_gadget_giveback_request(&ep->ep, &req->req); done()
125 ep->stopped = stopped; done()
129 * nuke(): delete all requests related to this ep
131 static void nuke(struct qe_ep *ep, int status) nuke() argument
134 while (!list_empty(&ep->queue)) { nuke()
136 req = list_entry(ep->queue.next, struct qe_req, queue); nuke()
138 done(ep, req, status); nuke()
146 static int qe_eprx_stall_change(struct qe_ep *ep, int value) qe_eprx_stall_change() argument
149 u8 epnum = ep->epnum; qe_eprx_stall_change()
150 struct qe_udc *udc = ep->udc; qe_eprx_stall_change()
156 else if (ep->dir == USB_DIR_IN) qe_eprx_stall_change()
163 static int qe_eptx_stall_change(struct qe_ep *ep, int value) qe_eptx_stall_change() argument
166 u8 epnum = ep->epnum; qe_eptx_stall_change()
167 struct qe_udc *udc = ep->udc; qe_eptx_stall_change()
173 else if (ep->dir == USB_DIR_OUT) qe_eptx_stall_change()
190 static int qe_eprx_nack(struct qe_ep *ep) qe_eprx_nack() argument
192 u8 epnum = ep->epnum; qe_eprx_nack()
193 struct qe_udc *udc = ep->udc; qe_eprx_nack()
195 if (ep->state == EP_STATE_IDLE) { qe_eprx_nack()
196 /* Set the ep's nack */ qe_eprx_nack()
204 ep->state = EP_STATE_NACK; qe_eprx_nack()
209 static int qe_eprx_normal(struct qe_ep *ep) qe_eprx_normal() argument
211 struct qe_udc *udc = ep->udc; qe_eprx_normal()
213 if (ep->state == EP_STATE_NACK) { qe_eprx_normal()
214 clrsetbits_be16(&udc->usb_regs->usb_usep[ep->epnum], qe_eprx_normal()
223 ep->state = EP_STATE_IDLE; qe_eprx_normal()
224 ep->has_data = 0; qe_eprx_normal()
230 static int qe_ep_cmd_stoptx(struct qe_ep *ep) qe_ep_cmd_stoptx() argument
232 if (ep->udc->soc_type == PORT_CPM) qe_ep_cmd_stoptx()
233 cpm_command(CPM_USB_STOP_TX | (ep->epnum << CPM_USB_EP_SHIFT), qe_ep_cmd_stoptx()
237 ep->epnum, 0); qe_ep_cmd_stoptx()
242 static int qe_ep_cmd_restarttx(struct qe_ep *ep) qe_ep_cmd_restarttx() argument
244 if (ep->udc->soc_type == PORT_CPM) qe_ep_cmd_restarttx()
245 cpm_command(CPM_USB_RESTART_TX | (ep->epnum << qe_ep_cmd_restarttx()
249 ep->epnum, 0); qe_ep_cmd_restarttx()
254 static int qe_ep_flushtxfifo(struct qe_ep *ep) qe_ep_flushtxfifo() argument
256 struct qe_udc *udc = ep->udc; qe_ep_flushtxfifo()
259 i = (int)ep->epnum; qe_ep_flushtxfifo()
261 qe_ep_cmd_stoptx(ep); qe_ep_flushtxfifo()
263 USB_CMD_FLUSH_FIFO | (USB_CMD_EP_MASK & (ep->epnum))); qe_ep_flushtxfifo()
268 ep->c_txbd = ep->txbase; qe_ep_flushtxfifo()
269 ep->n_txbd = ep->txbase; qe_ep_flushtxfifo()
270 qe_ep_cmd_restarttx(ep); qe_ep_flushtxfifo()
274 static int qe_ep_filltxfifo(struct qe_ep *ep) qe_ep_filltxfifo() argument
276 struct qe_udc *udc = ep->udc; qe_ep_filltxfifo()
279 USB_CMD_STR_FIFO | (USB_CMD_EP_MASK & (ep->epnum))); qe_ep_filltxfifo()
285 struct qe_ep *ep; qe_epbds_reset() local
290 ep = &udc->eps[pipe_num]; qe_epbds_reset()
292 if (ep->dir == USB_DIR_OUT) qe_epbds_reset()
297 bd = ep->rxbase; qe_epbds_reset()
304 bd = ep->txbase; qe_epbds_reset()
317 struct qe_ep *ep; qe_ep_reset() local
320 ep = &udc->eps[pipe_num]; qe_ep_reset()
324 switch (ep->dir) { qe_ep_reset()
326 qe_ep_flushtxfifo(ep); qe_ep_reset()
332 qe_ep_flushtxfifo(ep); qe_ep_reset()
345 static int qe_ep_toggledata01(struct qe_ep *ep) qe_ep_toggledata01() argument
347 ep->data01 ^= 0x1; qe_ep_toggledata01()
353 struct qe_ep *ep = &udc->eps[pipe_num]; qe_ep_bd_init() local
360 if (ep->dir == USB_DIR_OUT) qe_ep_bd_init()
366 /* alloc multi-ram for BD rings and set the ep parameters */ qe_ep_bd_init()
379 ep->rxbase = cpm_muram_addr(tmp_addr); qe_ep_bd_init()
380 ep->txbase = cpm_muram_addr(tmp_addr + (sizeof(struct qe_bd) qe_ep_bd_init()
382 ep->n_rxbd = ep->rxbase; qe_ep_bd_init()
383 ep->e_rxbd = ep->rxbase; qe_ep_bd_init()
384 ep->n_txbd = ep->txbase; qe_ep_bd_init()
385 ep->c_txbd = ep->txbase; qe_ep_bd_init()
386 ep->data01 = 0; /* data0 */ qe_ep_bd_init()
389 bd = ep->rxbase; qe_ep_bd_init()
398 bd = ep->txbase; qe_ep_bd_init()
410 static int qe_ep_rxbd_update(struct qe_ep *ep) qe_ep_rxbd_update() argument
418 if (ep->rxbase == NULL) qe_ep_rxbd_update()
421 bd = ep->rxbase; qe_ep_rxbd_update()
423 ep->rxframe = kmalloc(sizeof(*ep->rxframe), GFP_ATOMIC); qe_ep_rxbd_update()
424 if (ep->rxframe == NULL) { qe_ep_rxbd_update()
425 dev_err(ep->udc->dev, "malloc rxframe failed\n"); qe_ep_rxbd_update()
429 qe_frame_init(ep->rxframe); qe_ep_rxbd_update()
431 if (ep->dir == USB_DIR_OUT) qe_ep_rxbd_update()
436 size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (bdring_len + 1); qe_ep_rxbd_update()
437 ep->rxbuffer = kzalloc(size, GFP_ATOMIC); qe_ep_rxbd_update()
438 if (ep->rxbuffer == NULL) { qe_ep_rxbd_update()
439 dev_err(ep->udc->dev, "malloc rxbuffer failed,size=%d\n", qe_ep_rxbd_update()
441 kfree(ep->rxframe); qe_ep_rxbd_update()
445 ep->rxbuf_d = virt_to_phys((void *)ep->rxbuffer); qe_ep_rxbd_update()
446 if (ep->rxbuf_d == DMA_ADDR_INVALID) { qe_ep_rxbd_update()
447 ep->rxbuf_d = dma_map_single(ep->udc->gadget.dev.parent, qe_ep_rxbd_update()
448 ep->rxbuffer, qe_ep_rxbd_update()
451 ep->rxbufmap = 1; qe_ep_rxbd_update()
453 dma_sync_single_for_device(ep->udc->gadget.dev.parent, qe_ep_rxbd_update()
454 ep->rxbuf_d, size, qe_ep_rxbd_update()
456 ep->rxbufmap = 0; qe_ep_rxbd_update()
459 size = ep->ep.maxpacket + USB_CRC_SIZE + 2; qe_ep_rxbd_update()
460 tmp = ep->rxbuf_d; qe_ep_rxbd_update()
477 struct qe_ep *ep = &udc->eps[pipe_num]; qe_ep_register_init() local
486 logepnum = (ep->ep.desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); qe_ep_register_init()
489 switch (ep->ep.desc->bmAttributes & 0x03) { qe_ep_register_init()
504 switch (ep->dir) { qe_ep_register_init()
520 tmp = (u16)(ep->ep.maxpacket + USB_CRC_SIZE); qe_ep_register_init()
532 struct qe_ep *ep = &udc->eps[pipe_num]; qe_ep_init() local
545 if (strstr(ep->ep.name, "-iso") qe_ep_init()
546 || strstr(ep->ep.name, "-int")) qe_ep_init()
567 if (strstr(ep->ep.name, "-iso")) /* bulk is ok */ qe_ep_init()
583 if (strstr(ep->ep.name, "-bulk") qe_ep_init()
584 || strstr(ep->ep.name, "-int")) qe_ep_init()
598 if (strstr(ep->ep.name, "-iso") qe_ep_init()
599 || strstr(ep->ep.name, "-int")) qe_ep_init()
638 /* initialize ep structure */ qe_ep_init()
639 ep->ep.maxpacket = max; qe_ep_init()
640 ep->tm = (u8)(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK); qe_ep_init()
641 ep->ep.desc = desc; qe_ep_init()
642 ep->stopped = 0; qe_ep_init()
643 ep->init = 1; qe_ep_init()
646 ep->dir = USB_DIR_BOTH; qe_ep_init()
652 ep->dir = USB_DIR_OUT; qe_ep_init()
655 ep->dir = USB_DIR_IN; qe_ep_init()
663 if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_OUT)) { qe_ep_init()
664 reval = qe_ep_rxbd_update(ep); qe_ep_init()
669 if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_IN)) { qe_ep_init()
670 ep->txframe = kmalloc(sizeof(*ep->txframe), GFP_ATOMIC); qe_ep_init()
671 if (ep->txframe == NULL) { qe_ep_init()
675 qe_frame_init(ep->txframe); qe_ep_init()
686 kfree(ep->rxbuffer); qe_ep_init()
687 kfree(ep->rxframe); qe_ep_init()
691 dev_err(udc->dev, "failed to initialize %s\n", ep->ep.name); qe_ep_init()
713 static void recycle_one_rxbd(struct qe_ep *ep) recycle_one_rxbd() argument
717 bdstatus = in_be32((u32 __iomem *)ep->e_rxbd); recycle_one_rxbd()
719 out_be32((u32 __iomem *)ep->e_rxbd, bdstatus); recycle_one_rxbd()
722 ep->e_rxbd = ep->rxbase; recycle_one_rxbd()
724 ep->e_rxbd++; recycle_one_rxbd()
727 static void recycle_rxbds(struct qe_ep *ep, unsigned char stopatnext) recycle_rxbds() argument
733 nextbd = ep->n_rxbd; recycle_rxbds()
734 bd = ep->e_rxbd; recycle_rxbds()
742 bd = ep->rxbase; recycle_rxbds()
751 ep->e_rxbd = bd; recycle_rxbds()
754 static void ep_recycle_rxbds(struct qe_ep *ep) ep_recycle_rxbds() argument
756 struct qe_bd __iomem *bd = ep->n_rxbd; ep_recycle_rxbds()
758 u8 epnum = ep->epnum; ep_recycle_rxbds()
759 struct qe_udc *udc = ep->udc; ep_recycle_rxbds()
763 bd = ep->rxbase + ep_recycle_rxbds()
770 bd = ep->rxbase; ep_recycle_rxbds()
774 ep->e_rxbd = bd; ep_recycle_rxbds()
775 recycle_rxbds(ep, 0); ep_recycle_rxbds()
776 ep->e_rxbd = ep->n_rxbd; ep_recycle_rxbds()
778 recycle_rxbds(ep, 1); ep_recycle_rxbds()
783 if (ep->has_data <= 0 && (!list_empty(&ep->queue))) ep_recycle_rxbds()
784 qe_eprx_normal(ep); ep_recycle_rxbds()
786 ep->localnack = 0; ep_recycle_rxbds()
791 static int qe_ep_rxframe_handle(struct qe_ep *ep);
796 struct qe_ep *ep = &udc->eps[0]; ep0_setup_handle() local
801 pframe = ep->rxframe; ep0_setup_handle()
809 ep->data01 = 1; ep0_setup_handle()
820 struct qe_ep *ep = &udc->eps[0]; qe_ep0_rx() local
826 pframe = ep->rxframe; qe_ep0_rx()
828 if (ep->dir == USB_DIR_IN) { qe_ep0_rx()
833 bd = ep->n_rxbd; qe_ep0_rx()
866 qe_ep_rxframe_handle(ep); qe_ep0_rx()
873 recycle_one_rxbd(ep); qe_ep0_rx()
877 bd = ep->rxbase; qe_ep0_rx()
886 ep->n_rxbd = bd; qe_ep0_rx()
891 static int qe_ep_rxframe_handle(struct qe_ep *ep) qe_ep_rxframe_handle() argument
899 pframe = ep->rxframe; qe_ep_rxframe_handle()
904 if (framepid != ep->data01) { qe_ep_rxframe_handle()
905 dev_err(ep->udc->dev, "the data01 error!\n"); qe_ep_rxframe_handle()
910 if (list_empty(&ep->queue)) { qe_ep_rxframe_handle()
911 dev_err(ep->udc->dev, "the %s have no requeue!\n", ep->name); qe_ep_rxframe_handle()
913 req = list_entry(ep->queue.next, struct qe_req, queue); qe_ep_rxframe_handle()
919 if ((fsize < ep->ep.maxpacket) || qe_ep_rxframe_handle()
921 if (ep->epnum == 0) qe_ep_rxframe_handle()
922 ep0_req_complete(ep->udc, req); qe_ep_rxframe_handle()
924 done(ep, req, 0); qe_ep_rxframe_handle()
925 if (list_empty(&ep->queue) && ep->epnum != 0) qe_ep_rxframe_handle()
926 qe_eprx_nack(ep); qe_ep_rxframe_handle()
931 qe_ep_toggledata01(ep); qe_ep_rxframe_handle()
939 struct qe_ep *ep; ep_rx_tasklet() local
949 ep = &udc->eps[i]; ep_rx_tasklet()
951 if (ep->dir == USB_DIR_IN || ep->enable_tasklet == 0) { ep_rx_tasklet()
953 "This is a transmit ep or disable tasklet!\n"); ep_rx_tasklet()
957 pframe = ep->rxframe; ep_rx_tasklet()
958 bd = ep->n_rxbd; ep_rx_tasklet()
963 if (list_empty(&ep->queue)) { ep_rx_tasklet()
964 qe_eprx_nack(ep); ep_rx_tasklet()
967 ep->has_data); ep_rx_tasklet()
991 qe_ep_rxframe_handle(ep); ep_rx_tasklet()
999 ep->has_data--; ep_rx_tasklet()
1000 if (!(ep->localnack)) ep_rx_tasklet()
1001 recycle_one_rxbd(ep); ep_rx_tasklet()
1005 bd = ep->rxbase; ep_rx_tasklet()
1013 ep->n_rxbd = bd; ep_rx_tasklet()
1015 if (ep->localnack) ep_rx_tasklet()
1016 ep_recycle_rxbds(ep); ep_rx_tasklet()
1018 ep->enable_tasklet = 0; ep_rx_tasklet()
1024 static int qe_ep_rx(struct qe_ep *ep) qe_ep_rx() argument
1031 udc = ep->udc; qe_ep_rx()
1032 pframe = ep->rxframe; qe_ep_rx()
1034 if (ep->dir == USB_DIR_IN) { qe_ep_rx()
1035 dev_err(udc->dev, "transmit ep in rx function\n"); qe_ep_rx()
1039 bd = ep->n_rxbd; qe_ep_rx()
1041 swoffs = (u16)(bd - ep->rxbase); qe_ep_rx()
1042 ucoffs = (u16)((in_be16(&udc->ep_param[ep->epnum]->rbptr) - qe_ep_rx()
1043 in_be16(&udc->ep_param[ep->epnum]->rbase)) >> 3); qe_ep_rx()
1050 qe_eprx_nack(ep); qe_ep_rx()
1051 ep->localnack = 1; qe_ep_rx()
1054 ep->has_data = USB_BDRING_LEN_RX - emptybds; qe_ep_rx()
1056 if (list_empty(&ep->queue)) { qe_ep_rx()
1057 qe_eprx_nack(ep); qe_ep_rx()
1059 ep->has_data); qe_ep_rx()
1064 ep->enable_tasklet = 1; qe_ep_rx()
1070 static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame) qe_ep_tx() argument
1072 struct qe_udc *udc = ep->udc; qe_ep_tx()
1078 if (ep->dir == USB_DIR_OUT) { qe_ep_tx()
1079 dev_err(udc->dev, "receive ep passed to tx function\n"); qe_ep_tx()
1088 bd = ep->n_txbd; qe_ep_tx()
1108 if ((ep->epnum == 0) && (udc->ep0_state == DATA_STATE_NEED_ZLP)) qe_ep_tx()
1109 ep->data01 = 0x1; qe_ep_tx()
1111 if (ep->data01) { qe_ep_tx()
1121 qe_ep_filltxfifo(ep); qe_ep_tx()
1126 qe_ep_toggledata01(ep); qe_ep_tx()
1128 ep->n_txbd = ep->txbase; qe_ep_tx()
1130 ep->n_txbd++; qe_ep_tx()
1142 static int txcomplete(struct qe_ep *ep, unsigned char restart) txcomplete() argument
1144 if (ep->tx_req != NULL) { txcomplete()
1145 struct qe_req *req = ep->tx_req; txcomplete()
1148 last_len = min_t(unsigned, req->req.length - ep->sent, txcomplete()
1149 ep->ep.maxpacket); txcomplete()
1152 int asent = ep->last; txcomplete()
1153 ep->sent += asent; txcomplete()
1154 ep->last -= asent; txcomplete()
1156 ep->last = 0; txcomplete()
1162 (req->req.length % ep->ep.maxpacket) != 0) txcomplete()
1170 if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) { txcomplete()
1171 done(ep, ep->tx_req, 0); txcomplete()
1172 ep->tx_req = NULL; txcomplete()
1173 ep->last = 0; txcomplete()
1174 ep->sent = 0; txcomplete()
1179 if (ep->tx_req == NULL) { txcomplete()
1180 if (!list_empty(&ep->queue)) { txcomplete()
1181 ep->tx_req = list_entry(ep->queue.next, struct qe_req, txcomplete()
1183 ep->last = 0; txcomplete()
1184 ep->sent = 0; txcomplete()
1192 static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame) qe_usb_senddata() argument
1198 size = min_t(u32, (ep->tx_req->req.length - ep->sent), qe_usb_senddata()
1199 ep->ep.maxpacket); qe_usb_senddata()
1200 buf = (u8 *)ep->tx_req->req.buf + ep->sent; qe_usb_senddata()
1202 ep->last = size; qe_usb_senddata()
1203 ep->tx_req->req.actual += size; qe_usb_senddata()
1208 return qe_ep_tx(ep, frame); qe_usb_senddata()
1214 static int sendnulldata(struct qe_ep *ep, struct qe_frame *frame, uint infor) sendnulldata() argument
1216 struct qe_udc *udc = ep->udc; sendnulldata()
1227 return qe_ep_tx(ep, frame); sendnulldata()
1230 static int frame_create_tx(struct qe_ep *ep, struct qe_frame *frame) frame_create_tx() argument
1232 struct qe_req *req = ep->tx_req; frame_create_tx()
1238 if ((req->req.length - ep->sent) > 0) frame_create_tx()
1239 reval = qe_usb_senddata(ep, frame); frame_create_tx()
1241 reval = sendnulldata(ep, frame, 0); frame_create_tx()
1252 struct qe_ep *ep = &udc->eps[0]; ep0_prime_status() local
1257 sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ); ep0_prime_status()
1269 struct qe_ep *ep = &udc->eps[0]; ep0_req_complete() local
1270 /* because usb and ep's status already been set in ch9setaddress() */ ep0_req_complete()
1274 done(ep, req, 0); ep0_req_complete()
1281 done(ep, req, 0); ep0_req_complete()
1286 done(ep, req, 0); ep0_req_complete()
1293 done(ep, req, 0); ep0_req_complete()
1307 static int ep0_txcomplete(struct qe_ep *ep, unsigned char restart) ep0_txcomplete() argument
1310 struct qe_frame *frame = ep->txframe; ep0_txcomplete()
1314 ep->udc->ep0_state = WAIT_FOR_SETUP; ep0_txcomplete()
1316 sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ); ep0_txcomplete()
1320 tx_req = ep->tx_req; ep0_txcomplete()
1323 int asent = ep->last; ep0_txcomplete()
1324 ep->sent += asent; ep0_txcomplete()
1325 ep->last -= asent; ep0_txcomplete()
1327 ep->last = 0; ep0_txcomplete()
1331 if ((ep->tx_req->req.length - ep->sent) <= 0) { ep0_txcomplete()
1332 ep->tx_req->req.actual = (unsigned int)ep->sent; ep0_txcomplete()
1333 ep0_req_complete(ep->udc, ep->tx_req); ep0_txcomplete()
1334 ep->tx_req = NULL; ep0_txcomplete()
1335 ep->last = 0; ep0_txcomplete()
1336 ep->sent = 0; ep0_txcomplete()
1339 dev_vdbg(ep->udc->dev, "the ep0_controller have no req\n"); ep0_txcomplete()
1345 static int ep0_txframe_handle(struct qe_ep *ep) ep0_txframe_handle() argument
1348 if (frame_get_status(ep->txframe) & FRAME_ERROR) { ep0_txframe_handle()
1349 qe_ep_flushtxfifo(ep); ep0_txframe_handle()
1350 dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n"); ep0_txframe_handle()
1351 if (frame_get_info(ep->txframe) & PID_DATA0) ep0_txframe_handle()
1352 ep->data01 = 0; ep0_txframe_handle()
1354 ep->data01 = 1; ep0_txframe_handle()
1356 ep0_txcomplete(ep, 1); ep0_txframe_handle()
1358 ep0_txcomplete(ep, 0); ep0_txframe_handle()
1360 frame_create_tx(ep, ep->txframe); ep0_txframe_handle()
1364 static int qe_ep0_txconf(struct qe_ep *ep) qe_ep0_txconf() argument
1370 bd = ep->c_txbd; qe_ep0_txconf()
1373 pframe = ep->txframe; qe_ep0_txconf()
1379 ep->c_txbd = ep->txbase; qe_ep0_txconf()
1381 ep->c_txbd++; qe_ep0_txconf()
1383 if (ep->c_txbd == ep->n_txbd) { qe_ep0_txconf()
1391 ep0_txframe_handle(ep); qe_ep0_txconf()
1394 bd = ep->c_txbd; qe_ep0_txconf()
1401 static int ep_txframe_handle(struct qe_ep *ep) ep_txframe_handle() argument
1403 if (frame_get_status(ep->txframe) & FRAME_ERROR) { ep_txframe_handle()
1404 qe_ep_flushtxfifo(ep); ep_txframe_handle()
1405 dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n"); ep_txframe_handle()
1406 if (frame_get_info(ep->txframe) & PID_DATA0) ep_txframe_handle()
1407 ep->data01 = 0; ep_txframe_handle()
1409 ep->data01 = 1; ep_txframe_handle()
1411 txcomplete(ep, 1); ep_txframe_handle()
1413 txcomplete(ep, 0); ep_txframe_handle()
1415 frame_create_tx(ep, ep->txframe); /* send the data */ ep_txframe_handle()
1420 static int qe_ep_txconf(struct qe_ep *ep) qe_ep_txconf() argument
1427 bd = ep->c_txbd; qe_ep_txconf()
1430 pframe = ep->txframe; qe_ep_txconf()
1443 ep->c_txbd = ep->txbase; qe_ep_txconf()
1445 ep->c_txbd++; qe_ep_txconf()
1448 ep_txframe_handle(ep); qe_ep_txconf()
1449 bd = ep->c_txbd; qe_ep_txconf()
1459 static int ep_req_send(struct qe_ep *ep, struct qe_req *req) ep_req_send() argument
1463 if (ep->tx_req == NULL) { ep_req_send()
1464 ep->sent = 0; ep_req_send()
1465 ep->last = 0; ep_req_send()
1466 txcomplete(ep, 0); /* can gain a new tx_req */ ep_req_send()
1467 reval = frame_create_tx(ep, ep->txframe); ep_req_send()
1473 static int ep_req_rx(struct qe_ep *ep, struct qe_req *req) ep_req_rx() argument
1475 struct qe_udc *udc = ep->udc; ep_req_rx()
1484 if (list_empty(&ep->queue)) { ep_req_rx()
1488 pframe = ep->rxframe; ep_req_rx()
1490 bd = ep->n_rxbd; ep_req_rx()
1517 if (framepid != ep->data01) { ep_req_rx()
1526 if ((fsize < ep->ep.maxpacket) ep_req_rx()
1530 done(ep, req, 0); ep_req_rx()
1531 if (list_empty(&ep->queue)) ep_req_rx()
1532 qe_eprx_nack(ep); ep_req_rx()
1535 qe_ep_toggledata01(ep); ep_req_rx()
1544 ep->has_data--; ep_req_rx()
1548 bd = ep->rxbase; ep_req_rx()
1556 ep->n_rxbd = bd; ep_req_rx()
1557 ep_recycle_rxbds(ep); ep_req_rx()
1563 static int ep_req_receive(struct qe_ep *ep, struct qe_req *req) ep_req_receive() argument
1565 if (ep->state == EP_STATE_NACK) { ep_req_receive()
1566 if (ep->has_data <= 0) { ep_req_receive()
1568 qe_eprx_normal(ep); ep_req_receive()
1571 ep_req_rx(ep, req); ep_req_receive()
1589 struct qe_ep *ep; qe_ep_enable() local
1593 ep = container_of(_ep, struct qe_ep, ep); qe_ep_enable()
1600 udc = ep->udc; qe_ep_enable()
1608 cpm_muram_free(cpm_muram_offset(ep->rxbase)); qe_ep_enable()
1609 dev_dbg(udc->dev, "enable ep%d failed\n", ep->epnum); qe_ep_enable()
1612 dev_dbg(udc->dev, "enable ep%d successful\n", ep->epnum); qe_ep_enable()
1619 struct qe_ep *ep; qe_ep_disable() local
1623 ep = container_of(_ep, struct qe_ep, ep); qe_ep_disable()
1624 udc = ep->udc; qe_ep_disable()
1626 if (!_ep || !ep->ep.desc) { qe_ep_disable()
1627 dev_dbg(udc->dev, "%s not enabled\n", _ep ? ep->ep.name : NULL); qe_ep_disable()
1633 nuke(ep, -ESHUTDOWN); qe_ep_disable()
1634 ep->ep.desc = NULL; qe_ep_disable()
1635 ep->stopped = 1; qe_ep_disable()
1636 ep->tx_req = NULL; qe_ep_disable()
1637 qe_ep_reset(udc, ep->epnum); qe_ep_disable()
1640 cpm_muram_free(cpm_muram_offset(ep->rxbase)); qe_ep_disable()
1642 if (ep->dir == USB_DIR_OUT) qe_ep_disable()
1643 size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * qe_ep_disable()
1646 size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * qe_ep_disable()
1649 if (ep->dir != USB_DIR_IN) { qe_ep_disable()
1650 kfree(ep->rxframe); qe_ep_disable()
1651 if (ep->rxbufmap) { qe_ep_disable()
1653 ep->rxbuf_d, size, qe_ep_disable()
1655 ep->rxbuf_d = DMA_ADDR_INVALID; qe_ep_disable()
1659 ep->rxbuf_d, size, qe_ep_disable()
1662 kfree(ep->rxbuffer); qe_ep_disable()
1665 if (ep->dir != USB_DIR_OUT) qe_ep_disable()
1666 kfree(ep->txframe); qe_ep_disable()
1699 struct qe_ep *ep = container_of(_ep, struct qe_ep, ep); __qe_ep_queue() local
1704 udc = ep->udc; __qe_ep_queue()
1711 if (!_ep || (!ep->ep.desc && ep_index(ep))) { __qe_ep_queue()
1712 dev_dbg(udc->dev, "bad ep\n"); __qe_ep_queue()
1719 req->ep = ep; __qe_ep_queue()
1723 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent, __qe_ep_queue()
1726 ep_is_in(ep) __qe_ep_queue()
1731 dma_sync_single_for_device(ep->udc->gadget.dev.parent, __qe_ep_queue()
1733 ep_is_in(ep) __qe_ep_queue()
1742 list_add_tail(&req->queue, &ep->queue); __qe_ep_queue()
1744 ep->name, req->req.length); __qe_ep_queue()
1747 if (ep_is_in(ep)) __qe_ep_queue()
1748 reval = ep_req_send(ep, req); __qe_ep_queue()
1751 if (ep_index(ep) == 0 && req->req.length > 0) { __qe_ep_queue()
1752 if (ep_is_in(ep)) __qe_ep_queue()
1758 if (ep->dir == USB_DIR_OUT) __qe_ep_queue()
1759 reval = ep_req_receive(ep, req); __qe_ep_queue()
1768 struct qe_ep *ep = container_of(_ep, struct qe_ep, ep); qe_ep_queue() local
1769 struct qe_udc *udc = ep->udc; qe_ep_queue()
1782 struct qe_ep *ep = container_of(_ep, struct qe_ep, ep); qe_ep_dequeue() local
1789 spin_lock_irqsave(&ep->udc->lock, flags); qe_ep_dequeue()
1792 list_for_each_entry(req, &ep->queue, queue) { qe_ep_dequeue()
1798 spin_unlock_irqrestore(&ep->udc->lock, flags); qe_ep_dequeue()
1802 done(ep, req, -ECONNRESET); qe_ep_dequeue()
1804 spin_unlock_irqrestore(&ep->udc->lock, flags); qe_ep_dequeue()
1810 * @ep: the non-isochronous endpoint being stalled
1816 struct qe_ep *ep; qe_ep_set_halt() local
1821 ep = container_of(_ep, struct qe_ep, ep); qe_ep_set_halt()
1822 if (!_ep || !ep->ep.desc) { qe_ep_set_halt()
1827 udc = ep->udc; qe_ep_set_halt()
1828 /* Attempt to halt IN ep will fail if any transfer requests qe_ep_set_halt()
1830 if (value && ep_is_in(ep) && !list_empty(&ep->queue)) { qe_ep_set_halt()
1836 spin_lock_irqsave(&ep->udc->lock, flags); qe_ep_set_halt()
1837 qe_eptx_stall_change(ep, value); qe_ep_set_halt()
1838 qe_eprx_stall_change(ep, value); qe_ep_set_halt()
1839 spin_unlock_irqrestore(&ep->udc->lock, flags); qe_ep_set_halt()
1841 if (ep->epnum == 0) { qe_ep_set_halt()
1848 ep->data01 = 0; qe_ep_set_halt()
1850 dev_vdbg(udc->dev, "%s %s halt stat %d\n", ep->ep.name, qe_ep_set_halt()
1904 struct qe_ep *ep = &udc->eps[pipe]; udc_reset_ep_queue() local
1906 nuke(ep, -ECONNRESET); udc_reset_ep_queue()
1907 ep->tx_req = NULL; udc_reset_ep_queue()
1952 struct qe_ep *ep; ch9getstatus() local
1955 ep = &udc->eps[0]; ch9getstatus()
1970 if (!target_ep->ep.desc) ch9getstatus()
1987 req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL), ch9getstatus()
1999 status = __qe_ep_queue(&ep->ep, &req->req); ch9getstatus()
2053 struct qe_ep *ep; setup_received_handle() local
2058 ep = &udc->eps[pipe]; setup_received_handle()
2061 qe_ep_set_halt(&ep->ep, setup_received_handle()
2174 struct qe_ep *ep; tx_irq() local
2183 ep = &udc->eps[i]; tx_irq()
2184 if (ep && ep->init && (ep->dir != USB_DIR_OUT)) { tx_irq()
2185 bd = ep->c_txbd; tx_irq()
2189 if (ep->epnum == 0) tx_irq()
2190 res = qe_ep0_txconf(ep); tx_irq()
2192 res = qe_ep_txconf(ep); tx_irq()
2203 struct qe_ep *ep; rx_irq() local
2208 ep = &udc->eps[i]; rx_irq()
2209 if (ep && ep->init && (ep->dir != USB_DIR_IN)) { rx_irq()
2210 bd = ep->n_rxbd; rx_irq()
2213 if (ep->epnum == 0) { rx_irq()
2217 qe_ep_rx(ep); rx_irq()
2329 list_for_each_entry(loop_ep, &udc->gadget.ep_list, ep.ep_list) fsl_qe_stop()
2338 /* udc structure's alloc and setup, include ep-param alloc */ qe_udc_config()
2414 struct qe_ep *ep = &udc->eps[pipe_num]; qe_ep_config() local
2416 ep->udc = udc; qe_ep_config()
2417 strcpy(ep->name, ep_name[pipe_num]); qe_ep_config()
2418 ep->ep.name = ep_name[pipe_num]; qe_ep_config()
2421 ep->ep.caps.type_control = true; qe_ep_config()
2423 ep->ep.caps.type_iso = true; qe_ep_config()
2424 ep->ep.caps.type_bulk = true; qe_ep_config()
2425 ep->ep.caps.type_int = true; qe_ep_config()
2428 ep->ep.caps.dir_in = true; qe_ep_config()
2429 ep->ep.caps.dir_out = true; qe_ep_config()
2431 ep->ep.ops = &qe_ep_ops; qe_ep_config()
2432 ep->stopped = 1; qe_ep_config()
2433 usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0); qe_ep_config()
2434 ep->ep.desc = NULL; qe_ep_config()
2435 ep->dir = 0xff; qe_ep_config()
2436 ep->epnum = (u8)pipe_num; qe_ep_config()
2437 ep->sent = 0; qe_ep_config()
2438 ep->last = 0; qe_ep_config()
2439 ep->init = 0; qe_ep_config()
2440 ep->rxframe = NULL; qe_ep_config()
2441 ep->txframe = NULL; qe_ep_config()
2442 ep->tx_req = NULL; qe_ep_config()
2443 ep->state = EP_STATE_IDLE; qe_ep_config()
2444 ep->has_data = 0; qe_ep_config()
2446 /* the queue lists any req for this ep */ qe_ep_config()
2447 INIT_LIST_HEAD(&ep->queue); qe_ep_config()
2451 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); qe_ep_config()
2453 ep->gadget = &udc->gadget; qe_ep_config()
2481 struct qe_ep *ep; qe_udc_probe() local
2517 udc->gadget.ep0 = &udc->eps[0].ep; qe_udc_probe()
2530 /* because the ep type isn't decide here so qe_udc_probe()
2533 /* setup the qe_ep struct and link ep.ep.list qe_udc_probe()
2615 ep = &udc->eps[0]; qe_udc_probe()
2616 cpm_muram_free(cpm_muram_offset(ep->rxbase)); qe_udc_probe()
2617 kfree(ep->rxframe); qe_udc_probe()
2618 kfree(ep->rxbuffer); qe_udc_probe()
2619 kfree(ep->txframe); qe_udc_probe()
2642 struct qe_ep *ep; qe_udc_remove() local
2662 ep = &udc->eps[0]; qe_udc_remove()
2663 cpm_muram_free(cpm_muram_offset(ep->rxbase)); qe_udc_remove()
2664 size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (USB_BDRING_LEN + 1); qe_udc_remove()
2666 kfree(ep->rxframe); qe_udc_remove()
2667 if (ep->rxbufmap) { qe_udc_remove()
2669 ep->rxbuf_d, size, qe_udc_remove()
2671 ep->rxbuf_d = DMA_ADDR_INVALID; qe_udc_remove()
2674 ep->rxbuf_d, size, qe_udc_remove()
2678 kfree(ep->rxbuffer); qe_udc_remove()
2679 kfree(ep->txframe); qe_udc_remove()
H A Dgoku_udc.c17 * - Endpoint numbering is fixed: ep{1,2,3}-bulk
18 * - Gadget drivers can choose ep maxpacket (8/16/32/64)
97 struct goku_ep *ep; goku_ep_enable() local
102 ep = container_of(_ep, struct goku_ep, ep); goku_ep_enable()
106 dev = ep->dev; goku_ep_enable()
107 if (ep == &dev->ep[0]) goku_ep_enable()
111 if (ep->num != usb_endpoint_num(desc)) goku_ep_enable()
122 if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK) goku_ep_enable()
143 ep->is_in = usb_endpoint_dir_in(desc); goku_ep_enable()
144 if (ep->is_in) { goku_ep_enable()
146 ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT); goku_ep_enable()
148 ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT); goku_ep_enable()
149 if (ep->dma) goku_ep_enable()
151 ep->ep.name); goku_ep_enable()
154 spin_lock_irqsave(&ep->dev->lock, flags); goku_ep_enable()
157 if (ep->num < 3) { goku_ep_enable()
158 struct goku_udc_regs __iomem *regs = ep->dev->regs; goku_ep_enable()
162 tmp = ((ep->dma || !ep->is_in) goku_ep_enable()
165 ) << ep->num; goku_ep_enable()
169 tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num; goku_ep_enable()
173 writel(mode, ep->reg_mode); goku_ep_enable()
174 command(ep->dev->regs, COMMAND_RESET, ep->num); goku_ep_enable()
175 ep->ep.maxpacket = max; goku_ep_enable()
176 ep->stopped = 0; goku_ep_enable()
177 ep->ep.desc = desc; goku_ep_enable()
178 spin_unlock_irqrestore(&ep->dev->lock, flags); goku_ep_enable()
180 DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name, goku_ep_enable()
181 ep->is_in ? "IN" : "OUT", goku_ep_enable()
182 ep->dma ? "dma" : "pio", goku_ep_enable()
188 static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep) ep_reset() argument
190 struct goku_udc *dev = ep->dev; ep_reset()
193 command(regs, COMMAND_INVALID, ep->num); ep_reset()
194 if (ep->num) { ep_reset()
195 if (ep->num == UDC_MSTWR_ENDPOINT) ep_reset()
198 else if (ep->num == UDC_MSTRD_ENDPOINT) ep_reset()
200 dev->int_enable &= ~INT_EPxDATASET (ep->num); ep_reset()
205 if (ep->num < 3) { ep_reset()
206 struct goku_udc_regs __iomem *r = ep->dev->regs; ep_reset()
210 tmp &= ~(0x11 << ep->num); ep_reset()
214 tmp &= ~(0x11 << ep->num); ep_reset()
218 if (ep->dma) { ep_reset()
222 if (ep->num == UDC_MSTWR_ENDPOINT) { ep_reset()
233 usb_ep_set_maxpacket_limit(&ep->ep, MAX_FIFO_SIZE); ep_reset()
234 ep->ep.desc = NULL; ep_reset()
235 ep->stopped = 1; ep_reset()
236 ep->irqs = 0; ep_reset()
237 ep->dma = 0; ep_reset()
242 struct goku_ep *ep; goku_ep_disable() local
246 ep = container_of(_ep, struct goku_ep, ep); goku_ep_disable()
247 if (!_ep || !ep->ep.desc) goku_ep_disable()
249 dev = ep->dev; goku_ep_disable()
256 nuke(ep, -ESHUTDOWN); goku_ep_disable()
257 ep_reset(dev->regs, ep); goku_ep_disable()
296 done(struct goku_ep *ep, struct goku_request *req, int status) done() argument
299 unsigned stopped = ep->stopped; done()
308 dev = ep->dev; done()
310 if (ep->dma) done()
311 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in); done()
317 ep->ep.name, &req->req, status, done()
321 ep->stopped = 1; done()
323 usb_gadget_giveback_request(&ep->ep, &req->req); done()
325 ep->stopped = stopped; done()
345 static int write_fifo(struct goku_ep *ep, struct goku_request *req) write_fifo() argument
347 struct goku_udc *dev = ep->dev; write_fifo()
357 dev = ep->dev; write_fifo()
358 if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN)) write_fifo()
362 if (unlikely((tmp & DATASET_A(ep->num)) != 0)) write_fifo()
366 if (ep->num != 0) write_fifo()
367 writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status); write_fifo()
369 count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket); write_fifo()
372 if (unlikely(count != ep->ep.maxpacket)) { write_fifo()
373 writel(~(1<<ep->num), &dev->regs->EOP); write_fifo()
374 if (ep->num == 0) { write_fifo()
375 dev->ep[0].stopped = 1; write_fifo()
389 ep->ep.name, count, is_last ? "/last" : "", write_fifo()
397 done(ep, req, 0); write_fifo()
404 static int read_fifo(struct goku_ep *ep, struct goku_request *req) read_fifo() argument
411 regs = ep->dev->regs; read_fifo()
416 if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT)) read_fifo()
419 dbuff = (ep->num == 1 || ep->num == 2); read_fifo()
422 if (ep->num != 0) read_fifo()
423 writel(~INT_EPxDATASET(ep->num), &regs->int_status); read_fifo()
425 set = readl(&regs->DataSet) & DATASET_AB(ep->num); read_fifo()
426 size = readl(&regs->EPxSizeLA[ep->num]); read_fifo()
430 if (likely(ep->num != 0 || bufferspace != 0)) { read_fifo()
435 size = readl(&regs->EPxSizeLB[ep->num]); read_fifo()
446 is_short = (size < ep->ep.maxpacket); read_fifo()
448 VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n", read_fifo()
449 ep->ep.name, size, is_short ? "/S" : "", read_fifo()
453 u8 byte = (u8) readl(ep->reg_fifo); read_fifo()
461 DBG(ep->dev, "%s overflow %u\n", read_fifo()
462 ep->ep.name, size); read_fifo()
472 if (unlikely(ep->num == 0)) { read_fifo()
474 if (ep->dev->req_config) read_fifo()
475 writel(ep->dev->configured read_fifo()
481 ep->stopped = 1; read_fifo()
482 ep->dev->ep0state = EP0_STATUS; read_fifo()
484 done(ep, req, 0); read_fifo()
487 if (dbuff && !list_empty(&ep->queue)) { read_fifo()
488 req = list_entry(ep->queue.next, read_fifo()
517 pio_advance(struct goku_ep *ep) pio_advance() argument
521 if (unlikely(list_empty (&ep->queue))) pio_advance()
523 req = list_entry(ep->queue.next, struct goku_request, queue); pio_advance()
524 (ep->is_in ? write_fifo : read_fifo)(ep, req); pio_advance()
531 static int start_dma(struct goku_ep *ep, struct goku_request *req) start_dma() argument
533 struct goku_udc_regs __iomem *regs = ep->dev->regs; start_dma()
541 if (likely(ep->is_in)) { start_dma()
543 DBG (ep->dev, "start, IN active dma %03x!!\n", start_dma()
553 else if ((req->req.length % ep->ep.maxpacket) != 0 start_dma()
559 ep->dev->int_enable |= INT_MSTRDEND; start_dma()
568 DBG (ep->dev, "start, OUT active dma %03x!!\n", start_dma()
578 ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT; start_dma()
582 writel(ep->dev->int_enable, &regs->int_enable); start_dma()
586 static void dma_advance(struct goku_udc *dev, struct goku_ep *ep) dma_advance() argument
589 struct goku_udc_regs __iomem *regs = ep->dev->regs; dma_advance()
594 if (unlikely(list_empty(&ep->queue))) { dma_advance()
596 if (ep->is_in) dma_advance()
603 req = list_entry(ep->queue.next, struct goku_request, queue); dma_advance()
606 if (likely(ep->is_in)) { dma_advance()
624 ep->ep.name, ep->is_in ? "IN" : "OUT", dma_advance()
627 done(ep, req, 0); dma_advance()
628 if (list_empty(&ep->queue)) dma_advance()
630 req = list_entry(ep->queue.next, struct goku_request, queue); dma_advance()
631 (void) start_dma(ep, req); dma_advance()
634 static void abort_dma(struct goku_ep *ep, int status) abort_dma() argument
636 struct goku_udc_regs __iomem *regs = ep->dev->regs; abort_dma()
647 command(regs, COMMAND_FIFO_DISABLE, ep->num); abort_dma()
648 req = list_entry(ep->queue.next, struct goku_request, queue); abort_dma()
656 if (ep->is_in) { abort_dma()
669 DBG(ep->dev, "IN dma active after reset!\n"); abort_dma()
684 DBG(ep->dev, "OUT dma active after reset!\n"); abort_dma()
689 VDBG(ep->dev, "%s %s %s %d/%d\n", __func__, ep->ep.name, abort_dma()
690 ep->is_in ? "IN" : "OUT", abort_dma()
693 command(regs, COMMAND_FIFO_ENABLE, ep->num); abort_dma()
699 command(regs, COMMAND_FIFO_ENABLE, ep->num); abort_dma()
710 struct goku_ep *ep; goku_queue() local
720 ep = container_of(_ep, struct goku_ep, ep); goku_queue()
721 if (unlikely(!_ep || (!ep->ep.desc && ep->num != 0))) goku_queue()
723 dev = ep->dev; goku_queue()
732 if (ep->dma) { goku_queue()
734 ep->is_in); goku_queue()
752 if (unlikely(ep->num == 0 && ep->is_in)) goku_queue()
757 if (list_empty(&ep->queue) && likely(!ep->stopped)) { goku_queue()
761 if (ep->dma) goku_queue()
762 status = start_dma(ep, req); goku_queue()
764 status = (ep->is_in ? write_fifo : read_fifo)(ep, req); goku_queue()
775 list_add_tail(&req->queue, &ep->queue); goku_queue()
777 if (likely(!list_empty(&ep->queue)) goku_queue()
778 && likely(ep->num != 0) goku_queue()
779 && !ep->dma goku_queue()
780 && !(dev->int_enable & INT_EPxDATASET (ep->num))) goku_queue()
781 pio_irq_enable(dev, dev->regs, ep->num); goku_queue()
790 static void nuke(struct goku_ep *ep, int status) nuke() argument
794 ep->stopped = 1; nuke()
795 if (list_empty(&ep->queue)) nuke()
797 if (ep->dma) nuke()
798 abort_dma(ep, status); nuke()
799 while (!list_empty(&ep->queue)) { nuke()
800 req = list_entry(ep->queue.next, struct goku_request, queue); nuke()
801 done(ep, req, status); nuke()
809 struct goku_ep *ep; goku_dequeue() local
813 ep = container_of(_ep, struct goku_ep, ep); goku_dequeue()
814 if (!_ep || !_req || (!ep->ep.desc && ep->num != 0)) goku_dequeue()
816 dev = ep->dev; goku_dequeue()
825 ep->is_in ? "IN" : "OUT", goku_dequeue()
826 ep->dma ? "dma" : "pio", goku_dequeue()
832 list_for_each_entry (req, &ep->queue, queue) { goku_dequeue()
841 if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) { goku_dequeue()
842 abort_dma(ep, -ECONNRESET); goku_dequeue()
843 done(ep, req, -ECONNRESET); goku_dequeue()
844 dma_advance(dev, ep); goku_dequeue()
846 done(ep, req, -ECONNRESET); goku_dequeue()
856 static void goku_clear_halt(struct goku_ep *ep) goku_clear_halt() argument
858 // assert (ep->num !=0) goku_clear_halt()
859 VDBG(ep->dev, "%s clear halt\n", ep->ep.name); goku_clear_halt()
860 command(ep->dev->regs, COMMAND_SETDATA0, ep->num); goku_clear_halt()
861 command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num); goku_clear_halt()
862 if (ep->stopped) { goku_clear_halt()
863 ep->stopped = 0; goku_clear_halt()
864 if (ep->dma) { goku_clear_halt()
867 if (list_empty(&ep->queue)) goku_clear_halt()
869 req = list_entry(ep->queue.next, struct goku_request, goku_clear_halt()
871 (void) start_dma(ep, req); goku_clear_halt()
873 pio_advance(ep); goku_clear_halt()
879 struct goku_ep *ep; goku_set_halt() local
885 ep = container_of (_ep, struct goku_ep, ep); goku_set_halt()
887 if (ep->num == 0) { goku_set_halt()
889 ep->dev->ep0state = EP0_STALL; goku_set_halt()
890 ep->dev->ep[0].stopped = 1; goku_set_halt()
895 } else if (!ep->ep.desc) { goku_set_halt()
896 DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name); goku_set_halt()
900 spin_lock_irqsave(&ep->dev->lock, flags); goku_set_halt()
901 if (!list_empty(&ep->queue)) goku_set_halt()
903 else if (ep->is_in && value goku_set_halt()
905 && (readl(&ep->dev->regs->DataSet) goku_set_halt()
906 & DATASET_AB(ep->num))) goku_set_halt()
909 goku_clear_halt(ep); goku_set_halt()
911 ep->stopped = 1; goku_set_halt()
912 VDBG(ep->dev, "%s set halt\n", ep->ep.name); goku_set_halt()
913 command(ep->dev->regs, COMMAND_STALL, ep->num); goku_set_halt()
914 readl(ep->reg_status); goku_set_halt()
916 spin_unlock_irqrestore(&ep->dev->lock, flags); goku_set_halt()
922 struct goku_ep *ep; goku_fifo_status() local
928 ep = container_of(_ep, struct goku_ep, ep); goku_fifo_status()
931 if (ep->is_in) goku_fifo_status()
935 regs = ep->dev->regs; goku_fifo_status()
936 size = readl(&regs->EPxSizeLA[ep->num]) & DATASIZE; goku_fifo_status()
937 size += readl(&regs->EPxSizeLB[ep->num]) & DATASIZE; goku_fifo_status()
938 VDBG(ep->dev, "%s %s %u\n", __func__, ep->ep.name, size); goku_fifo_status()
944 struct goku_ep *ep; goku_fifo_flush() local
950 ep = container_of(_ep, struct goku_ep, ep); goku_fifo_flush()
951 VDBG(ep->dev, "%s %s\n", __func__, ep->ep.name); goku_fifo_flush()
954 if (!ep->ep.desc && ep->num != 0) { goku_fifo_flush()
955 DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name); goku_fifo_flush()
959 regs = ep->dev->regs; goku_fifo_flush()
960 size = readl(&regs->EPxSizeLA[ep->num]); goku_fifo_flush()
965 * the bytes out (PIO, if !ep->dma); for in, no choice. goku_fifo_flush()
968 command(regs, COMMAND_FIFO_CLEAR, ep->num); goku_fifo_flush()
998 struct usb_ep *ep; goku_match_ep() local
1003 ep = &dev->ep[3].ep; goku_match_ep()
1004 if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp)) goku_match_ep()
1005 return ep; goku_match_ep()
1010 ep = &dev->ep[2].ep; goku_match_ep()
1011 if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp)) goku_match_ep()
1012 return ep; goku_match_ep()
1193 struct goku_ep *ep = &dev->ep [i]; udc_proc_read() local
1196 if (i && !ep->ep.desc) udc_proc_read()
1199 tmp = readl(ep->reg_status); udc_proc_read()
1201 ep->ep.name, udc_proc_read()
1202 ep->is_in ? "in" : "out", udc_proc_read()
1203 ep->ep.maxpacket, udc_proc_read()
1204 ep->dma ? "dma" : "pio", udc_proc_read()
1205 ep->irqs, udc_proc_read()
1214 if (list_empty(&ep->queue)) { udc_proc_read()
1220 list_for_each_entry(req, &ep->queue, queue) { udc_proc_read()
1221 if (ep->dma && req->queue.prev == &ep->queue) { udc_proc_read()
1270 dev->gadget.ep0 = &dev->ep [0].ep; udc_reinit()
1276 struct goku_ep *ep = &dev->ep[i]; udc_reinit() local
1278 ep->num = i; udc_reinit()
1279 ep->ep.name = names[i]; udc_reinit()
1280 ep->reg_fifo = &dev->regs->ep_fifo [i]; udc_reinit()
1281 ep->reg_status = &dev->regs->ep_status [i]; udc_reinit()
1282 ep->reg_mode = &dev->regs->ep_mode[i]; udc_reinit()
1284 ep->ep.ops = &goku_ep_ops; udc_reinit()
1285 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list); udc_reinit()
1286 ep->dev = dev; udc_reinit()
1287 INIT_LIST_HEAD (&ep->queue); udc_reinit()
1289 ep_reset(NULL, ep); udc_reinit()
1292 ep->ep.caps.type_control = true; udc_reinit()
1294 ep->ep.caps.type_bulk = true; udc_reinit()
1296 ep->ep.caps.dir_in = true; udc_reinit()
1297 ep->ep.caps.dir_out = true; udc_reinit()
1300 dev->ep[0].reg_mode = NULL; udc_reinit()
1301 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, MAX_EP0_SIZE); udc_reinit()
1302 list_del_init (&dev->ep[0].ep.ep_list); udc_reinit()
1341 dev->ep[i].irqs = 0; ep0_start()
1409 nuke(&dev->ep [i], -ESHUTDOWN); stop_activity()
1447 nuke(&dev->ep[0], 0); ep0_setup()
1448 dev->ep[0].stopped = 0; ep0_setup()
1450 dev->ep[0].is_in = 1; ep0_setup()
1455 dev->ep[0].is_in = 0; ep0_setup()
1469 (!dev->ep[tmp].ep.desc && tmp != 0)) ep0_setup()
1473 if (!dev->ep[tmp].is_in) ep0_setup()
1476 if (dev->ep[tmp].is_in) ep0_setup()
1483 goku_clear_halt(&dev->ep[tmp]); ep0_setup()
1487 dev->ep[0].stopped = 1; ep0_setup()
1533 dev->ep[0].stopped = 1; ep0_setup()
1550 struct goku_ep *ep; goku_irq() local
1640 dev->ep[0].irqs++; goku_irq()
1646 ep = &dev->ep[0]; goku_irq()
1647 ep->irqs++; goku_irq()
1648 nuke(ep, 0); goku_irq()
1655 ep = &dev->ep[0]; goku_irq()
1656 ep->irqs++; goku_irq()
1657 pio_advance(ep); goku_irq()
1663 ep = &dev->ep[UDC_MSTRD_ENDPOINT]; goku_irq()
1664 ep->irqs++; goku_irq()
1665 dma_advance(dev, ep); goku_irq()
1669 ep = &dev->ep[UDC_MSTWR_ENDPOINT]; goku_irq()
1670 ep->irqs++; goku_irq()
1671 dma_advance(dev, ep); goku_irq()
1675 ep = &dev->ep[UDC_MSTWR_ENDPOINT]; goku_irq()
1676 ep->irqs++; goku_irq()
1677 ERROR(dev, "%s write timeout ?\n", ep->ep.name); goku_irq()
1687 ep = &dev->ep[i]; goku_irq()
1688 pio_advance(ep); goku_irq()
1689 if (list_empty (&ep->queue)) goku_irq()
1693 ep->irqs++; goku_irq()
H A Dr8a66597-udc.c38 static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
39 static void irq_packet_write(struct r8a66597_ep *ep,
44 static void transfer_complete(struct r8a66597_ep *ep,
101 INIT_LIST_HEAD(&r8a66597->ep[0].queue);
301 struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum]; pipe_change() local
303 if (ep->use_dma) pipe_change()
304 r8a66597_bclr(r8a66597, DREQE, ep->fifosel); pipe_change()
306 r8a66597_mdfy(r8a66597, pipenum, CURPIPE, ep->fifosel); pipe_change()
310 if (r8a66597_is_sudmac(r8a66597) && ep->use_dma) pipe_change()
311 r8a66597_bclr(r8a66597, mbw_value(r8a66597), ep->fifosel); pipe_change()
313 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel); pipe_change()
315 if (ep->use_dma) pipe_change()
316 r8a66597_bset(r8a66597, DREQE, ep->fifosel); pipe_change()
394 static void pipe_initialize(struct r8a66597_ep *ep) pipe_initialize() argument
396 struct r8a66597 *r8a66597 = ep->r8a66597; pipe_initialize()
398 r8a66597_mdfy(r8a66597, 0, CURPIPE, ep->fifosel); pipe_initialize()
400 r8a66597_write(r8a66597, ACLRM, ep->pipectr); pipe_initialize()
401 r8a66597_write(r8a66597, 0, ep->pipectr); pipe_initialize()
402 r8a66597_write(r8a66597, SQCLR, ep->pipectr); pipe_initialize()
403 if (ep->use_dma) { pipe_initialize()
404 r8a66597_mdfy(r8a66597, ep->pipenum, CURPIPE, ep->fifosel); pipe_initialize()
408 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel); pipe_initialize()
413 struct r8a66597_ep *ep, r8a66597_ep_setting()
417 ep->use_dma = 0; r8a66597_ep_setting()
418 ep->fifoaddr = CFIFO; r8a66597_ep_setting()
419 ep->fifosel = CFIFOSEL; r8a66597_ep_setting()
420 ep->fifoctr = CFIFOCTR; r8a66597_ep_setting()
422 ep->pipectr = get_pipectr_addr(pipenum); r8a66597_ep_setting()
424 ep->pipetre = get_pipetre_addr(pipenum); r8a66597_ep_setting()
425 ep->pipetrn = get_pipetrn_addr(pipenum); r8a66597_ep_setting()
427 ep->pipetre = 0; r8a66597_ep_setting()
428 ep->pipetrn = 0; r8a66597_ep_setting()
430 ep->pipenum = pipenum; r8a66597_ep_setting()
431 ep->ep.maxpacket = usb_endpoint_maxp(desc); r8a66597_ep_setting()
432 r8a66597->pipenum2ep[pipenum] = ep; r8a66597_ep_setting()
434 = ep; r8a66597_ep_setting()
435 INIT_LIST_HEAD(&ep->queue); r8a66597_ep_setting()
438 static void r8a66597_ep_release(struct r8a66597_ep *ep) r8a66597_ep_release() argument
440 struct r8a66597 *r8a66597 = ep->r8a66597; r8a66597_ep_release()
441 u16 pipenum = ep->pipenum; r8a66597_ep_release()
446 if (ep->use_dma) r8a66597_ep_release()
448 ep->pipenum = 0; r8a66597_ep_release()
449 ep->busy = 0; r8a66597_ep_release()
450 ep->use_dma = 0; r8a66597_ep_release()
453 static int alloc_pipe_config(struct r8a66597_ep *ep, alloc_pipe_config() argument
456 struct r8a66597 *r8a66597 = ep->r8a66597; alloc_pipe_config()
462 ep->ep.desc = desc; alloc_pipe_config()
464 if (ep->pipenum) /* already allocated pipe */ alloc_pipe_config()
510 ep->type = info.type; alloc_pipe_config()
531 r8a66597_ep_setting(r8a66597, ep, desc, info.pipe, dma); alloc_pipe_config()
532 pipe_initialize(ep); alloc_pipe_config()
537 static int free_pipe_config(struct r8a66597_ep *ep) free_pipe_config() argument
539 struct r8a66597 *r8a66597 = ep->r8a66597; free_pipe_config()
542 info.pipe = ep->pipenum; free_pipe_config()
543 info.type = ep->type; free_pipe_config()
545 r8a66597_ep_release(ep); free_pipe_config()
566 r8a66597->ep[0].internal_ccpl = ccpl; control_end()
571 static void start_ep0_write(struct r8a66597_ep *ep, start_ep0_write() argument
574 struct r8a66597 *r8a66597 = ep->r8a66597; start_ep0_write()
576 pipe_change(r8a66597, ep->pipenum); start_ep0_write()
578 r8a66597_write(r8a66597, BCLR, ep->fifoctr); start_ep0_write()
580 r8a66597_bset(r8a66597, BVAL, ep->fifoctr); start_ep0_write()
582 transfer_complete(ep, req, 0); start_ep0_write()
585 irq_ep0_write(ep, req); start_ep0_write()
602 struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum]; change_bfre_mode() local
626 r8a66597_bset(r8a66597, ACLRM, ep->pipectr); change_bfre_mode()
627 r8a66597_bclr(r8a66597, ACLRM, ep->pipectr); change_bfre_mode()
633 struct r8a66597_ep *ep, sudmac_alloc_channel()
642 if (!is_bulk_pipe(ep->pipenum)) sudmac_alloc_channel()
651 if (ep->ep.desc->bEndpointAddress & USB_DIR_IN) { sudmac_alloc_channel()
655 change_bfre_mode(r8a66597, ep->pipenum, 1); sudmac_alloc_channel()
659 ep->use_dma = 1; sudmac_alloc_channel()
660 ep->dma = dma; sudmac_alloc_channel()
661 ep->fifoaddr = D0FIFO; sudmac_alloc_channel()
662 ep->fifosel = D0FIFOSEL; sudmac_alloc_channel()
663 ep->fifoctr = D0FIFOCTR; sudmac_alloc_channel()
670 struct r8a66597_ep *ep, sudmac_free_channel()
676 usb_gadget_unmap_request(&r8a66597->gadget, &req->req, ep->dma->dir); sudmac_free_channel()
678 r8a66597_bclr(r8a66597, DREQE, ep->fifosel); sudmac_free_channel()
679 r8a66597_change_curpipe(r8a66597, 0, 0, ep->fifosel); sudmac_free_channel()
681 ep->dma->used = 0; sudmac_free_channel()
682 ep->use_dma = 0; sudmac_free_channel()
683 ep->fifoaddr = CFIFO; sudmac_free_channel()
684 ep->fifosel = CFIFOSEL; sudmac_free_channel()
685 ep->fifoctr = CFIFOCTR; sudmac_free_channel()
688 static void sudmac_start(struct r8a66597 *r8a66597, struct r8a66597_ep *ep, sudmac_start() argument
701 static void start_packet_write(struct r8a66597_ep *ep, start_packet_write() argument
704 struct r8a66597 *r8a66597 = ep->r8a66597; start_packet_write()
707 pipe_change(r8a66597, ep->pipenum); start_packet_write()
708 disable_irq_empty(r8a66597, ep->pipenum); start_packet_write()
709 pipe_start(r8a66597, ep->pipenum); start_packet_write()
712 transfer_complete(ep, req, 0); start_packet_write()
714 r8a66597_write(r8a66597, ~(1 << ep->pipenum), BRDYSTS); start_packet_write()
715 if (sudmac_alloc_channel(r8a66597, ep, req) < 0) { start_packet_write()
717 pipe_change(r8a66597, ep->pipenum); start_packet_write()
718 disable_irq_empty(r8a66597, ep->pipenum); start_packet_write()
719 pipe_start(r8a66597, ep->pipenum); start_packet_write()
720 tmp = r8a66597_read(r8a66597, ep->fifoctr); start_packet_write()
722 pipe_irq_enable(r8a66597, ep->pipenum); start_packet_write()
724 irq_packet_write(ep, req); start_packet_write()
727 pipe_change(r8a66597, ep->pipenum); start_packet_write()
728 disable_irq_nrdy(r8a66597, ep->pipenum); start_packet_write()
729 pipe_start(r8a66597, ep->pipenum); start_packet_write()
730 enable_irq_nrdy(r8a66597, ep->pipenum); start_packet_write()
731 sudmac_start(r8a66597, ep, req); start_packet_write()
736 static void start_packet_read(struct r8a66597_ep *ep, start_packet_read() argument
739 struct r8a66597 *r8a66597 = ep->r8a66597; start_packet_read()
740 u16 pipenum = ep->pipenum; start_packet_read()
742 if (ep->pipenum == 0) { start_packet_read()
744 r8a66597_write(r8a66597, BCLR, ep->fifoctr); start_packet_read()
749 if (ep->pipetre) { start_packet_read()
751 r8a66597_write(r8a66597, TRCLR, ep->pipetre); start_packet_read()
753 DIV_ROUND_UP(req->req.length, ep->ep.maxpacket), start_packet_read()
754 ep->pipetrn); start_packet_read()
755 r8a66597_bset(r8a66597, TRENB, ep->pipetre); start_packet_read()
758 if (sudmac_alloc_channel(r8a66597, ep, req) < 0) { start_packet_read()
760 change_bfre_mode(r8a66597, ep->pipenum, 0); start_packet_read()
765 sudmac_start(r8a66597, ep, req); start_packet_read()
771 static void start_packet(struct r8a66597_ep *ep, struct r8a66597_request *req) start_packet() argument
773 if (ep->ep.desc->bEndpointAddress & USB_DIR_IN) start_packet()
774 start_packet_write(ep, req); start_packet()
776 start_packet_read(ep, req); start_packet()
779 static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req) start_ep0() argument
783 ctsq = r8a66597_read(ep->r8a66597, INTSTS0) & CTSQ; start_ep0()
787 start_ep0_write(ep, req); start_ep0()
790 start_packet_read(ep, req); start_ep0()
794 control_end(ep->r8a66597, 0); start_ep0()
797 dev_err(r8a66597_to_dev(ep->r8a66597), start_ep0()
895 static struct r8a66597_request *get_request_from_ep(struct r8a66597_ep *ep) get_request_from_ep() argument
897 return list_entry(ep->queue.next, struct r8a66597_request, queue); get_request_from_ep()
901 static void transfer_complete(struct r8a66597_ep *ep,
908 if (unlikely(ep->pipenum == 0)) {
909 if (ep->internal_ccpl) {
910 ep->internal_ccpl = 0;
916 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
921 if (!list_empty(&ep->queue))
924 if (ep->use_dma)
925 sudmac_free_channel(ep->r8a66597, ep, req);
927 spin_unlock(&ep->r8a66597->lock);
928 usb_gadget_giveback_request(&ep->ep, &req->req);
929 spin_lock(&ep->r8a66597->lock);
932 req = get_request_from_ep(ep);
933 if (ep->ep.desc)
934 start_packet(ep, req);
938 static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req) irq_ep0_write() argument
945 u16 pipenum = ep->pipenum; irq_ep0_write()
946 struct r8a66597 *r8a66597 = ep->r8a66597; irq_ep0_write()
949 r8a66597_bset(r8a66597, ISEL, ep->fifosel); irq_ep0_write()
953 tmp = r8a66597_read(r8a66597, ep->fifoctr); irq_ep0_write()
971 r8a66597_write_fifo(r8a66597, ep, buf, size); irq_ep0_write()
972 if ((size == 0) || ((size % ep->ep.maxpacket) != 0)) irq_ep0_write()
973 r8a66597_bset(r8a66597, BVAL, ep->fifoctr); irq_ep0_write()
981 || (size % ep->ep.maxpacket) irq_ep0_write()
992 static void irq_packet_write(struct r8a66597_ep *ep, irq_packet_write() argument
999 u16 pipenum = ep->pipenum; irq_packet_write()
1000 struct r8a66597 *r8a66597 = ep->r8a66597; irq_packet_write()
1003 tmp = r8a66597_read(r8a66597, ep->fifoctr); irq_packet_write()
1019 r8a66597_write_fifo(r8a66597, ep, buf, size); irq_packet_write()
1021 || ((size % ep->ep.maxpacket) != 0) irq_packet_write()
1022 || ((bufsize != ep->ep.maxpacket) irq_packet_write()
1024 r8a66597_bset(r8a66597, BVAL, ep->fifoctr); irq_packet_write()
1031 || (size % ep->ep.maxpacket) irq_packet_write()
1041 static void irq_packet_read(struct r8a66597_ep *ep, irq_packet_read() argument
1048 u16 pipenum = ep->pipenum; irq_packet_read()
1049 struct r8a66597 *r8a66597 = ep->r8a66597; irq_packet_read()
1053 tmp = r8a66597_read(r8a66597, ep->fifoctr); irq_packet_read()
1078 || (size % ep->ep.maxpacket) irq_packet_read()
1088 r8a66597_write(r8a66597, BCLR, ep->fifoctr); irq_packet_read()
1090 r8a66597_read_fifo(r8a66597, ep->fifoaddr, buf, size); irq_packet_read()
1094 if ((ep->pipenum != 0) && finish) irq_packet_read()
1095 transfer_complete(ep, req, 0); irq_packet_read()
1102 struct r8a66597_ep *ep; irq_pipe_ready() local
1109 ep = &r8a66597->ep[0]; irq_pipe_ready()
1110 req = get_request_from_ep(ep); irq_pipe_ready()
1111 irq_packet_read(ep, req); irq_pipe_ready()
1117 ep = r8a66597->pipenum2ep[pipenum]; irq_pipe_ready()
1118 req = get_request_from_ep(ep); irq_pipe_ready()
1119 if (ep->ep.desc->bEndpointAddress & USB_DIR_IN) irq_pipe_ready()
1120 irq_packet_write(ep, req); irq_pipe_ready()
1122 irq_packet_read(ep, req); irq_pipe_ready()
1133 struct r8a66597_ep *ep; irq_pipe_empty() local
1139 ep = &r8a66597->ep[0]; irq_pipe_empty()
1140 req = get_request_from_ep(ep); irq_pipe_empty()
1141 irq_ep0_write(ep, req); irq_pipe_empty()
1152 ep = r8a66597->pipenum2ep[pipenum]; irq_pipe_empty()
1153 req = get_request_from_ep(ep); irq_pipe_empty()
1154 if (!list_empty(&ep->queue)) irq_pipe_empty()
1155 transfer_complete(ep, req, 0); irq_pipe_empty()
1166 struct r8a66597_ep *ep; variable in typeref:struct:r8a66597_ep
1179 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1180 pid = control_reg_get_pid(r8a66597, ep->pipenum);
1211 struct r8a66597_ep *ep; clear_feature() local
1215 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK]; clear_feature()
1216 if (!ep->wedge) { clear_feature()
1217 pipe_stop(r8a66597, ep->pipenum); clear_feature()
1218 control_reg_sqclr(r8a66597, ep->pipenum); clear_feature()
1220 usb_ep_clear_halt(&ep->ep); clear_feature()
1226 req = get_request_from_ep(ep); clear_feature()
1227 if (ep->busy) { clear_feature()
1228 ep->busy = 0; clear_feature()
1229 if (list_empty(&ep->queue)) clear_feature()
1231 start_packet(ep, req); clear_feature()
1232 } else if (!list_empty(&ep->queue)) clear_feature()
1233 pipe_start(r8a66597, ep->pipenum); clear_feature()
1272 struct r8a66597_ep *ep; set_feature() local
1275 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK]; set_feature()
1276 pipe_stall(r8a66597, ep->pipenum); set_feature()
1373 struct r8a66597_ep *ep; variable in typeref:struct:r8a66597_ep
1375 ep = &r8a66597->ep[0];
1376 req = get_request_from_ep(ep);
1377 transfer_complete(ep, req, 0);
1403 static void sudmac_finish(struct r8a66597 *r8a66597, struct r8a66597_ep *ep) sudmac_finish() argument
1410 pipenum = ep->pipenum; sudmac_finish()
1413 while (!(r8a66597_read(r8a66597, ep->fifoctr) & FRDY)) { sudmac_finish()
1423 r8a66597_bset(r8a66597, BCLR, ep->fifoctr); sudmac_finish()
1424 req = get_request_from_ep(ep); sudmac_finish()
1435 || (len % ep->ep.maxpacket)) { sudmac_finish()
1436 if (ep->dma->dir) { sudmac_finish()
1442 transfer_complete(ep, req, 0); sudmac_finish()
1450 struct r8a66597_ep *ep; r8a66597_sudmac_irq() local
1457 ep = r8a66597->pipenum2ep[pipenum]; r8a66597_sudmac_irq()
1458 sudmac_finish(r8a66597, ep); r8a66597_sudmac_irq()
1559 struct r8a66597_ep *ep; r8a66597_enable() local
1561 ep = container_of(_ep, struct r8a66597_ep, ep); r8a66597_enable()
1562 return alloc_pipe_config(ep, desc); r8a66597_enable()
1567 struct r8a66597_ep *ep; r8a66597_disable() local
1571 ep = container_of(_ep, struct r8a66597_ep, ep); r8a66597_disable()
1572 BUG_ON(!ep); r8a66597_disable()
1574 while (!list_empty(&ep->queue)) { r8a66597_disable()
1575 req = get_request_from_ep(ep); r8a66597_disable()
1576 spin_lock_irqsave(&ep->r8a66597->lock, flags); r8a66597_disable()
1577 transfer_complete(ep, req, -ECONNRESET); r8a66597_disable()
1578 spin_unlock_irqrestore(&ep->r8a66597->lock, flags); r8a66597_disable()
1581 pipe_irq_disable(ep->r8a66597, ep->pipenum); r8a66597_disable()
1582 return free_pipe_config(ep); r8a66597_disable()
1610 struct r8a66597_ep *ep; r8a66597_queue() local
1615 ep = container_of(_ep, struct r8a66597_ep, ep); r8a66597_queue()
1618 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN) r8a66597_queue()
1621 spin_lock_irqsave(&ep->r8a66597->lock, flags); r8a66597_queue()
1623 if (list_empty(&ep->queue)) r8a66597_queue()
1626 list_add_tail(&req->queue, &ep->queue); r8a66597_queue()
1630 if (ep->ep.desc == NULL) /* control */ r8a66597_queue()
1631 start_ep0(ep, req); r8a66597_queue()
1633 if (request && !ep->busy) r8a66597_queue()
1634 start_packet(ep, req); r8a66597_queue()
1637 spin_unlock_irqrestore(&ep->r8a66597->lock, flags); r8a66597_queue()
1644 struct r8a66597_ep *ep; r8a66597_dequeue() local
1648 ep = container_of(_ep, struct r8a66597_ep, ep); r8a66597_dequeue()
1651 spin_lock_irqsave(&ep->r8a66597->lock, flags); r8a66597_dequeue()
1652 if (!list_empty(&ep->queue)) r8a66597_dequeue()
1653 transfer_complete(ep, req, -ECONNRESET); r8a66597_dequeue()
1654 spin_unlock_irqrestore(&ep->r8a66597->lock, flags); r8a66597_dequeue()
1661 struct r8a66597_ep *ep; r8a66597_set_halt() local
1666 ep = container_of(_ep, struct r8a66597_ep, ep); r8a66597_set_halt()
1667 req = get_request_from_ep(ep); r8a66597_set_halt()
1669 spin_lock_irqsave(&ep->r8a66597->lock, flags); r8a66597_set_halt()
1670 if (!list_empty(&ep->queue)) { r8a66597_set_halt()
1675 ep->busy = 1; r8a66597_set_halt()
1676 pipe_stall(ep->r8a66597, ep->pipenum); r8a66597_set_halt()
1678 ep->busy = 0; r8a66597_set_halt()
1679 ep->wedge = 0; r8a66597_set_halt()
1680 pipe_stop(ep->r8a66597, ep->pipenum); r8a66597_set_halt()
1684 spin_unlock_irqrestore(&ep->r8a66597->lock, flags); r8a66597_set_halt()
1690 struct r8a66597_ep *ep; r8a66597_set_wedge() local
1693 ep = container_of(_ep, struct r8a66597_ep, ep); r8a66597_set_wedge()
1695 if (!ep || !ep->ep.desc) r8a66597_set_wedge()
1698 spin_lock_irqsave(&ep->r8a66597->lock, flags); r8a66597_set_wedge()
1699 ep->wedge = 1; r8a66597_set_wedge()
1700 spin_unlock_irqrestore(&ep->r8a66597->lock, flags); r8a66597_set_wedge()
1707 struct r8a66597_ep *ep; r8a66597_fifo_flush() local
1710 ep = container_of(_ep, struct r8a66597_ep, ep); r8a66597_fifo_flush()
1711 spin_lock_irqsave(&ep->r8a66597->lock, flags); r8a66597_fifo_flush()
1712 if (list_empty(&ep->queue) && !ep->busy) { r8a66597_fifo_flush()
1713 pipe_stop(ep->r8a66597, ep->pipenum); r8a66597_fifo_flush()
1714 r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr); r8a66597_fifo_flush()
1715 r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr); r8a66597_fifo_flush()
1716 r8a66597_write(ep->r8a66597, 0, ep->pipectr); r8a66597_fifo_flush()
1718 spin_unlock_irqrestore(&ep->r8a66597->lock, flags); r8a66597_fifo_flush()
1829 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req); r8a66597_remove()
1838 static void nop_completion(struct usb_ep *ep, struct usb_request *r) nop_completion() argument
1923 r8a66597->gadget.ep0 = &r8a66597->ep[0].ep; r8a66597_probe()
1926 struct r8a66597_ep *ep = &r8a66597->ep[i]; r8a66597_probe() local
1929 INIT_LIST_HEAD(&r8a66597->ep[i].ep.ep_list); r8a66597_probe()
1930 list_add_tail(&r8a66597->ep[i].ep.ep_list, r8a66597_probe()
1933 ep->r8a66597 = r8a66597; r8a66597_probe()
1934 INIT_LIST_HEAD(&ep->queue); r8a66597_probe()
1935 ep->ep.name = r8a66597_ep_name[i]; r8a66597_probe()
1936 ep->ep.ops = &r8a66597_ep_ops; r8a66597_probe()
1937 usb_ep_set_maxpacket_limit(&ep->ep, 512); r8a66597_probe()
1940 ep->ep.caps.type_control = true; r8a66597_probe()
1942 ep->ep.caps.type_iso = true; r8a66597_probe()
1943 ep->ep.caps.type_bulk = true; r8a66597_probe()
1944 ep->ep.caps.type_int = true; r8a66597_probe()
1946 ep->ep.caps.dir_in = true; r8a66597_probe()
1947 ep->ep.caps.dir_out = true; r8a66597_probe()
1949 usb_ep_set_maxpacket_limit(&r8a66597->ep[0].ep, 64); r8a66597_probe()
1950 r8a66597->ep[0].pipenum = 0; r8a66597_probe()
1951 r8a66597->ep[0].fifoaddr = CFIFO; r8a66597_probe()
1952 r8a66597->ep[0].fifosel = CFIFOSEL; r8a66597_probe()
1953 r8a66597->ep[0].fifoctr = CFIFOCTR; r8a66597_probe()
1954 r8a66597->ep[0].pipectr = get_pipectr_addr(0); r8a66597_probe()
1955 r8a66597->pipenum2ep[0] = &r8a66597->ep[0]; r8a66597_probe()
1956 r8a66597->epaddr2ep[0] = &r8a66597->ep[0]; r8a66597_probe()
1958 r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep, r8a66597_probe()
1974 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req); r8a66597_probe()
1980 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req); r8a66597_probe()
412 r8a66597_ep_setting(struct r8a66597 *r8a66597, struct r8a66597_ep *ep, const struct usb_endpoint_descriptor *desc, u16 pipenum, int dma) r8a66597_ep_setting() argument
632 sudmac_alloc_channel(struct r8a66597 *r8a66597, struct r8a66597_ep *ep, struct r8a66597_request *req) sudmac_alloc_channel() argument
669 sudmac_free_channel(struct r8a66597 *r8a66597, struct r8a66597_ep *ep, struct r8a66597_request *req) sudmac_free_channel() argument
H A Dnet2272.c58 "ep-a", "ep-b", "ep-c",
74 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
103 * mode 0 == ep-{a,b,c} 512db each
104 * mode 1 == ep-a 1k, ep-{b,c} 512db
105 * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
106 * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
120 static void assert_out_naking(struct net2272_ep *ep, const char *where) assert_out_naking() argument
128 tmp = net2272_ep_read(ep, EP_STAT0); assert_out_naking()
130 dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n", assert_out_naking()
131 ep->ep.name, where, tmp); assert_out_naking()
132 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); assert_out_naking()
135 #define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
137 static void stop_out_naking(struct net2272_ep *ep) stop_out_naking() argument
139 u8 tmp = net2272_ep_read(ep, EP_STAT0); stop_out_naking()
142 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); stop_out_naking()
192 struct net2272_ep *ep; net2272_enable() local
197 ep = container_of(_ep, struct net2272_ep, ep); net2272_enable()
198 if (!_ep || !desc || ep->desc || _ep->name == ep0name net2272_enable()
201 dev = ep->dev; net2272_enable()
209 ep->desc = desc; net2272_enable()
212 ep->stopped = 0; net2272_enable()
213 ep->wedged = 0; net2272_enable()
216 net2272_ep_write(ep, EP_MAXPKT0, max & 0xff); net2272_enable()
217 net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8); net2272_enable()
220 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); net2272_enable()
230 ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0; net2272_enable()
237 ep->is_in = usb_endpoint_dir_in(desc); net2272_enable()
238 if (!ep->is_in) net2272_enable()
239 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); net2272_enable()
241 net2272_ep_write(ep, EP_CFG, tmp); net2272_enable()
244 tmp = (1 << ep->num) | net2272_read(dev, IRQENB0); net2272_enable()
249 | net2272_ep_read(ep, EP_IRQENB); net2272_enable()
250 net2272_ep_write(ep, EP_IRQENB, tmp); net2272_enable()
253 dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n", net2272_enable()
256 net2272_ep_read(ep, EP_CFG)); net2272_enable()
262 static void net2272_ep_reset(struct net2272_ep *ep) net2272_ep_reset() argument
266 ep->desc = NULL; net2272_ep_reset()
267 INIT_LIST_HEAD(&ep->queue); net2272_ep_reset()
269 usb_ep_set_maxpacket_limit(&ep->ep, ~0); net2272_ep_reset()
270 ep->ep.ops = &net2272_ep_ops; net2272_ep_reset()
273 net2272_ep_write(ep, EP_IRQENB, 0); net2272_ep_reset()
279 net2272_ep_write(ep, EP_RSPSET, tmp); net2272_ep_reset()
282 if (ep->num != 0) net2272_ep_reset()
285 net2272_ep_write(ep, EP_RSPCLR, tmp); net2272_ep_reset()
288 net2272_ep_write(ep, EP_STAT0, net2272_ep_reset()
295 net2272_ep_write(ep, EP_STAT1, net2272_ep_reset()
310 struct net2272_ep *ep; net2272_disable() local
313 ep = container_of(_ep, struct net2272_ep, ep); net2272_disable()
314 if (!_ep || !ep->desc || _ep->name == ep0name) net2272_disable()
317 spin_lock_irqsave(&ep->dev->lock, flags); net2272_disable()
318 net2272_dequeue_all(ep); net2272_disable()
319 net2272_ep_reset(ep); net2272_disable()
321 dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name); net2272_disable()
323 spin_unlock_irqrestore(&ep->dev->lock, flags); net2272_disable()
332 struct net2272_ep *ep; net2272_alloc_request() local
337 ep = container_of(_ep, struct net2272_ep, ep); net2272_alloc_request()
351 struct net2272_ep *ep; net2272_free_request() local
354 ep = container_of(_ep, struct net2272_ep, ep); net2272_free_request()
364 net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status) net2272_done() argument
367 unsigned stopped = ep->stopped; net2272_done()
369 if (ep->num == 0) { net2272_done()
370 if (ep->dev->protocol_stall) { net2272_done()
371 ep->stopped = 1; net2272_done()
372 set_halt(ep); net2272_done()
374 allow_status(ep); net2272_done()
384 dev = ep->dev; net2272_done()
385 if (use_dma && ep->dma) net2272_done()
387 ep->is_in); net2272_done()
391 ep->ep.name, &req->req, status, net2272_done()
395 ep->stopped = 1; net2272_done()
397 usb_gadget_giveback_request(&ep->ep, &req->req); net2272_done()
399 ep->stopped = stopped; net2272_done()
403 net2272_write_packet(struct net2272_ep *ep, u8 *buf, net2272_write_packet() argument
406 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA); net2272_write_packet()
414 dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n", net2272_write_packet()
415 ep->ep.name, req, max, length, net2272_write_packet()
416 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0)); net2272_write_packet()
430 tmp = net2272_read(ep->dev, LOCCTL); net2272_write_packet()
431 net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH)); net2272_write_packet()
433 net2272_write(ep->dev, LOCCTL, tmp); net2272_write_packet()
440 net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req) net2272_write_fifo() argument
446 dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n", net2272_write_fifo()
447 ep->ep.name, req->req.actual, req->req.length); net2272_write_fifo()
459 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)); net2272_write_fifo()
460 while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) { net2272_write_fifo()
465 net2272_ep_read(ep, EP_STAT0); net2272_write_fifo()
467 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_write_fifo()
468 (net2272_ep_read(ep, EP_AVAIL0)); net2272_write_fifo()
470 if (max < ep->ep.maxpacket) net2272_write_fifo()
471 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) net2272_write_fifo()
472 | (net2272_ep_read(ep, EP_AVAIL0)); net2272_write_fifo()
474 count = net2272_write_packet(ep, buf, req, max); net2272_write_fifo()
478 if (count < ep->ep.maxpacket) net2272_write_fifo()
479 set_fifo_bytecount(ep, 0); net2272_write_fifo()
480 net2272_done(ep, req, 0); net2272_write_fifo()
482 if (!list_empty(&ep->queue)) { net2272_write_fifo()
483 req = list_entry(ep->queue.next, net2272_write_fifo()
486 status = net2272_kick_dma(ep, req); net2272_write_fifo()
489 if ((net2272_ep_read(ep, EP_STAT0) net2272_write_fifo()
495 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)); net2272_write_fifo()
501 net2272_out_flush(struct net2272_ep *ep) net2272_out_flush() argument
503 ASSERT_OUT_NAKING(ep); net2272_out_flush()
505 net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT) net2272_out_flush()
507 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); net2272_out_flush()
511 net2272_read_packet(struct net2272_ep *ep, u8 *buf, net2272_read_packet() argument
514 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA); net2272_read_packet()
520 dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n", net2272_read_packet()
521 ep->ep.name, req, avail, net2272_read_packet()
522 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0)); net2272_read_packet()
524 is_short = (avail < ep->ep.maxpacket); net2272_read_packet()
544 * ep stat0 twice in the case of a short transfer net2272_read_packet()
546 if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) net2272_read_packet()
547 net2272_ep_read(ep, EP_STAT0); net2272_read_packet()
553 net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req) net2272_read_fifo() argument
562 dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n", net2272_read_fifo()
563 ep->ep.name, req->req.actual, req->req.length); net2272_read_fifo()
570 count = (net2272_ep_read(ep, EP_AVAIL1) << 8) net2272_read_fifo()
571 | net2272_ep_read(ep, EP_AVAIL0); net2272_read_fifo()
573 net2272_ep_write(ep, EP_STAT0, net2272_read_fifo()
580 if ((tmp % ep->ep.maxpacket) != 0) { net2272_read_fifo()
581 dev_err(ep->dev->dev, net2272_read_fifo()
583 ep->ep.name, count, tmp); net2272_read_fifo()
589 is_short = net2272_read_packet(ep, buf, req, count); net2272_read_fifo()
597 net2272_out_flush(ep); net2272_read_fifo()
598 net2272_done(ep, req, -EOVERFLOW); net2272_read_fifo()
600 net2272_done(ep, req, 0); net2272_read_fifo()
606 if (unlikely(ep->num == 0)) { net2272_read_fifo()
607 net2272_ep_write(ep, EP_TRANSFER2, 0); net2272_read_fifo()
608 net2272_ep_write(ep, EP_TRANSFER1, 0); net2272_read_fifo()
609 net2272_ep_write(ep, EP_TRANSFER0, 0); net2272_read_fifo()
612 if (!list_empty(&ep->queue)) { net2272_read_fifo()
613 req = list_entry(ep->queue.next, net2272_read_fifo()
615 status = net2272_kick_dma(ep, req); net2272_read_fifo()
617 !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY))) net2272_read_fifo()
622 } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY))); net2272_read_fifo()
628 net2272_pio_advance(struct net2272_ep *ep) net2272_pio_advance() argument
632 if (unlikely(list_empty(&ep->queue))) net2272_pio_advance()
635 req = list_entry(ep->queue.next, struct net2272_request, queue); net2272_pio_advance()
636 (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req); net2272_pio_advance()
641 net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf, net2272_request_dma() argument
644 dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n", net2272_request_dma()
645 ep, buf, len, dir); net2272_request_dma()
697 ((ep >> 1) << DMA_ENDPOINT_SELECT)); net2272_request_dma()
720 net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req) net2272_kick_dma() argument
725 if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma) net2272_kick_dma()
734 dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n", net2272_kick_dma()
735 ep->ep.name, req, (unsigned long long) req->req.dma); net2272_kick_dma()
737 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); net2272_kick_dma()
740 if (ep->dev->dma_busy) net2272_kick_dma()
750 if (ep->is_in) { net2272_kick_dma()
752 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0)) net2272_kick_dma()
759 tmp = net2272_ep_read(ep, EP_STAT0); net2272_kick_dma()
762 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1)) net2272_kick_dma()
767 ep->not_empty = 1; net2272_kick_dma()
769 ep->not_empty = 0; net2272_kick_dma()
773 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); net2272_kick_dma()
781 net2272_write(ep->dev, DMAREQ, net2272_kick_dma()
785 (ep->dev->dma_eot_polarity << EOT_POLARITY) | net2272_kick_dma()
786 (ep->dev->dma_dack_polarity << DACK_POLARITY) | net2272_kick_dma()
787 (ep->dev->dma_dreq_polarity << DREQ_POLARITY) | net2272_kick_dma()
788 ((ep->num >> 1) << DMA_ENDPOINT_SELECT)); net2272_kick_dma()
795 net2272_ep_write(ep, EP_IRQENB, 0); net2272_kick_dma()
797 net2272_start_dma(ep->dev); net2272_kick_dma()
829 struct net2272_ep *ep; net2272_queue() local
839 ep = container_of(_ep, struct net2272_ep, ep); net2272_queue()
840 if (!_ep || (!ep->desc && ep->num != 0)) net2272_queue()
842 dev = ep->dev; net2272_queue()
847 if (use_dma && ep->dma) { net2272_queue()
849 ep->is_in); net2272_queue()
864 if (list_empty(&ep->queue) && !ep->stopped) { net2272_queue()
866 if (ep->num == 0 && _req->length == 0) { net2272_queue()
867 net2272_done(ep, req, 0); net2272_queue()
868 dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name); net2272_queue()
873 s = net2272_ep_read(ep, EP_STAT0); net2272_queue()
877 net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) { net2272_queue()
883 status = net2272_read_fifo(ep, req); net2272_queue()
885 net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS)); net2272_queue()
891 status = net2272_kick_dma(ep, req); net2272_queue()
899 if (ep->is_in) net2272_queue()
900 status = net2272_write_fifo(ep, req); net2272_queue()
902 s = net2272_ep_read(ep, EP_STAT0); net2272_queue()
904 status = net2272_read_fifo(ep, req); net2272_queue()
915 list_add_tail(&req->queue, &ep->queue); net2272_queue()
917 if (likely(!list_empty(&ep->queue))) net2272_queue()
918 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); net2272_queue()
927 net2272_dequeue_all(struct net2272_ep *ep) net2272_dequeue_all() argument
932 ep->stopped = 1; net2272_dequeue_all()
934 while (!list_empty(&ep->queue)) { net2272_dequeue_all()
935 req = list_entry(ep->queue.next, net2272_dequeue_all()
938 net2272_done(ep, req, -ESHUTDOWN); net2272_dequeue_all()
946 struct net2272_ep *ep; net2272_dequeue() local
951 ep = container_of(_ep, struct net2272_ep, ep); net2272_dequeue()
952 if (!_ep || (!ep->desc && ep->num != 0) || !_req) net2272_dequeue()
955 spin_lock_irqsave(&ep->dev->lock, flags); net2272_dequeue()
956 stopped = ep->stopped; net2272_dequeue()
957 ep->stopped = 1; net2272_dequeue()
960 list_for_each_entry(req, &ep->queue, queue) { net2272_dequeue()
965 spin_unlock_irqrestore(&ep->dev->lock, flags); net2272_dequeue()
970 if (ep->queue.next == &req->queue) { net2272_dequeue()
971 dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name); net2272_dequeue()
972 net2272_done(ep, req, -ECONNRESET); net2272_dequeue()
975 ep->stopped = stopped; net2272_dequeue()
977 spin_unlock_irqrestore(&ep->dev->lock, flags); net2272_dequeue()
986 struct net2272_ep *ep; net2272_set_halt_and_wedge() local
990 ep = container_of(_ep, struct net2272_ep, ep); net2272_set_halt_and_wedge()
991 if (!_ep || (!ep->desc && ep->num != 0)) net2272_set_halt_and_wedge()
993 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) net2272_set_halt_and_wedge()
995 if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc)) net2272_set_halt_and_wedge()
998 spin_lock_irqsave(&ep->dev->lock, flags); net2272_set_halt_and_wedge()
999 if (!list_empty(&ep->queue)) net2272_set_halt_and_wedge()
1001 else if (ep->is_in && value && net2272_fifo_status(_ep) != 0) net2272_set_halt_and_wedge()
1004 dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name, net2272_set_halt_and_wedge()
1009 if (ep->num == 0) net2272_set_halt_and_wedge()
1010 ep->dev->protocol_stall = 1; net2272_set_halt_and_wedge()
1012 set_halt(ep); net2272_set_halt_and_wedge()
1014 ep->wedged = 1; net2272_set_halt_and_wedge()
1016 clear_halt(ep); net2272_set_halt_and_wedge()
1017 ep->wedged = 0; net2272_set_halt_and_wedge()
1020 spin_unlock_irqrestore(&ep->dev->lock, flags); net2272_set_halt_and_wedge()
1042 struct net2272_ep *ep; net2272_fifo_status() local
1045 ep = container_of(_ep, struct net2272_ep, ep); net2272_fifo_status()
1046 if (!_ep || (!ep->desc && ep->num != 0)) net2272_fifo_status()
1048 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) net2272_fifo_status()
1051 avail = net2272_ep_read(ep, EP_AVAIL1) << 8; net2272_fifo_status()
1052 avail |= net2272_ep_read(ep, EP_AVAIL0); net2272_fifo_status()
1053 if (avail > ep->fifo_size) net2272_fifo_status()
1055 if (ep->is_in) net2272_fifo_status()
1056 avail = ep->fifo_size - avail; net2272_fifo_status()
1063 struct net2272_ep *ep; net2272_fifo_flush() local
1065 ep = container_of(_ep, struct net2272_ep, ep); net2272_fifo_flush()
1066 if (!_ep || (!ep->desc && ep->num != 0)) net2272_fifo_flush()
1068 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) net2272_fifo_flush()
1071 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); net2272_fifo_flush()
1248 struct net2272_ep *ep; registers_show() local
1250 ep = &dev->ep[i]; registers_show()
1251 if (i && !ep->desc) registers_show()
1254 t1 = net2272_ep_read(ep, EP_CFG); registers_show()
1255 t2 = net2272_ep_read(ep, EP_RSPSET); registers_show()
1259 ep->ep.name, t1, t2, registers_show()
1268 net2272_ep_read(ep, EP_IRQENB)); registers_show()
1274 "(ep%d%s-%s)%s\n", registers_show()
1275 net2272_ep_read(ep, EP_STAT0), registers_show()
1276 net2272_ep_read(ep, EP_STAT1), registers_show()
1277 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0), registers_show()
1279 ep->is_in ? "in" : "out", registers_show()
1281 ep->stopped ? "*" : ""); registers_show()
1287 ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) | registers_show()
1288 ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) | registers_show()
1289 ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff))); registers_show()
1293 t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03; registers_show()
1294 t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03; registers_show()
1322 /* always ep-a, ep-c ... maybe not ep-b */ net2272_set_fifo_mode()
1323 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list); net2272_set_fifo_mode()
1327 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); net2272_set_fifo_mode()
1328 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512; net2272_set_fifo_mode()
1331 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); net2272_set_fifo_mode()
1332 dev->ep[1].fifo_size = 1024; net2272_set_fifo_mode()
1333 dev->ep[2].fifo_size = 512; net2272_set_fifo_mode()
1336 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); net2272_set_fifo_mode()
1337 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024; net2272_set_fifo_mode()
1340 dev->ep[1].fifo_size = 1024; net2272_set_fifo_mode()
1344 /* ep-c is always 2 512 byte buffers */ net2272_set_fifo_mode()
1345 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); net2272_set_fifo_mode()
1346 dev->ep[3].fifo_size = 512; net2272_set_fifo_mode()
1377 /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping net2272_usb_reset()
1392 struct net2272_ep *ep = &dev->ep[i]; net2272_usb_reinit() local
1394 ep->ep.name = ep_name[i]; net2272_usb_reinit()
1395 ep->dev = dev; net2272_usb_reinit()
1396 ep->num = i; net2272_usb_reinit()
1397 ep->not_empty = 0; net2272_usb_reinit()
1399 if (use_dma && ep->num == dma_ep) net2272_usb_reinit()
1400 ep->dma = 1; net2272_usb_reinit()
1403 ep->fifo_size = 512; net2272_usb_reinit()
1405 ep->fifo_size = 64; net2272_usb_reinit()
1406 net2272_ep_reset(ep); net2272_usb_reinit()
1409 ep->ep.caps.type_control = true; net2272_usb_reinit()
1411 ep->ep.caps.type_iso = true; net2272_usb_reinit()
1412 ep->ep.caps.type_bulk = true; net2272_usb_reinit()
1413 ep->ep.caps.type_int = true; net2272_usb_reinit()
1416 ep->ep.caps.dir_in = true; net2272_usb_reinit()
1417 ep->ep.caps.dir_out = true; net2272_usb_reinit()
1419 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64); net2272_usb_reinit()
1421 dev->gadget.ep0 = &dev->ep[0].ep; net2272_usb_reinit()
1422 dev->ep[0].stopped = 0; net2272_usb_reinit()
1429 struct net2272_ep *ep0 = &dev->ep[0]; net2272_ep0_start()
1470 dev->ep[i].irqs = 0; net2272_start()
1498 net2272_dequeue_all(&dev->ep[i]); stop_activity()
1527 /* handle ep-a/ep-b dma completions */
1529 net2272_handle_dma(struct net2272_ep *ep) net2272_handle_dma() argument
1535 if (!list_empty(&ep->queue)) net2272_handle_dma()
1536 req = list_entry(ep->queue.next, net2272_handle_dma()
1541 dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req); net2272_handle_dma()
1544 net2272_write(ep->dev, DMAREQ, net2272_handle_dma()
1548 | (ep->dev->dma_eot_polarity << EOT_POLARITY) net2272_handle_dma()
1549 | (ep->dev->dma_dack_polarity << DACK_POLARITY) net2272_handle_dma()
1550 | (ep->dev->dma_dreq_polarity << DREQ_POLARITY) net2272_handle_dma()
1551 | (ep->dma << DMA_ENDPOINT_SELECT)); net2272_handle_dma()
1553 ep->dev->dma_busy = 0; net2272_handle_dma()
1555 net2272_ep_write(ep, EP_IRQENB, net2272_handle_dma()
1558 | net2272_ep_read(ep, EP_IRQENB)); net2272_handle_dma()
1561 if (ep->is_in) { net2272_handle_dma()
1563 if ((req->req.length % ep->ep.maxpacket != 0) || net2272_handle_dma()
1565 set_fifo_bytecount(ep, 0); net2272_handle_dma()
1567 net2272_done(ep, req, 0); net2272_handle_dma()
1568 if (!list_empty(&ep->queue)) { net2272_handle_dma()
1569 req = list_entry(ep->queue.next, net2272_handle_dma()
1571 status = net2272_kick_dma(ep, req); net2272_handle_dma()
1573 net2272_pio_advance(ep); net2272_handle_dma()
1579 if (net2272_read(ep->dev, IRQSTAT0) & net2272_handle_dma()
1582 net2272_cancel_dma(ep->dev); net2272_handle_dma()
1590 len = (net2272_ep_read(ep, EP_TRANSFER2) << 16) net2272_handle_dma()
1591 | (net2272_ep_read(ep, EP_TRANSFER1) << 8) net2272_handle_dma()
1592 | (net2272_ep_read(ep, EP_TRANSFER0)); net2272_handle_dma()
1594 if (ep->not_empty) net2272_handle_dma()
1600 net2272_pio_advance(ep); net2272_handle_dma()
1607 net2272_handle_ep(struct net2272_ep *ep) net2272_handle_ep() argument
1612 if (!list_empty(&ep->queue)) net2272_handle_ep()
1613 req = list_entry(ep->queue.next, net2272_handle_ep()
1619 stat0 = net2272_ep_read(ep, EP_STAT0); net2272_handle_ep()
1620 stat1 = net2272_ep_read(ep, EP_STAT1); net2272_handle_ep()
1621 ep->irqs++; net2272_handle_ep()
1623 dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n", net2272_handle_ep()
1624 ep->ep.name, stat0, stat1, req ? &req->req : NULL); net2272_handle_ep()
1626 net2272_ep_write(ep, EP_STAT0, stat0 & net2272_handle_ep()
1629 net2272_ep_write(ep, EP_STAT1, stat1); net2272_handle_ep()
1635 if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT))) net2272_handle_ep()
1636 net2272_pio_advance(ep); net2272_handle_ep()
1639 net2272_pio_advance(ep); net2272_handle_ep()
1645 struct net2272_ep *ep; net2272_get_ep_by_addr() local
1648 return &dev->ep[0]; net2272_get_ep_by_addr()
1650 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) { net2272_get_ep_by_addr()
1653 if (!ep->desc) net2272_get_ep_by_addr()
1655 bEndpointAddress = ep->desc->bEndpointAddress; net2272_get_ep_by_addr()
1659 return ep; net2272_get_ep_by_addr()
1729 struct net2272_ep *ep; net2272_handle_stat0_irqs() local
1750 ep = &dev->ep[0]; net2272_handle_stat0_irqs()
1751 ep->irqs++; net2272_handle_stat0_irqs()
1755 while (!list_empty(&ep->queue)) { net2272_handle_stat0_irqs()
1756 req = list_entry(ep->queue.next, net2272_handle_stat0_irqs()
1758 net2272_done(ep, req, net2272_handle_stat0_irqs()
1761 ep->stopped = 0; net2272_handle_stat0_irqs()
1763 net2272_ep_write(ep, EP_STAT0, net2272_handle_stat0_irqs()
1769 net2272_ep_write(ep, EP_STAT1, net2272_handle_stat0_irqs()
1815 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0; net2272_handle_stat0_irqs()
1816 if (ep->is_in) { net2272_handle_stat0_irqs()
1820 stop_out_naking(ep); net2272_handle_stat0_irqs()
1825 net2272_ep_write(ep, EP_IRQENB, scratch); net2272_handle_stat0_irqs()
1845 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); net2272_handle_stat0_irqs()
1847 set_fifo_bytecount(&dev->ep[0], 0); net2272_handle_stat0_irqs()
1848 allow_status(ep); net2272_handle_stat0_irqs()
1850 ep->ep.name, status); net2272_handle_stat0_irqs()
1859 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); net2272_handle_stat0_irqs()
1861 set_fifo_bytecount(&dev->ep[0], 0); net2272_handle_stat0_irqs()
1862 allow_status(ep); net2272_handle_stat0_irqs()
1870 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); net2272_handle_stat0_irqs()
1872 set_fifo_bytecount(&dev->ep[0], 0); net2272_handle_stat0_irqs()
1873 allow_status(ep); net2272_handle_stat0_irqs()
1893 ep->ep.name); net2272_handle_stat0_irqs()
1895 dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name); net2272_handle_stat0_irqs()
1898 allow_status(ep); net2272_handle_stat0_irqs()
1907 allow_status(ep); net2272_handle_stat0_irqs()
1919 allow_status(ep); net2272_handle_stat0_irqs()
1920 dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name); net2272_handle_stat0_irqs()
1925 allow_status(ep); net2272_handle_stat0_irqs()
1934 net2272_ep_read(ep, EP_CFG)); net2272_handle_stat0_irqs()
1955 ep = &dev->ep[num]; net2272_handle_stat0_irqs()
1956 net2272_handle_dma(ep); net2272_handle_stat0_irqs()
1972 ep = &dev->ep[num]; net2272_handle_stat0_irqs()
1973 net2272_handle_ep(ep); net2272_handle_stat0_irqs()
2095 net2272_handle_dma(&dev->ep[2]); net2272_irq()
2097 net2272_handle_dma(&dev->ep[1]); net2272_irq()
H A Dfotg210-udc.c31 static void fotg210_disable_fifo_int(struct fotg210_ep *ep) fotg210_disable_fifo_int() argument
33 u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR1); fotg210_disable_fifo_int()
35 if (ep->dir_in) fotg210_disable_fifo_int()
36 value |= DMISGR1_MF_IN_INT(ep->epnum - 1); fotg210_disable_fifo_int()
38 value |= DMISGR1_MF_OUTSPK_INT(ep->epnum - 1); fotg210_disable_fifo_int()
39 iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR1); fotg210_disable_fifo_int()
42 static void fotg210_enable_fifo_int(struct fotg210_ep *ep) fotg210_enable_fifo_int() argument
44 u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR1); fotg210_enable_fifo_int()
46 if (ep->dir_in) fotg210_enable_fifo_int()
47 value &= ~DMISGR1_MF_IN_INT(ep->epnum - 1); fotg210_enable_fifo_int()
49 value &= ~DMISGR1_MF_OUTSPK_INT(ep->epnum - 1); fotg210_enable_fifo_int()
50 iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR1); fotg210_enable_fifo_int()
61 static void fotg210_done(struct fotg210_ep *ep, struct fotg210_request *req, fotg210_done() argument
67 if (ep->fotg210->gadget.speed == USB_SPEED_UNKNOWN) fotg210_done()
72 spin_unlock(&ep->fotg210->lock); fotg210_done()
73 usb_gadget_giveback_request(&ep->ep, &req->req); fotg210_done()
74 spin_lock(&ep->fotg210->lock); fotg210_done()
76 if (ep->epnum) { fotg210_done()
77 if (list_empty(&ep->queue)) fotg210_done()
78 fotg210_disable_fifo_int(ep); fotg210_done()
80 fotg210_set_cxdone(ep->fotg210); fotg210_done()
84 static void fotg210_fifo_ep_mapping(struct fotg210_ep *ep, u32 epnum, fotg210_fifo_ep_mapping() argument
87 struct fotg210_udc *fotg210 = ep->fotg210; fotg210_fifo_ep_mapping()
90 /* Driver should map an ep to a fifo and then map the fifo fotg210_fifo_ep_mapping()
91 * to the ep. What a brain-damaged design! fotg210_fifo_ep_mapping()
94 /* map a fifo to an ep */ fotg210_fifo_ep_mapping()
100 /* map the ep to the fifo */ fotg210_fifo_ep_mapping()
112 static void fotg210_set_fifo_dir(struct fotg210_ep *ep, u32 epnum, u32 dir_in) fotg210_set_fifo_dir() argument
114 struct fotg210_udc *fotg210 = ep->fotg210; fotg210_set_fifo_dir()
122 static void fotg210_set_tfrtype(struct fotg210_ep *ep, u32 epnum, u32 type) fotg210_set_tfrtype() argument
124 struct fotg210_udc *fotg210 = ep->fotg210; fotg210_set_tfrtype()
132 static void fotg210_set_mps(struct fotg210_ep *ep, u32 epnum, u32 mps, fotg210_set_mps() argument
135 struct fotg210_udc *fotg210 = ep->fotg210; fotg210_set_mps()
145 static int fotg210_config_ep(struct fotg210_ep *ep, fotg210_config_ep() argument
148 struct fotg210_udc *fotg210 = ep->fotg210; fotg210_config_ep()
150 fotg210_set_fifo_dir(ep, ep->epnum, ep->dir_in); fotg210_config_ep()
151 fotg210_set_tfrtype(ep, ep->epnum, ep->type); fotg210_config_ep()
152 fotg210_set_mps(ep, ep->epnum, ep->ep.maxpacket, ep->dir_in); fotg210_config_ep()
153 fotg210_fifo_ep_mapping(ep, ep->epnum, ep->dir_in); fotg210_config_ep()
155 fotg210->ep[ep->epnum] = ep; fotg210_config_ep()
163 struct fotg210_ep *ep; fotg210_ep_enable() local
165 ep = container_of(_ep, struct fotg210_ep, ep); fotg210_ep_enable()
167 ep->desc = desc; fotg210_ep_enable()
168 ep->epnum = usb_endpoint_num(desc); fotg210_ep_enable()
169 ep->type = usb_endpoint_type(desc); fotg210_ep_enable()
170 ep->dir_in = usb_endpoint_dir_in(desc); fotg210_ep_enable()
171 ep->ep.maxpacket = usb_endpoint_maxp(desc); fotg210_ep_enable()
173 return fotg210_config_ep(ep, desc); fotg210_ep_enable()
178 struct fotg210_ep *ep = fotg210->ep[epnum]; fotg210_reset_tseq() local
182 reg = (ep->dir_in) ? fotg210_reset_tseq()
199 static int fotg210_ep_release(struct fotg210_ep *ep) fotg210_ep_release() argument
201 if (!ep->epnum) fotg210_ep_release()
203 ep->epnum = 0; fotg210_ep_release()
204 ep->stall = 0; fotg210_ep_release()
205 ep->wedged = 0; fotg210_ep_release()
207 fotg210_reset_tseq(ep->fotg210, ep->epnum); fotg210_ep_release()
214 struct fotg210_ep *ep; fotg210_ep_disable() local
220 ep = container_of(_ep, struct fotg210_ep, ep); fotg210_ep_disable()
222 while (!list_empty(&ep->queue)) { fotg210_ep_disable()
223 req = list_entry(ep->queue.next, fotg210_ep_disable()
225 spin_lock_irqsave(&ep->fotg210->lock, flags); fotg210_ep_disable()
226 fotg210_done(ep, req, -ECONNRESET); fotg210_ep_disable()
227 spin_unlock_irqrestore(&ep->fotg210->lock, flags); fotg210_ep_disable()
230 return fotg210_ep_release(ep); fotg210_ep_disable()
256 static void fotg210_enable_dma(struct fotg210_ep *ep, fotg210_enable_dma() argument
260 struct fotg210_udc *fotg210 = ep->fotg210; fotg210_enable_dma()
265 value |= DMACPSR1_DMA_LEN(len) | DMACPSR1_DMA_TYPE(ep->dir_in); fotg210_enable_dma()
270 if (ep->epnum) fotg210_enable_dma()
271 value |= DMATFNR_ACC_FN(ep->epnum - 1); fotg210_enable_dma()
290 static void fotg210_disable_dma(struct fotg210_ep *ep) fotg210_disable_dma() argument
292 iowrite32(DMATFNR_DISDMA, ep->fotg210->reg + FOTG210_DMATFNR); fotg210_disable_dma()
295 static void fotg210_wait_dma_done(struct fotg210_ep *ep) fotg210_wait_dma_done() argument
300 value = ioread32(ep->fotg210->reg + FOTG210_DISGR2); fotg210_wait_dma_done()
307 iowrite32(value, ep->fotg210->reg + FOTG210_DISGR2); fotg210_wait_dma_done()
311 value = ioread32(ep->fotg210->reg + FOTG210_DMACPSR1); fotg210_wait_dma_done()
313 iowrite32(value, ep->fotg210->reg + FOTG210_DMACPSR1); fotg210_wait_dma_done()
316 if (ep->epnum) { fotg210_wait_dma_done()
317 value = ioread32(ep->fotg210->reg + fotg210_wait_dma_done()
318 FOTG210_FIBCR(ep->epnum - 1)); fotg210_wait_dma_done()
320 iowrite32(value, ep->fotg210->reg + fotg210_wait_dma_done()
321 FOTG210_FIBCR(ep->epnum - 1)); fotg210_wait_dma_done()
323 value = ioread32(ep->fotg210->reg + FOTG210_DCFESR); fotg210_wait_dma_done()
325 iowrite32(value, ep->fotg210->reg + FOTG210_DCFESR); fotg210_wait_dma_done()
329 static void fotg210_start_dma(struct fotg210_ep *ep, fotg210_start_dma() argument
336 if (ep->epnum) { fotg210_start_dma()
337 if (ep->dir_in) { fotg210_start_dma()
342 length = ioread32(ep->fotg210->reg + fotg210_start_dma()
343 FOTG210_FIBCR(ep->epnum - 1)); fotg210_start_dma()
348 if (req->req.length - req->req.actual > ep->ep.maxpacket) fotg210_start_dma()
349 length = ep->ep.maxpacket; fotg210_start_dma()
355 ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); fotg210_start_dma()
363 ep->dir_in ? DMA_TO_DEVICE : fotg210_start_dma()
366 fotg210_enable_dma(ep, d, length); fotg210_start_dma()
369 fotg210_wait_dma_done(ep); fotg210_start_dma()
371 fotg210_disable_dma(ep); fotg210_start_dma()
379 static void fotg210_ep0_queue(struct fotg210_ep *ep, fotg210_ep0_queue() argument
383 fotg210_done(ep, req, 0); fotg210_ep0_queue()
386 if (ep->dir_in) { /* if IN */ fotg210_ep0_queue()
387 fotg210_start_dma(ep, req); fotg210_ep0_queue()
389 (req->req.actual < ep->ep.maxpacket)) fotg210_ep0_queue()
390 fotg210_done(ep, req, 0); fotg210_ep0_queue()
392 u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0); fotg210_ep0_queue()
395 iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR0); fotg210_ep0_queue()
402 struct fotg210_ep *ep; fotg210_ep_queue() local
407 ep = container_of(_ep, struct fotg210_ep, ep); fotg210_ep_queue()
410 if (ep->fotg210->gadget.speed == USB_SPEED_UNKNOWN) fotg210_ep_queue()
413 spin_lock_irqsave(&ep->fotg210->lock, flags); fotg210_ep_queue()
415 if (list_empty(&ep->queue)) fotg210_ep_queue()
418 list_add_tail(&req->queue, &ep->queue); fotg210_ep_queue()
423 if (!ep->epnum) /* ep0 */ fotg210_ep_queue()
424 fotg210_ep0_queue(ep, req); fotg210_ep_queue()
425 else if (request && !ep->stall) fotg210_ep_queue()
426 fotg210_enable_fifo_int(ep); fotg210_ep_queue()
428 spin_unlock_irqrestore(&ep->fotg210->lock, flags); fotg210_ep_queue()
435 struct fotg210_ep *ep; fotg210_ep_dequeue() local
439 ep = container_of(_ep, struct fotg210_ep, ep); fotg210_ep_dequeue()
442 spin_lock_irqsave(&ep->fotg210->lock, flags); fotg210_ep_dequeue()
443 if (!list_empty(&ep->queue)) fotg210_ep_dequeue()
444 fotg210_done(ep, req, -ECONNRESET); fotg210_ep_dequeue()
445 spin_unlock_irqrestore(&ep->fotg210->lock, flags); fotg210_ep_dequeue()
450 static void fotg210_set_epnstall(struct fotg210_ep *ep) fotg210_set_epnstall() argument
452 struct fotg210_udc *fotg210 = ep->fotg210; fotg210_set_epnstall()
457 if (ep->dir_in) { fotg210_set_epnstall()
460 } while (!(value & DCFESR_FIFO_EMPTY(ep->epnum - 1))); fotg210_set_epnstall()
463 reg = (ep->dir_in) ? fotg210_set_epnstall()
464 fotg210->reg + FOTG210_INEPMPSR(ep->epnum) : fotg210_set_epnstall()
465 fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum); fotg210_set_epnstall()
471 static void fotg210_clear_epnstall(struct fotg210_ep *ep) fotg210_clear_epnstall() argument
473 struct fotg210_udc *fotg210 = ep->fotg210; fotg210_clear_epnstall()
477 reg = (ep->dir_in) ? fotg210_clear_epnstall()
478 fotg210->reg + FOTG210_INEPMPSR(ep->epnum) : fotg210_clear_epnstall()
479 fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum); fotg210_clear_epnstall()
487 struct fotg210_ep *ep; fotg210_set_halt_and_wedge() local
492 ep = container_of(_ep, struct fotg210_ep, ep); fotg210_set_halt_and_wedge()
494 fotg210 = ep->fotg210; fotg210_set_halt_and_wedge()
496 spin_lock_irqsave(&ep->fotg210->lock, flags); fotg210_set_halt_and_wedge()
499 fotg210_set_epnstall(ep); fotg210_set_halt_and_wedge()
500 ep->stall = 1; fotg210_set_halt_and_wedge()
502 ep->wedged = 1; fotg210_set_halt_and_wedge()
504 fotg210_reset_tseq(fotg210, ep->epnum); fotg210_set_halt_and_wedge()
505 fotg210_clear_epnstall(ep); fotg210_set_halt_and_wedge()
506 ep->stall = 0; fotg210_set_halt_and_wedge()
507 ep->wedged = 0; fotg210_set_halt_and_wedge()
508 if (!list_empty(&ep->queue)) fotg210_set_halt_and_wedge()
509 fotg210_enable_fifo_int(ep); fotg210_set_halt_and_wedge()
512 spin_unlock_irqrestore(&ep->fotg210->lock, flags); fotg210_set_halt_and_wedge()
661 fotg210_set_epnstall(fotg210->ep[epnum]); fotg210_set_feature()
676 struct fotg210_ep *ep = fotg210_clear_feature() local
677 fotg210->ep[ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK]; fotg210_clear_feature()
688 if (ep->wedged) { fotg210_clear_feature()
692 if (ep->stall) fotg210_clear_feature()
693 fotg210_set_halt_and_wedge(&ep->ep, 0, 0); fotg210_clear_feature()
703 static int fotg210_is_epnstall(struct fotg210_ep *ep) fotg210_is_epnstall() argument
705 struct fotg210_udc *fotg210 = ep->fotg210; fotg210_is_epnstall()
709 reg = (ep->dir_in) ? fotg210_is_epnstall()
710 fotg210->reg + FOTG210_INEPMPSR(ep->epnum) : fotg210_is_epnstall()
711 fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum); fotg210_is_epnstall()
732 fotg210_is_epnstall(fotg210->ep[epnum]) fotg210_get_status()
759 fotg210->ep[0]->dir_in = ctrl->bRequestType & USB_DIR_IN; fotg210_setup_packet()
799 struct fotg210_ep *ep = fotg210->ep[0]; fotg210_ep0out() local
801 if (!list_empty(&ep->queue) && !ep->dir_in) { fotg210_ep0out()
804 req = list_first_entry(&ep->queue, fotg210_ep0out()
808 fotg210_start_dma(ep, req); fotg210_ep0out()
810 if ((req->req.length - req->req.actual) < ep->ep.maxpacket) fotg210_ep0out()
811 fotg210_done(ep, req, 0); fotg210_ep0out()
819 struct fotg210_ep *ep = fotg210->ep[0]; fotg210_ep0in() local
821 if ((!list_empty(&ep->queue)) && (ep->dir_in)) { fotg210_ep0in()
824 req = list_entry(ep->queue.next, fotg210_ep0in()
828 fotg210_start_dma(ep, req); fotg210_ep0in()
830 if ((req->req.length - req->req.actual) < ep->ep.maxpacket) fotg210_ep0in()
831 fotg210_done(ep, req, 0); fotg210_ep0in()
845 static void fotg210_in_fifo_handler(struct fotg210_ep *ep) fotg210_in_fifo_handler() argument
847 struct fotg210_request *req = list_entry(ep->queue.next, fotg210_in_fifo_handler()
851 fotg210_start_dma(ep, req); fotg210_in_fifo_handler()
852 fotg210_done(ep, req, 0); fotg210_in_fifo_handler()
855 static void fotg210_out_fifo_handler(struct fotg210_ep *ep) fotg210_out_fifo_handler() argument
857 struct fotg210_request *req = list_entry(ep->queue.next, fotg210_out_fifo_handler()
860 fotg210_start_dma(ep, req); fotg210_out_fifo_handler()
864 req->req.actual < ep->ep.maxpacket) fotg210_out_fifo_handler()
865 fotg210_done(ep, req, 0); fotg210_out_fifo_handler()
985 fotg210_in_fifo_handler(fotg210->ep[fifo + 1]); fotg210_irq()
989 fotg210_out_fifo_handler(fotg210->ep[fifo + 1]); fotg210_irq()
1074 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req); fotg210_udc_remove()
1111 fotg210->ep[i] = _ep[i]; fotg210_udc_probe()
1134 struct fotg210_ep *ep = fotg210->ep[i]; fotg210_udc_probe() local
1137 INIT_LIST_HEAD(&fotg210->ep[i]->ep.ep_list); fotg210_udc_probe()
1138 list_add_tail(&fotg210->ep[i]->ep.ep_list, fotg210_udc_probe()
1141 ep->fotg210 = fotg210; fotg210_udc_probe()
1142 INIT_LIST_HEAD(&ep->queue); fotg210_udc_probe()
1143 ep->ep.name = fotg210_ep_name[i]; fotg210_udc_probe()
1144 ep->ep.ops = &fotg210_ep_ops; fotg210_udc_probe()
1145 usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0); fotg210_udc_probe()
1148 ep->ep.caps.type_control = true; fotg210_udc_probe()
1150 ep->ep.caps.type_iso = true; fotg210_udc_probe()
1151 ep->ep.caps.type_bulk = true; fotg210_udc_probe()
1152 ep->ep.caps.type_int = true; fotg210_udc_probe()
1155 ep->ep.caps.dir_in = true; fotg210_udc_probe()
1156 ep->ep.caps.dir_out = true; fotg210_udc_probe()
1158 usb_ep_set_maxpacket_limit(&fotg210->ep[0]->ep, 0x40); fotg210_udc_probe()
1159 fotg210->gadget.ep0 = &fotg210->ep[0]->ep; fotg210_udc_probe()
1162 fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep, fotg210_udc_probe()
1190 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req); fotg210_udc_probe()
H A Damd5536udc.c70 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
247 DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts)); print_regs()
248 DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk)); print_regs()
285 /* mask all ep interrupts */ udc_mask_unused_interrupts()
330 static int udc_set_txfifo_addr(struct udc_ep *ep) udc_set_txfifo_addr() argument
336 if (!ep || !(ep->in)) udc_set_txfifo_addr()
339 dev = ep->dev; udc_set_txfifo_addr()
340 ep->txfifo = dev->txfifo; udc_set_txfifo_addr()
342 /* traverse ep's */ udc_set_txfifo_addr()
343 for (i = 0; i < ep->num; i++) { udc_set_txfifo_addr()
344 if (dev->ep[i].regs) { udc_set_txfifo_addr()
346 tmp = readl(&dev->ep[i].regs->bufin_framenum); udc_set_txfifo_addr()
348 ep->txfifo += tmp; udc_set_txfifo_addr()
357 static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num) UDC_QUEUE_CNAK() argument
359 if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) { UDC_QUEUE_CNAK()
360 DBG(ep->dev, "NAK could not be cleared for ep%d\n", num); UDC_QUEUE_CNAK()
362 ep->naking = 1; UDC_QUEUE_CNAK()
372 struct udc_ep *ep; udc_ep_enable() local
385 ep = container_of(usbep, struct udc_ep, ep); udc_ep_enable()
386 dev = ep->dev; udc_ep_enable()
388 DBG(dev, "udc_ep_enable() ep %d\n", ep->num); udc_ep_enable()
394 ep->ep.desc = desc; udc_ep_enable()
396 ep->halted = 0; udc_ep_enable()
399 tmp = readl(&dev->ep[ep->num].regs->ctl); udc_ep_enable()
401 writel(tmp, &dev->ep[ep->num].regs->ctl); udc_ep_enable()
405 tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt); udc_ep_enable()
407 ep->ep.maxpacket = maxpacket; udc_ep_enable()
408 writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt); udc_ep_enable()
410 /* IN ep */ udc_ep_enable()
411 if (ep->in) { udc_ep_enable()
413 /* ep ix in UDC CSR register space */ udc_ep_enable()
414 udc_csr_epix = ep->num; udc_ep_enable()
417 tmp = readl(&dev->ep[ep->num].regs->bufin_framenum); udc_ep_enable()
424 writel(tmp, &dev->ep[ep->num].regs->bufin_framenum); udc_ep_enable()
427 udc_set_txfifo_addr(ep); udc_ep_enable()
430 tmp = readl(&ep->regs->ctl); udc_ep_enable()
432 writel(tmp, &ep->regs->ctl); udc_ep_enable()
434 /* OUT ep */ udc_ep_enable()
436 /* ep ix in UDC CSR register space */ udc_ep_enable()
437 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; udc_ep_enable()
440 tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]); udc_ep_enable()
443 writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]); udc_ep_enable()
445 if (use_dma && !ep->in) { udc_ep_enable()
447 ep->bna_dummy_req = udc_alloc_bna_dummy(ep); udc_ep_enable()
448 ep->bna_occurred = 0; udc_ep_enable()
451 if (ep->num != UDC_EP0OUT_IX) udc_ep_enable()
455 /* set ep values */ udc_ep_enable()
459 /* ep number */ udc_ep_enable()
461 /* ep direction */ udc_ep_enable()
462 tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR); udc_ep_enable()
463 /* ep type */ udc_ep_enable()
465 /* ep config */ udc_ep_enable()
466 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG); udc_ep_enable()
467 /* ep interface */ udc_ep_enable()
468 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF); udc_ep_enable()
469 /* ep alt */ udc_ep_enable()
470 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT); udc_ep_enable()
474 /* enable ep irq */ udc_ep_enable()
476 tmp &= AMD_UNMASK_BIT(ep->num); udc_ep_enable()
483 if (!use_dma || ep->in) { udc_ep_enable()
484 tmp = readl(&ep->regs->ctl); udc_ep_enable()
486 writel(tmp, &ep->regs->ctl); udc_ep_enable()
487 ep->naking = 0; udc_ep_enable()
488 UDC_QUEUE_CNAK(ep, ep->num); udc_ep_enable()
498 static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep) ep_init() argument
502 VDBG(ep->dev, "ep-%d reset\n", ep->num); ep_init()
503 ep->ep.desc = NULL; ep_init()
504 ep->ep.ops = &udc_ep_ops; ep_init()
505 INIT_LIST_HEAD(&ep->queue); ep_init()
507 usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0); ep_init()
509 tmp = readl(&ep->regs->ctl); ep_init()
511 writel(tmp, &ep->regs->ctl); ep_init()
512 ep->naking = 1; ep_init()
516 tmp |= AMD_BIT(ep->num); ep_init()
519 if (ep->in) { ep_init()
521 tmp = readl(&ep->regs->ctl); ep_init()
523 writel(tmp, &ep->regs->ctl); ep_init()
525 tmp = readl(&ep->regs->sts); ep_init()
527 writel(tmp, &ep->regs->sts); ep_init()
530 tmp = readl(&ep->regs->ctl); ep_init()
532 writel(tmp, &ep->regs->ctl); ep_init()
536 writel(0, &ep->regs->desptr); ep_init()
542 struct udc_ep *ep = NULL; udc_ep_disable() local
548 ep = container_of(usbep, struct udc_ep, ep); udc_ep_disable()
549 if (usbep->name == ep0_string || !ep->ep.desc) udc_ep_disable()
552 DBG(ep->dev, "Disable ep-%d\n", ep->num); udc_ep_disable()
554 spin_lock_irqsave(&ep->dev->lock, iflags); udc_ep_disable()
555 udc_free_request(&ep->ep, &ep->bna_dummy_req->req); udc_ep_disable()
556 empty_req_queue(ep); udc_ep_disable()
557 ep_init(ep->dev->regs, ep); udc_ep_disable()
558 spin_unlock_irqrestore(&ep->dev->lock, iflags); udc_ep_disable()
569 struct udc_ep *ep; udc_alloc_request() local
574 ep = container_of(usbep, struct udc_ep, ep); udc_alloc_request()
576 VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num); udc_alloc_request()
584 if (ep->dma) { udc_alloc_request()
586 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp, udc_alloc_request()
593 VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, " udc_alloc_request()
638 struct udc_ep *ep; udc_free_request() local
644 ep = container_of(usbep, struct udc_ep, ep); udc_free_request()
646 VDBG(ep->dev, "free_req req=%p\n", req); udc_free_request()
649 VDBG(ep->dev, "req->td_data=%p\n", req->td_data); udc_free_request()
653 udc_free_dma_chain(ep->dev, req); udc_free_request()
655 pci_pool_free(ep->dev->data_requests, req->td_data, udc_free_request()
682 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep) udc_alloc_bna_dummy() argument
688 _req = udc_alloc_request(&ep->ep, GFP_ATOMIC); udc_alloc_bna_dummy()
691 ep->bna_dummy_req = req; udc_alloc_bna_dummy()
699 udc_txfifo_write(struct udc_ep *ep, struct usb_request *req) udc_txfifo_write() argument
707 if (!req || !ep) udc_txfifo_write()
716 bytes = ep->ep.maxpacket; udc_txfifo_write()
722 writel(*(buf + i), ep->txfifo); udc_txfifo_write()
727 ep->txfifo); udc_txfifo_write()
731 writel(0, &ep->regs->confirm); udc_txfifo_write()
772 udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req) udc_rxfifo_read() argument
780 bytes = readl(&ep->regs->sts); udc_rxfifo_read()
786 if ((buf_space % ep->ep.maxpacket) != 0) { udc_rxfifo_read()
787 DBG(ep->dev, udc_rxfifo_read()
789 ep->ep.name, bytes, buf_space); udc_rxfifo_read()
797 if (((bytes % ep->ep.maxpacket) != 0) || (!bytes) udc_rxfifo_read()
802 VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes); udc_rxfifo_read()
803 udc_rxfifo_read_bytes(ep->dev, buf, bytes); udc_rxfifo_read()
810 struct udc_ep *ep, udc_create_dma_chain()
824 VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n", udc_create_dma_chain()
829 if (!ep->in) udc_create_dma_chain()
833 len = req->req.length / ep->ep.maxpacket; udc_create_dma_chain()
834 if (req->req.length % ep->ep.maxpacket) udc_create_dma_chain()
840 udc_free_dma_chain(ep->dev, req); udc_create_dma_chain()
850 td = pci_pool_alloc(ep->dev->data_requests, udc_create_dma_chain()
888 if (ep->in) { udc_create_dma_chain()
892 ep->ep.maxpacket, udc_create_dma_chain()
906 if (ep->in) { udc_create_dma_chain()
926 static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp) prep_dma() argument
931 VDBG(ep->dev, "prep_dma\n"); prep_dma()
932 VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n", prep_dma()
933 ep->num, req->td_data); prep_dma()
944 retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp); prep_dma()
947 DBG(ep->dev, "Out of DMA memory\n"); prep_dma()
950 if (ep->in) { prep_dma()
951 if (req->req.length == ep->ep.maxpacket) { prep_dma()
955 ep->ep.maxpacket, prep_dma()
963 if (ep->in) { prep_dma()
964 VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d " prep_dma()
965 "maxpacket=%d ep%d\n", prep_dma()
967 ep->ep.maxpacket, ep->num); prep_dma()
972 if (!use_dma_ppb || req->req.length < ep->ep.maxpacket prep_dma()
973 || ep->num == UDC_EP0OUT_IX prep_dma()
974 || ep->num == UDC_EP0IN_IX) { prep_dma()
992 VDBG(ep->dev, "OUT set host ready\n"); prep_dma()
1001 if (ep->naking) { prep_dma()
1002 tmp = readl(&ep->regs->ctl); prep_dma()
1004 writel(tmp, &ep->regs->ctl); prep_dma()
1005 ep->naking = 0; prep_dma()
1006 UDC_QUEUE_CNAK(ep, ep->num); prep_dma()
1016 complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
1017 __releases(ep->dev->lock)
1018 __acquires(ep->dev->lock)
1023 VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
1025 dev = ep->dev;
1027 if (ep->dma)
1028 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
1030 halted = ep->halted;
1031 ep->halted = 1;
1037 /* remove from ep queue */
1040 VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
1041 &req->req, req->req.length, ep->ep.name, sts);
1044 usb_gadget_giveback_request(&ep->ep, &req->req);
1046 ep->halted = halted;
1109 struct udc_ep *ep; udc_queue() local
1121 ep = container_of(usbep, struct udc_ep, ep); udc_queue()
1122 if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX)) udc_queue()
1125 VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in); udc_queue()
1126 dev = ep->dev; udc_queue()
1132 if (ep->dma) { udc_queue()
1134 retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in); udc_queue()
1149 if (list_empty(&ep->queue)) { udc_queue()
1153 complete_req(ep, req, 0); udc_queue()
1154 VDBG(dev, "%s: zlp\n", ep->ep.name); udc_queue()
1168 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); udc_queue()
1170 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); udc_queue()
1171 dev->ep[UDC_EP0IN_IX].naking = 0; udc_queue()
1172 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], udc_queue()
1178 if (ep->dma) { udc_queue()
1179 retval = prep_dma(ep, req, GFP_ATOMIC); udc_queue()
1183 if (ep->in) { udc_queue()
1192 if (!ep->in) { udc_queue()
1208 if (ep->bna_occurred) { udc_queue()
1210 memcpy(ep->bna_dummy_req->td_data, udc_queue()
1216 writel(req->td_phys, &ep->regs->desptr); udc_queue()
1219 if (ep->naking) { udc_queue()
1220 tmp = readl(&ep->regs->ctl); udc_queue()
1222 writel(tmp, &ep->regs->ctl); udc_queue()
1223 ep->naking = 0; udc_queue()
1224 UDC_QUEUE_CNAK(ep, ep->num); udc_queue()
1227 if (ep->in) { udc_queue()
1228 /* enable ep irq */ udc_queue()
1230 tmp &= AMD_UNMASK_BIT(ep->num); udc_queue()
1233 } else if (ep->in) { udc_queue()
1234 /* enable ep irq */ udc_queue()
1236 tmp &= AMD_UNMASK_BIT(ep->num); udc_queue()
1240 } else if (ep->dma) { udc_queue()
1243 * prep_dma not used for OUT ep's, this is not possible udc_queue()
1246 if (ep->in) { udc_queue()
1247 retval = prep_dma(ep, req, GFP_ATOMIC); udc_queue()
1253 /* add request to ep queue */ udc_queue()
1256 list_add_tail(&req->queue, &ep->queue); udc_queue()
1263 if (ep->num != UDC_EP0OUT_IX) udc_queue()
1267 if (!ep->in) { udc_queue()
1275 if (udc_rxfifo_read(ep, req)) { udc_queue()
1277 complete_req(ep, req, 0); udc_queue()
1291 static void empty_req_queue(struct udc_ep *ep) empty_req_queue() argument
1295 ep->halted = 1; empty_req_queue()
1296 while (!list_empty(&ep->queue)) { empty_req_queue()
1297 req = list_entry(ep->queue.next, empty_req_queue()
1300 complete_req(ep, req, -ESHUTDOWN); empty_req_queue()
1307 struct udc_ep *ep; udc_dequeue() local
1312 ep = container_of(usbep, struct udc_ep, ep); udc_dequeue()
1313 if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0 udc_dequeue()
1314 && ep->num != UDC_EP0OUT_IX))) udc_dequeue()
1319 spin_lock_irqsave(&ep->dev->lock, iflags); udc_dequeue()
1320 halted = ep->halted; udc_dequeue()
1321 ep->halted = 1; udc_dequeue()
1323 if (ep->queue.next == &req->queue) { udc_dequeue()
1324 if (ep->dma && req->dma_going) { udc_dequeue()
1325 if (ep->in) udc_dequeue()
1326 ep->cancel_transfer = 1; udc_dequeue()
1341 ep->cancel_transfer = 1; udc_dequeue()
1343 udc_init_bna_dummy(ep->req); udc_dequeue()
1344 writel(ep->bna_dummy_req->td_phys, udc_dequeue()
1345 &ep->regs->desptr); udc_dequeue()
1351 complete_req(ep, req, -ECONNRESET); udc_dequeue()
1352 ep->halted = halted; udc_dequeue()
1354 spin_unlock_irqrestore(&ep->dev->lock, iflags); udc_dequeue()
1362 struct udc_ep *ep; udc_set_halt() local
1372 ep = container_of(usbep, struct udc_ep, ep); udc_set_halt()
1373 if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX)) udc_set_halt()
1375 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) udc_set_halt()
1381 if (ep->num == 0) udc_set_halt()
1382 ep->dev->stall_ep0in = 1; udc_set_halt()
1388 tmp = readl(&ep->regs->ctl); udc_set_halt()
1390 writel(tmp, &ep->regs->ctl); udc_set_halt()
1391 ep->halted = 1; udc_set_halt()
1399 DBG(ep->dev, "start polltimer\n"); udc_set_halt()
1405 /* ep is halted by set_halt() before */ udc_set_halt()
1406 if (ep->halted) { udc_set_halt()
1407 tmp = readl(&ep->regs->ctl); udc_set_halt()
1412 writel(tmp, &ep->regs->ctl); udc_set_halt()
1413 ep->halted = 0; udc_set_halt()
1414 UDC_QUEUE_CNAK(ep, ep->num); udc_set_halt()
1491 /* make gadget ep lists */ make_ep_lists()
1493 list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list, make_ep_lists()
1495 list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list, make_ep_lists()
1497 list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list, make_ep_lists()
1501 dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE; make_ep_lists()
1503 dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE; make_ep_lists()
1505 dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf; make_ep_lists()
1506 dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE; make_ep_lists()
1577 struct udc_ep *ep; udc_setup_endpoints() local
1591 /* set basic ep parameters */ udc_setup_endpoints()
1593 ep = &dev->ep[tmp]; udc_setup_endpoints()
1594 ep->dev = dev; udc_setup_endpoints()
1595 ep->ep.name = ep_info[tmp].name; udc_setup_endpoints()
1596 ep->ep.caps = ep_info[tmp].caps; udc_setup_endpoints()
1597 ep->num = tmp; udc_setup_endpoints()
1599 ep->txfifo = dev->txfifo; udc_setup_endpoints()
1603 ep->fifo_depth = UDC_TXFIFO_SIZE; udc_setup_endpoints()
1604 ep->in = 1; udc_setup_endpoints()
1606 ep->fifo_depth = UDC_RXFIFO_SIZE; udc_setup_endpoints()
1607 ep->in = 0; udc_setup_endpoints()
1610 ep->regs = &dev->ep_regs[tmp]; udc_setup_endpoints()
1612 * ep will be reset only if ep was not enabled before to avoid udc_setup_endpoints()
1613 * disabling ep interrupts when ENUM interrupt occurs but ep is udc_setup_endpoints()
1616 if (!ep->ep.desc) udc_setup_endpoints()
1617 ep_init(dev->regs, ep); udc_setup_endpoints()
1621 * ep->dma is not really used, just to indicate that udc_setup_endpoints()
1625 ep->dma = &dev->regs->ctl; udc_setup_endpoints()
1631 reg = readl(&dev->ep[tmp].regs->ctl); udc_setup_endpoints()
1633 writel(reg, &dev->ep[tmp].regs->ctl); udc_setup_endpoints()
1634 dev->ep[tmp].naking = 1; udc_setup_endpoints()
1641 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep, udc_setup_endpoints()
1643 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep, udc_setup_endpoints()
1646 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep, udc_setup_endpoints()
1648 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep, udc_setup_endpoints()
1656 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep; udc_setup_endpoints()
1657 dev->ep[UDC_EP0IN_IX].halted = 0; udc_setup_endpoints()
1719 empty_req_queue(&dev->ep[tmp]); udc_tasklet_disconnect()
1725 &dev->ep[UDC_EP0IN_IX]); udc_tasklet_disconnect()
1755 * ep int. status reset udc_soft_reset()
1819 static void udc_handle_halt_state(struct udc_ep *ep) udc_handle_halt_state() argument
1823 if (ep->halted == 1) { udc_handle_halt_state()
1824 tmp = readl(&ep->regs->ctl); udc_handle_halt_state()
1835 DBG(ep->dev, "ep %d: set STALL again\n", ep->num); udc_handle_halt_state()
1837 writel(tmp, &ep->regs->ctl);*/ udc_handle_halt_state()
1841 writel(tmp, &ep->regs->ctl); udc_handle_halt_state()
1842 ep->halted = 0; udc_handle_halt_state()
1843 UDC_QUEUE_CNAK(ep, ep->num); udc_handle_halt_state()
1851 struct udc_ep *ep; udc_pollstall_timer_function() local
1859 ep = &udc->ep[UDC_EPIN_IX]; udc_pollstall_timer_function()
1860 udc_handle_halt_state(ep); udc_pollstall_timer_function()
1861 if (ep->halted) udc_pollstall_timer_function()
1864 ep = &udc->ep[UDC_EPOUT_IX]; udc_pollstall_timer_function()
1865 udc_handle_halt_state(ep); udc_pollstall_timer_function()
1866 if (ep->halted) udc_pollstall_timer_function()
1890 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); activate_control_endpoints()
1892 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); activate_control_endpoints()
1895 dev->ep[UDC_EP0IN_IX].in = 1; activate_control_endpoints()
1896 dev->ep[UDC_EP0OUT_IX].in = 0; activate_control_endpoints()
1899 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum); activate_control_endpoints()
1906 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum); activate_control_endpoints()
1909 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt); activate_control_endpoints()
1916 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt); activate_control_endpoints()
1919 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt); activate_control_endpoints()
1926 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt); activate_control_endpoints()
1939 dev->ep[UDC_EP0OUT_IX].td->status |= activate_control_endpoints()
1942 writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma, activate_control_endpoints()
1943 &dev->ep[UDC_EP0OUT_IX].regs->subptr); activate_control_endpoints()
1944 writel(dev->ep[UDC_EP0OUT_IX].td_phys, activate_control_endpoints()
1945 &dev->ep[UDC_EP0OUT_IX].regs->desptr); activate_control_endpoints()
1967 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); activate_control_endpoints()
1969 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); activate_control_endpoints()
1970 dev->ep[UDC_EP0IN_IX].naking = 0; activate_control_endpoints()
1971 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX); activate_control_endpoints()
1974 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); activate_control_endpoints()
1976 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl); activate_control_endpoints()
1977 dev->ep[UDC_EP0OUT_IX].naking = 0; activate_control_endpoints()
1978 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX); activate_control_endpoints()
2006 dev->ep[UDC_EP0OUT_IX].ep.driver_data = amd5536_udc_start()
2007 dev->ep[UDC_EP0IN_IX].ep.driver_data; amd5536_udc_start()
2034 empty_req_queue(&dev->ep[tmp]);
2071 DBG(dev, "CNAK pending for ep%d\n", tmp); udc_process_cnak_queue()
2073 reg = readl(&dev->ep[tmp].regs->ctl); udc_process_cnak_queue()
2075 writel(reg, &dev->ep[tmp].regs->ctl); udc_process_cnak_queue()
2076 dev->ep[tmp].naking = 0; udc_process_cnak_queue()
2077 UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num); udc_process_cnak_queue()
2082 DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX); udc_process_cnak_queue()
2084 reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); udc_process_cnak_queue()
2086 writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl); udc_process_cnak_queue()
2087 dev->ep[UDC_EP0OUT_IX].naking = 0; udc_process_cnak_queue()
2088 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], udc_process_cnak_queue()
2089 dev->ep[UDC_EP0OUT_IX].num); udc_process_cnak_queue()
2125 struct udc_ep *ep; udc_data_out_isr() local
2131 VDBG(dev, "ep%d irq\n", ep_ix); udc_data_out_isr()
2132 ep = &dev->ep[ep_ix]; udc_data_out_isr()
2134 tmp = readl(&ep->regs->sts); udc_data_out_isr()
2138 DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n", udc_data_out_isr()
2139 ep->num, readl(&ep->regs->desptr)); udc_data_out_isr()
2141 writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts); udc_data_out_isr()
2142 if (!ep->cancel_transfer) udc_data_out_isr()
2143 ep->bna_occurred = 1; udc_data_out_isr()
2145 ep->cancel_transfer = 0; udc_data_out_isr()
2152 dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num); udc_data_out_isr()
2155 writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts); udc_data_out_isr()
2160 if (!list_empty(&ep->queue)) { udc_data_out_isr()
2163 req = list_entry(ep->queue.next, udc_data_out_isr()
2174 if (req && udc_rxfifo_read(ep, req)) { udc_data_out_isr()
2178 complete_req(ep, req, 0); udc_data_out_isr()
2180 if (!list_empty(&ep->queue) && !ep->halted) { udc_data_out_isr()
2181 req = list_entry(ep->queue.next, udc_data_out_isr()
2188 } else if (!ep->cancel_transfer && req) { udc_data_out_isr()
2201 if (ep->bna_occurred) { udc_data_out_isr()
2203 memcpy(req->td_data, ep->bna_dummy_req->td_data, udc_data_out_isr()
2205 ep->bna_occurred = 0; udc_data_out_isr()
2206 udc_init_bna_dummy(ep->req); udc_data_out_isr()
2244 if ((tmp % ep->ep.maxpacket) != 0) { udc_data_out_isr()
2246 ep->ep.name, count, tmp); udc_data_out_isr()
2254 complete_req(ep, req, 0); udc_data_out_isr()
2257 if (!list_empty(&ep->queue) && !ep->halted) { udc_data_out_isr()
2258 req = list_entry(ep->queue.next, udc_data_out_isr()
2269 if (prep_dma(ep, req, GFP_ATOMIC) != 0) udc_data_out_isr()
2273 &ep->regs->desptr); udc_data_out_isr()
2283 if (ep->bna_dummy_req) { udc_data_out_isr()
2285 writel(ep->bna_dummy_req->td_phys, udc_data_out_isr()
2286 &ep->regs->desptr); udc_data_out_isr()
2287 ep->bna_occurred = 0; udc_data_out_isr()
2304 if (ep->num != UDC_EP0OUT_IX) udc_data_out_isr()
2316 } else if (ep->cancel_transfer) { udc_data_out_isr()
2318 ep->cancel_transfer = 0; udc_data_out_isr()
2328 /* clear OUT bits in ep status */ udc_data_out_isr()
2329 writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts); udc_data_out_isr()
2340 struct udc_ep *ep; udc_data_in_isr() local
2346 ep = &dev->ep[ep_ix]; udc_data_in_isr()
2348 epsts = readl(&ep->regs->sts); udc_data_in_isr()
2353 "BNA ep%din occurred - DESPTR = %08lx\n", udc_data_in_isr()
2354 ep->num, udc_data_in_isr()
2355 (unsigned long) readl(&ep->regs->desptr)); udc_data_in_isr()
2358 writel(epsts, &ep->regs->sts); udc_data_in_isr()
2366 "HE ep%dn occurred - DESPTR = %08lx\n", udc_data_in_isr()
2367 ep->num, (unsigned long) readl(&ep->regs->desptr)); udc_data_in_isr()
2370 writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts); udc_data_in_isr()
2379 if (!ep->cancel_transfer && !list_empty(&ep->queue)) { udc_data_in_isr()
2380 req = list_entry(ep->queue.next, udc_data_in_isr()
2402 complete_req(ep, req, 0); udc_data_in_isr()
2405 if (list_empty(&ep->queue)) { udc_data_in_isr()
2408 tmp |= AMD_BIT(ep->num); udc_data_in_isr()
2413 ep->cancel_transfer = 0; udc_data_in_isr()
2423 if (!list_empty(&ep->queue)) { udc_data_in_isr()
2425 req = list_entry(ep->queue.next, udc_data_in_isr()
2430 udc_txfifo_write(ep, &req->req); udc_data_in_isr()
2432 if (len > ep->ep.maxpacket) udc_data_in_isr()
2433 len = ep->ep.maxpacket; udc_data_in_isr()
2436 || (len != ep->ep.maxpacket)) { udc_data_in_isr()
2438 complete_req(ep, req, 0); udc_data_in_isr()
2453 ep->ep.maxpacket) { udc_data_in_isr()
2460 writel(req->td_phys, &ep->regs->desptr); udc_data_in_isr()
2470 tmp = readl(&ep->regs->ctl); udc_data_in_isr()
2472 writel(tmp, &ep->regs->ctl); udc_data_in_isr()
2476 } else if (!use_dma && ep->in) { udc_data_in_isr()
2480 tmp |= AMD_BIT(ep->num); udc_data_in_isr()
2486 writel(epsts, &ep->regs->sts); udc_data_in_isr()
2503 struct udc_ep *ep; variable in typeref:struct:udc_ep
2506 ep = &dev->ep[UDC_EP0OUT_IX];
2511 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2516 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2517 ep->bna_occurred = 1;
2530 ep->dev->stall_ep0in = 0;
2534 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2536 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2537 dev->ep[UDC_EP0IN_IX].naking = 1;
2541 /* clear OUT bits in ep status */
2543 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2546 dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2548 dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2550 dev->ep[UDC_EP0OUT_IX].td_stp->status =
2559 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2564 dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2569 if (ep->bna_dummy_req) {
2571 writel(ep->bna_dummy_req->td_phys,
2572 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2573 ep->bna_occurred = 0;
2577 dev->ep[UDC_EP0OUT_IX].naking = 1;
2604 ep_tmp = &udc->ep[UDC_EPIN_IX];
2605 udc_set_halt(&ep_tmp->ep, 0);
2606 ep_tmp = &udc->ep[UDC_EPOUT_IX];
2607 udc_set_halt(&ep_tmp->ep, 0);
2616 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2622 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2623 dev->ep[UDC_EP0IN_IX].naking = 0;
2624 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2629 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2636 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2638 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2639 dev->ep[UDC_EP0OUT_IX].naking = 0;
2640 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2644 /* clear OUT bits in ep status */
2646 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2651 /* clear OUT bits in ep status */
2652 writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2657 if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2661 dev->ep[UDC_EP0OUT_IX].td->status =
2663 dev->ep[UDC_EP0OUT_IX].td->status,
2674 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2675 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2682 count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2692 readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2714 struct udc_ep *ep; udc_control_in_isr() local
2718 ep = &dev->ep[UDC_EP0IN_IX]; udc_control_in_isr()
2723 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts); udc_control_in_isr()
2731 &dev->ep[UDC_EP0IN_IX].regs->sts); udc_control_in_isr()
2737 if (ep->dma) { udc_control_in_isr()
2740 &dev->ep[UDC_EP0IN_IX].regs->sts); udc_control_in_isr()
2745 tmp = readl(&ep->regs->ctl); udc_control_in_isr()
2747 writel(tmp, &ep->regs->ctl); udc_control_in_isr()
2749 if (!list_empty(&ep->queue)) { udc_control_in_isr()
2751 req = list_entry(ep->queue.next, udc_control_in_isr()
2754 if (ep->dma) { udc_control_in_isr()
2756 writel(req->td_phys, &ep->regs->desptr); udc_control_in_isr()
2766 readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); udc_control_in_isr()
2769 &dev->ep[UDC_EP0IN_IX].regs->ctl); udc_control_in_isr()
2775 complete_req(ep, req, 0); udc_control_in_isr()
2779 udc_txfifo_write(ep, &req->req); udc_control_in_isr()
2783 if (len > ep->ep.maxpacket) udc_control_in_isr()
2784 len = ep->ep.maxpacket; udc_control_in_isr()
2788 || (len != ep->ep.maxpacket)) { udc_control_in_isr()
2790 complete_req(ep, req, 0); udc_control_in_isr()
2796 ep->halted = 0; udc_control_in_isr()
2798 if (!ep->dma) { udc_control_in_isr()
2801 &dev->ep[UDC_EP0IN_IX].regs->sts); udc_control_in_isr()
2817 struct udc_ep *ep; variable in typeref:struct:udc_ep
2839 ep = &dev->ep[i];
2840 if (ep->in) {
2842 /* ep ix in UDC CSR register space */
2843 udc_csr_epix = ep->num;
2846 /* OUT ep */
2848 /* ep ix in UDC CSR register space */
2849 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2853 /* ep cfg */
2854 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2860 ep->halted = 0;
2861 tmp = readl(&ep->regs->ctl);
2863 writel(tmp, &ep->regs->ctl);
2892 ep = &dev->ep[i];
2893 if (ep->in) {
2895 /* ep ix in UDC CSR register space */
2896 udc_csr_epix = ep->num;
2899 /* OUT ep */
2901 /* ep ix in UDC CSR register space */
2902 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2906 /* set ep values */
2908 /* ep interface */
2909 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2912 /* ep alt */
2913 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2919 ep->halted = 0;
2920 tmp = readl(&ep->regs->ctl);
2922 writel(tmp, &ep->regs->ctl);
2954 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2955 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
3003 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
3004 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
3011 /* init ep 0 */
3050 /* check for ep irq */ udc_irq()
3060 * iterate ep's udc_irq()
3070 /* irq for out ep ? */ udc_irq()
3121 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td, free_dma_pools()
3122 dev->ep[UDC_EP0OUT_IX].td_phys); free_dma_pools()
3123 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp, free_dma_pools()
3124 dev->ep[UDC_EP0OUT_IX].td_stp_dma); free_dma_pools()
3179 dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl; init_dma_pools()
3191 &dev->ep[UDC_EP0OUT_IX].td_stp_dma); init_dma_pools()
3196 dev->ep[UDC_EP0OUT_IX].td_stp = td_stp; init_dma_pools()
3200 &dev->ep[UDC_EP0OUT_IX].td_phys); init_dma_pools()
3205 dev->ep[UDC_EP0OUT_IX].td = td_data; init_dma_pools()
3209 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp, init_dma_pools()
3210 dev->ep[UDC_EP0OUT_IX].td_stp_dma); init_dma_pools()
3342 /* ep registers base */ udc_pci_probe()
809 udc_create_dma_chain( struct udc_ep *ep, struct udc_request *req, unsigned long buf_len, gfp_t gfp_flags ) udc_create_dma_chain() argument
H A Datmel_usba_udc.c38 struct usba_ep *ep = inode->i_private; queue_dbg_open() local
47 spin_lock_irq(&ep->udc->lock); queue_dbg_open()
48 list_for_each_entry(req, &ep->queue, queue) { queue_dbg_open()
54 spin_unlock_irq(&ep->udc->lock); queue_dbg_open()
60 spin_unlock_irq(&ep->udc->lock); list_for_each_entry_safe()
204 struct usba_ep *ep) usba_ep_init_debugfs()
208 ep_root = debugfs_create_dir(ep->ep.name, udc->debugfs_root); usba_ep_init_debugfs()
211 ep->debugfs_dir = ep_root; usba_ep_init_debugfs()
213 ep->debugfs_queue = debugfs_create_file("queue", 0400, ep_root, usba_ep_init_debugfs()
214 ep, &queue_dbg_fops); usba_ep_init_debugfs()
215 if (!ep->debugfs_queue) usba_ep_init_debugfs()
218 if (ep->can_dma) { usba_ep_init_debugfs()
219 ep->debugfs_dma_status usba_ep_init_debugfs()
221 &ep->last_dma_status); usba_ep_init_debugfs()
222 if (!ep->debugfs_dma_status) usba_ep_init_debugfs()
225 if (ep_is_control(ep)) { usba_ep_init_debugfs()
226 ep->debugfs_state usba_ep_init_debugfs()
228 &ep->state); usba_ep_init_debugfs()
229 if (!ep->debugfs_state) usba_ep_init_debugfs()
236 if (ep->can_dma) usba_ep_init_debugfs()
237 debugfs_remove(ep->debugfs_dma_status); usba_ep_init_debugfs()
239 debugfs_remove(ep->debugfs_queue); usba_ep_init_debugfs()
243 dev_err(&ep->udc->pdev->dev, usba_ep_init_debugfs()
244 "failed to create debugfs directory for %s\n", ep->ep.name); usba_ep_init_debugfs()
247 static void usba_ep_cleanup_debugfs(struct usba_ep *ep) usba_ep_cleanup_debugfs() argument
249 debugfs_remove(ep->debugfs_queue); usba_ep_cleanup_debugfs()
250 debugfs_remove(ep->debugfs_dma_status); usba_ep_cleanup_debugfs()
251 debugfs_remove(ep->debugfs_state); usba_ep_cleanup_debugfs()
252 debugfs_remove(ep->debugfs_dir); usba_ep_cleanup_debugfs()
253 ep->debugfs_dma_status = NULL; usba_ep_cleanup_debugfs()
254 ep->debugfs_dir = NULL; usba_ep_cleanup_debugfs()
300 struct usba_ep *ep) usba_ep_init_debugfs()
305 static inline void usba_ep_cleanup_debugfs(struct usba_ep *ep) usba_ep_cleanup_debugfs() argument
358 static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req) next_fifo_transaction() argument
364 if (transaction_len > ep->ep.maxpacket) { next_fifo_transaction()
365 transaction_len = ep->ep.maxpacket; next_fifo_transaction()
367 } else if (transaction_len == ep->ep.maxpacket && req->req.zero) next_fifo_transaction()
371 ep->ep.name, req, transaction_len, next_fifo_transaction()
374 memcpy_toio(ep->fifo, req->req.buf + req->req.actual, transaction_len); next_fifo_transaction()
375 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); next_fifo_transaction()
379 static void submit_request(struct usba_ep *ep, struct usba_request *req) submit_request() argument
382 ep->ep.name, req, req->req.length); submit_request()
389 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); submit_request()
394 usba_ep_writel(ep, CTL_ENB, USBA_SHORT_PACKET); submit_request()
396 usba_ep_writel(ep, CTL_DIS, USBA_SHORT_PACKET); submit_request()
398 usba_dma_writel(ep, ADDRESS, req->req.dma); submit_request()
399 usba_dma_writel(ep, CONTROL, req->ctrl); submit_request()
401 next_fifo_transaction(ep, req); submit_request()
403 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); submit_request()
404 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); submit_request()
406 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); submit_request()
407 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); submit_request()
412 static void submit_next_request(struct usba_ep *ep) submit_next_request() argument
416 if (list_empty(&ep->queue)) { submit_next_request()
417 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY); submit_next_request()
421 req = list_entry(ep->queue.next, struct usba_request, queue); submit_next_request()
423 submit_request(ep, req); submit_next_request()
426 static void send_status(struct usba_udc *udc, struct usba_ep *ep) send_status() argument
428 ep->state = STATUS_STAGE_IN; send_status()
429 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); send_status()
430 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); send_status()
433 static void receive_data(struct usba_ep *ep) receive_data() argument
435 struct usba_udc *udc = ep->udc; receive_data()
441 status = usba_ep_readl(ep, STA); receive_data()
447 if (list_empty(&ep->queue)) { receive_data()
448 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); receive_data()
451 req = list_entry(ep->queue.next, receive_data()
464 ep->fifo, bytecount); receive_data()
467 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); receive_data()
470 DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name); receive_data()
473 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); receive_data()
475 usb_gadget_giveback_request(&ep->ep, &req->req); receive_data()
479 status = usba_ep_readl(ep, STA); receive_data()
482 if (is_complete && ep_is_control(ep)) { receive_data()
483 send_status(udc, ep); receive_data()
490 request_complete(struct usba_ep *ep, struct usba_request *req, int status) request_complete() argument
492 struct usba_udc *udc = ep->udc; request_complete()
500 usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in); request_complete()
504 ep->ep.name, req, req->req.status, req->req.actual); request_complete()
507 usb_gadget_giveback_request(&ep->ep, &req->req); request_complete()
512 request_complete_list(struct usba_ep *ep, struct list_head *list, int status) request_complete_list() argument
518 request_complete(ep, req, status); list_for_each_entry_safe()
525 struct usba_ep *ep = to_usba_ep(_ep); usba_ep_enable() local
526 struct usba_udc *udc = ep->udc; usba_ep_enable()
530 DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc); usba_ep_enable()
534 if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index) usba_ep_enable()
535 || ep->index == 0 usba_ep_enable()
538 || maxpacket > ep->fifo_size) { usba_ep_enable()
543 ep->is_isoc = 0; usba_ep_enable()
544 ep->is_in = 0; usba_ep_enable()
553 ep->ep.name, ept_cfg, maxpacket); usba_ep_enable()
556 ep->is_in = 1; usba_ep_enable()
566 if (!ep->can_isoc) { usba_ep_enable()
568 ep->ep.name); usba_ep_enable()
580 ep->is_isoc = 1; usba_ep_enable()
586 if (nr_trans > 1 && ep->nr_banks == 3) usba_ep_enable()
602 spin_lock_irqsave(&ep->udc->lock, flags); usba_ep_enable()
604 ep->ep.desc = desc; usba_ep_enable()
605 ep->ep.maxpacket = maxpacket; usba_ep_enable()
607 usba_ep_writel(ep, CFG, ept_cfg); usba_ep_enable()
608 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); usba_ep_enable()
610 if (ep->can_dma) { usba_ep_enable()
614 USBA_BF(EPT_INT, 1 << ep->index) | usba_ep_enable()
615 USBA_BF(DMA_INT, 1 << ep->index)); usba_ep_enable()
617 usba_ep_writel(ep, CTL_ENB, ctrl); usba_ep_enable()
620 USBA_BF(EPT_INT, 1 << ep->index)); usba_ep_enable()
625 DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index, usba_ep_enable()
626 (unsigned long)usba_ep_readl(ep, CFG)); usba_ep_enable()
635 struct usba_ep *ep = to_usba_ep(_ep); usba_ep_disable() local
636 struct usba_udc *udc = ep->udc; usba_ep_disable()
640 DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name); usba_ep_disable()
644 if (!ep->ep.desc) { usba_ep_disable()
652 ep->ep.name); usba_ep_disable()
655 ep->ep.desc = NULL; usba_ep_disable()
657 list_splice_init(&ep->queue, &req_list); usba_ep_disable()
658 if (ep->can_dma) { usba_ep_disable()
659 usba_dma_writel(ep, CONTROL, 0); usba_ep_disable()
660 usba_dma_writel(ep, ADDRESS, 0); usba_ep_disable()
661 usba_dma_readl(ep, STATUS); usba_ep_disable()
663 usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE); usba_ep_disable()
665 ~USBA_BF(EPT_INT, 1 << ep->index)); usba_ep_disable()
667 request_complete_list(ep, &req_list, -ESHUTDOWN); usba_ep_disable()
700 static int queue_dma(struct usba_udc *udc, struct usba_ep *ep, queue_dma() argument
707 ep->ep.name, req->req.length, &req->req.dma, queue_dma()
718 ret = usb_gadget_map_request(&udc->gadget, &req->req, ep->is_in); queue_dma()
727 if (!ep->is_in) queue_dma()
737 if (ep->ep.desc) { queue_dma()
738 if (list_empty(&ep->queue)) queue_dma()
739 submit_request(ep, req); queue_dma()
741 list_add_tail(&req->queue, &ep->queue); queue_dma()
753 struct usba_ep *ep = to_usba_ep(_ep); usba_ep_queue() local
754 struct usba_udc *udc = ep->udc; usba_ep_queue()
759 ep->ep.name, req, _req->length); usba_ep_queue()
762 !ep->ep.desc) usba_ep_queue()
772 if (ep->can_dma) usba_ep_queue()
773 return queue_dma(udc, ep, req, gfp_flags); usba_ep_queue()
778 if (ep->ep.desc) { usba_ep_queue()
779 list_add_tail(&req->queue, &ep->queue); usba_ep_queue()
781 if ((!ep_is_control(ep) && ep->is_in) || usba_ep_queue()
782 (ep_is_control(ep) usba_ep_queue()
783 && (ep->state == DATA_STAGE_IN usba_ep_queue()
784 || ep->state == STATUS_STAGE_IN))) usba_ep_queue()
785 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); usba_ep_queue()
787 usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY); usba_ep_queue()
796 usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status) usba_update_req() argument
801 static int stop_dma(struct usba_ep *ep, u32 *pstatus) stop_dma() argument
810 usba_dma_writel(ep, CONTROL, 0); stop_dma()
814 status = usba_dma_readl(ep, STATUS); stop_dma()
824 dev_err(&ep->udc->pdev->dev, stop_dma()
826 ep->ep.name); stop_dma()
835 struct usba_ep *ep = to_usba_ep(_ep); usba_ep_dequeue() local
836 struct usba_udc *udc = ep->udc; usba_ep_dequeue()
842 ep->ep.name, req); usba_ep_dequeue()
846 list_for_each_entry(req, &ep->queue, queue) { usba_ep_dequeue()
861 if (ep->queue.next == &req->queue) { usba_ep_dequeue()
862 status = usba_dma_readl(ep, STATUS); usba_ep_dequeue()
864 stop_dma(ep, &status); usba_ep_dequeue()
867 ep->last_dma_status = status; usba_ep_dequeue()
870 usba_writel(udc, EPT_RST, 1 << ep->index); usba_ep_dequeue()
872 usba_update_req(ep, req, status); usba_ep_dequeue()
882 request_complete(ep, req, -ECONNRESET); usba_ep_dequeue()
885 submit_next_request(ep); usba_ep_dequeue()
893 struct usba_ep *ep = to_usba_ep(_ep); usba_ep_set_halt() local
894 struct usba_udc *udc = ep->udc; usba_ep_set_halt()
898 DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name, usba_ep_set_halt()
901 if (!ep->ep.desc) { usba_ep_set_halt()
902 DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n", usba_ep_set_halt()
903 ep->ep.name); usba_ep_set_halt()
906 if (ep->is_isoc) { usba_ep_set_halt()
907 DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n", usba_ep_set_halt()
908 ep->ep.name); usba_ep_set_halt()
918 if (!list_empty(&ep->queue) usba_ep_set_halt()
919 || ((value && ep->is_in && (usba_ep_readl(ep, STA) usba_ep_set_halt()
924 usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL); usba_ep_set_halt()
926 usba_ep_writel(ep, CLR_STA, usba_ep_set_halt()
928 usba_ep_readl(ep, STA); usba_ep_set_halt()
938 struct usba_ep *ep = to_usba_ep(_ep); usba_ep_fifo_status() local
940 return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA)); usba_ep_fifo_status()
945 struct usba_ep *ep = to_usba_ep(_ep); usba_ep_fifo_flush() local
946 struct usba_udc *udc = ep->udc; usba_ep_fifo_flush()
948 usba_writel(udc, EPT_RST, 1 << ep->index); usba_ep_fifo_flush()
1038 struct usba_ep *ep; reset_all_endpoints() local
1043 ep = to_usba_ep(udc->gadget.ep0); reset_all_endpoints()
1044 list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) { reset_all_endpoints()
1046 request_complete(ep, req, -ECONNRESET); reset_all_endpoints()
1055 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { reset_all_endpoints()
1056 if (ep->ep.desc) { reset_all_endpoints()
1058 usba_ep_disable(&ep->ep); reset_all_endpoints()
1066 struct usba_ep *ep; get_ep_by_addr() local
1071 list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) { get_ep_by_addr()
1074 if (!ep->ep.desc) get_ep_by_addr()
1076 bEndpointAddress = ep->ep.desc->bEndpointAddress; get_ep_by_addr()
1081 return ep; get_ep_by_addr()
1088 static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep) set_protocol_stall() argument
1090 usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL); set_protocol_stall()
1091 ep->state = WAIT_FOR_SETUP; set_protocol_stall()
1094 static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep) is_stalled() argument
1096 if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL) is_stalled()
1128 struct usba_ep *ep; do_test_mode() local
1153 ep = &udc->usba_ep[0]; do_test_mode()
1156 usba_ep_writel(ep, CFG, do_test_mode()
1161 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) { do_test_mode()
1162 set_protocol_stall(udc, ep); do_test_mode()
1165 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); do_test_mode()
1171 ep = &udc->usba_ep[0]; do_test_mode()
1172 usba_ep_writel(ep, CFG, do_test_mode()
1177 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) { do_test_mode()
1178 set_protocol_stall(udc, ep); do_test_mode()
1181 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); do_test_mode()
1183 memcpy_toio(ep->fifo, test_packet_buffer, do_test_mode()
1185 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); do_test_mode()
1219 static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep, handle_ep0_setup() argument
1250 ep->state = DATA_STAGE_IN; handle_ep0_setup()
1251 usba_io_writew(status, ep->fifo); handle_ep0_setup()
1252 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); handle_ep0_setup()
1282 send_status(udc, ep); handle_ep0_setup()
1289 send_status(udc, ep); handle_ep0_setup()
1290 ep->state = STATUS_STAGE_TEST; handle_ep0_setup()
1313 send_status(udc, ep); handle_ep0_setup()
1322 send_status(udc, ep); handle_ep0_setup()
1323 ep->state = STATUS_STAGE_ADDR; handle_ep0_setup()
1338 ep->ep.name, crq->bRequestType, crq->bRequest, handle_ep0_setup()
1341 set_protocol_stall(udc, ep); handle_ep0_setup()
1345 static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep) usba_control_irq() argument
1352 epstatus = usba_ep_readl(ep, STA); usba_control_irq()
1353 epctrl = usba_ep_readl(ep, CTL); usba_control_irq()
1356 ep->ep.name, ep->state, epstatus, epctrl); usba_control_irq()
1359 if (!list_empty(&ep->queue)) usba_control_irq()
1360 req = list_entry(ep->queue.next, usba_control_irq()
1365 next_fifo_transaction(ep, req); usba_control_irq()
1367 submit_request(ep, req); usba_control_irq()
1370 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); usba_control_irq()
1371 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); usba_control_irq()
1376 usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE); usba_control_irq()
1378 switch (ep->state) { usba_control_irq()
1380 usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY); usba_control_irq()
1381 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); usba_control_irq()
1382 ep->state = STATUS_STAGE_OUT; usba_control_irq()
1388 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); usba_control_irq()
1389 ep->state = WAIT_FOR_SETUP; usba_control_irq()
1394 request_complete(ep, req, 0); usba_control_irq()
1395 submit_next_request(ep); usba_control_irq()
1397 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); usba_control_irq()
1398 ep->state = WAIT_FOR_SETUP; usba_control_irq()
1401 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); usba_control_irq()
1402 ep->state = WAIT_FOR_SETUP; usba_control_irq()
1404 set_protocol_stall(udc, ep); usba_control_irq()
1409 ep->ep.name, ep->state); usba_control_irq()
1410 set_protocol_stall(udc, ep); usba_control_irq()
1417 switch (ep->state) { usba_control_irq()
1419 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); usba_control_irq()
1420 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); usba_control_irq()
1424 request_complete(ep, req, 0); usba_control_irq()
1426 ep->state = WAIT_FOR_SETUP; usba_control_irq()
1430 receive_data(ep); usba_control_irq()
1434 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); usba_control_irq()
1435 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); usba_control_irq()
1438 ep->ep.name, ep->state); usba_control_irq()
1439 set_protocol_stall(udc, ep); usba_control_irq()
1453 if (ep->state != WAIT_FOR_SETUP) { usba_control_irq()
1466 if (ep->state == STATUS_STAGE_OUT usba_control_irq()
1467 || ep->state == STATUS_STAGE_IN) { usba_control_irq()
1468 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); usba_control_irq()
1474 request_complete(ep, req, status); usba_control_irq()
1478 pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA)); usba_control_irq()
1483 set_protocol_stall(udc, ep); usba_control_irq()
1487 DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo); usba_control_irq()
1488 memcpy_fromio(crq.data, ep->fifo, sizeof(crq)); usba_control_irq()
1492 usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP); usba_control_irq()
1495 ep->state, crq.crq.bRequestType, usba_control_irq()
1505 ep->state = DATA_STAGE_IN; usba_control_irq()
1508 ep->state = DATA_STAGE_OUT; usba_control_irq()
1510 ep->state = STATUS_STAGE_IN; usba_control_irq()
1514 if (ep->index == 0) usba_control_irq()
1515 ret = handle_ep0_setup(udc, ep, &crq.crq); usba_control_irq()
1524 le16_to_cpu(crq.crq.wLength), ep->state, ret); usba_control_irq()
1528 set_protocol_stall(udc, ep); usba_control_irq()
1533 static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep) usba_ep_irq() argument
1539 epstatus = usba_ep_readl(ep, STA); usba_ep_irq()
1540 epctrl = usba_ep_readl(ep, CTL); usba_ep_irq()
1542 DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus); usba_ep_irq()
1545 DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name); usba_ep_irq()
1547 if (list_empty(&ep->queue)) { usba_ep_irq()
1549 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); usba_ep_irq()
1553 req = list_entry(ep->queue.next, struct usba_request, queue); usba_ep_irq()
1557 usba_ep_writel(ep, SET_STA, usba_ep_irq()
1559 usba_ep_writel(ep, CTL_DIS, usba_ep_irq()
1562 submit_next_request(ep); usba_ep_irq()
1563 request_complete(ep, req, 0); usba_ep_irq()
1566 next_fifo_transaction(ep, req); usba_ep_irq()
1568 submit_request(ep, req); usba_ep_irq()
1572 submit_next_request(ep); usba_ep_irq()
1573 request_complete(ep, req, 0); usba_ep_irq()
1577 epstatus = usba_ep_readl(ep, STA); usba_ep_irq()
1578 epctrl = usba_ep_readl(ep, CTL); usba_ep_irq()
1581 DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name); usba_ep_irq()
1582 receive_data(ep); usba_ep_irq()
1586 static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep) usba_dma_irq() argument
1591 status = usba_dma_readl(ep, STATUS); usba_dma_irq()
1592 control = usba_dma_readl(ep, CONTROL); usba_dma_irq()
1594 ep->last_dma_status = status; usba_dma_irq()
1612 if (list_empty(&ep->queue)) usba_dma_irq()
1617 req = list_entry(ep->queue.next, struct usba_request, queue); usba_dma_irq()
1618 usba_update_req(ep, req, status); usba_dma_irq()
1621 submit_next_request(ep); usba_dma_irq()
1622 request_complete(ep, req, 0); usba_dma_irq()
1716 ep0->ep.desc = &usba_ep0_desc; usba_udc_irq()
1934 struct usba_ep *eps, *ep; atmel_udc_of_init() local
1957 udc->gadget.ep0 = &eps[0].ep; atmel_udc_of_init()
1959 INIT_LIST_HEAD(&eps[0].ep.ep_list); atmel_udc_of_init()
1964 ep = &eps[i]; atmel_udc_of_init()
1971 ep->index = val; atmel_udc_of_init()
1978 ep->fifo_size = val; atmel_udc_of_init()
1985 ep->nr_banks = val; atmel_udc_of_init()
1987 ep->can_dma = of_property_read_bool(pp, "atmel,can-dma"); atmel_udc_of_init()
1988 ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc"); atmel_udc_of_init()
1995 ep->ep.name = name; atmel_udc_of_init()
1997 ep->ep_regs = udc->regs + USBA_EPT_BASE(i); atmel_udc_of_init()
1998 ep->dma_regs = udc->regs + USBA_DMA_BASE(i); atmel_udc_of_init()
1999 ep->fifo = udc->fifo + USBA_FIFO_BASE(i); atmel_udc_of_init()
2000 ep->ep.ops = &usba_ep_ops; atmel_udc_of_init()
2001 usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size); atmel_udc_of_init()
2002 ep->udc = udc; atmel_udc_of_init()
2003 INIT_LIST_HEAD(&ep->queue); atmel_udc_of_init()
2005 if (ep->index == 0) { atmel_udc_of_init()
2006 ep->ep.caps.type_control = true; atmel_udc_of_init()
2008 ep->ep.caps.type_iso = ep->can_isoc; atmel_udc_of_init()
2009 ep->ep.caps.type_bulk = true; atmel_udc_of_init()
2010 ep->ep.caps.type_int = true; atmel_udc_of_init()
2013 ep->ep.caps.dir_in = true; atmel_udc_of_init()
2014 ep->ep.caps.dir_out = true; atmel_udc_of_init()
2017 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); atmel_udc_of_init()
2055 udc->gadget.ep0 = &eps[0].ep; usba_udc_pdata()
2061 INIT_LIST_HEAD(&eps[0].ep.ep_list); usba_udc_pdata()
2064 struct usba_ep *ep = &eps[i]; usba_udc_pdata() local
2066 ep->ep_regs = udc->regs + USBA_EPT_BASE(i); usba_udc_pdata()
2067 ep->dma_regs = udc->regs + USBA_DMA_BASE(i); usba_udc_pdata()
2068 ep->fifo = udc->fifo + USBA_FIFO_BASE(i); usba_udc_pdata()
2069 ep->ep.ops = &usba_ep_ops; usba_udc_pdata()
2070 ep->ep.name = pdata->ep[i].name; usba_udc_pdata()
2071 ep->fifo_size = pdata->ep[i].fifo_size; usba_udc_pdata()
2072 usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size); usba_udc_pdata()
2073 ep->udc = udc; usba_udc_pdata()
2074 INIT_LIST_HEAD(&ep->queue); usba_udc_pdata()
2075 ep->nr_banks = pdata->ep[i].nr_banks; usba_udc_pdata()
2076 ep->index = pdata->ep[i].index; usba_udc_pdata()
2077 ep->can_dma = pdata->ep[i].can_dma; usba_udc_pdata()
2078 ep->can_isoc = pdata->ep[i].can_isoc; usba_udc_pdata()
2081 ep->ep.caps.type_control = true; usba_udc_pdata()
2083 ep->ep.caps.type_iso = ep->can_isoc; usba_udc_pdata()
2084 ep->ep.caps.type_bulk = true; usba_udc_pdata()
2085 ep->ep.caps.type_int = true; usba_udc_pdata()
2088 ep->ep.caps.dir_in = true; usba_udc_pdata()
2089 ep->ep.caps.dir_out = true; usba_udc_pdata()
2092 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); usba_udc_pdata()
203 usba_ep_init_debugfs(struct usba_udc *udc, struct usba_ep *ep) usba_ep_init_debugfs() argument
299 usba_ep_init_debugfs(struct usba_udc *udc, struct usba_ep *ep) usba_ep_init_debugfs() argument
H A Dfusb300_udc.c36 static void done(struct fusb300_ep *ep, struct fusb300_request *req,
58 static void fusb300_ep_setting(struct fusb300_ep *ep, fusb300_ep_setting() argument
61 ep->epnum = info.epnum; fusb300_ep_setting()
62 ep->type = info.type; fusb300_ep_setting()
65 static int fusb300_ep_release(struct fusb300_ep *ep) fusb300_ep_release() argument
67 if (!ep->epnum) fusb300_ep_release()
69 ep->epnum = 0; fusb300_ep_release()
70 ep->stall = 0; fusb300_ep_release()
71 ep->wedged = 0; fusb300_ep_release()
76 u32 ep) fusb300_set_fifo_entry()
78 u32 val = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); fusb300_set_fifo_entry()
82 iowrite32(val, fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); fusb300_set_fifo_entry()
86 u8 ep) fusb300_set_start_entry()
88 u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); fusb300_set_start_entry()
93 iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); fusb300_set_start_entry()
146 u8 ep) fusb300_set_ep_active()
148 u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); fusb300_set_ep_active()
151 iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); fusb300_set_ep_active()
200 static int config_ep(struct fusb300_ep *ep, config_ep() argument
203 struct fusb300 *fusb300 = ep->fusb300; config_ep()
206 ep->ep.desc = desc; config_ep()
228 fusb300_ep_setting(ep, info); config_ep()
230 fusb300->ep[info.epnum] = ep; config_ep()
238 struct fusb300_ep *ep; fusb300_enable() local
240 ep = container_of(_ep, struct fusb300_ep, ep); fusb300_enable()
242 if (ep->fusb300->reenum) { fusb300_enable()
243 ep->fusb300->fifo_entry_num = 0; fusb300_enable()
244 ep->fusb300->addrofs = 0; fusb300_enable()
245 ep->fusb300->reenum = 0; fusb300_enable()
248 return config_ep(ep, desc); fusb300_enable()
253 struct fusb300_ep *ep; fusb300_disable() local
257 ep = container_of(_ep, struct fusb300_ep, ep); fusb300_disable()
259 BUG_ON(!ep); fusb300_disable()
261 while (!list_empty(&ep->queue)) { fusb300_disable()
262 req = list_entry(ep->queue.next, struct fusb300_request, queue); fusb300_disable()
263 spin_lock_irqsave(&ep->fusb300->lock, flags); fusb300_disable()
264 done(ep, req, -ECONNRESET); fusb300_disable()
265 spin_unlock_irqrestore(&ep->fusb300->lock, flags); fusb300_disable()
268 return fusb300_ep_release(ep); fusb300_disable()
292 static int enable_fifo_int(struct fusb300_ep *ep) enable_fifo_int() argument
294 struct fusb300 *fusb300 = ep->fusb300; enable_fifo_int()
296 if (ep->epnum) { enable_fifo_int()
298 FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum)); enable_fifo_int()
307 static int disable_fifo_int(struct fusb300_ep *ep) disable_fifo_int() argument
309 struct fusb300 *fusb300 = ep->fusb300; disable_fifo_int()
311 if (ep->epnum) { disable_fifo_int()
313 FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum)); disable_fifo_int()
333 static void fusb300_wrcxf(struct fusb300_ep *ep, fusb300_wrcxf() argument
339 struct fusb300 *fusb300 = ep->fusb300; fusb300_wrcxf()
385 static void fusb300_set_epnstall(struct fusb300 *fusb300, u8 ep) fusb300_set_epnstall() argument
387 fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep), fusb300_set_epnstall()
391 static void fusb300_clear_epnstall(struct fusb300 *fusb300, u8 ep) fusb300_clear_epnstall() argument
393 u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep)); fusb300_clear_epnstall()
396 printk(KERN_DEBUG "EP%d stall... Clear!!\n", ep); fusb300_clear_epnstall()
398 iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET0(ep)); fusb300_clear_epnstall()
402 static void ep0_queue(struct fusb300_ep *ep, struct fusb300_request *req) ep0_queue() argument
404 if (ep->fusb300->ep0_dir) { /* if IN */ ep0_queue()
406 fusb300_wrcxf(ep, req); ep0_queue()
411 (req->req.actual < ep->ep.maxpacket)) ep0_queue()
412 done(ep, req, 0); ep0_queue()
415 done(ep, req, 0); ep0_queue()
417 fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER1, ep0_queue()
425 struct fusb300_ep *ep; fusb300_queue() local
430 ep = container_of(_ep, struct fusb300_ep, ep); fusb300_queue()
433 if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN) fusb300_queue()
436 spin_lock_irqsave(&ep->fusb300->lock, flags); fusb300_queue()
438 if (list_empty(&ep->queue)) fusb300_queue()
441 list_add_tail(&req->queue, &ep->queue); fusb300_queue()
446 if (ep->ep.desc == NULL) /* ep0 */ fusb300_queue()
447 ep0_queue(ep, req); fusb300_queue()
448 else if (request && !ep->stall) fusb300_queue()
449 enable_fifo_int(ep); fusb300_queue()
451 spin_unlock_irqrestore(&ep->fusb300->lock, flags); fusb300_queue()
458 struct fusb300_ep *ep; fusb300_dequeue() local
462 ep = container_of(_ep, struct fusb300_ep, ep); fusb300_dequeue()
465 spin_lock_irqsave(&ep->fusb300->lock, flags); fusb300_dequeue()
466 if (!list_empty(&ep->queue)) fusb300_dequeue()
467 done(ep, req, -ECONNRESET); fusb300_dequeue()
468 spin_unlock_irqrestore(&ep->fusb300->lock, flags); fusb300_dequeue()
475 struct fusb300_ep *ep; fusb300_set_halt_and_wedge() local
480 ep = container_of(_ep, struct fusb300_ep, ep); fusb300_set_halt_and_wedge()
482 fusb300 = ep->fusb300; fusb300_set_halt_and_wedge()
484 spin_lock_irqsave(&ep->fusb300->lock, flags); fusb300_set_halt_and_wedge()
486 if (!list_empty(&ep->queue)) { fusb300_set_halt_and_wedge()
492 fusb300_set_epnstall(fusb300, ep->epnum); fusb300_set_halt_and_wedge()
493 ep->stall = 1; fusb300_set_halt_and_wedge()
495 ep->wedged = 1; fusb300_set_halt_and_wedge()
497 fusb300_clear_epnstall(fusb300, ep->epnum); fusb300_set_halt_and_wedge()
498 ep->stall = 0; fusb300_set_halt_and_wedge()
499 ep->wedged = 0; fusb300_set_halt_and_wedge()
503 spin_unlock_irqrestore(&ep->fusb300->lock, flags); fusb300_set_halt_and_wedge()
603 static void fusb300_rdfifo(struct fusb300_ep *ep, fusb300_rdfifo() argument
610 struct fusb300 *fusb300 = ep->fusb300; fusb300_rdfifo()
620 FUSB300_OFFSET_EPPORT(ep->epnum)); fusb300_rdfifo()
631 FUSB300_OFFSET_EPPORT(ep->epnum)); fusb300_rdfifo()
636 FUSB300_OFFSET_EPPORT(ep->epnum)); fusb300_rdfifo()
642 FUSB300_OFFSET_EPPORT(ep->epnum)); fusb300_rdfifo()
660 static u8 fusb300_get_epnstall(struct fusb300 *fusb300, u8 ep) fusb300_get_epnstall() argument
663 u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep)); fusb300_get_epnstall()
690 u8 ep; variable
702 ep = w_index & USB_ENDPOINT_NUMBER_MASK;
703 if (ep) {
704 if (fusb300_get_epnstall(fusb300, ep))
728 u8 ep; set_feature() local
740 ep = w_index & USB_ENDPOINT_NUMBER_MASK; set_feature()
741 if (ep) set_feature()
742 fusb300_set_epnstall(fusb300, ep); set_feature()
754 static void fusb300_clear_seqnum(struct fusb300 *fusb300, u8 ep) fusb300_clear_seqnum() argument
756 fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep), fusb300_clear_seqnum()
762 struct fusb300_ep *ep = clear_feature() local
763 fusb300->ep[ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK]; clear_feature()
774 if (ep->wedged) { clear_feature()
778 if (ep->stall) { clear_feature()
779 ep->stall = 0; clear_feature()
780 fusb300_clear_seqnum(fusb300, ep->epnum); clear_feature()
781 fusb300_clear_epnstall(fusb300, ep->epnum); clear_feature()
782 if (!list_empty(&ep->queue)) clear_feature()
783 enable_fifo_int(ep); clear_feature()
867 static void done(struct fusb300_ep *ep, struct fusb300_request *req, done() argument
873 if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN) done()
878 spin_unlock(&ep->fusb300->lock); done()
879 usb_gadget_giveback_request(&ep->ep, &req->req); done()
880 spin_lock(&ep->fusb300->lock); done()
882 if (ep->epnum) { done()
883 disable_fifo_int(ep); done()
884 if (!list_empty(&ep->queue)) done()
885 enable_fifo_int(ep); done()
887 fusb300_set_cxdone(ep->fusb300); done()
890 static void fusb300_fill_idma_prdtbl(struct fusb300_ep *ep, dma_addr_t d, fusb300_fill_idma_prdtbl() argument
898 reg = ioread32(ep->fusb300->reg + fusb300_fill_idma_prdtbl()
899 FUSB300_OFFSET_EPPRD_W0(ep->epnum)); fusb300_fill_idma_prdtbl()
903 iowrite32(d, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W1(ep->epnum)); fusb300_fill_idma_prdtbl()
907 iowrite32(value, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W0(ep->epnum)); fusb300_fill_idma_prdtbl()
909 iowrite32(0x0, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W2(ep->epnum)); fusb300_fill_idma_prdtbl()
911 fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_EPPRDRDY, fusb300_fill_idma_prdtbl()
912 FUSB300_EPPRDR_EP_PRD_RDY(ep->epnum)); fusb300_fill_idma_prdtbl()
915 static void fusb300_wait_idma_finished(struct fusb300_ep *ep) fusb300_wait_idma_finished() argument
920 reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR1); fusb300_wait_idma_finished()
927 reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR0); fusb300_wait_idma_finished()
928 reg &= FUSB300_IGR0_EPn_PRD_INT(ep->epnum); fusb300_wait_idma_finished()
931 fusb300_clear_int(ep->fusb300, FUSB300_OFFSET_IGR0, fusb300_wait_idma_finished()
932 FUSB300_IGR0_EPn_PRD_INT(ep->epnum)); fusb300_wait_idma_finished()
936 reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGER0); fusb300_wait_idma_finished()
937 reg &= ~FUSB300_IGER0_EEPn_PRD_INT(ep->epnum); fusb300_wait_idma_finished()
938 iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_IGER0); fusb300_wait_idma_finished()
941 static void fusb300_set_idma(struct fusb300_ep *ep, fusb300_set_idma() argument
946 ret = usb_gadget_map_request(&ep->fusb300->gadget, fusb300_set_idma()
951 fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER0, fusb300_set_idma()
952 FUSB300_IGER0_EEPn_PRD_INT(ep->epnum)); fusb300_set_idma()
954 fusb300_fill_idma_prdtbl(ep, req->req.dma, req->req.length); fusb300_set_idma()
956 fusb300_wait_idma_finished(ep); fusb300_set_idma()
958 usb_gadget_unmap_request(&ep->fusb300->gadget, fusb300_set_idma()
962 static void in_ep_fifo_handler(struct fusb300_ep *ep) in_ep_fifo_handler() argument
964 struct fusb300_request *req = list_entry(ep->queue.next, in_ep_fifo_handler()
968 fusb300_set_idma(ep, req); in_ep_fifo_handler()
969 done(ep, req, 0); in_ep_fifo_handler()
972 static void out_ep_fifo_handler(struct fusb300_ep *ep) out_ep_fifo_handler() argument
974 struct fusb300 *fusb300 = ep->fusb300; out_ep_fifo_handler()
975 struct fusb300_request *req = list_entry(ep->queue.next, out_ep_fifo_handler()
977 u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum)); out_ep_fifo_handler()
980 fusb300_rdfifo(ep, req, length); out_ep_fifo_handler()
983 if ((req->req.length == req->req.actual) || (length < ep->ep.maxpacket)) out_ep_fifo_handler()
984 done(ep, req, 0); out_ep_fifo_handler()
1011 struct fusb300_ep *ep = fusb300->ep[0]; fusb300_ep0out() local
1014 if (!list_empty(&ep->queue)) { fusb300_ep0out()
1017 req = list_first_entry(&ep->queue, fusb300_ep0out()
1020 fusb300_rdcxf(ep->fusb300, req->req.buf, fusb300_ep0out()
1022 done(ep, req, 0); fusb300_ep0out()
1033 struct fusb300_ep *ep = fusb300->ep[0]; fusb300_ep0in() local
1035 if ((!list_empty(&ep->queue)) && (fusb300->ep0_dir)) { fusb300_ep0in()
1036 req = list_entry(ep->queue.next, fusb300_ep0in()
1039 fusb300_wrcxf(ep, req); fusb300_ep0in()
1040 if ((req->req.length - req->req.actual) < ep->ep.maxpacket) fusb300_ep0in()
1041 done(ep, req, 0); fusb300_ep0in()
1247 in_ep_fifo_handler(fusb300->ep[i]); fusb300_irq()
1249 out_ep_fifo_handler(fusb300->ep[i]); fusb300_irq()
1353 fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req); fusb300_remove()
1411 fusb300->ep[i] = _ep[i]; fusb300_probe()
1441 struct fusb300_ep *ep = fusb300->ep[i]; fusb300_probe() local
1444 INIT_LIST_HEAD(&fusb300->ep[i]->ep.ep_list); fusb300_probe()
1445 list_add_tail(&fusb300->ep[i]->ep.ep_list, fusb300_probe()
1448 ep->fusb300 = fusb300; fusb300_probe()
1449 INIT_LIST_HEAD(&ep->queue); fusb300_probe()
1450 ep->ep.name = fusb300_ep_name[i]; fusb300_probe()
1451 ep->ep.ops = &fusb300_ep_ops; fusb300_probe()
1452 usb_ep_set_maxpacket_limit(&ep->ep, HS_BULK_MAX_PACKET_SIZE); fusb300_probe()
1455 ep->ep.caps.type_control = true; fusb300_probe()
1457 ep->ep.caps.type_iso = true; fusb300_probe()
1458 ep->ep.caps.type_bulk = true; fusb300_probe()
1459 ep->ep.caps.type_int = true; fusb300_probe()
1462 ep->ep.caps.dir_in = true; fusb300_probe()
1463 ep->ep.caps.dir_out = true; fusb300_probe()
1465 usb_ep_set_maxpacket_limit(&fusb300->ep[0]->ep, HS_CTL_MAX_PACKET_SIZE); fusb300_probe()
1466 fusb300->ep[0]->epnum = 0; fusb300_probe()
1467 fusb300->gadget.ep0 = &fusb300->ep[0]->ep; fusb300_probe()
1470 fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep, fusb300_probe()
1487 fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req); fusb300_probe()
1495 fusb300_free_request(&fusb300->ep[0]->ep, fusb300_probe()
75 fusb300_set_fifo_entry(struct fusb300 *fusb300, u32 ep) fusb300_set_fifo_entry() argument
85 fusb300_set_start_entry(struct fusb300 *fusb300, u8 ep) fusb300_set_start_entry() argument
145 fusb300_set_ep_active(struct fusb300 *fusb300, u8 ep) fusb300_set_ep_active() argument
H A Dm66592-udc.c38 static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req);
39 static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req);
43 static void transfer_complete(struct m66592_ep *ep,
102 INIT_LIST_HEAD(&m66592->ep[0].queue);
211 struct m66592_ep *ep = m66592->pipenum2ep[pipenum]; pipe_change() local
214 if (ep->use_dma) pipe_change()
217 m66592_mdfy(m66592, pipenum, M66592_CURPIPE, ep->fifosel); pipe_change()
226 m66592_bset(m66592, mbw, ep->fifosel); pipe_change()
303 static void pipe_initialize(struct m66592_ep *ep) pipe_initialize() argument
305 struct m66592 *m66592 = ep->m66592; pipe_initialize()
308 m66592_mdfy(m66592, 0, M66592_CURPIPE, ep->fifosel); pipe_initialize()
310 m66592_write(m66592, M66592_ACLRM, ep->pipectr); pipe_initialize()
311 m66592_write(m66592, 0, ep->pipectr); pipe_initialize()
312 m66592_write(m66592, M66592_SQCLR, ep->pipectr); pipe_initialize()
313 if (ep->use_dma) { pipe_initialize()
314 m66592_mdfy(m66592, ep->pipenum, M66592_CURPIPE, ep->fifosel); pipe_initialize()
323 m66592_bset(m66592, mbw, ep->fifosel); pipe_initialize()
327 static void m66592_ep_setting(struct m66592 *m66592, struct m66592_ep *ep, m66592_ep_setting() argument
334 ep->use_dma = 1; m66592_ep_setting()
335 ep->fifoaddr = M66592_D0FIFO; m66592_ep_setting()
336 ep->fifosel = M66592_D0FIFOSEL; m66592_ep_setting()
337 ep->fifoctr = M66592_D0FIFOCTR; m66592_ep_setting()
338 ep->fifotrn = M66592_D0FIFOTRN; m66592_ep_setting()
341 ep->use_dma = 1; m66592_ep_setting()
342 ep->fifoaddr = M66592_D1FIFO; m66592_ep_setting()
343 ep->fifosel = M66592_D1FIFOSEL; m66592_ep_setting()
344 ep->fifoctr = M66592_D1FIFOCTR; m66592_ep_setting()
345 ep->fifotrn = M66592_D1FIFOTRN; m66592_ep_setting()
347 ep->use_dma = 0; m66592_ep_setting()
348 ep->fifoaddr = M66592_CFIFO; m66592_ep_setting()
349 ep->fifosel = M66592_CFIFOSEL; m66592_ep_setting()
350 ep->fifoctr = M66592_CFIFOCTR; m66592_ep_setting()
351 ep->fifotrn = 0; m66592_ep_setting()
354 ep->use_dma = 0; m66592_ep_setting()
355 ep->fifoaddr = M66592_CFIFO; m66592_ep_setting()
356 ep->fifosel = M66592_CFIFOSEL; m66592_ep_setting()
357 ep->fifoctr = M66592_CFIFOCTR; m66592_ep_setting()
358 ep->fifotrn = 0; m66592_ep_setting()
361 ep->pipectr = get_pipectr_addr(pipenum); m66592_ep_setting()
362 ep->pipenum = pipenum; m66592_ep_setting()
363 ep->ep.maxpacket = usb_endpoint_maxp(desc); m66592_ep_setting()
364 m66592->pipenum2ep[pipenum] = ep; m66592_ep_setting()
365 m66592->epaddr2ep[desc->bEndpointAddress&USB_ENDPOINT_NUMBER_MASK] = ep; m66592_ep_setting()
366 INIT_LIST_HEAD(&ep->queue); m66592_ep_setting()
369 static void m66592_ep_release(struct m66592_ep *ep) m66592_ep_release() argument
371 struct m66592 *m66592 = ep->m66592; m66592_ep_release()
372 u16 pipenum = ep->pipenum; m66592_ep_release()
377 if (ep->use_dma) m66592_ep_release()
379 ep->pipenum = 0; m66592_ep_release()
380 ep->busy = 0; m66592_ep_release()
381 ep->use_dma = 0; m66592_ep_release()
384 static int alloc_pipe_config(struct m66592_ep *ep, alloc_pipe_config() argument
387 struct m66592 *m66592 = ep->m66592; alloc_pipe_config()
393 ep->ep.desc = desc; alloc_pipe_config()
395 BUG_ON(ep->pipenum); alloc_pipe_config()
437 ep->type = info.type; alloc_pipe_config()
457 m66592_ep_setting(m66592, ep, desc, info.pipe, dma); alloc_pipe_config()
458 pipe_initialize(ep); alloc_pipe_config()
463 static int free_pipe_config(struct m66592_ep *ep) free_pipe_config() argument
465 struct m66592 *m66592 = ep->m66592; free_pipe_config()
468 info.pipe = ep->pipenum; free_pipe_config()
469 info.type = ep->type; free_pipe_config()
471 m66592_ep_release(ep); free_pipe_config()
492 m66592->ep[0].internal_ccpl = ccpl; control_end()
497 static void start_ep0_write(struct m66592_ep *ep, struct m66592_request *req) start_ep0_write() argument
499 struct m66592 *m66592 = ep->m66592; start_ep0_write()
501 pipe_change(m66592, ep->pipenum); start_ep0_write()
505 m66592_write(m66592, M66592_BCLR, ep->fifoctr); start_ep0_write()
507 m66592_bset(m66592, M66592_BVAL, ep->fifoctr); start_ep0_write()
509 transfer_complete(ep, req, 0); start_ep0_write()
512 irq_ep0_write(ep, req); start_ep0_write()
516 static void start_packet_write(struct m66592_ep *ep, struct m66592_request *req) start_packet_write() argument
518 struct m66592 *m66592 = ep->m66592; start_packet_write()
521 pipe_change(m66592, ep->pipenum); start_packet_write()
522 disable_irq_empty(m66592, ep->pipenum); start_packet_write()
523 pipe_start(m66592, ep->pipenum); start_packet_write()
525 tmp = m66592_read(m66592, ep->fifoctr); start_packet_write()
527 pipe_irq_enable(m66592, ep->pipenum); start_packet_write()
529 irq_packet_write(ep, req); start_packet_write()
532 static void start_packet_read(struct m66592_ep *ep, struct m66592_request *req) start_packet_read() argument
534 struct m66592 *m66592 = ep->m66592; start_packet_read()
535 u16 pipenum = ep->pipenum; start_packet_read()
537 if (ep->pipenum == 0) { start_packet_read()
541 m66592_write(m66592, M66592_BCLR, ep->fifoctr); start_packet_read()
545 if (ep->use_dma) { start_packet_read()
546 m66592_bset(m66592, M66592_TRCLR, ep->fifosel); start_packet_read()
548 m66592_bset(m66592, M66592_TRENB, ep->fifosel); start_packet_read()
550 (req->req.length + ep->ep.maxpacket - 1) start_packet_read()
551 / ep->ep.maxpacket, start_packet_read()
552 ep->fifotrn); start_packet_read()
559 static void start_packet(struct m66592_ep *ep, struct m66592_request *req) start_packet() argument
561 if (ep->ep.desc->bEndpointAddress & USB_DIR_IN) start_packet()
562 start_packet_write(ep, req); start_packet()
564 start_packet_read(ep, req); start_packet()
567 static void start_ep0(struct m66592_ep *ep, struct m66592_request *req) start_ep0() argument
571 ctsq = m66592_read(ep->m66592, M66592_INTSTS0) & M66592_CTSQ; start_ep0()
575 start_ep0_write(ep, req); start_ep0()
578 start_packet_read(ep, req); start_ep0()
582 control_end(ep->m66592, 0); start_ep0()
708 static void transfer_complete(struct m66592_ep *ep,
715 if (unlikely(ep->pipenum == 0)) {
716 if (ep->internal_ccpl) {
717 ep->internal_ccpl = 0;
723 if (ep->m66592->gadget.speed == USB_SPEED_UNKNOWN)
728 if (!list_empty(&ep->queue))
731 spin_unlock(&ep->m66592->lock);
732 usb_gadget_giveback_request(&ep->ep, &req->req);
733 spin_lock(&ep->m66592->lock);
736 req = list_entry(ep->queue.next, struct m66592_request, queue);
737 if (ep->ep.desc)
738 start_packet(ep, req);
742 static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req) irq_ep0_write() argument
749 u16 pipenum = ep->pipenum; irq_ep0_write()
750 struct m66592 *m66592 = ep->m66592; irq_ep0_write()
753 m66592_bset(m66592, M66592_ISEL, ep->fifosel); irq_ep0_write()
757 tmp = m66592_read(m66592, ep->fifoctr); irq_ep0_write()
774 m66592_write_fifo(m66592, ep, buf, size); irq_ep0_write()
775 if ((size == 0) || ((size % ep->ep.maxpacket) != 0)) irq_ep0_write()
776 m66592_bset(m66592, M66592_BVAL, ep->fifoctr); irq_ep0_write()
784 || (size % ep->ep.maxpacket) irq_ep0_write()
795 static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req) irq_packet_write() argument
801 u16 pipenum = ep->pipenum; irq_packet_write()
802 struct m66592 *m66592 = ep->m66592; irq_packet_write()
805 tmp = m66592_read(m66592, ep->fifoctr); irq_packet_write()
820 m66592_write_fifo(m66592, ep, buf, size); irq_packet_write()
822 || ((size % ep->ep.maxpacket) != 0) irq_packet_write()
823 || ((bufsize != ep->ep.maxpacket) irq_packet_write()
825 m66592_bset(m66592, M66592_BVAL, ep->fifoctr); irq_packet_write()
832 || (size % ep->ep.maxpacket) irq_packet_write()
842 static void irq_packet_read(struct m66592_ep *ep, struct m66592_request *req) irq_packet_read() argument
848 u16 pipenum = ep->pipenum; irq_packet_read()
849 struct m66592 *m66592 = ep->m66592; irq_packet_read()
853 tmp = m66592_read(m66592, ep->fifoctr); irq_packet_read()
878 || (size % ep->ep.maxpacket) irq_packet_read()
888 m66592_write(m66592, M66592_BCLR, ep->fifoctr); irq_packet_read()
890 m66592_read_fifo(m66592, ep->fifoaddr, buf, size); irq_packet_read()
893 if ((ep->pipenum != 0) && finish) irq_packet_read()
894 transfer_complete(ep, req, 0); irq_packet_read()
901 struct m66592_ep *ep; irq_pipe_ready() local
909 ep = &m66592->ep[0]; irq_pipe_ready()
910 req = list_entry(ep->queue.next, struct m66592_request, queue); irq_pipe_ready()
911 irq_packet_read(ep, req); irq_pipe_ready()
917 ep = m66592->pipenum2ep[pipenum]; irq_pipe_ready()
918 req = list_entry(ep->queue.next, irq_pipe_ready()
920 if (ep->ep.desc->bEndpointAddress & USB_DIR_IN) irq_pipe_ready()
921 irq_packet_write(ep, req); irq_pipe_ready()
923 irq_packet_read(ep, req); irq_pipe_ready()
934 struct m66592_ep *ep; irq_pipe_empty() local
940 ep = &m66592->ep[0]; irq_pipe_empty()
941 req = list_entry(ep->queue.next, struct m66592_request, queue); irq_pipe_empty()
942 irq_ep0_write(ep, req); irq_pipe_empty()
953 ep = m66592->pipenum2ep[pipenum]; irq_pipe_empty()
954 req = list_entry(ep->queue.next, irq_pipe_empty()
957 if (!list_empty(&ep->queue)) irq_pipe_empty()
958 transfer_complete(ep, req, 0); irq_pipe_empty()
969 struct m66592_ep *ep; variable in typeref:struct:m66592_ep
982 ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
983 pid = control_reg_get_pid(m66592, ep->pipenum);
1013 struct m66592_ep *ep; clear_feature() local
1017 ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK]; clear_feature()
1018 pipe_stop(m66592, ep->pipenum); clear_feature()
1019 control_reg_sqclr(m66592, ep->pipenum); clear_feature()
1023 req = list_entry(ep->queue.next, clear_feature()
1025 if (ep->busy) { clear_feature()
1026 ep->busy = 0; clear_feature()
1027 if (list_empty(&ep->queue)) clear_feature()
1029 start_packet(ep, req); clear_feature()
1030 } else if (!list_empty(&ep->queue)) clear_feature()
1031 pipe_start(m66592, ep->pipenum); clear_feature()
1071 struct m66592_ep *ep; set_feature() local
1074 ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK]; set_feature()
1075 pipe_stall(m66592, ep->pipenum); set_feature()
1169 struct m66592_ep *ep; variable in typeref:struct:m66592_ep
1171 ep = &m66592->ep[0];
1172 req = list_entry(ep->queue.next, struct m66592_request, queue);
1173 transfer_complete(ep, req, 0);
1309 struct m66592_ep *ep; m66592_enable() local
1311 ep = container_of(_ep, struct m66592_ep, ep); m66592_enable()
1312 return alloc_pipe_config(ep, desc); m66592_enable()
1317 struct m66592_ep *ep; m66592_disable() local
1321 ep = container_of(_ep, struct m66592_ep, ep); m66592_disable()
1322 BUG_ON(!ep); m66592_disable()
1324 while (!list_empty(&ep->queue)) { m66592_disable()
1325 req = list_entry(ep->queue.next, struct m66592_request, queue); m66592_disable()
1326 spin_lock_irqsave(&ep->m66592->lock, flags); m66592_disable()
1327 transfer_complete(ep, req, -ECONNRESET); m66592_disable()
1328 spin_unlock_irqrestore(&ep->m66592->lock, flags); m66592_disable()
1331 pipe_irq_disable(ep->m66592, ep->pipenum); m66592_disable()
1332 return free_pipe_config(ep); m66592_disable()
1360 struct m66592_ep *ep; m66592_queue() local
1365 ep = container_of(_ep, struct m66592_ep, ep); m66592_queue()
1368 if (ep->m66592->gadget.speed == USB_SPEED_UNKNOWN) m66592_queue()
1371 spin_lock_irqsave(&ep->m66592->lock, flags); m66592_queue()
1373 if (list_empty(&ep->queue)) m66592_queue()
1376 list_add_tail(&req->queue, &ep->queue); m66592_queue()
1380 if (ep->ep.desc == NULL) /* control */ m66592_queue()
1381 start_ep0(ep, req); m66592_queue()
1383 if (request && !ep->busy) m66592_queue()
1384 start_packet(ep, req); m66592_queue()
1387 spin_unlock_irqrestore(&ep->m66592->lock, flags); m66592_queue()
1394 struct m66592_ep *ep; m66592_dequeue() local
1398 ep = container_of(_ep, struct m66592_ep, ep); m66592_dequeue()
1401 spin_lock_irqsave(&ep->m66592->lock, flags); m66592_dequeue()
1402 if (!list_empty(&ep->queue)) m66592_dequeue()
1403 transfer_complete(ep, req, -ECONNRESET); m66592_dequeue()
1404 spin_unlock_irqrestore(&ep->m66592->lock, flags); m66592_dequeue()
1411 struct m66592_ep *ep; m66592_set_halt() local
1416 ep = container_of(_ep, struct m66592_ep, ep); m66592_set_halt()
1417 req = list_entry(ep->queue.next, struct m66592_request, queue); m66592_set_halt()
1419 spin_lock_irqsave(&ep->m66592->lock, flags); m66592_set_halt()
1420 if (!list_empty(&ep->queue)) { m66592_set_halt()
1425 ep->busy = 1; m66592_set_halt()
1426 pipe_stall(ep->m66592, ep->pipenum); m66592_set_halt()
1428 ep->busy = 0; m66592_set_halt()
1429 pipe_stop(ep->m66592, ep->pipenum); m66592_set_halt()
1433 spin_unlock_irqrestore(&ep->m66592->lock, flags); m66592_set_halt()
1439 struct m66592_ep *ep; m66592_fifo_flush() local
1442 ep = container_of(_ep, struct m66592_ep, ep); m66592_fifo_flush()
1443 spin_lock_irqsave(&ep->m66592->lock, flags); m66592_fifo_flush()
1444 if (list_empty(&ep->queue) && !ep->busy) { m66592_fifo_flush()
1445 pipe_stop(ep->m66592, ep->pipenum); m66592_fifo_flush()
1446 m66592_bclr(ep->m66592, M66592_BCLR, ep->fifoctr); m66592_fifo_flush()
1448 spin_unlock_irqrestore(&ep->m66592->lock, flags); m66592_fifo_flush()
1540 m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req); m66592_remove()
1549 static void nop_completion(struct usb_ep *ep, struct usb_request *r) nop_completion() argument
1632 m66592->gadget.ep0 = &m66592->ep[0].ep; m66592_probe()
1635 struct m66592_ep *ep = &m66592->ep[i]; m66592_probe() local
1638 INIT_LIST_HEAD(&m66592->ep[i].ep.ep_list); m66592_probe()
1639 list_add_tail(&m66592->ep[i].ep.ep_list, m66592_probe()
1642 ep->m66592 = m66592; m66592_probe()
1643 INIT_LIST_HEAD(&ep->queue); m66592_probe()
1644 ep->ep.name = m66592_ep_name[i]; m66592_probe()
1645 ep->ep.ops = &m66592_ep_ops; m66592_probe()
1646 usb_ep_set_maxpacket_limit(&ep->ep, 512); m66592_probe()
1649 ep->ep.caps.type_control = true; m66592_probe()
1651 ep->ep.caps.type_iso = true; m66592_probe()
1652 ep->ep.caps.type_bulk = true; m66592_probe()
1653 ep->ep.caps.type_int = true; m66592_probe()
1656 ep->ep.caps.dir_in = true; m66592_probe()
1657 ep->ep.caps.dir_out = true; m66592_probe()
1659 usb_ep_set_maxpacket_limit(&m66592->ep[0].ep, 64); m66592_probe()
1660 m66592->ep[0].pipenum = 0; m66592_probe()
1661 m66592->ep[0].fifoaddr = M66592_CFIFO; m66592_probe()
1662 m66592->ep[0].fifosel = M66592_CFIFOSEL; m66592_probe()
1663 m66592->ep[0].fifoctr = M66592_CFIFOCTR; m66592_probe()
1664 m66592->ep[0].fifotrn = 0; m66592_probe()
1665 m66592->ep[0].pipectr = get_pipectr_addr(0); m66592_probe()
1666 m66592->pipenum2ep[0] = &m66592->ep[0]; m66592_probe()
1667 m66592->epaddr2ep[0] = &m66592->ep[0]; m66592_probe()
1669 m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL); m66592_probe()
1686 m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req); m66592_probe()
1698 m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req); m66592_probe()
H A Dpch_udc.c179 #define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
186 #define UDC_EPIN_IDX(ep) (ep * 2)
187 #define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
276 * @ep: embedded ep request
282 * @offset_addr: offset address of ep register
283 * @desc: for this ep
291 struct usb_ep ep; member in struct:pch_udc_ep
326 * @ep: array of endpoints
355 struct pch_udc_ep ep[PCH_UDC_EP_NUM]; member in struct:pch_udc_dev
397 * @req: embedded ep request
448 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg) pch_udc_ep_readl() argument
450 return ioread32(ep->dev->base_addr + ep->offset_addr + reg); pch_udc_ep_readl()
453 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep, pch_udc_ep_writel() argument
456 iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg); pch_udc_ep_writel()
459 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep, pch_udc_ep_bit_set() argument
463 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg); pch_udc_ep_bit_set()
466 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep, pch_udc_ep_bit_clr() argument
470 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg); pch_udc_ep_bit_clr()
496 unsigned int ep) pch_udc_write_csr()
498 unsigned long reg = PCH_UDC_CSR(ep); pch_udc_write_csr()
512 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep) pch_udc_read_csr() argument
514 unsigned long reg = PCH_UDC_CSR(ep); pch_udc_read_csr()
634 * @ep: Reference to structure of type pch_udc_ep_regs
636 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep) pch_udc_ep_set_stall() argument
638 if (ep->in) { pch_udc_ep_set_stall()
639 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F); pch_udc_ep_set_stall()
640 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S); pch_udc_ep_set_stall()
642 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S); pch_udc_ep_set_stall()
648 * @ep: Reference to structure of type pch_udc_ep_regs
650 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep) pch_udc_ep_clear_stall() argument
653 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S); pch_udc_ep_clear_stall()
655 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK); pch_udc_ep_clear_stall()
660 * @ep: Reference to structure of type pch_udc_ep_regs
663 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep, pch_udc_ep_set_trfr_type() argument
666 pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) & pch_udc_ep_set_trfr_type()
672 * @ep: Reference to structure of type pch_udc_ep_regs
675 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep, pch_udc_ep_set_bufsz() argument
680 data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR); pch_udc_ep_set_bufsz()
682 pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR); pch_udc_ep_set_bufsz()
684 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR); pch_udc_ep_set_bufsz()
686 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR); pch_udc_ep_set_bufsz()
692 * @ep: Reference to structure of type pch_udc_ep_regs
695 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size) pch_udc_ep_set_maxpkt() argument
697 u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR); pch_udc_ep_set_maxpkt()
699 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR); pch_udc_ep_set_maxpkt()
704 * @ep: Reference to structure of type pch_udc_ep_regs
707 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr) pch_udc_ep_set_subptr() argument
709 pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR); pch_udc_ep_set_subptr()
714 * @ep: Reference to structure of type pch_udc_ep_regs
717 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr) pch_udc_ep_set_ddptr() argument
719 pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR); pch_udc_ep_set_ddptr()
724 * @ep: Reference to structure of type pch_udc_ep_regs
726 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep) pch_udc_ep_set_pd() argument
728 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P); pch_udc_ep_set_pd()
733 * @ep: Reference to structure of type pch_udc_ep_regs
735 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep) pch_udc_ep_set_rrdy() argument
737 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY); pch_udc_ep_set_rrdy()
742 * @ep: Reference to structure of type pch_udc_ep_regs
744 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep) pch_udc_ep_clear_rrdy() argument
746 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY); pch_udc_ep_clear_rrdy()
889 * @ep: Reference to structure of type pch_udc_ep_regs
892 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep) pch_udc_read_ep_control() argument
894 return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR); pch_udc_read_ep_control()
899 * @ep: Reference to structure of type pch_udc_ep_regs
902 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep) pch_udc_clear_ep_control() argument
904 return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR); pch_udc_clear_ep_control()
909 * @ep: Reference to structure of type pch_udc_ep_regs
912 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep) pch_udc_read_ep_status() argument
914 return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR); pch_udc_read_ep_status()
919 * @ep: Reference to structure of type pch_udc_ep_regs
922 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep, pch_udc_clear_ep_status() argument
925 return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR); pch_udc_clear_ep_status()
931 * @ep: Reference to structure of type pch_udc_ep_regs
933 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep) pch_udc_ep_set_nak() argument
935 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK); pch_udc_ep_set_nak()
941 * @ep: reference to structure of type pch_udc_ep_regs
943 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep) pch_udc_ep_clear_nak() argument
946 struct pch_udc_dev *dev = ep->dev; pch_udc_ep_clear_nak()
948 if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK)) pch_udc_ep_clear_nak()
950 if (!ep->in) { pch_udc_ep_clear_nak()
952 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) && pch_udc_ep_clear_nak()
960 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) { pch_udc_ep_clear_nak()
961 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK); pch_udc_ep_clear_nak()
965 dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n", pch_udc_ep_clear_nak()
966 __func__, ep->num, (ep->in ? "in" : "out")); pch_udc_ep_clear_nak()
971 * @ep: reference to structure of type pch_udc_ep_regs
976 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir) pch_udc_ep_fifo_flush() argument
978 if (dir) { /* IN ep */ pch_udc_ep_fifo_flush()
979 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F); pch_udc_ep_fifo_flush()
989 static void pch_udc_ep_enable(struct pch_udc_ep *ep, pch_udc_ep_enable() argument
996 pch_udc_ep_set_trfr_type(ep, desc->bmAttributes); pch_udc_ep_enable()
997 if (ep->in) pch_udc_ep_enable()
1001 pch_udc_ep_set_bufsz(ep, buff_size, ep->in); pch_udc_ep_enable()
1002 pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc)); pch_udc_ep_enable()
1003 pch_udc_ep_set_nak(ep); pch_udc_ep_enable()
1004 pch_udc_ep_fifo_flush(ep, ep->in); pch_udc_ep_enable()
1006 val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT | pch_udc_ep_enable()
1014 if (ep->in) pch_udc_ep_enable()
1015 pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num)); pch_udc_ep_enable()
1017 pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num)); pch_udc_ep_enable()
1024 static void pch_udc_ep_disable(struct pch_udc_ep *ep) pch_udc_ep_disable() argument
1026 if (ep->in) { pch_udc_ep_disable()
1028 pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR); pch_udc_ep_disable()
1030 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR); pch_udc_ep_disable()
1031 pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN); pch_udc_ep_disable()
1034 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR); pch_udc_ep_disable()
1037 pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR); pch_udc_ep_disable()
1044 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep) pch_udc_wait_ep_stall() argument
1049 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count) pch_udc_wait_ep_stall()
1052 dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__); pch_udc_wait_ep_stall()
1076 /* mask and clear all ep interrupts */ pch_udc_init()
1102 /* mask all ep interrupts */ pch_udc_exit()
1442 * @ep: Reference to the endpoint structure
1446 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1452 unsigned halted = ep->halted;
1462 dev = ep->dev;
1465 if (ep->in)
1475 if (ep->in)
1490 ep->halted = 1;
1492 if (!ep->in)
1493 pch_udc_ep_clear_rrdy(ep); variable
1494 usb_gadget_giveback_request(&ep->ep, &req->req);
1496 ep->halted = halted;
1501 * @ep: Reference to the endpoint structure
1503 static void empty_req_queue(struct pch_udc_ep *ep) empty_req_queue() argument
1507 ep->halted = 1; empty_req_queue()
1508 while (!list_empty(&ep->queue)) { empty_req_queue()
1509 req = list_entry(ep->queue.next, struct pch_udc_request, queue); empty_req_queue()
1510 complete_req(ep, req, -ESHUTDOWN); /* Remove from list */ empty_req_queue()
1546 * @ep: Reference to the endpoint structure
1555 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep, pch_udc_create_dma_chain() argument
1566 pch_udc_free_dma_chain(ep->dev, req); pch_udc_create_dma_chain()
1579 td = pci_pool_alloc(ep->dev->data_requests, gfp_flags, pch_udc_create_dma_chain()
1597 pch_udc_free_dma_chain(ep->dev, req); pch_udc_create_dma_chain()
1606 * @ep: Reference to the endpoint structure
1614 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req, prepare_dma() argument
1620 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp); prepare_dma()
1625 if (ep->in) prepare_dma()
1634 * @ep: Reference to the endpoint structure
1637 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req) process_zlp() argument
1639 struct pch_udc_dev *dev = ep->dev; process_zlp()
1642 complete_req(ep, req, 0); process_zlp()
1653 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX])); process_zlp()
1660 * @ep: Reference to the endpoint structure
1663 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep, pch_udc_start_rxrequest() argument
1668 pch_udc_clear_dma(ep->dev, DMA_DIR_RX); pch_udc_start_rxrequest()
1679 pch_udc_ep_set_ddptr(ep, req->td_data_phys); pch_udc_start_rxrequest()
1681 pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num); pch_udc_start_rxrequest()
1682 pch_udc_set_dma(ep->dev, DMA_DIR_RX); pch_udc_start_rxrequest()
1683 pch_udc_ep_clear_nak(ep); pch_udc_start_rxrequest()
1684 pch_udc_ep_set_rrdy(ep); pch_udc_start_rxrequest()
1701 struct pch_udc_ep *ep; pch_udc_pcd_ep_enable() local
1709 ep = container_of(usbep, struct pch_udc_ep, ep); pch_udc_pcd_ep_enable()
1710 dev = ep->dev; pch_udc_pcd_ep_enable()
1714 ep->ep.desc = desc; pch_udc_pcd_ep_enable()
1715 ep->halted = 0; pch_udc_pcd_ep_enable()
1716 pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc); pch_udc_pcd_ep_enable()
1717 ep->ep.maxpacket = usb_endpoint_maxp(desc); pch_udc_pcd_ep_enable()
1718 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num)); pch_udc_pcd_ep_enable()
1734 struct pch_udc_ep *ep; pch_udc_pcd_ep_disable() local
1741 ep = container_of(usbep, struct pch_udc_ep, ep); pch_udc_pcd_ep_disable()
1742 dev = ep->dev; pch_udc_pcd_ep_disable()
1743 if ((usbep->name == ep0_string) || !ep->ep.desc) pch_udc_pcd_ep_disable()
1746 spin_lock_irqsave(&ep->dev->lock, iflags); pch_udc_pcd_ep_disable()
1747 empty_req_queue(ep); pch_udc_pcd_ep_disable()
1748 ep->halted = 1; pch_udc_pcd_ep_disable()
1749 pch_udc_ep_disable(ep); pch_udc_pcd_ep_disable()
1750 pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num)); pch_udc_pcd_ep_disable()
1751 ep->ep.desc = NULL; pch_udc_pcd_ep_disable()
1752 INIT_LIST_HEAD(&ep->queue); pch_udc_pcd_ep_disable()
1753 spin_unlock_irqrestore(&ep->dev->lock, iflags); pch_udc_pcd_ep_disable()
1771 struct pch_udc_ep *ep; pch_udc_alloc_request() local
1777 ep = container_of(usbep, struct pch_udc_ep, ep); pch_udc_alloc_request()
1778 dev = ep->dev; pch_udc_alloc_request()
1785 if (!ep->dev->dma_addr) pch_udc_alloc_request()
1788 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp, pch_udc_alloc_request()
1812 struct pch_udc_ep *ep; pch_udc_free_request() local
1818 ep = container_of(usbep, struct pch_udc_ep, ep); pch_udc_free_request()
1820 dev = ep->dev; pch_udc_free_request()
1826 pch_udc_free_dma_chain(ep->dev, req); pch_udc_free_request()
1827 pci_pool_free(ep->dev->data_requests, req->td_data, pch_udc_free_request()
1848 struct pch_udc_ep *ep; pch_udc_pcd_queue() local
1855 ep = container_of(usbep, struct pch_udc_ep, ep); pch_udc_pcd_queue()
1856 dev = ep->dev; pch_udc_pcd_queue()
1857 if (!ep->ep.desc && ep->num) pch_udc_pcd_queue()
1869 if (ep->in) pch_udc_pcd_queue()
1885 if (ep->in) { pch_udc_pcd_queue()
1900 retval = prepare_dma(ep, req, GFP_ATOMIC); pch_udc_pcd_queue()
1907 if (list_empty(&ep->queue) && !ep->halted) { pch_udc_pcd_queue()
1910 process_zlp(ep, req); pch_udc_pcd_queue()
1914 if (!ep->in) { pch_udc_pcd_queue()
1915 pch_udc_start_rxrequest(ep, req); pch_udc_pcd_queue()
1922 pch_udc_wait_ep_stall(ep); pch_udc_pcd_queue()
1923 pch_udc_ep_clear_nak(ep); pch_udc_pcd_queue()
1924 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num)); pch_udc_pcd_queue()
1927 /* Now add this request to the ep's pending requests */ pch_udc_pcd_queue()
1929 list_add_tail(&req->queue, &ep->queue); pch_udc_pcd_queue()
1949 struct pch_udc_ep *ep; pch_udc_pcd_dequeue() local
1955 ep = container_of(usbep, struct pch_udc_ep, ep); pch_udc_pcd_dequeue()
1956 dev = ep->dev; pch_udc_pcd_dequeue()
1957 if (!usbep || !usbreq || (!ep->ep.desc && ep->num)) pch_udc_pcd_dequeue()
1960 spin_lock_irqsave(&ep->dev->lock, flags); pch_udc_pcd_dequeue()
1962 list_for_each_entry(req, &ep->queue, queue) { pch_udc_pcd_dequeue()
1964 pch_udc_ep_set_nak(ep); pch_udc_pcd_dequeue()
1966 complete_req(ep, req, -ECONNRESET); pch_udc_pcd_dequeue()
1971 spin_unlock_irqrestore(&ep->dev->lock, flags); pch_udc_pcd_dequeue()
1987 struct pch_udc_ep *ep; pch_udc_pcd_set_halt() local
1994 ep = container_of(usbep, struct pch_udc_ep, ep); pch_udc_pcd_set_halt()
1995 dev = ep->dev; pch_udc_pcd_set_halt()
1996 if (!ep->ep.desc && !ep->num) pch_udc_pcd_set_halt()
1998 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN)) pch_udc_pcd_set_halt()
2001 if (list_empty(&ep->queue)) { pch_udc_pcd_set_halt()
2003 if (ep->num == PCH_UDC_EP0) pch_udc_pcd_set_halt()
2004 ep->dev->stall = 1; pch_udc_pcd_set_halt()
2005 pch_udc_ep_set_stall(ep); pch_udc_pcd_set_halt()
2006 pch_udc_enable_ep_interrupts(ep->dev, pch_udc_pcd_set_halt()
2007 PCH_UDC_EPINT(ep->in, pch_udc_pcd_set_halt()
2008 ep->num)); pch_udc_pcd_set_halt()
2010 pch_udc_ep_clear_stall(ep); pch_udc_pcd_set_halt()
2032 struct pch_udc_ep *ep; pch_udc_pcd_set_wedge() local
2039 ep = container_of(usbep, struct pch_udc_ep, ep); pch_udc_pcd_set_wedge()
2040 dev = ep->dev; pch_udc_pcd_set_wedge()
2041 if (!ep->ep.desc && !ep->num) pch_udc_pcd_set_wedge()
2043 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN)) pch_udc_pcd_set_wedge()
2046 if (!list_empty(&ep->queue)) { pch_udc_pcd_set_wedge()
2049 if (ep->num == PCH_UDC_EP0) pch_udc_pcd_set_wedge()
2050 ep->dev->stall = 1; pch_udc_pcd_set_wedge()
2051 pch_udc_ep_set_stall(ep); pch_udc_pcd_set_wedge()
2052 pch_udc_enable_ep_interrupts(ep->dev, pch_udc_pcd_set_wedge()
2053 PCH_UDC_EPINT(ep->in, ep->num)); pch_udc_pcd_set_wedge()
2054 ep->dev->prot_stall = 1; pch_udc_pcd_set_wedge()
2067 struct pch_udc_ep *ep; pch_udc_pcd_fifo_flush() local
2072 ep = container_of(usbep, struct pch_udc_ep, ep); pch_udc_pcd_fifo_flush()
2073 if (ep->ep.desc || !ep->num) pch_udc_pcd_fifo_flush()
2074 pch_udc_ep_fifo_flush(ep, ep->in); pch_udc_pcd_fifo_flush()
2108 * @ep: Reference to the endpoint structure
2110 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep) pch_udc_start_next_txrequest() argument
2115 if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P) pch_udc_start_next_txrequest()
2118 if (list_empty(&ep->queue)) pch_udc_start_next_txrequest()
2122 req = list_entry(ep->queue.next, struct pch_udc_request, queue); pch_udc_start_next_txrequest()
2127 pch_udc_wait_ep_stall(ep); pch_udc_start_next_txrequest()
2129 pch_udc_ep_set_ddptr(ep, 0); pch_udc_start_next_txrequest()
2138 pch_udc_ep_set_ddptr(ep, req->td_data_phys); pch_udc_start_next_txrequest()
2139 pch_udc_set_dma(ep->dev, DMA_DIR_TX); pch_udc_start_next_txrequest()
2140 pch_udc_ep_set_pd(ep); pch_udc_start_next_txrequest()
2141 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num)); pch_udc_start_next_txrequest()
2142 pch_udc_ep_clear_nak(ep); pch_udc_start_next_txrequest()
2147 * @ep: Reference to the endpoint structure
2149 static void pch_udc_complete_transfer(struct pch_udc_ep *ep) pch_udc_complete_transfer() argument
2152 struct pch_udc_dev *dev = ep->dev; pch_udc_complete_transfer()
2154 if (list_empty(&ep->queue)) pch_udc_complete_transfer()
2156 req = list_entry(ep->queue.next, struct pch_udc_request, queue); pch_udc_complete_transfer()
2165 (int)(ep->epsts)); pch_udc_complete_transfer()
2172 complete_req(ep, req, 0); pch_udc_complete_transfer()
2174 if (!list_empty(&ep->queue)) { pch_udc_complete_transfer()
2175 pch_udc_wait_ep_stall(ep); pch_udc_complete_transfer()
2176 pch_udc_ep_clear_nak(ep); pch_udc_complete_transfer()
2177 pch_udc_enable_ep_interrupts(ep->dev, pch_udc_complete_transfer()
2178 PCH_UDC_EPINT(ep->in, ep->num)); pch_udc_complete_transfer()
2180 pch_udc_disable_ep_interrupts(ep->dev, pch_udc_complete_transfer()
2181 PCH_UDC_EPINT(ep->in, ep->num)); pch_udc_complete_transfer()
2187 * @ep: Reference to the endpoint structure
2189 static void pch_udc_complete_receiver(struct pch_udc_ep *ep) pch_udc_complete_receiver() argument
2192 struct pch_udc_dev *dev = ep->dev; pch_udc_complete_receiver()
2197 if (list_empty(&ep->queue)) pch_udc_complete_receiver()
2200 req = list_entry(ep->queue.next, struct pch_udc_request, queue); pch_udc_complete_receiver()
2201 pch_udc_clear_dma(ep->dev, DMA_DIR_RX); pch_udc_complete_receiver()
2202 pch_udc_ep_set_ddptr(ep, 0); pch_udc_complete_receiver()
2214 (int)(ep->epsts)); pch_udc_complete_receiver()
2237 complete_req(ep, req, 0); pch_udc_complete_receiver()
2239 if (!list_empty(&ep->queue)) { pch_udc_complete_receiver()
2240 req = list_entry(ep->queue.next, struct pch_udc_request, queue); pch_udc_complete_receiver()
2241 pch_udc_start_rxrequest(ep, req); pch_udc_complete_receiver()
2254 struct pch_udc_ep *ep; pch_udc_svc_data_in() local
2256 ep = &dev->ep[UDC_EPIN_IDX(ep_num)]; pch_udc_svc_data_in()
2257 epsts = ep->epsts; pch_udc_svc_data_in()
2258 ep->epsts = 0; pch_udc_svc_data_in()
2269 pch_udc_ep_set_stall(ep); pch_udc_svc_data_in()
2270 pch_udc_enable_ep_interrupts(ep->dev, pch_udc_svc_data_in()
2271 PCH_UDC_EPINT(ep->in, ep->num)); pch_udc_svc_data_in()
2275 pch_udc_ep_clear_stall(ep); pch_udc_svc_data_in()
2277 pch_udc_ep_set_stall(ep); pch_udc_svc_data_in()
2278 pch_udc_enable_ep_interrupts(ep->dev, pch_udc_svc_data_in()
2279 PCH_UDC_EPINT(ep->in, ep->num)); pch_udc_svc_data_in()
2283 pch_udc_complete_transfer(ep); pch_udc_svc_data_in()
2287 pch_udc_start_next_txrequest(ep); pch_udc_svc_data_in()
2298 struct pch_udc_ep *ep; pch_udc_svc_data_out() local
2301 ep = &dev->ep[UDC_EPOUT_IDX(ep_num)]; pch_udc_svc_data_out()
2302 epsts = ep->epsts; pch_udc_svc_data_out()
2303 ep->epsts = 0; pch_udc_svc_data_out()
2305 if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) { pch_udc_svc_data_out()
2307 req = list_entry(ep->queue.next, struct pch_udc_request, pch_udc_svc_data_out()
2312 pch_udc_start_rxrequest(ep, req); pch_udc_svc_data_out()
2319 pch_udc_ep_set_stall(ep); pch_udc_svc_data_out()
2320 pch_udc_enable_ep_interrupts(ep->dev, pch_udc_svc_data_out()
2321 PCH_UDC_EPINT(ep->in, ep->num)); pch_udc_svc_data_out()
2325 pch_udc_ep_clear_stall(ep); pch_udc_svc_data_out()
2327 pch_udc_ep_set_stall(ep); pch_udc_svc_data_out()
2328 pch_udc_enable_ep_interrupts(ep->dev, pch_udc_svc_data_out()
2329 PCH_UDC_EPINT(ep->in, ep->num)); pch_udc_svc_data_out()
2334 if (ep->dev->prot_stall == 1) { pch_udc_svc_data_out()
2335 pch_udc_ep_set_stall(ep); pch_udc_svc_data_out()
2336 pch_udc_enable_ep_interrupts(ep->dev, pch_udc_svc_data_out()
2337 PCH_UDC_EPINT(ep->in, ep->num)); pch_udc_svc_data_out()
2339 pch_udc_complete_receiver(ep); pch_udc_svc_data_out()
2342 if (list_empty(&ep->queue)) pch_udc_svc_data_out()
2353 struct pch_udc_ep *ep; pch_udc_svc_control_in() local
2356 ep = &dev->ep[UDC_EP0IN_IDX]; pch_udc_svc_control_in()
2357 ep_out = &dev->ep[UDC_EP0OUT_IDX]; pch_udc_svc_control_in()
2358 epsts = ep->epsts; pch_udc_svc_control_in()
2359 ep->epsts = 0; pch_udc_svc_control_in()
2370 pch_udc_complete_transfer(ep); pch_udc_svc_control_in()
2382 pch_udc_start_next_txrequest(ep); pch_udc_svc_control_in()
2396 struct pch_udc_ep *ep; variable in typeref:struct:pch_udc_ep
2398 ep = &dev->ep[UDC_EP0OUT_IDX];
2399 stat = ep->epsts;
2400 ep->epsts = 0;
2406 dev->ep[UDC_EP0IN_IDX].halted = 0;
2407 dev->ep[UDC_EP0OUT_IDX].halted = 0;
2408 dev->setup_data = ep->td_stp->request;
2409 pch_udc_init_setup_buff(ep->td_stp);
2411 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2412 dev->ep[UDC_EP0IN_IDX].in);
2414 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2416 dev->gadget.ep0 = &ep->ep;
2428 ep->td_data->status = (ep->td_data->status &
2431 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2436 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2441 pch_udc_ep_clear_nak(ep); variable
2445 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2446 pch_udc_enable_ep_interrupts(ep->dev,
2447 PCH_UDC_EPINT(ep->in, ep->num));
2456 pch_udc_ep_set_ddptr(ep, 0);
2457 if (!list_empty(&ep->queue)) {
2458 ep->epsts = stat;
2463 pch_udc_ep_set_rrdy(ep); variable
2475 struct pch_udc_ep *ep; pch_udc_postsvc_epinters() local
2478 ep = &dev->ep[UDC_EPIN_IDX(ep_num)]; pch_udc_postsvc_epinters()
2479 if (!list_empty(&ep->queue)) { pch_udc_postsvc_epinters()
2480 req = list_entry(ep->queue.next, struct pch_udc_request, queue); pch_udc_postsvc_epinters()
2481 pch_udc_enable_ep_interrupts(ep->dev, pch_udc_postsvc_epinters()
2482 PCH_UDC_EPINT(ep->in, ep->num)); pch_udc_postsvc_epinters()
2483 pch_udc_ep_clear_nak(ep); pch_udc_postsvc_epinters()
2495 struct pch_udc_ep *ep; pch_udc_read_all_epstatus() local
2500 ep = &dev->ep[UDC_EPIN_IDX(i)]; pch_udc_read_all_epstatus()
2501 ep->epsts = pch_udc_read_ep_status(ep); pch_udc_read_all_epstatus()
2502 pch_udc_clear_ep_status(ep, ep->epsts); pch_udc_read_all_epstatus()
2506 ep = &dev->ep[UDC_EPOUT_IDX(i)]; pch_udc_read_all_epstatus()
2507 ep->epsts = pch_udc_read_ep_status(ep); pch_udc_read_all_epstatus()
2508 pch_udc_clear_ep_status(ep, ep->epsts); pch_udc_read_all_epstatus()
2520 struct pch_udc_ep *ep; pch_udc_activate_control_ep() local
2524 ep = &dev->ep[UDC_EP0IN_IDX]; pch_udc_activate_control_ep()
2525 pch_udc_clear_ep_control(ep); pch_udc_activate_control_ep()
2526 pch_udc_ep_fifo_flush(ep, ep->in); pch_udc_activate_control_ep()
2527 pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in); pch_udc_activate_control_ep()
2528 pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE); pch_udc_activate_control_ep()
2530 ep->td_data = NULL; pch_udc_activate_control_ep()
2531 ep->td_stp = NULL; pch_udc_activate_control_ep()
2532 ep->td_data_phys = 0; pch_udc_activate_control_ep()
2533 ep->td_stp_phys = 0; pch_udc_activate_control_ep()
2536 ep = &dev->ep[UDC_EP0OUT_IDX]; pch_udc_activate_control_ep()
2537 pch_udc_clear_ep_control(ep); pch_udc_activate_control_ep()
2538 pch_udc_ep_fifo_flush(ep, ep->in); pch_udc_activate_control_ep()
2539 pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in); pch_udc_activate_control_ep()
2540 pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE); pch_udc_activate_control_ep()
2542 pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX); pch_udc_activate_control_ep()
2545 pch_udc_init_setup_buff(ep->td_stp); pch_udc_activate_control_ep()
2547 pch_udc_ep_set_subptr(ep, ep->td_stp_phys); pch_udc_activate_control_ep()
2549 pch_udc_ep_set_ddptr(ep, ep->td_data_phys); pch_udc_activate_control_ep()
2552 ep->td_data->status = PCH_UDC_DMA_LAST; pch_udc_activate_control_ep()
2553 ep->td_data->dataptr = dev->dma_addr; pch_udc_activate_control_ep()
2554 ep->td_data->next = ep->td_data_phys; pch_udc_activate_control_ep()
2556 pch_udc_ep_clear_nak(ep); pch_udc_activate_control_ep()
2566 struct pch_udc_ep *ep; pch_udc_svc_ur_interrupt() local
2577 ep = &dev->ep[i]; pch_udc_svc_ur_interrupt()
2578 pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK); pch_udc_svc_ur_interrupt()
2579 pch_udc_clear_ep_control(ep); pch_udc_svc_ur_interrupt()
2580 pch_udc_ep_set_ddptr(ep, 0); pch_udc_svc_ur_interrupt()
2581 pch_udc_write_csr(ep->dev, 0x00, i); pch_udc_svc_ur_interrupt()
2588 /* disable ep to empty req queue. Skip the control EP's */ pch_udc_svc_ur_interrupt()
2590 ep = &dev->ep[i]; pch_udc_svc_ur_interrupt()
2591 pch_udc_ep_set_nak(ep); pch_udc_svc_ur_interrupt()
2592 pch_udc_ep_fifo_flush(ep, ep->in); pch_udc_svc_ur_interrupt()
2594 empty_req_queue(ep); pch_udc_svc_ur_interrupt()
2634 pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX])); pch_udc_svc_enum_interrupt()
2674 pch_udc_ep_clear_stall(&(dev->ep[i])); pch_udc_svc_intf_interrupt()
2675 dev->ep[i].halted = 0; pch_udc_svc_intf_interrupt()
2709 pch_udc_ep_clear_stall(&(dev->ep[i])); pch_udc_svc_cfg_interrupt()
2710 dev->ep[i].halted = 0; pch_udc_svc_cfg_interrupt()
2808 /* Clear ep interrupts */ pch_udc_isr()
2889 memset(dev->ep, 0, sizeof dev->ep); pch_udc_pcd_reinit()
2891 struct pch_udc_ep *ep = &dev->ep[i]; pch_udc_pcd_reinit() local
2892 ep->dev = dev; pch_udc_pcd_reinit()
2893 ep->halted = 1; pch_udc_pcd_reinit()
2894 ep->num = i / 2; pch_udc_pcd_reinit()
2895 ep->in = ~i & 1; pch_udc_pcd_reinit()
2896 ep->ep.name = ep_string[i]; pch_udc_pcd_reinit()
2897 ep->ep.ops = &pch_udc_ep_ops; pch_udc_pcd_reinit()
2898 if (ep->in) { pch_udc_pcd_reinit()
2899 ep->offset_addr = ep->num * UDC_EP_REG_SHIFT; pch_udc_pcd_reinit()
2900 ep->ep.caps.dir_in = true; pch_udc_pcd_reinit()
2902 ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) * pch_udc_pcd_reinit()
2904 ep->ep.caps.dir_out = true; pch_udc_pcd_reinit()
2907 ep->ep.caps.type_control = true; pch_udc_pcd_reinit()
2909 ep->ep.caps.type_iso = true; pch_udc_pcd_reinit()
2910 ep->ep.caps.type_bulk = true; pch_udc_pcd_reinit()
2911 ep->ep.caps.type_int = true; pch_udc_pcd_reinit()
2913 /* need to set ep->ep.maxpacket and set Default Configuration?*/ pch_udc_pcd_reinit()
2914 usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE); pch_udc_pcd_reinit()
2915 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list); pch_udc_pcd_reinit()
2916 INIT_LIST_HEAD(&ep->queue); pch_udc_pcd_reinit()
2918 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE); pch_udc_pcd_reinit()
2919 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE); pch_udc_pcd_reinit()
2922 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list); pch_udc_pcd_reinit()
2923 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list); pch_udc_pcd_reinit()
2925 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep; pch_udc_pcd_reinit()
2972 &dev->ep[UDC_EP0OUT_IDX].td_stp_phys); init_dma_pools()
2978 dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp; init_dma_pools()
2982 &dev->ep[UDC_EP0OUT_IDX].td_data_phys); init_dma_pools()
2988 dev->ep[UDC_EP0OUT_IDX].td_data = td_data; init_dma_pools()
2989 dev->ep[UDC_EP0IN_IDX].td_stp = NULL; init_dma_pools()
2990 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0; init_dma_pools()
2991 dev->ep[UDC_EP0IN_IDX].td_data = NULL; init_dma_pools()
2992 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0; init_dma_pools()
3065 if (dev->ep[UDC_EP0OUT_IDX].td_stp) { pch_udc_remove()
3067 dev->ep[UDC_EP0OUT_IDX].td_stp, pch_udc_remove()
3068 dev->ep[UDC_EP0OUT_IDX].td_stp_phys); pch_udc_remove()
3070 if (dev->ep[UDC_EP0OUT_IDX].td_data) { pch_udc_remove()
3072 dev->ep[UDC_EP0OUT_IDX].td_data, pch_udc_remove()
3073 dev->ep[UDC_EP0OUT_IDX].td_data_phys); pch_udc_remove()
495 pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val, unsigned int ep) pch_udc_write_csr() argument
H A Dgr_udc.c95 static void gr_dbgprint_request(const char *str, struct gr_ep *ep, gr_dbgprint_request() argument
98 int buflen = ep->is_in ? req->req.length : req->req.actual; gr_dbgprint_request()
102 dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen, gr_dbgprint_request()
116 static void gr_dbgprint_request(const char *str, struct gr_ep *ep, gr_dbgprint_request() argument
129 static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep) gr_seq_ep_show() argument
131 u32 epctrl = gr_read32(&ep->regs->epctrl); gr_seq_ep_show()
132 u32 epstat = gr_read32(&ep->regs->epstat); gr_seq_ep_show()
136 seq_printf(seq, "%s:\n", ep->ep.name); gr_seq_ep_show()
141 seq_printf(seq, " dma_start = %d\n", ep->dma_start); gr_seq_ep_show()
142 seq_printf(seq, " stopped = %d\n", ep->stopped); gr_seq_ep_show()
143 seq_printf(seq, " wedged = %d\n", ep->wedged); gr_seq_ep_show()
144 seq_printf(seq, " callback = %d\n", ep->callback); gr_seq_ep_show()
145 seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket); gr_seq_ep_show()
146 seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit); gr_seq_ep_show()
147 seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer); gr_seq_ep_show()
161 if (list_empty(&ep->queue)) { gr_seq_ep_show()
167 list_for_each_entry(req, &ep->queue, queue) { gr_seq_ep_show()
192 struct gr_ep *ep; gr_seq_show() local
205 list_for_each_entry(ep, &dev->ep_list, ep_list) gr_seq_show()
206 gr_seq_ep_show(seq, ep); gr_seq_show()
251 static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags) gr_alloc_dma_desc() argument
256 dma_desc = dma_pool_alloc(ep->dev->desc_pool, gfp_flags, &paddr); gr_alloc_dma_desc()
258 dev_err(ep->dev->dev, "Could not allocate from DMA pool\n"); gr_alloc_dma_desc()
303 static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
317 dev = ep->dev;
318 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
321 if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
326 * divisible by ep->ep.maxpacket and the last descriptor was
331 memcpy(buftail, ep->tailbuf, req->oddlen);
335 dev_dbg(ep->dev->dev, "Overflow for ep %s\n",
336 ep->ep.name);
337 gr_dbgprint_request("OVFL", ep, req);
343 if (ep->is_in)
344 gr_dbgprint_request("SENT", ep, req);
346 gr_dbgprint_request("RECV", ep, req);
349 /* Prevent changes to ep->queue during callback */
350 ep->callback = 1;
360 usb_gadget_giveback_request(&ep->ep, &req->req);
364 ep->callback = 0;
381 * Starts DMA for endpoint ep if there are requests in the queue.
383 * Must be called with dev->lock held and with !ep->stopped.
385 static void gr_start_dma(struct gr_ep *ep) gr_start_dma() argument
390 if (list_empty(&ep->queue)) { gr_start_dma()
391 ep->dma_start = 0; gr_start_dma()
395 req = list_first_entry(&ep->queue, struct gr_request, queue); gr_start_dma()
402 * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly gr_start_dma()
406 if (!ep->is_in && req->oddlen) gr_start_dma()
407 req->last_desc->data = ep->tailbuf_paddr; gr_start_dma()
412 gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr); gr_start_dma()
415 dmactrl = gr_read32(&ep->regs->dmactrl); gr_start_dma()
416 gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA); gr_start_dma()
418 ep->dma_start = 1; gr_start_dma()
422 * Finishes the first request in the ep's queue and, if available, starts the
425 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
427 static void gr_dma_advance(struct gr_ep *ep, int status) gr_dma_advance() argument
431 req = list_first_entry(&ep->queue, struct gr_request, queue); gr_dma_advance()
432 gr_finish_request(ep, req, status); gr_dma_advance()
433 gr_start_dma(ep); /* Regardless of ep->dma_start */ gr_dma_advance()
442 static void gr_abort_dma(struct gr_ep *ep) gr_abort_dma() argument
446 dmactrl = gr_read32(&ep->regs->dmactrl); gr_abort_dma()
447 gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD); gr_abort_dma()
457 static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req, gr_add_dma_desc() argument
462 desc = gr_alloc_dma_desc(ep, gfp_flags); gr_add_dma_desc()
467 if (ep->is_in) gr_add_dma_desc()
496 static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req, gr_setup_out_desc_list() argument
508 u16 size = min(bytes_left, ep->bytes_per_buffer); gr_setup_out_desc_list()
510 if (size < ep->bytes_per_buffer) { gr_setup_out_desc_list()
516 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags); gr_setup_out_desc_list()
529 gr_free_dma_desc_chain(ep->dev, req); gr_setup_out_desc_list()
540 * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
549 static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req, gr_setup_in_desc_list() argument
561 u16 size = min(bytes_left, ep->bytes_per_buffer); gr_setup_in_desc_list()
563 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags); gr_setup_in_desc_list()
574 * multiples of ep->ep.maxpacket. gr_setup_in_desc_list()
576 if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) { gr_setup_in_desc_list()
577 ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags); gr_setup_in_desc_list()
591 gr_free_dma_desc_chain(ep->dev, req); gr_setup_in_desc_list()
597 static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags) gr_queue() argument
599 struct gr_udc *dev = ep->dev; gr_queue()
602 if (unlikely(!ep->ep.desc && ep->num != 0)) { gr_queue()
603 dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name); gr_queue()
610 ep->ep.name, req->req.buf, list_empty(&req->queue)); gr_queue()
626 ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in); gr_queue()
632 if (ep->is_in) gr_queue()
633 ret = gr_setup_in_desc_list(ep, req, gfp_flags); gr_queue()
635 ret = gr_setup_out_desc_list(ep, req, gfp_flags); gr_queue()
641 list_add_tail(&req->queue, &ep->queue); gr_queue()
644 if (!ep->dma_start && likely(!ep->stopped)) gr_queue()
645 gr_start_dma(ep); gr_queue()
655 static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req, gr_queue_int() argument
658 if (ep->is_in) gr_queue_int()
659 gr_dbgprint_request("RESP", ep, req); gr_queue_int()
661 return gr_queue(ep, req, gfp_flags); gr_queue_int()
672 static void gr_ep_nuke(struct gr_ep *ep) gr_ep_nuke() argument
676 ep->stopped = 1; gr_ep_nuke()
677 ep->dma_start = 0; gr_ep_nuke()
678 gr_abort_dma(ep); gr_ep_nuke()
680 while (!list_empty(&ep->queue)) { gr_ep_nuke()
681 req = list_first_entry(&ep->queue, struct gr_request, queue); gr_ep_nuke()
682 gr_finish_request(ep, req, -ESHUTDOWN); gr_ep_nuke()
691 static void gr_ep_reset(struct gr_ep *ep) gr_ep_reset() argument
693 gr_write32(&ep->regs->epctrl, 0); gr_ep_reset()
694 gr_write32(&ep->regs->dmactrl, 0); gr_ep_reset()
696 ep->ep.maxpacket = MAX_CTRL_PL_SIZE; gr_ep_reset()
697 ep->ep.desc = NULL; gr_ep_reset()
698 ep->stopped = 1; gr_ep_reset()
699 ep->dma_start = 0; gr_ep_reset()
724 static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost) gr_ep_halt_wedge() argument
729 if (ep->num && !ep->ep.desc) gr_ep_halt_wedge()
732 if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) gr_ep_halt_wedge()
736 if (!ep->num) { gr_ep_halt_wedge()
739 gr_control_stall(ep->dev); gr_ep_halt_wedge()
740 dev_dbg(ep->dev->dev, "EP: stall ep0\n"); gr_ep_halt_wedge()
746 dev_dbg(ep->dev->dev, "EP: %s halt %s\n", gr_ep_halt_wedge()
747 (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name); gr_ep_halt_wedge()
749 epctrl = gr_read32(&ep->regs->epctrl); gr_ep_halt_wedge()
752 gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH); gr_ep_halt_wedge()
753 ep->stopped = 1; gr_ep_halt_wedge()
755 ep->wedged = 1; gr_ep_halt_wedge()
757 gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH); gr_ep_halt_wedge()
758 ep->stopped = 0; gr_ep_halt_wedge()
759 ep->wedged = 0; gr_ep_halt_wedge()
762 if (!ep->dma_start) gr_ep_halt_wedge()
763 gr_start_dma(ep); gr_ep_halt_wedge()
797 struct gr_ep *ep; gr_stop_activity() local
799 list_for_each_entry(ep, &dev->ep_list, ep_list) gr_stop_activity()
800 gr_ep_nuke(ep); gr_stop_activity()
814 struct gr_ep *ep; gr_ep0_testmode_complete() local
818 ep = container_of(_ep, struct gr_ep, ep); gr_ep0_testmode_complete()
819 dev = ep->dev; gr_ep0_testmode_complete()
841 void (*complete)(struct usb_ep *ep, gr_ep0_respond()
1002 struct gr_ep *ep; gr_endpoint_request() local
1014 ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]); gr_endpoint_request()
1018 halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH; gr_endpoint_request()
1024 status = gr_ep_halt_wedge(ep, 1, 0, 1); gr_endpoint_request()
1034 if (ep->wedged) gr_endpoint_request()
1036 status = gr_ep_halt_wedge(ep, 0, 0, 1); gr_endpoint_request()
1255 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1257 static int gr_handle_in_ep(struct gr_ep *ep) gr_handle_in_ep() argument
1261 req = list_first_entry(&ep->queue, struct gr_request, queue); gr_handle_in_ep()
1268 if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0)) gr_handle_in_ep()
1272 gr_dma_advance(ep, 0); gr_handle_in_ep()
1280 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1282 static int gr_handle_out_ep(struct gr_ep *ep) gr_handle_out_ep() argument
1288 struct gr_udc *dev = ep->dev; gr_handle_out_ep()
1290 req = list_first_entry(&ep->queue, struct gr_request, queue); gr_handle_out_ep()
1304 if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) { gr_handle_out_ep()
1307 if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) { gr_handle_out_ep()
1318 gr_dma_advance(ep, 0); gr_handle_out_ep()
1324 ep_dmactrl = gr_read32(&ep->regs->dmactrl); gr_handle_out_ep()
1325 gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA); gr_handle_out_ep()
1419 struct gr_ep *ep; gr_irq_handler() local
1430 * Check IN ep interrupts. We check these before the OUT eps because gr_irq_handler()
1435 ep = &dev->epi[i]; gr_irq_handler()
1436 if (!ep->stopped && !ep->callback && !list_empty(&ep->queue)) gr_irq_handler()
1437 handled = gr_handle_in_ep(ep) || handled; gr_irq_handler()
1440 /* Check OUT ep interrupts */ gr_irq_handler()
1442 ep = &dev->epo[i]; gr_irq_handler()
1443 if (!ep->stopped && !ep->callback && !list_empty(&ep->queue)) gr_irq_handler()
1444 handled = gr_handle_out_ep(ep) || handled; gr_irq_handler()
1455 list_for_each_entry(ep, &dev->ep_list, ep_list) { gr_irq_handler()
1456 if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) { gr_irq_handler()
1459 ep->ep.name); gr_irq_handler()
1483 /* USB ep ops */
1490 struct gr_ep *ep; gr_ep_enable() local
1497 ep = container_of(_ep, struct gr_ep, ep); gr_ep_enable()
1501 dev = ep->dev; gr_ep_enable()
1504 if (ep == &dev->epo[0] || ep == &dev->epi[0]) gr_ep_enable()
1511 epctrl = gr_read32(&ep->regs->epctrl); gr_ep_enable()
1516 if (!ep->is_in != !usb_endpoint_dir_in(desc)) gr_ep_enable()
1519 /* Check ep num */ gr_ep_enable()
1520 if ((!ep->is_in && ep->num >= dev->nepo) || gr_ep_enable()
1521 (ep->is_in && ep->num >= dev->nepi)) gr_ep_enable()
1534 ep->ep.name); gr_ep_enable()
1561 } else if (max > ep->ep.maxpacket_limit) { gr_ep_enable()
1563 max, ep->ep.maxpacket_limit); gr_ep_enable()
1567 spin_lock(&ep->dev->lock); gr_ep_enable()
1569 if (!ep->stopped) { gr_ep_enable()
1570 spin_unlock(&ep->dev->lock); gr_ep_enable()
1574 ep->stopped = 0; gr_ep_enable()
1575 ep->wedged = 0; gr_ep_enable()
1576 ep->ep.desc = desc; gr_ep_enable()
1577 ep->ep.maxpacket = max; gr_ep_enable()
1578 ep->dma_start = 0; gr_ep_enable()
1586 ep->bytes_per_buffer = (nt + 1) * max; gr_ep_enable()
1587 } else if (ep->is_in) { gr_ep_enable()
1593 ep->bytes_per_buffer = (buffer_size / max) * max; gr_ep_enable()
1599 ep->bytes_per_buffer = max; gr_ep_enable()
1606 if (ep->is_in) gr_ep_enable()
1608 gr_write32(&ep->regs->epctrl, epctrl); gr_ep_enable()
1610 gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI); gr_ep_enable()
1612 spin_unlock(&ep->dev->lock); gr_ep_enable()
1614 dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n", gr_ep_enable()
1615 ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer); gr_ep_enable()
1622 struct gr_ep *ep; gr_ep_disable() local
1626 ep = container_of(_ep, struct gr_ep, ep); gr_ep_disable()
1627 if (!_ep || !ep->ep.desc) gr_ep_disable()
1630 dev = ep->dev; gr_ep_disable()
1633 if (ep == &dev->epo[0] || ep == &dev->epi[0]) gr_ep_disable()
1639 dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name); gr_ep_disable()
1643 gr_ep_nuke(ep); gr_ep_disable()
1644 gr_ep_reset(ep); gr_ep_disable()
1645 ep->ep.desc = NULL; gr_ep_disable()
1675 struct gr_ep *ep; gr_queue_ext() local
1683 ep = container_of(_ep, struct gr_ep, ep); gr_queue_ext()
1685 dev = ep->dev; gr_queue_ext()
1687 spin_lock(&ep->dev->lock); gr_queue_ext()
1695 if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) { gr_queue_ext()
1696 ep = &dev->epo[0]; gr_queue_ext()
1697 ep->ep.driver_data = dev->epi[0].ep.driver_data; gr_queue_ext()
1700 if (ep->is_in) gr_queue_ext()
1701 gr_dbgprint_request("EXTERN", ep, req); gr_queue_ext()
1703 ret = gr_queue(ep, req, GFP_ATOMIC); gr_queue_ext()
1705 spin_unlock(&ep->dev->lock); gr_queue_ext()
1714 struct gr_ep *ep; gr_dequeue() local
1719 ep = container_of(_ep, struct gr_ep, ep); gr_dequeue()
1720 if (!_ep || !_req || (!ep->ep.desc && ep->num != 0)) gr_dequeue()
1722 dev = ep->dev; gr_dequeue()
1733 list_for_each_entry(req, &ep->queue, queue) { gr_dequeue()
1742 if (list_first_entry(&ep->queue, struct gr_request, queue) == req) { gr_dequeue()
1744 gr_abort_dma(ep); gr_dequeue()
1745 if (ep->stopped) gr_dequeue()
1746 gr_finish_request(ep, req, -ECONNRESET); gr_dequeue()
1748 gr_dma_advance(ep, -ECONNRESET); gr_dequeue()
1751 gr_finish_request(ep, req, -ECONNRESET); gr_dequeue()
1766 struct gr_ep *ep; gr_set_halt_wedge() local
1770 ep = container_of(_ep, struct gr_ep, ep); gr_set_halt_wedge()
1772 spin_lock(&ep->dev->lock); gr_set_halt_wedge()
1775 if (halt && ep->is_in && !list_empty(&ep->queue)) { gr_set_halt_wedge()
1780 ret = gr_ep_halt_wedge(ep, halt, wedge, 0); gr_set_halt_wedge()
1783 spin_unlock(&ep->dev->lock); gr_set_halt_wedge()
1806 struct gr_ep *ep; gr_fifo_status() local
1812 ep = container_of(_ep, struct gr_ep, ep); gr_fifo_status()
1814 epstat = gr_read32(&ep->regs->epstat); gr_fifo_status()
1828 struct gr_ep *ep; gr_fifo_flush() local
1833 ep = container_of(_ep, struct gr_ep, ep); gr_fifo_flush()
1834 dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name); gr_fifo_flush()
1836 spin_lock(&ep->dev->lock); gr_fifo_flush()
1838 epctrl = gr_read32(&ep->regs->epctrl); gr_fifo_flush()
1840 gr_write32(&ep->regs->epctrl, epctrl); gr_fifo_flush()
1842 spin_unlock(&ep->dev->lock); gr_fifo_flush()
1980 struct gr_ep *ep; gr_ep_init() local
1986 ep = &dev->epi[num]; gr_ep_init()
1987 ep->ep.name = inames[num]; gr_ep_init()
1988 ep->regs = &dev->regs->epi[num]; gr_ep_init()
1990 ep = &dev->epo[num]; gr_ep_init()
1991 ep->ep.name = onames[num]; gr_ep_init()
1992 ep->regs = &dev->regs->epo[num]; gr_ep_init()
1995 gr_ep_reset(ep); gr_ep_init()
1996 ep->num = num; gr_ep_init()
1997 ep->is_in = is_in; gr_ep_init()
1998 ep->dev = dev; gr_ep_init()
1999 ep->ep.ops = &gr_ep_ops; gr_ep_init()
2000 INIT_LIST_HEAD(&ep->queue); gr_ep_init()
2003 _req = gr_alloc_request(&ep->ep, GFP_ATOMIC); gr_ep_init()
2019 usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE); gr_ep_init()
2020 ep->bytes_per_buffer = MAX_CTRL_PL_SIZE; gr_ep_init()
2022 ep->ep.caps.type_control = true; gr_ep_init()
2024 usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit); gr_ep_init()
2025 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list); gr_ep_init()
2027 ep->ep.caps.type_iso = true; gr_ep_init()
2028 ep->ep.caps.type_bulk = true; gr_ep_init()
2029 ep->ep.caps.type_int = true; gr_ep_init()
2031 list_add_tail(&ep->ep_list, &dev->ep_list); gr_ep_init()
2034 ep->ep.caps.dir_in = true; gr_ep_init()
2036 ep->ep.caps.dir_out = true; gr_ep_init()
2038 ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit, gr_ep_init()
2039 &ep->tailbuf_paddr, GFP_ATOMIC); gr_ep_init()
2040 if (!ep->tailbuf) gr_ep_init()
2060 dev->gadget.ep0 = &dev->epi[0].ep; gr_udc_init()
2097 struct gr_ep *ep; gr_ep_remove() local
2100 ep = &dev->epi[num]; gr_ep_remove()
2102 ep = &dev->epo[num]; gr_ep_remove()
2104 if (ep->tailbuf) gr_ep_remove()
2105 dma_free_coherent(dev->dev, ep->ep.maxpacket_limit, gr_ep_remove()
2106 ep->tailbuf, ep->tailbuf_paddr); gr_ep_remove()
2123 gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req); gr_remove()
2124 gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req); gr_remove()
840 gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length, void (*complete)(struct usb_ep *ep, struct usb_request *req)) gr_ep0_respond() argument
H A Dnet2280.c90 EP_INFO("ep-a",
92 EP_INFO("ep-b",
94 EP_INFO("ep-c",
96 EP_INFO("ep-d",
98 EP_INFO("ep-e",
100 EP_INFO("ep-f",
102 EP_INFO("ep-g",
104 EP_INFO("ep-h",
129 /* mode 0 == ep-{a,b,c,d} 1K fifo each
130 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
131 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
165 static void ep_clear_seqnum(struct net2280_ep *ep);
171 static inline void enable_pciirqenb(struct net2280_ep *ep) enable_pciirqenb() argument
173 u32 tmp = readl(&ep->dev->regs->pciirqenb0); enable_pciirqenb()
175 if (ep->dev->quirks & PLX_LEGACY) enable_pciirqenb()
176 tmp |= BIT(ep->num); enable_pciirqenb()
178 tmp |= BIT(ep_bit[ep->num]); enable_pciirqenb()
179 writel(tmp, &ep->dev->regs->pciirqenb0); enable_pciirqenb()
188 struct net2280_ep *ep; net2280_enable() local
196 ep = container_of(_ep, struct net2280_ep, ep); net2280_enable()
197 if (!_ep || !desc || ep->desc || _ep->name == ep0name || net2280_enable()
202 dev = ep->dev; net2280_enable()
219 ep->is_in = !!usb_endpoint_dir_in(desc); net2280_enable()
220 if (dev->enhanced_mode && ep->is_in && ep_key[ep->num]) { net2280_enable()
226 /* sanity check ep-e/ep-f since their fifos are small */ net2280_enable()
228 if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) { net2280_enable()
235 ep->desc = desc; net2280_enable()
238 ep->stopped = 0; net2280_enable()
239 ep->wedged = 0; net2280_enable()
240 ep->out_overflow = 0; net2280_enable()
243 set_max_speed(ep, max); net2280_enable()
246 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat); net2280_enable()
249 tmp = readl(&ep->cfg->ep_cfg); net2280_enable()
250 /* If USB ep number doesn't match hardware ep number */ net2280_enable()
256 if (ep->is_in) net2280_enable()
268 &ep->regs->ep_rsp); net2280_enable()
279 ep->is_iso = (type == USB_ENDPOINT_XFER_ISOC); net2280_enable()
287 ep->is_in = (tmp & USB_DIR_IN) != 0; net2280_enable()
290 if (dev->enhanced_mode && ep->is_in) { net2280_enable()
296 tmp |= (ep->is_in << ENDPOINT_DIRECTION); net2280_enable()
302 tmp |= (ep->ep.maxburst << MAX_BURST_SIZE); net2280_enable()
309 if (!ep->is_in) net2280_enable()
310 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp); net2280_enable()
316 BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp); net2280_enable()
320 ep_clear_seqnum(ep); net2280_enable()
321 writel(tmp, &ep->cfg->ep_cfg); net2280_enable()
324 if (!ep->dma) { /* pio, per-packet */ net2280_enable()
325 enable_pciirqenb(ep); net2280_enable()
330 tmp |= readl(&ep->regs->ep_irqenb); net2280_enable()
331 writel(tmp, &ep->regs->ep_irqenb); net2280_enable()
333 tmp = BIT((8 + ep->num)); /* completion */ net2280_enable()
343 writel(tmp, &ep->regs->ep_irqenb); net2280_enable()
345 enable_pciirqenb(ep); net2280_enable()
350 ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n", net2280_enable()
353 ep->dma ? "dma" : "pio", max); net2280_enable()
360 dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret); net2280_enable()
384 struct net2280_ep *ep) ep_reset_228x()
388 ep->desc = NULL; ep_reset_228x()
389 INIT_LIST_HEAD(&ep->queue); ep_reset_228x()
391 usb_ep_set_maxpacket_limit(&ep->ep, ~0); ep_reset_228x()
392 ep->ep.ops = &net2280_ep_ops; ep_reset_228x()
395 if (ep->dma) { ep_reset_228x()
396 writel(0, &ep->dma->dmactl); ep_reset_228x()
400 &ep->dma->dmastat); ep_reset_228x()
403 tmp &= ~BIT(ep->num); ep_reset_228x()
407 tmp &= ~BIT((8 + ep->num)); /* completion */ ep_reset_228x()
410 writel(0, &ep->regs->ep_irqenb); ep_reset_228x()
415 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) { ep_reset_228x()
428 if (ep->num != 0) { ep_reset_228x()
432 writel(tmp, &ep->regs->ep_rsp); ep_reset_228x()
435 if (ep->dev->quirks & PLX_2280) ep_reset_228x()
454 &ep->regs->ep_stat); ep_reset_228x()
460 struct net2280_ep *ep) ep_reset_338x()
464 ep->desc = NULL; ep_reset_338x()
465 INIT_LIST_HEAD(&ep->queue); ep_reset_338x()
467 usb_ep_set_maxpacket_limit(&ep->ep, ~0); ep_reset_338x()
468 ep->ep.ops = &net2280_ep_ops; ep_reset_338x()
471 if (ep->dma) { ep_reset_338x()
472 writel(0, &ep->dma->dmactl); ep_reset_338x()
478 &ep->dma->dmastat); ep_reset_338x()
480 dmastat = readl(&ep->dma->dmastat); ep_reset_338x()
482 ep_warn(ep->dev, "The dmastat return = %x!!\n", ep_reset_338x()
484 writel(0x5a, &ep->dma->dmastat); ep_reset_338x()
488 tmp &= ~BIT(ep_bit[ep->num]); ep_reset_338x()
491 if (ep->num < 5) { ep_reset_338x()
493 tmp &= ~BIT((8 + ep->num)); /* completion */ ep_reset_338x()
497 writel(0, &ep->regs->ep_irqenb); ep_reset_338x()
505 BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat); ep_reset_338x()
507 tmp = readl(&ep->cfg->ep_cfg); ep_reset_338x()
508 if (ep->is_in) ep_reset_338x()
512 writel(tmp, &ep->cfg->ep_cfg); ep_reset_338x()
519 struct net2280_ep *ep; net2280_disable() local
522 ep = container_of(_ep, struct net2280_ep, ep); net2280_disable()
523 if (!_ep || !ep->desc || _ep->name == ep0name) { net2280_disable()
524 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); net2280_disable()
527 spin_lock_irqsave(&ep->dev->lock, flags); net2280_disable()
528 nuke(ep); net2280_disable()
530 if (ep->dev->quirks & PLX_SUPERSPEED) net2280_disable()
531 ep_reset_338x(ep->dev->regs, ep); net2280_disable()
533 ep_reset_228x(ep->dev->regs, ep); net2280_disable()
535 ep_vdbg(ep->dev, "disabled %s %s\n", net2280_disable()
536 ep->dma ? "dma" : "pio", _ep->name); net2280_disable()
539 (void)readl(&ep->cfg->ep_cfg); net2280_disable()
541 if (!ep->dma && ep->num >= 1 && ep->num <= 4) net2280_disable()
542 ep->dma = &ep->dev->dma[ep->num - 1]; net2280_disable()
544 spin_unlock_irqrestore(&ep->dev->lock, flags); net2280_disable()
553 struct net2280_ep *ep; net2280_alloc_request() local
557 pr_err("%s: Invalid ep\n", __func__); net2280_alloc_request()
560 ep = container_of(_ep, struct net2280_ep, ep); net2280_alloc_request()
569 if (ep->dma) { net2280_alloc_request()
572 td = pci_pool_alloc(ep->dev->requests, gfp_flags, net2280_alloc_request()
587 struct net2280_ep *ep; net2280_free_request() local
590 ep = container_of(_ep, struct net2280_ep, ep); net2280_free_request()
592 dev_err(&ep->dev->pdev->dev, "%s: Inavlid ep=%p or req=%p\n", net2280_free_request()
600 pci_pool_free(ep->dev->requests, req->td, req->td_dma); net2280_free_request()
609 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
611 * one packet. ep-a..ep-d should use dma instead.
613 static void write_fifo(struct net2280_ep *ep, struct usb_request *req) write_fifo() argument
615 struct net2280_ep_regs __iomem *regs = ep->regs; write_fifo()
632 count = ep->ep.maxpacket; write_fifo()
636 ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n", write_fifo()
637 ep->ep.name, count, write_fifo()
638 (count != ep->ep.maxpacket) ? " (short)" : "", write_fifo()
656 if (count || total < ep->ep.maxpacket) { write_fifo()
659 set_fifo_bytecount(ep, count & 0x03); write_fifo()
673 static void out_flush(struct net2280_ep *ep) out_flush() argument
678 statp = &ep->regs->ep_stat; out_flush()
682 ep_dbg(ep->dev, "%s %s %08x !NAK\n", out_flush()
683 ep->ep.name, __func__, tmp); out_flush()
684 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp); out_flush()
696 ep->dev->gadget.speed == USB_SPEED_FULL) { out_flush()
710 * for ep-a..ep-d this will read multiple packets out when they
713 static int read_fifo(struct net2280_ep *ep, struct net2280_request *req) read_fifo() argument
715 struct net2280_ep_regs __iomem *regs = ep->regs; read_fifo()
723 if (ep->dev->chiprev == 0x0100 && read_fifo()
724 ep->dev->gadget.speed == USB_SPEED_FULL) { read_fifo()
726 tmp = readl(&ep->regs->ep_stat); read_fifo()
730 start_out_naking(ep); read_fifo()
743 tmp = readl(&ep->regs->ep_stat); read_fifo()
753 if ((tmp % ep->ep.maxpacket) != 0) { read_fifo()
754 ep_err(ep->dev, read_fifo()
756 ep->ep.name, count, tmp); read_fifo()
767 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0); read_fifo()
769 ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n", read_fifo()
770 ep->ep.name, count, is_short ? " (short)" : "", read_fifo()
790 out_flush(ep); read_fifo()
792 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp); read_fifo()
793 (void) readl(&ep->regs->ep_rsp); read_fifo()
801 static void fill_dma_desc(struct net2280_ep *ep, fill_dma_desc() argument
812 if (ep->is_in) fill_dma_desc()
814 if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) || fill_dma_desc()
815 !(ep->dev->quirks & PLX_2280)) fill_dma_desc()
826 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */ fill_dma_desc()
853 static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma) start_queue() argument
855 struct net2280_dma_regs __iomem *dma = ep->dma; start_queue()
856 unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION); start_queue()
858 if (!(ep->dev->quirks & PLX_2280)) start_queue()
865 if (ep->dev->quirks & PLX_SUPERSPEED) start_queue()
870 (void) readl(&ep->dev->pci->pcimstctl); start_queue()
874 if (!ep->is_in) start_queue()
875 stop_out_naking(ep); start_queue()
878 static void start_dma(struct net2280_ep *ep, struct net2280_request *req) start_dma() argument
881 struct net2280_dma_regs __iomem *dma = ep->dma; start_dma()
887 writel(0, &ep->dma->dmactl); start_dma()
890 if (!ep->is_in && (readl(&ep->regs->ep_stat) & start_dma()
893 &ep->regs->ep_stat); start_dma()
895 tmp = readl(&ep->regs->ep_avail); start_dma()
922 if (ep->is_in) { start_dma()
923 if (likely((req->req.length % ep->ep.maxpacket) || start_dma()
926 ep->in_fifo_validate = 1; start_dma()
928 ep->in_fifo_validate = 0; start_dma()
932 req->td->dmadesc = cpu_to_le32 (ep->td_dma); start_dma()
933 fill_dma_desc(ep, req, 1); start_dma()
937 start_queue(ep, tmp, req->td_dma); start_dma()
941 queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid) queue_dma() argument
947 end = ep->dummy; queue_dma()
948 ep->dummy = req->td; queue_dma()
951 tmp = ep->td_dma; queue_dma()
952 ep->td_dma = req->td_dma; queue_dma()
955 end->dmadesc = cpu_to_le32 (ep->td_dma); queue_dma()
957 fill_dma_desc(ep, req, valid); queue_dma()
961 done(struct net2280_ep *ep, struct net2280_request *req, int status) done() argument
964 unsigned stopped = ep->stopped; done()
973 dev = ep->dev; done()
974 if (ep->dma) done()
975 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in); done()
979 ep->ep.name, &req->req, status, done()
983 ep->stopped = 1; done()
985 usb_gadget_giveback_request(&ep->ep, &req->req); done()
987 ep->stopped = stopped; done()
996 struct net2280_ep *ep; net2280_queue() local
1004 ep = container_of(_ep, struct net2280_ep, ep); net2280_queue()
1005 if (!_ep || (!ep->desc && ep->num != 0)) { net2280_queue()
1006 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); net2280_queue()
1019 dev = ep->dev; net2280_queue()
1026 if (ep->dma && _req->length == 0) { net2280_queue()
1032 if (ep->dma) { net2280_queue()
1034 ep->is_in); net2280_queue()
1048 if (list_empty(&ep->queue) && !ep->stopped && net2280_queue()
1049 !((dev->quirks & PLX_SUPERSPEED) && ep->dma && net2280_queue()
1050 (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) { net2280_queue()
1053 if (ep->dma) net2280_queue()
1054 start_dma(ep, req); net2280_queue()
1057 if (ep->num == 0 && _req->length == 0) { net2280_queue()
1058 allow_status(ep); net2280_queue()
1059 done(ep, req, 0); net2280_queue()
1060 ep_vdbg(dev, "%s status ack\n", ep->ep.name); net2280_queue()
1065 if (ep->is_in) net2280_queue()
1066 write_fifo(ep, _req); net2280_queue()
1067 else if (list_empty(&ep->queue)) { net2280_queue()
1071 s = readl(&ep->regs->ep_stat); net2280_queue()
1079 if (read_fifo(ep, req) && net2280_queue()
1080 ep->num == 0) { net2280_queue()
1081 done(ep, req, 0); net2280_queue()
1082 allow_status(ep); net2280_queue()
1085 } else if (read_fifo(ep, req) && net2280_queue()
1086 ep->num != 0) { net2280_queue()
1087 done(ep, req, 0); net2280_queue()
1090 s = readl(&ep->regs->ep_stat); net2280_queue()
1096 &ep->regs->ep_rsp); net2280_queue()
1100 } else if (ep->dma) { net2280_queue()
1103 if (ep->is_in) { net2280_queue()
1110 (req->req.length % ep->ep.maxpacket)); net2280_queue()
1111 if (expect != ep->in_fifo_validate) net2280_queue()
1114 queue_dma(ep, req, valid); net2280_queue()
1118 ep->responded = 1; net2280_queue()
1120 list_add_tail(&req->queue, &ep->queue); net2280_queue()
1128 dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret); net2280_queue()
1133 dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount, dma_done() argument
1137 done(ep, req, status); dma_done()
1140 static void scan_dma_completions(struct net2280_ep *ep) scan_dma_completions() argument
1145 while (!list_empty(&ep->queue)) { scan_dma_completions()
1149 req = list_entry(ep->queue.next, scan_dma_completions()
1164 tmp = readl(&ep->dma->dmacount); scan_dma_completions()
1168 dma_done(ep, req, tmp, 0); scan_dma_completions()
1170 } else if (!ep->is_in && scan_dma_completions()
1171 (req->req.length % ep->ep.maxpacket) && scan_dma_completions()
1172 !(ep->dev->quirks & PLX_SUPERSPEED)) { scan_dma_completions()
1174 tmp = readl(&ep->regs->ep_stat); scan_dma_completions()
1180 ep_warn(ep->dev, "%s lost packet sync!\n", scan_dma_completions()
1181 ep->ep.name); scan_dma_completions()
1184 tmp = readl(&ep->regs->ep_avail); scan_dma_completions()
1187 ep->out_overflow = 1; scan_dma_completions()
1188 ep_dbg(ep->dev, scan_dma_completions()
1190 ep->ep.name, tmp, scan_dma_completions()
1196 dma_done(ep, req, tmp, 0); scan_dma_completions()
1200 static void restart_dma(struct net2280_ep *ep) restart_dma() argument
1204 if (ep->stopped) restart_dma()
1206 req = list_entry(ep->queue.next, struct net2280_request, queue); restart_dma()
1208 start_dma(ep, req); restart_dma()
1211 static void abort_dma(struct net2280_ep *ep) abort_dma() argument
1214 if (likely(!list_empty(&ep->queue))) { abort_dma()
1216 writel(BIT(DMA_ABORT), &ep->dma->dmastat); abort_dma()
1217 spin_stop_dma(ep->dma); abort_dma()
1219 stop_dma(ep->dma); abort_dma()
1220 scan_dma_completions(ep); abort_dma()
1224 static void nuke(struct net2280_ep *ep) nuke() argument
1229 ep->stopped = 1; nuke()
1230 if (ep->dma) nuke()
1231 abort_dma(ep); nuke()
1232 while (!list_empty(&ep->queue)) { nuke()
1233 req = list_entry(ep->queue.next, nuke()
1236 done(ep, req, -ESHUTDOWN); nuke()
1243 struct net2280_ep *ep; net2280_dequeue() local
1249 ep = container_of(_ep, struct net2280_ep, ep); net2280_dequeue()
1250 if (!_ep || (!ep->desc && ep->num != 0) || !_req) { net2280_dequeue()
1251 pr_err("%s: Invalid ep=%p or ep->desc or req=%p\n", net2280_dequeue()
1256 spin_lock_irqsave(&ep->dev->lock, flags); net2280_dequeue()
1257 stopped = ep->stopped; net2280_dequeue()
1261 ep->stopped = 1; net2280_dequeue()
1262 if (ep->dma) { net2280_dequeue()
1263 dmactl = readl(&ep->dma->dmactl); net2280_dequeue()
1265 stop_dma(ep->dma); net2280_dequeue()
1266 scan_dma_completions(ep); net2280_dequeue()
1270 list_for_each_entry(req, &ep->queue, queue) { net2280_dequeue()
1275 spin_unlock_irqrestore(&ep->dev->lock, flags); net2280_dequeue()
1276 dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n", net2280_dequeue()
1282 if (ep->queue.next == &req->queue) { net2280_dequeue()
1283 if (ep->dma) { net2280_dequeue()
1284 ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name); net2280_dequeue()
1286 abort_dma(ep); net2280_dequeue()
1287 if (likely(ep->queue.next == &req->queue)) { net2280_dequeue()
1290 dma_done(ep, req, net2280_dequeue()
1291 readl(&ep->dma->dmacount), net2280_dequeue()
1295 ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name); net2280_dequeue()
1296 done(ep, req, -ECONNRESET); net2280_dequeue()
1302 done(ep, req, -ECONNRESET); net2280_dequeue()
1303 ep->stopped = stopped; net2280_dequeue()
1305 if (ep->dma) { net2280_dequeue()
1307 if (list_empty(&ep->queue)) net2280_dequeue()
1308 stop_dma(ep->dma); net2280_dequeue()
1309 else if (!ep->stopped) { net2280_dequeue()
1312 writel(dmactl, &ep->dma->dmactl); net2280_dequeue()
1314 start_dma(ep, list_entry(ep->queue.next, net2280_dequeue()
1319 spin_unlock_irqrestore(&ep->dev->lock, flags); net2280_dequeue()
1330 struct net2280_ep *ep; net2280_set_halt_and_wedge() local
1334 ep = container_of(_ep, struct net2280_ep, ep); net2280_set_halt_and_wedge()
1335 if (!_ep || (!ep->desc && ep->num != 0)) { net2280_set_halt_and_wedge()
1336 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); net2280_set_halt_and_wedge()
1339 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) { net2280_set_halt_and_wedge()
1343 if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03) net2280_set_halt_and_wedge()
1349 spin_lock_irqsave(&ep->dev->lock, flags); net2280_set_halt_and_wedge()
1350 if (!list_empty(&ep->queue)) { net2280_set_halt_and_wedge()
1353 } else if (ep->is_in && value && net2280_fifo_status(_ep) != 0) { net2280_set_halt_and_wedge()
1357 ep_vdbg(ep->dev, "%s %s %s\n", _ep->name, net2280_set_halt_and_wedge()
1362 if (ep->num == 0) net2280_set_halt_and_wedge()
1363 ep->dev->protocol_stall = 1; net2280_set_halt_and_wedge()
1365 set_halt(ep); net2280_set_halt_and_wedge()
1367 ep->wedged = 1; net2280_set_halt_and_wedge()
1369 clear_halt(ep); net2280_set_halt_and_wedge()
1370 if (ep->dev->quirks & PLX_SUPERSPEED && net2280_set_halt_and_wedge()
1371 !list_empty(&ep->queue) && ep->td_dma) net2280_set_halt_and_wedge()
1372 restart_dma(ep); net2280_set_halt_and_wedge()
1373 ep->wedged = 0; net2280_set_halt_and_wedge()
1375 (void) readl(&ep->regs->ep_rsp); net2280_set_halt_and_wedge()
1377 spin_unlock_irqrestore(&ep->dev->lock, flags); net2280_set_halt_and_wedge()
1382 spin_unlock_irqrestore(&ep->dev->lock, flags); net2280_set_halt_and_wedge()
1384 dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, retval); net2280_set_halt_and_wedge()
1396 pr_err("%s: Invalid ep=%p or ep0\n", __func__, _ep); net2280_set_wedge()
1404 struct net2280_ep *ep; net2280_fifo_status() local
1407 ep = container_of(_ep, struct net2280_ep, ep); net2280_fifo_status()
1408 if (!_ep || (!ep->desc && ep->num != 0)) { net2280_fifo_status()
1409 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); net2280_fifo_status()
1412 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) { net2280_fifo_status()
1413 dev_err(&ep->dev->pdev->dev, net2280_fifo_status()
1415 __func__, ep->dev->driver, ep->dev->gadget.speed); net2280_fifo_status()
1419 avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1); net2280_fifo_status()
1420 if (avail > ep->fifo_size) { net2280_fifo_status()
1421 dev_err(&ep->dev->pdev->dev, "%s: Fifo overflow\n", __func__); net2280_fifo_status()
1424 if (ep->is_in) net2280_fifo_status()
1425 avail = ep->fifo_size - avail; net2280_fifo_status()
1431 struct net2280_ep *ep; net2280_fifo_flush() local
1433 ep = container_of(_ep, struct net2280_ep, ep); net2280_fifo_flush()
1434 if (!_ep || (!ep->desc && ep->num != 0)) { net2280_fifo_flush()
1435 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep); net2280_fifo_flush()
1438 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) { net2280_fifo_flush()
1439 dev_err(&ep->dev->pdev->dev, net2280_fifo_flush()
1441 __func__, ep->dev->driver, ep->dev->gadget.speed); net2280_fifo_flush()
1445 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat); net2280_fifo_flush()
1446 (void) readl(&ep->regs->ep_rsp); net2280_fifo_flush()
1558 struct usb_ep *ep; net2280_match_ep() local
1561 /* ep-e, ep-f are PIO with only 64 byte fifos */ net2280_match_ep()
1562 ep = gadget_find_ep_by_name(_gadget, "ep-e"); net2280_match_ep()
1563 if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp)) net2280_match_ep()
1564 return ep; net2280_match_ep()
1565 ep = gadget_find_ep_by_name(_gadget, "ep-f"); net2280_match_ep()
1566 if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp)) net2280_match_ep()
1567 return ep; net2280_match_ep()
1571 snprintf(name, sizeof(name), "ep%d%s", usb_endpoint_num(desc), net2280_match_ep()
1573 ep = gadget_find_ep_by_name(_gadget, name); net2280_match_ep()
1574 if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp)) net2280_match_ep()
1575 return ep; net2280_match_ep()
1681 struct net2280_ep *ep; registers_show() local
1683 ep = &dev->ep[i]; registers_show()
1684 if (i && !ep->desc) registers_show()
1687 t1 = readl(&ep->cfg->ep_cfg); registers_show()
1688 t2 = readl(&ep->regs->ep_rsp) & 0xff; registers_show()
1692 ep->ep.name, t1, t2, registers_show()
1709 readl(&ep->regs->ep_irqenb)); registers_show()
1715 "(ep%d%s-%s)%s\n", registers_show()
1716 readl(&ep->regs->ep_stat), registers_show()
1717 readl(&ep->regs->ep_avail), registers_show()
1720 ep->stopped ? "*" : ""); registers_show()
1724 if (!ep->dma) registers_show()
1730 readl(&ep->dma->dmactl), registers_show()
1731 readl(&ep->dma->dmastat), registers_show()
1732 readl(&ep->dma->dmacount), registers_show()
1733 readl(&ep->dma->dmaaddr), registers_show()
1734 readl(&ep->dma->dmadesc)); registers_show()
1747 struct net2280_ep *ep; registers_show() local
1749 ep = &dev->ep[i]; registers_show()
1750 if (i && !ep->irqs) registers_show()
1752 t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs); registers_show()
1782 struct net2280_ep *ep = &dev->ep[i]; queues_show() local
1789 d = ep->desc; queues_show()
1794 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n", queues_show()
1795 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK, queues_show()
1799 ep->dma ? "dma" : "pio", ep->fifo_size queues_show()
1803 ep->is_in ? "in" : "out"); queues_show()
1809 if (list_empty(&ep->queue)) { queues_show()
1817 list_for_each_entry(req, &ep->queue, queue) { queues_show()
1818 if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc)) queues_show()
1824 readl(&ep->dma->dmacount)); queues_show()
1835 if (ep->dma) { queues_show()
1878 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */ set_fifo_mode()
1880 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list); set_fifo_mode()
1881 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); set_fifo_mode()
1884 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); set_fifo_mode()
1885 list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list); set_fifo_mode()
1886 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024; set_fifo_mode()
1889 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048; set_fifo_mode()
1892 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); set_fifo_mode()
1893 dev->ep[1].fifo_size = 2048; set_fifo_mode()
1894 dev->ep[2].fifo_size = 1024; set_fifo_mode()
1897 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */ set_fifo_mode()
1898 list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list); set_fifo_mode()
1899 list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list); set_fifo_mode()
1907 * returing ep regs back to normal. defect7374_disable_data_eps()
1909 struct net2280_ep *ep; defect7374_disable_data_eps() local
1915 ep = &dev->ep[i]; defect7374_disable_data_eps()
1916 writel(i, &ep->cfg->ep_cfg); defect7374_disable_data_eps()
1967 writel(tmp, &dev->ep[i].cfg->ep_cfg); defect7374_enable_data_eps_zero()
2039 struct net2280_ep *ep = &dev->ep[tmp + 1]; usb_reset_228x() local
2040 if (ep->dma) usb_reset_228x()
2041 abort_dma(ep); usb_reset_228x()
2077 struct net2280_ep *ep = &dev->ep[tmp + 1]; usb_reset_338x() local
2080 if (ep->dma) { usb_reset_338x()
2081 abort_dma(ep); usb_reset_338x()
2102 /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */ usb_reset_338x()
2106 list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list); usb_reset_338x()
2123 struct net2280_ep *ep = &dev->ep[tmp]; usb_reinit_228x() local
2125 ep->ep.name = ep_info_dft[tmp].name; usb_reinit_228x()
2126 ep->ep.caps = ep_info_dft[tmp].caps; usb_reinit_228x()
2127 ep->dev = dev; usb_reinit_228x()
2128 ep->num = tmp; usb_reinit_228x()
2131 ep->fifo_size = 1024; usb_reinit_228x()
2132 ep->dma = &dev->dma[tmp - 1]; usb_reinit_228x()
2134 ep->fifo_size = 64; usb_reinit_228x()
2135 ep->regs = &dev->epregs[tmp]; usb_reinit_228x()
2136 ep->cfg = &dev->epregs[tmp]; usb_reinit_228x()
2137 ep_reset_228x(dev->regs, ep); usb_reinit_228x()
2139 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64); usb_reinit_228x()
2140 usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64); usb_reinit_228x()
2141 usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64); usb_reinit_228x()
2143 dev->gadget.ep0 = &dev->ep[0].ep; usb_reinit_228x()
2144 dev->ep[0].stopped = 0; usb_reinit_228x()
2164 struct net2280_ep *ep = &dev->ep[i]; usb_reinit_338x() local
2166 ep->ep.name = dev->enhanced_mode ? ep_info_adv[i].name : usb_reinit_338x()
2168 ep->ep.caps = dev->enhanced_mode ? ep_info_adv[i].caps : usb_reinit_338x()
2170 ep->dev = dev; usb_reinit_338x()
2171 ep->num = i; usb_reinit_338x()
2174 ep->dma = &dev->dma[i - 1]; usb_reinit_338x()
2177 ep->cfg = &dev->epregs[ne[i]]; usb_reinit_338x()
2183 writel(ne[i], &ep->cfg->ep_cfg); usb_reinit_338x()
2184 ep->regs = (struct net2280_ep_regs __iomem *) usb_reinit_338x()
2188 ep->cfg = &dev->epregs[i]; usb_reinit_338x()
2189 ep->regs = &dev->epregs[i]; usb_reinit_338x()
2192 ep->fifo_size = (i != 0) ? 2048 : 512; usb_reinit_338x()
2194 ep_reset_338x(dev->regs, ep); usb_reinit_338x()
2196 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512); usb_reinit_338x()
2198 dev->gadget.ep0 = &dev->ep[0].ep; usb_reinit_338x()
2199 dev->ep[0].stopped = 0; usb_reinit_338x()
2379 dev->ep[i].irqs = 0; net2280_start()
2425 nuke(&dev->ep[i]); stop_activity()
2460 /* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
2464 static void handle_ep_small(struct net2280_ep *ep) handle_ep_small() argument
2471 if (!list_empty(&ep->queue)) handle_ep_small()
2472 req = list_entry(ep->queue.next, handle_ep_small()
2478 t = readl(&ep->regs->ep_stat); handle_ep_small()
2479 ep->irqs++; handle_ep_small()
2481 ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n", handle_ep_small()
2482 ep->ep.name, t, req ? &req->req : NULL); handle_ep_small()
2484 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) handle_ep_small()
2485 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat); handle_ep_small()
2488 writel(t, &ep->regs->ep_stat); handle_ep_small()
2500 if (unlikely(ep->num == 0)) { handle_ep_small()
2501 if (ep->is_in) { handle_ep_small()
2504 if (ep->dev->protocol_stall) { handle_ep_small()
2505 ep->stopped = 1; handle_ep_small()
2506 set_halt(ep); handle_ep_small()
2509 allow_status(ep); handle_ep_small()
2513 if (ep->dev->protocol_stall) { handle_ep_small()
2514 ep->stopped = 1; handle_ep_small()
2515 set_halt(ep); handle_ep_small()
2517 } else if (ep->responded && handle_ep_small()
2518 !req && !ep->stopped) handle_ep_small()
2519 write_fifo(ep, NULL); handle_ep_small()
2524 if (ep->dev->protocol_stall) { handle_ep_small()
2525 ep->stopped = 1; handle_ep_small()
2526 set_halt(ep); handle_ep_small()
2533 (ep->responded && !req)) { handle_ep_small()
2534 ep->dev->protocol_stall = 1; handle_ep_small()
2535 set_halt(ep); handle_ep_small()
2536 ep->stopped = 1; handle_ep_small()
2538 done(ep, req, -EOVERFLOW); handle_ep_small()
2548 if (likely(ep->dma)) { handle_ep_small()
2551 int stopped = ep->stopped; handle_ep_small()
2557 ep->stopped = 1; handle_ep_small()
2558 for (count = 0; ; t = readl(&ep->regs->ep_stat)) { handle_ep_small()
2563 scan_dma_completions(ep); handle_ep_small()
2564 if (unlikely(list_empty(&ep->queue) || handle_ep_small()
2565 ep->out_overflow)) { handle_ep_small()
2569 req = list_entry(ep->queue.next, handle_ep_small()
2576 count = readl(&ep->dma->dmacount); handle_ep_small()
2578 if (readl(&ep->dma->dmadesc) handle_ep_small()
2586 /* stop DMA, leave ep NAKing */ handle_ep_small()
2587 writel(BIT(DMA_ABORT), &ep->dma->dmastat); handle_ep_small()
2588 spin_stop_dma(ep->dma); handle_ep_small()
2592 t = readl(&ep->regs->ep_avail); handle_ep_small()
2593 dma_done(ep, req, count, handle_ep_small()
2594 (ep->out_overflow || t) handle_ep_small()
2599 if (unlikely(ep->out_overflow || handle_ep_small()
2600 (ep->dev->chiprev == 0x0100 && handle_ep_small()
2601 ep->dev->gadget.speed handle_ep_small()
2603 out_flush(ep); handle_ep_small()
2604 ep->out_overflow = 0; handle_ep_small()
2608 ep->stopped = stopped; handle_ep_small()
2609 if (!list_empty(&ep->queue)) handle_ep_small()
2610 restart_dma(ep); handle_ep_small()
2612 ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n", handle_ep_small()
2613 ep->ep.name, t); handle_ep_small()
2618 if (read_fifo(ep, req) && ep->num != 0) handle_ep_small()
2626 if (len > ep->ep.maxpacket) handle_ep_small()
2627 len = ep->ep.maxpacket; handle_ep_small()
2633 (!req->req.zero || len != ep->ep.maxpacket) && ep->num) handle_ep_small()
2643 done(ep, req, 0); handle_ep_small()
2646 if (ep->num == 0) { handle_ep_small()
2651 if (!ep->stopped) handle_ep_small()
2652 allow_status(ep); handle_ep_small()
2655 if (!list_empty(&ep->queue) && !ep->stopped) handle_ep_small()
2656 req = list_entry(ep->queue.next, handle_ep_small()
2660 if (req && !ep->is_in) handle_ep_small()
2661 stop_out_naking(ep); handle_ep_small()
2668 if (req && !ep->stopped) { handle_ep_small()
2672 write_fifo(ep, &req->req); handle_ep_small()
2678 struct net2280_ep *ep; get_ep_by_addr() local
2681 return &dev->ep[0]; get_ep_by_addr()
2682 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) { get_ep_by_addr()
2685 if (!ep->desc) get_ep_by_addr()
2687 bEndpointAddress = ep->desc->bEndpointAddress; get_ep_by_addr()
2691 return ep; get_ep_by_addr()
2770 static void ep_clear_seqnum(struct net2280_ep *ep) ep_clear_seqnum() argument
2772 struct net2280 *dev = ep->dev; ep_clear_seqnum()
2777 val |= ep_pl[ep->num]; ep_clear_seqnum()
2786 struct net2280_ep *ep, struct usb_ctrlrequest r) handle_stat0_irqs_superspeed()
2811 set_fifo_bytecount(ep, sizeof(status)); handle_stat0_irqs_superspeed()
2813 allow_status_338x(ep); handle_stat0_irqs_superspeed()
2823 set_fifo_bytecount(ep, sizeof(status)); handle_stat0_irqs_superspeed()
2825 allow_status_338x(ep); handle_stat0_irqs_superspeed()
2843 allow_status_338x(ep); handle_stat0_irqs_superspeed()
2851 allow_status_338x(ep); handle_stat0_irqs_superspeed()
2859 allow_status_338x(ep); handle_stat0_irqs_superspeed()
2871 allow_status_338x(ep); handle_stat0_irqs_superspeed()
2882 ep_vdbg(dev, "%s clear halt\n", e->ep.name); handle_stat0_irqs_superspeed()
2891 allow_status(ep); handle_stat0_irqs_superspeed()
2892 ep->stopped = 1; handle_stat0_irqs_superspeed()
2909 allow_status_338x(ep); handle_stat0_irqs_superspeed()
2917 allow_status_338x(ep); handle_stat0_irqs_superspeed()
2925 allow_status_338x(ep); handle_stat0_irqs_superspeed()
2937 allow_status_338x(ep); handle_stat0_irqs_superspeed()
2946 ep->stopped = 1; handle_stat0_irqs_superspeed()
2947 if (ep->num == 0) handle_stat0_irqs_superspeed()
2948 ep->dev->protocol_stall = 1; handle_stat0_irqs_superspeed()
2950 if (ep->dma) handle_stat0_irqs_superspeed()
2951 abort_dma(ep); handle_stat0_irqs_superspeed()
2952 set_halt(ep); handle_stat0_irqs_superspeed()
2954 allow_status_338x(ep); handle_stat0_irqs_superspeed()
2968 readl(&ep->cfg->ep_cfg)); handle_stat0_irqs_superspeed()
2970 ep->responded = 0; handle_stat0_irqs_superspeed()
2981 set_halt(ep); handle_stat0_irqs_superspeed()
3009 handle_ep_small(&dev->ep[index]); usb338x_handle_ep_intr()
3015 struct net2280_ep *ep; handle_stat0_irqs() local
3037 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, handle_stat0_irqs()
3041 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, handle_stat0_irqs()
3045 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, handle_stat0_irqs()
3053 ep = &dev->ep[0]; handle_stat0_irqs()
3054 ep->irqs++; handle_stat0_irqs()
3058 while (!list_empty(&ep->queue)) { handle_stat0_irqs()
3059 req = list_entry(ep->queue.next, handle_stat0_irqs()
3061 done(ep, req, (req->req.actual == req->req.length) handle_stat0_irqs()
3064 ep->stopped = 0; handle_stat0_irqs()
3067 if (ep->dev->quirks & PLX_2280) handle_stat0_irqs()
3085 &ep->regs->ep_stat); handle_stat0_irqs()
3111 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0; handle_stat0_irqs()
3112 if (ep->is_in) { handle_stat0_irqs()
3116 stop_out_naking(ep); handle_stat0_irqs()
3126 ep->responded = 1; handle_stat0_irqs()
3129 handle_stat0_irqs_superspeed(dev, ep, u.r); handle_stat0_irqs()
3152 set_fifo_bytecount(ep, w_length); handle_stat0_irqs()
3154 allow_status(ep); handle_stat0_irqs()
3155 ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status); handle_stat0_irqs()
3172 ep->ep.name); handle_stat0_irqs()
3174 ep_vdbg(dev, "%s clear halt\n", e->ep.name); handle_stat0_irqs()
3176 if ((ep->dev->quirks & PLX_SUPERSPEED) && handle_stat0_irqs()
3180 allow_status(ep); handle_stat0_irqs()
3195 if (e->ep.name == ep0name) handle_stat0_irqs()
3200 allow_status(ep); handle_stat0_irqs()
3201 ep_vdbg(dev, "%s set halt\n", ep->ep.name); handle_stat0_irqs()
3211 readl(&ep->cfg->ep_cfg)); handle_stat0_irqs()
3212 ep->responded = 0; handle_stat0_irqs()
3259 ep = &dev->ep[num]; handle_stat0_irqs()
3260 handle_ep_small(ep); handle_stat0_irqs()
3281 struct net2280_ep *ep; variable in typeref:struct:net2280_ep
3379 /* DMA status, for ep-{a,b,c,d} */
3391 ep = &dev->ep[num + 1];
3392 dma = ep->dma;
3397 /* clear ep's dma status */
3404 if (!ep->is_in && (r_dmacount & 0x00FFFFFF) &&
3410 ep_dbg(ep->dev, "%s no xact done? %08x\n",
3411 ep->ep.name, tmp);
3414 stop_dma(ep->dma);
3426 scan_dma_completions(ep); variable
3429 if (!list_empty(&ep->queue)) {
3431 restart_dma(ep); variable
3433 ep->irqs++;
3507 if (!dev->ep[i].dummy) net2280_remove()
3509 pci_pool_free(dev->requests, dev->ep[i].dummy, net2280_remove()
3510 dev->ep[i].td_dma); net2280_remove()
3667 &dev->ep[i].td_dma); net2280_probe()
3675 dev->ep[i].dummy = td; net2280_probe()
383 ep_reset_228x(struct net2280_regs __iomem *regs, struct net2280_ep *ep) ep_reset_228x() argument
459 ep_reset_338x(struct net2280_regs __iomem *regs, struct net2280_ep *ep) ep_reset_338x() argument
2785 handle_stat0_irqs_superspeed(struct net2280 *dev, struct net2280_ep *ep, struct usb_ctrlrequest r) handle_stat0_irqs_superspeed() argument
H A Domap_udc.c122 static void use_ep(struct omap_ep *ep, u16 select) use_ep() argument
124 u16 num = ep->bEndpointAddress & 0x0f; use_ep()
126 if (ep->bEndpointAddress & USB_DIR_IN) use_ep()
142 static void dma_channel_claim(struct omap_ep *ep, unsigned preferred);
149 struct omap_ep *ep = container_of(_ep, struct omap_ep, ep); omap_ep_enable() local
157 || ep->bEndpointAddress != desc->bEndpointAddress omap_ep_enable()
158 || ep->maxpacket < usb_endpoint_maxp(desc)) { omap_ep_enable()
159 DBG("%s, bad ep or descriptor\n", __func__); omap_ep_enable()
164 && maxp != ep->maxpacket) omap_ep_enable()
165 || usb_endpoint_maxp(desc) > ep->maxpacket omap_ep_enable()
187 if (ep->bmAttributes != desc->bmAttributes omap_ep_enable()
188 && ep->bmAttributes != USB_ENDPOINT_XFER_BULK omap_ep_enable()
194 udc = ep->udc; omap_ep_enable()
202 ep->ep.desc = desc; omap_ep_enable()
203 ep->irqs = 0; omap_ep_enable()
204 ep->stopped = 0; omap_ep_enable()
205 ep->ep.maxpacket = maxp; omap_ep_enable()
208 ep->dma_channel = 0; omap_ep_enable()
209 ep->has_dma = 0; omap_ep_enable()
210 ep->lch = -1; omap_ep_enable()
211 use_ep(ep, UDC_EP_SEL); omap_ep_enable()
213 ep->ackwait = 0; omap_ep_enable()
216 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) omap_ep_enable()
217 list_add(&ep->iso, &udc->iso); omap_ep_enable()
222 dma_channel_claim(ep, 0); omap_ep_enable()
226 && !ep->has_dma omap_ep_enable()
227 && !(ep->bEndpointAddress & USB_DIR_IN)) { omap_ep_enable()
229 ep->ackwait = 1 + ep->double_buf; omap_ep_enable()
241 struct omap_ep *ep = container_of(_ep, struct omap_ep, ep); omap_ep_disable() local
244 if (!_ep || !ep->ep.desc) { omap_ep_disable()
246 _ep ? ep->ep.name : NULL); omap_ep_disable()
250 spin_lock_irqsave(&ep->udc->lock, flags); omap_ep_disable()
251 ep->ep.desc = NULL; omap_ep_disable()
252 nuke(ep, -ESHUTDOWN); omap_ep_disable()
253 ep->ep.maxpacket = ep->maxpacket; omap_ep_disable()
254 ep->has_dma = 0; omap_ep_disable()
256 list_del_init(&ep->iso); omap_ep_disable()
257 del_timer(&ep->timer); omap_ep_disable()
259 spin_unlock_irqrestore(&ep->udc->lock, flags); omap_ep_disable()
268 omap_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) omap_alloc_request() argument
282 omap_free_request(struct usb_ep *ep, struct usb_request *_req) omap_free_request() argument
292 done(struct omap_ep *ep, struct omap_req *req, int status) done() argument
294 struct omap_udc *udc = ep->udc; done()
295 unsigned stopped = ep->stopped; done()
304 if (use_dma && ep->has_dma) done()
306 (ep->bEndpointAddress & USB_DIR_IN)); done()
312 ep->ep.name, &req->req, status, done()
316 ep->stopped = 1; done()
317 spin_unlock(&ep->udc->lock); done()
318 usb_gadget_giveback_request(&ep->ep, &req->req); done()
319 spin_lock(&ep->udc->lock); done()
320 ep->stopped = stopped; done()
358 static int write_fifo(struct omap_ep *ep, struct omap_req *req) write_fifo() argument
373 count = ep->ep.maxpacket; write_fifo()
376 ep->ackwait = 1; write_fifo()
379 if (count != ep->ep.maxpacket) write_fifo()
392 done(ep, req, 0); write_fifo()
420 static int read_fifo(struct omap_ep *ep, struct omap_req *req) read_fifo() argument
434 if (!ep->double_buf) read_fifo()
436 ep->fnf = 1; read_fifo()
442 avail = ep->ep.maxpacket; read_fifo()
445 ep->fnf = ep->double_buf; read_fifo()
450 if (count < ep->ep.maxpacket) { read_fifo()
464 if (!ep->bEndpointAddress) read_fifo()
467 done(ep, req, 0); read_fifo()
475 static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start) dma_src_len() argument
485 end = omap_get_dma_src_pos(ep->lch); dma_src_len()
486 if (end == ep->dma_counter) dma_src_len()
495 static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start) dma_dest_len() argument
499 end = omap_get_dma_dst_pos(ep->lch); dma_dest_len()
500 if (end == ep->dma_counter) dma_dest_len()
517 static void next_in_dma(struct omap_ep *ep, struct omap_req *req) next_in_dma() argument
528 || (cpu_is_omap15xx() && length < ep->maxpacket)) { next_in_dma()
530 omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8, next_in_dma()
533 length = min(length / ep->maxpacket, next_in_dma()
536 omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16, next_in_dma()
537 ep->ep.maxpacket >> 1, length, sync_mode, next_in_dma()
539 length *= ep->maxpacket; next_in_dma()
541 omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_EMIFF, next_in_dma()
545 omap_start_dma(ep->lch); next_in_dma()
546 ep->dma_counter = omap_get_dma_src_pos(ep->lch); next_in_dma()
548 w |= UDC_TX_DONE_IE(ep->dma_channel); next_in_dma()
550 omap_writew(UDC_TXN_START | txdma_ctrl, UDC_TXDMA(ep->dma_channel)); next_in_dma()
554 static void finish_in_dma(struct omap_ep *ep, struct omap_req *req, int status) finish_in_dma() argument
566 && (req->req.actual % ep->maxpacket) == 0) finish_in_dma()
569 req->req.actual += dma_src_len(ep, req->req.dma finish_in_dma()
573 omap_stop_dma(ep->lch); finish_in_dma()
575 w &= ~UDC_TX_DONE_IE(ep->dma_channel); finish_in_dma()
577 done(ep, req, status); finish_in_dma()
580 static void next_out_dma(struct omap_ep *ep, struct omap_req *req) next_out_dma() argument
587 packets /= ep->ep.maxpacket; next_out_dma()
589 req->dma_bytes = packets * ep->ep.maxpacket; next_out_dma()
590 omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16, next_out_dma()
591 ep->ep.maxpacket >> 1, packets, next_out_dma()
594 omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF, next_out_dma()
597 ep->dma_counter = omap_get_dma_dst_pos(ep->lch); next_out_dma()
599 omap_writew(UDC_RXN_STOP | (packets - 1), UDC_RXDMA(ep->dma_channel)); next_out_dma()
601 w |= UDC_RX_EOT_IE(ep->dma_channel); next_out_dma()
603 omap_writew(ep->bEndpointAddress & 0xf, UDC_EP_NUM); next_out_dma()
606 omap_start_dma(ep->lch); next_out_dma()
610 finish_out_dma(struct omap_ep *ep, struct omap_req *req, int status, int one) finish_out_dma() argument
615 ep->dma_counter = (u16) (req->req.dma + req->req.actual); finish_out_dma()
616 count = dma_dest_len(ep, req->req.dma + req->req.actual); finish_out_dma()
624 omap_stop_dma(ep->lch); finish_out_dma()
632 w &= ~UDC_RX_EOT_IE(ep->dma_channel); finish_out_dma()
634 done(ep, req, status); finish_out_dma()
640 struct omap_ep *ep; dma_irq() local
645 ep = &udc->ep[16 + UDC_DMA_TX_SRC(dman_stat)]; dma_irq()
646 ep->irqs++; dma_irq()
648 if (!list_empty(&ep->queue)) { dma_irq()
649 req = container_of(ep->queue.next, dma_irq()
651 finish_in_dma(ep, req, 0); dma_irq()
655 if (!list_empty(&ep->queue)) { dma_irq()
656 req = container_of(ep->queue.next, dma_irq()
658 next_in_dma(ep, req); dma_irq()
664 ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)]; dma_irq()
665 ep->irqs++; dma_irq()
667 if (!list_empty(&ep->queue)) { dma_irq()
668 req = container_of(ep->queue.next, dma_irq()
670 finish_out_dma(ep, req, 0, dman_stat & UDC_DMA_RX_SB); dma_irq()
674 if (!list_empty(&ep->queue)) { dma_irq()
675 req = container_of(ep->queue.next, dma_irq()
677 next_out_dma(ep, req); dma_irq()
682 ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)]; dma_irq()
683 ep->irqs++; dma_irq()
685 VDBG("%s, RX_CNT irq?\n", ep->ep.name); dma_irq()
692 struct omap_ep *ep = data; dma_error() local
696 ERR("%s dma error, lch %d status %02x\n", ep->ep.name, lch, ch_status); dma_error()
701 static void dma_channel_claim(struct omap_ep *ep, unsigned channel) dma_channel_claim() argument
707 is_in = ep->bEndpointAddress & USB_DIR_IN; dma_channel_claim()
714 ep->dma_channel = 0; dma_channel_claim()
715 ep->lch = -1; dma_channel_claim()
728 reg |= (0x0f & ep->bEndpointAddress) << (4 * (channel - 1)); dma_channel_claim()
729 ep->dma_channel = channel; dma_channel_claim()
734 ep->ep.name, dma_error, ep, &ep->lch); dma_channel_claim()
738 omap_set_dma_src_burst_mode(ep->lch, dma_channel_claim()
740 omap_set_dma_src_data_pack(ep->lch, 1); dma_channel_claim()
742 omap_set_dma_dest_params(ep->lch, dma_channel_claim()
751 ep->ep.name, dma_error, ep, &ep->lch); dma_channel_claim()
755 omap_set_dma_src_params(ep->lch, dma_channel_claim()
761 omap_set_dma_dest_burst_mode(ep->lch, dma_channel_claim()
763 omap_set_dma_dest_data_pack(ep->lch, 1); dma_channel_claim()
767 ep->dma_channel = 0; dma_channel_claim()
769 ep->has_dma = 1; dma_channel_claim()
770 omap_disable_dma_irq(ep->lch, OMAP_DMA_BLOCK_IRQ); dma_channel_claim()
774 omap_set_dma_channel_mode(ep->lch, OMAP_DMA_LCH_P); dma_channel_claim()
779 restart = !ep->stopped && !list_empty(&ep->queue); dma_channel_claim()
782 DBG("%s no dma channel: %d%s\n", ep->ep.name, status, dma_channel_claim()
785 DBG("%s claimed %cxdma%d lch %d%s\n", ep->ep.name, dma_channel_claim()
787 ep->dma_channel - 1, ep->lch, dma_channel_claim()
792 req = container_of(ep->queue.next, struct omap_req, queue); dma_channel_claim()
793 if (ep->has_dma) dma_channel_claim()
794 (is_in ? next_in_dma : next_out_dma)(ep, req); dma_channel_claim()
796 use_ep(ep, UDC_EP_SEL); dma_channel_claim()
797 (is_in ? write_fifo : read_fifo)(ep, req); dma_channel_claim()
801 ep->ackwait = 1 + ep->double_buf; dma_channel_claim()
808 static void dma_channel_release(struct omap_ep *ep) dma_channel_release() argument
810 int shift = 4 * (ep->dma_channel - 1); dma_channel_release()
816 if (!list_empty(&ep->queue)) dma_channel_release()
817 req = container_of(ep->queue.next, struct omap_req, queue); dma_channel_release()
821 active = omap_get_dma_active_status(ep->lch); dma_channel_release()
823 DBG("%s release %s %cxdma%d %p\n", ep->ep.name, dma_channel_release()
825 (ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r', dma_channel_release()
826 ep->dma_channel - 1, req); dma_channel_release()
833 if (ep->bEndpointAddress & USB_DIR_IN) { dma_channel_release()
838 finish_in_dma(ep, req, -ECONNRESET); dma_channel_release()
841 use_ep(ep, UDC_EP_SEL); dma_channel_release()
855 finish_out_dma(ep, req, -ECONNRESET, 0); dma_channel_release()
857 omap_free_dma(ep->lch); dma_channel_release()
858 ep->dma_channel = 0; dma_channel_release()
859 ep->lch = -1; dma_channel_release()
869 struct omap_ep *ep = container_of(_ep, struct omap_ep, ep); omap_ep_queue() local
881 if (!_ep || (!ep->ep.desc && ep->bEndpointAddress)) { omap_ep_queue()
882 DBG("%s, bad ep\n", __func__); omap_ep_queue()
885 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) { omap_ep_queue()
886 if (req->req.length > ep->ep.maxpacket) omap_ep_queue()
895 && ep->has_dma omap_ep_queue()
896 && ep->bEndpointAddress != 0 omap_ep_queue()
897 && (ep->bEndpointAddress & USB_DIR_IN) == 0 omap_ep_queue()
898 && (req->req.length % ep->ep.maxpacket) != 0) { omap_ep_queue()
903 udc = ep->udc; omap_ep_queue()
907 if (use_dma && ep->has_dma) omap_ep_queue()
909 (ep->bEndpointAddress & USB_DIR_IN)); omap_ep_queue()
912 ep->ep.name, _req, _req->length, _req->buf); omap_ep_queue()
926 } else if (list_empty(&ep->queue) && !ep->stopped && !ep->ackwait) { omap_ep_queue()
929 if (ep->bEndpointAddress == 0) { omap_ep_queue()
930 if (!udc->ep0_pending || !list_empty(&ep->queue)) { omap_ep_queue()
965 done(ep, req, 0); omap_ep_queue()
978 is_in = ep->bEndpointAddress & USB_DIR_IN; omap_ep_queue()
979 if (!ep->has_dma) omap_ep_queue()
980 use_ep(ep, UDC_EP_SEL); omap_ep_queue()
984 if (ep->has_dma) omap_ep_queue()
985 (is_in ? next_in_dma : next_out_dma)(ep, req); omap_ep_queue()
987 if ((is_in ? write_fifo : read_fifo)(ep, req) == 1) omap_ep_queue()
992 ep->ackwait = 1 + ep->double_buf; omap_ep_queue()
1001 list_add_tail(&req->queue, &ep->queue); omap_ep_queue()
1009 struct omap_ep *ep = container_of(_ep, struct omap_ep, ep); omap_ep_dequeue() local
1016 spin_lock_irqsave(&ep->udc->lock, flags); omap_ep_dequeue()
1019 list_for_each_entry(req, &ep->queue, queue) { omap_ep_dequeue()
1024 spin_unlock_irqrestore(&ep->udc->lock, flags); omap_ep_dequeue()
1028 if (use_dma && ep->dma_channel && ep->queue.next == &req->queue) { omap_ep_dequeue()
1029 int channel = ep->dma_channel; omap_ep_dequeue()
1034 dma_channel_release(ep); omap_ep_dequeue()
1035 dma_channel_claim(ep, channel); omap_ep_dequeue()
1037 done(ep, req, -ECONNRESET); omap_ep_dequeue()
1038 spin_unlock_irqrestore(&ep->udc->lock, flags); omap_ep_dequeue()
1046 struct omap_ep *ep = container_of(_ep, struct omap_ep, ep); omap_ep_set_halt() local
1050 spin_lock_irqsave(&ep->udc->lock, flags); omap_ep_set_halt()
1053 if (ep->bEndpointAddress == 0) { omap_ep_set_halt()
1054 if (!ep->udc->ep0_pending) omap_ep_set_halt()
1057 if (ep->udc->ep0_set_config) { omap_ep_set_halt()
1062 ep->udc->ep0_pending = 0; omap_ep_set_halt()
1068 } else if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC && ep->ep.desc) { omap_ep_set_halt()
1071 if ((ep->bEndpointAddress & USB_DIR_IN) omap_ep_set_halt()
1072 && !list_empty(&ep->queue)) { omap_ep_set_halt()
1080 if (use_dma && ep->dma_channel omap_ep_set_halt()
1081 && !list_empty(&ep->queue)) { omap_ep_set_halt()
1082 channel = ep->dma_channel; omap_ep_set_halt()
1083 dma_channel_release(ep); omap_ep_set_halt()
1087 use_ep(ep, UDC_EP_SEL); omap_ep_set_halt()
1096 dma_channel_claim(ep, channel); omap_ep_set_halt()
1098 use_ep(ep, 0); omap_ep_set_halt()
1099 omap_writew(ep->udc->clr_halt, UDC_CTRL); omap_ep_set_halt()
1100 ep->ackwait = 0; omap_ep_set_halt()
1101 if (!(ep->bEndpointAddress & USB_DIR_IN)) { omap_ep_set_halt()
1103 ep->ackwait = 1 + ep->double_buf; omap_ep_set_halt()
1108 VDBG("%s %s halt stat %d\n", ep->ep.name, omap_ep_set_halt()
1111 spin_unlock_irqrestore(&ep->udc->lock, flags); omap_ep_set_halt()
1331 static void nuke(struct omap_ep *ep, int status) nuke() argument
1335 ep->stopped = 1; nuke()
1337 if (use_dma && ep->dma_channel) nuke()
1338 dma_channel_release(ep); nuke()
1340 use_ep(ep, 0); nuke()
1342 if (ep->bEndpointAddress && ep->bmAttributes != USB_ENDPOINT_XFER_ISOC) nuke()
1345 while (!list_empty(&ep->queue)) { nuke()
1346 req = list_entry(ep->queue.next, struct omap_req, queue); nuke()
1347 done(ep, req, status); nuke()
1354 struct omap_ep *ep; udc_quiesce() local
1357 nuke(&udc->ep[0], -ESHUTDOWN); udc_quiesce()
1358 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) udc_quiesce()
1359 nuke(ep, -ESHUTDOWN); udc_quiesce()
1395 struct omap_ep *ep0 = &udc->ep[0]; ep0_irq()
1515 struct omap_ep *ep; ep0_irq() local
1543 /* udc needs to know when ep != 0 is valid */ ep0_irq()
1569 ep = &udc->ep[w_index & 0xf]; ep0_irq()
1570 if (ep != ep0) { ep0_irq()
1572 ep += 16; ep0_irq()
1573 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC ep0_irq()
1574 || !ep->ep.desc) ep0_irq()
1576 use_ep(ep, 0); ep0_irq()
1578 ep->ackwait = 0; ep0_irq()
1579 if (!(ep->bEndpointAddress & USB_DIR_IN)) { ep0_irq()
1581 ep->ackwait = 1 + ep->double_buf; ep0_irq()
1589 VDBG("%s halt cleared by host\n", ep->name); ep0_irq()
1598 ep = &udc->ep[w_index & 0xf]; ep0_irq()
1600 ep += 16; ep0_irq()
1601 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC ep0_irq()
1602 || ep == ep0 || !ep->ep.desc) ep0_irq()
1604 if (use_dma && ep->has_dma) { ep0_irq()
1608 DBG("%s host set_halt, NYET\n", ep->name); ep0_irq()
1611 use_ep(ep, 0); ep0_irq()
1615 VDBG("%s halted by host\n", ep->name); ep0_irq()
1634 ep = &udc->ep[w_index & 0xf]; ep0_irq()
1636 ep += 16; ep0_irq()
1637 if (!ep->ep.desc) ep0_irq()
1641 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) ep0_irq()
1645 ERR("%s status, can't report\n", ep->ep.name); ep0_irq()
1863 struct omap_ep *ep = (void *) _ep; pio_out_timer() local
1867 spin_lock_irqsave(&ep->udc->lock, flags); pio_out_timer()
1868 if (!list_empty(&ep->queue) && ep->ackwait) { pio_out_timer()
1869 use_ep(ep, UDC_EP_SEL); pio_out_timer()
1873 || (ep->double_buf && HALF_FULL(stat_flg)))) { pio_out_timer()
1876 VDBG("%s: lose, %04x\n", ep->ep.name, stat_flg); pio_out_timer()
1877 req = container_of(ep->queue.next, pio_out_timer()
1879 (void) read_fifo(ep, req); pio_out_timer()
1880 omap_writew(ep->bEndpointAddress, UDC_EP_NUM); pio_out_timer()
1882 ep->ackwait = 1 + ep->double_buf; pio_out_timer()
1886 mod_timer(&ep->timer, PIO_OUT_TIMEOUT); pio_out_timer()
1887 spin_unlock_irqrestore(&ep->udc->lock, flags); pio_out_timer()
1894 struct omap_ep *ep; omap_udc_pio_irq() local
1909 ep = &udc->ep[epnum]; omap_udc_pio_irq()
1910 ep->irqs++; omap_udc_pio_irq()
1913 ep->fnf = 0; omap_udc_pio_irq()
1915 ep->ackwait--; omap_udc_pio_irq()
1916 if (!list_empty(&ep->queue)) { omap_udc_pio_irq()
1918 req = container_of(ep->queue.next, omap_udc_pio_irq()
1920 stat = read_fifo(ep, req); omap_udc_pio_irq()
1921 if (!ep->double_buf) omap_udc_pio_irq()
1922 ep->fnf = 1; omap_udc_pio_irq()
1933 if (ep->fnf) { omap_udc_pio_irq()
1935 ep->ackwait = 1 + ep->double_buf; omap_udc_pio_irq()
1937 mod_timer(&ep->timer, PIO_OUT_TIMEOUT); omap_udc_pio_irq()
1945 ep = &udc->ep[16 + epnum]; omap_udc_pio_irq()
1946 ep->irqs++; omap_udc_pio_irq()
1950 ep->ackwait = 0; omap_udc_pio_irq()
1951 if (!list_empty(&ep->queue)) { omap_udc_pio_irq()
1952 req = container_of(ep->queue.next, omap_udc_pio_irq()
1954 (void) write_fifo(ep, req); omap_udc_pio_irq()
1972 struct omap_ep *ep; omap_udc_iso_irq() local
1979 list_for_each_entry(ep, &udc->iso, iso) { omap_udc_iso_irq()
1983 if (ep->has_dma || list_empty(&ep->queue)) omap_udc_iso_irq()
1985 req = list_entry(ep->queue.next, struct omap_req, queue); omap_udc_iso_irq()
1987 use_ep(ep, UDC_EP_SEL); omap_udc_iso_irq()
1993 if (ep->bEndpointAddress & USB_DIR_IN) { omap_udc_iso_irq()
1995 /* done(ep, req, -EPROTO) */; omap_udc_iso_irq()
1997 write_fifo(ep, req); omap_udc_iso_irq()
2009 /* done(ep, req, status) */; omap_udc_iso_irq()
2011 read_fifo(ep, req); omap_udc_iso_irq()
2016 ep->irqs++; omap_udc_iso_irq()
2017 if (!list_empty(&ep->queue)) omap_udc_iso_irq()
2049 struct omap_ep *ep; omap_udc_start() local
2055 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { omap_udc_start()
2056 ep->irqs = 0; omap_udc_start()
2057 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) omap_udc_start()
2059 use_ep(ep, 0); omap_udc_start()
2063 udc->ep[0].irqs = 0; omap_udc_start()
2144 static void proc_ep_show(struct seq_file *s, struct omap_ep *ep) proc_ep_show() argument
2150 use_ep(ep, 0); proc_ep_show()
2152 if (use_dma && ep->has_dma) proc_ep_show()
2154 (ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r', proc_ep_show()
2155 ep->dma_channel - 1, ep->lch); proc_ep_show()
2162 ep->name, buf, proc_ep_show()
2163 ep->double_buf ? "dbuf " : "", proc_ep_show()
2165 switch (ep->ackwait) { proc_ep_show()
2179 ep->irqs, stat_flg, proc_ep_show()
2194 if (list_empty(&ep->queue)) proc_ep_show()
2197 list_for_each_entry(req, &ep->queue, queue) { proc_ep_show()
2201 length += ((ep->bEndpointAddress & USB_DIR_IN) proc_ep_show()
2203 (ep, req->req.dma + length); proc_ep_show()
2300 struct omap_ep *ep; proc_udc_show() local
2426 proc_ep_show(s, &udc->ep[0]); proc_udc_show()
2428 list_for_each_entry(ep, &udc->gadget.ep_list, proc_udc_show()
2429 ep.ep_list) { proc_udc_show()
2430 if (ep->ep.desc) proc_udc_show()
2431 proc_ep_show(s, ep); proc_udc_show()
2483 struct omap_ep *ep; omap_ep_setup() local
2487 ep = &udc->ep[addr & 0xf]; omap_ep_setup()
2489 ep += 16; omap_ep_setup()
2491 /* in case of ep init table bugs */ omap_ep_setup()
2492 BUG_ON(ep->name[0]); omap_ep_setup()
2549 init_timer(&ep->timer); omap_ep_setup()
2550 ep->timer.function = pio_out_timer; omap_ep_setup()
2551 ep->timer.data = (unsigned long) ep; omap_ep_setup()
2573 BUG_ON(strlen(name) >= sizeof ep->name); omap_ep_setup()
2574 strlcpy(ep->name, name, sizeof ep->name); omap_ep_setup()
2575 INIT_LIST_HEAD(&ep->queue); omap_ep_setup()
2576 INIT_LIST_HEAD(&ep->iso); omap_ep_setup()
2577 ep->bEndpointAddress = addr; omap_ep_setup()
2578 ep->bmAttributes = type; omap_ep_setup()
2579 ep->double_buf = dbuf; omap_ep_setup()
2580 ep->udc = udc; omap_ep_setup()
2584 ep->ep.caps.type_control = true; omap_ep_setup()
2585 ep->ep.caps.dir_in = true; omap_ep_setup()
2586 ep->ep.caps.dir_out = true; omap_ep_setup()
2589 ep->ep.caps.type_iso = true; omap_ep_setup()
2592 ep->ep.caps.type_bulk = true; omap_ep_setup()
2595 ep->ep.caps.type_int = true; omap_ep_setup()
2600 ep->ep.caps.dir_in = true; omap_ep_setup()
2602 ep->ep.caps.dir_out = true; omap_ep_setup()
2604 ep->ep.name = ep->name; omap_ep_setup()
2605 ep->ep.ops = &omap_ep_ops; omap_ep_setup()
2606 ep->maxpacket = maxp; omap_ep_setup()
2607 usb_ep_set_maxpacket_limit(&ep->ep, ep->maxpacket); omap_ep_setup()
2608 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); omap_ep_setup()
2643 udc->gadget.ep0 = &udc->ep[0].ep; omap_udc_setup()
2654 list_del_init(&udc->ep[0].ep.ep_list); omap_udc_setup()
H A Dpxa27x_udc.c82 static void handle_ep(struct pxa_ep *ep);
141 struct pxa_ep *ep; queues_dbg_show() local
150 ep = &udc->pxa_ep[i]; queues_dbg_show()
151 maxpkt = ep->fifo_size; queues_dbg_show()
153 EPNAME(ep), maxpkt, "pio"); queues_dbg_show()
155 if (list_empty(&ep->queue)) { queues_dbg_show()
160 list_for_each_entry(req, &ep->queue, queue) { queues_dbg_show()
173 struct pxa_ep *ep; eps_dbg_show() local
180 ep = &udc->pxa_ep[0]; eps_dbg_show()
181 tmp = udc_ep_readl(ep, UDCCSR); eps_dbg_show()
192 ep = &udc->pxa_ep[i]; eps_dbg_show()
193 tmp = i? udc_ep_readl(ep, UDCCR) : udc_readl(udc, UDCCR); eps_dbg_show()
195 EPNAME(ep), eps_dbg_show()
196 ep->stats.in_bytes, ep->stats.in_ops, eps_dbg_show()
197 ep->stats.out_bytes, ep->stats.out_ops, eps_dbg_show()
198 ep->stats.irqs, eps_dbg_show()
199 tmp, udc_ep_readl(ep, UDCCSR), eps_dbg_show()
200 udc_ep_readl(ep, UDCBCR)); eps_dbg_show()
306 * @ep: pxa endpoint
313 static int is_match_usb_pxa(struct udc_usb_ep *udc_usb_ep, struct pxa_ep *ep, is_match_usb_pxa() argument
316 if (usb_endpoint_num(&udc_usb_ep->desc) != ep->addr) is_match_usb_pxa()
318 if (usb_endpoint_dir_in(&udc_usb_ep->desc) != ep->dir_in) is_match_usb_pxa()
320 if (usb_endpoint_type(&udc_usb_ep->desc) != ep->type) is_match_usb_pxa()
322 if ((ep->config != config) || (ep->interface != interface) is_match_usb_pxa()
323 || (ep->alternate != altsetting)) is_match_usb_pxa()
357 struct pxa_ep *ep; find_pxa_ep() local
366 ep = &udc->pxa_ep[i]; find_pxa_ep()
367 if (is_match_usb_pxa(udc_usb_ep, ep, cfg, iface, alt)) find_pxa_ep()
368 return ep; find_pxa_ep()
397 * @ep: udc endpoint
399 static void pio_irq_enable(struct pxa_ep *ep) pio_irq_enable() argument
401 struct pxa_udc *udc = ep->dev; pio_irq_enable()
402 int index = EPIDX(ep); pio_irq_enable()
414 * @ep: udc endpoint
416 static void pio_irq_disable(struct pxa_ep *ep) pio_irq_disable() argument
418 struct pxa_udc *udc = ep->dev; pio_irq_disable()
419 int index = EPIDX(ep); pio_irq_disable()
467 static inline void ep_write_UDCCSR(struct pxa_ep *ep, int mask) ep_write_UDCCSR() argument
469 if (is_ep0(ep)) ep_write_UDCCSR()
471 udc_ep_writel(ep, UDCCSR, mask); ep_write_UDCCSR()
476 * @ep: udc endpoint
480 static int ep_count_bytes_remain(struct pxa_ep *ep) ep_count_bytes_remain() argument
482 if (ep->dir_in) ep_count_bytes_remain()
484 return udc_ep_readl(ep, UDCBCR) & 0x3ff; ep_count_bytes_remain()
488 * ep_is_empty - checks if ep has byte ready for reading
489 * @ep: udc endpoint
495 * Returns 0 if ep not empty, 1 if ep empty, -EOPNOTSUPP if IN endpoint
497 static int ep_is_empty(struct pxa_ep *ep) ep_is_empty() argument
501 if (!is_ep0(ep) && ep->dir_in) ep_is_empty()
503 if (is_ep0(ep)) ep_is_empty()
504 ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR0_RNE); ep_is_empty()
506 ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNE); ep_is_empty()
511 * ep_is_full - checks if ep has place to write bytes
512 * @ep: udc endpoint
517 * Returns 0 if ep not full, 1 if ep full, -EOPNOTSUPP if OUT endpoint
519 static int ep_is_full(struct pxa_ep *ep) ep_is_full() argument
521 if (is_ep0(ep)) ep_is_full()
522 return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_IPR); ep_is_full()
523 if (!ep->dir_in) ep_is_full()
525 return (!(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNF)); ep_is_full()
530 * @ep: pxa endpoint
532 * Returns 1 if a complete packet is available, 0 if not, -EOPNOTSUPP for IN ep.
534 static int epout_has_pkt(struct pxa_ep *ep) epout_has_pkt() argument
536 if (!is_ep0(ep) && ep->dir_in) epout_has_pkt()
538 if (is_ep0(ep)) epout_has_pkt()
539 return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_OPC); epout_has_pkt()
540 return (udc_ep_readl(ep, UDCCSR) & UDCCSR_PC); epout_has_pkt()
550 struct pxa_ep *ep = &udc->pxa_ep[0]; set_ep0state() local
554 ep_dbg(ep, "state=%s->%s, udccsr0=0x%03x, udcbcr=%d\n", old_stname, set_ep0state()
555 EP0_STNAME(udc), udc_ep_readl(ep, UDCCSR), set_ep0state()
556 udc_ep_readl(ep, UDCBCR)); set_ep0state()
569 * inc_ep_stats_reqs - Update ep stats counts
570 * @ep: physical endpoint
572 * @is_in: ep direction (USB_DIR_IN or 0)
575 static void inc_ep_stats_reqs(struct pxa_ep *ep, int is_in) inc_ep_stats_reqs() argument
578 ep->stats.in_ops++; inc_ep_stats_reqs()
580 ep->stats.out_ops++; inc_ep_stats_reqs()
584 * inc_ep_stats_bytes - Update ep stats counts
585 * @ep: physical endpoint
587 * @is_in: ep direction (USB_DIR_IN or 0)
589 static void inc_ep_stats_bytes(struct pxa_ep *ep, int count, int is_in) inc_ep_stats_bytes() argument
592 ep->stats.in_bytes += count; inc_ep_stats_bytes()
594 ep->stats.out_bytes += count; inc_ep_stats_bytes()
599 * @ep: pxa27x physical endpoint
601 * Find the physical pxa27x ep, and setup its UDCCR
603 static void pxa_ep_setup(struct pxa_ep *ep) pxa_ep_setup() argument
607 new_udccr = ((ep->config << UDCCONR_CN_S) & UDCCONR_CN) pxa_ep_setup()
608 | ((ep->interface << UDCCONR_IN_S) & UDCCONR_IN) pxa_ep_setup()
609 | ((ep->alternate << UDCCONR_AISN_S) & UDCCONR_AISN) pxa_ep_setup()
610 | ((EPADDR(ep) << UDCCONR_EN_S) & UDCCONR_EN) pxa_ep_setup()
611 | ((EPXFERTYPE(ep) << UDCCONR_ET_S) & UDCCONR_ET) pxa_ep_setup()
612 | ((ep->dir_in) ? UDCCONR_ED : 0) pxa_ep_setup()
613 | ((ep->fifo_size << UDCCONR_MPS_S) & UDCCONR_MPS) pxa_ep_setup()
616 udc_ep_writel(ep, UDCCR, new_udccr); pxa_ep_setup()
678 * @ep: usb endpoint
681 * Context: ep->lock held
686 static void ep_add_request(struct pxa_ep *ep, struct pxa27x_request *req) ep_add_request() argument
690 ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req, ep_add_request()
691 req->req.length, udc_ep_readl(ep, UDCCSR)); ep_add_request()
694 list_add_tail(&req->queue, &ep->queue); ep_add_request()
695 pio_irq_enable(ep); ep_add_request()
700 * @ep: usb endpoint
703 * Context: ep->lock held
709 static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req) ep_del_request() argument
713 ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req, ep_del_request()
714 req->req.length, udc_ep_readl(ep, UDCCSR)); ep_del_request()
718 if (!is_ep0(ep) && list_empty(&ep->queue)) ep_del_request()
719 pio_irq_disable(ep); ep_del_request()
724 * @ep: pxa physical endpoint
729 * Context: ep->lock held if flags not NULL, else ep->lock released
733 static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status, req_done() argument
738 ep_del_request(ep, req); req_done()
745 ep_dbg(ep, "complete req %p stat %d len %u/%u\n", req_done()
750 spin_unlock_irqrestore(&ep->lock, *pflags); req_done()
755 spin_lock_irqsave(&ep->lock, *pflags); req_done()
760 * @ep: physical endpoint
764 * Context: ep->lock held or released (see req_done())
768 static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req, ep_end_out_req() argument
771 inc_ep_stats_reqs(ep, !USB_DIR_IN); ep_end_out_req()
772 req_done(ep, req, 0, pflags); ep_end_out_req()
777 * @ep: physical endpoint
781 * Context: ep->lock held or released (see req_done())
786 static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req, ep0_end_out_req() argument
789 set_ep0state(ep->dev, OUT_STATUS_STAGE); ep0_end_out_req()
790 ep_end_out_req(ep, req, pflags); ep0_end_out_req()
791 ep0_idle(ep->dev); ep0_end_out_req()
796 * @ep: physical endpoint
800 * Context: ep->lock held or released (see req_done())
804 static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req, ep_end_in_req() argument
807 inc_ep_stats_reqs(ep, USB_DIR_IN); ep_end_in_req()
808 req_done(ep, req, 0, pflags); ep_end_in_req()
813 * @ep: physical endpoint
817 * Context: ep->lock held or released (see req_done())
822 static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req, ep0_end_in_req() argument
825 set_ep0state(ep->dev, IN_STATUS_STAGE); ep0_end_in_req()
826 ep_end_in_req(ep, req, pflags); ep0_end_in_req()
831 * @ep: pxa endpoint
834 * Context: ep->lock released
839 static void nuke(struct pxa_ep *ep, int status) nuke() argument
844 spin_lock_irqsave(&ep->lock, flags); nuke()
845 while (!list_empty(&ep->queue)) { nuke()
846 req = list_entry(ep->queue.next, struct pxa27x_request, queue); nuke()
847 req_done(ep, req, status, &flags); nuke()
849 spin_unlock_irqrestore(&ep->lock, flags); nuke()
854 * @ep: pxa physical endpoint
863 static int read_packet(struct pxa_ep *ep, struct pxa27x_request *req) read_packet() argument
868 bytes_ep = ep_count_bytes_remain(ep); read_packet()
874 if (likely(!ep_is_empty(ep))) read_packet()
880 *buf++ = udc_ep_readl(ep, UDCDR); read_packet()
883 ep_write_UDCCSR(ep, UDCCSR_PC); read_packet()
890 * @ep: pxa physical endpoint
900 static int write_packet(struct pxa_ep *ep, struct pxa27x_request *req, write_packet() argument
916 udc_ep_writel(ep, UDCDR, *buf++); write_packet()
920 udc_ep_writeb(ep, UDCDR, *buf_8++); write_packet()
922 ep_vdbg(ep, "length=%d+%d, udccsr=0x%03x\n", count, remain, write_packet()
923 udc_ep_readl(ep, UDCCSR)); write_packet()
930 * @ep: pxa physical endpoint
942 static int read_fifo(struct pxa_ep *ep, struct pxa27x_request *req) read_fifo() argument
946 while (epout_has_pkt(ep)) { read_fifo()
947 count = read_packet(ep, req); read_fifo()
948 inc_ep_stats_bytes(ep, count, !USB_DIR_IN); read_fifo()
950 is_short = (count < ep->fifo_size); read_fifo()
951 ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n", read_fifo()
952 udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "", read_fifo()
967 * @ep: pxa physical endpoint
977 static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req) write_fifo() argument
983 max = ep->fifo_size; write_fifo()
987 udccsr = udc_ep_readl(ep, UDCCSR); write_fifo()
989 ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n", write_fifo()
991 ep_write_UDCCSR(ep, UDCCSR_PC); write_fifo()
994 ep_vdbg(ep, "Clearing Underrun on, udccsr=%x\n", write_fifo()
996 ep_write_UDCCSR(ep, UDCCSR_TRN); write_fifo()
999 count = write_packet(ep, req, max); write_fifo()
1000 inc_ep_stats_bytes(ep, count, USB_DIR_IN); write_fifo()
1014 is_short = unlikely(max < ep->fifo_size); write_fifo()
1018 ep_write_UDCCSR(ep, UDCCSR_SP); write_fifo()
1025 } while (!ep_is_full(ep)); write_fifo()
1027 ep_dbg(ep, "wrote count:%d bytes%s%s, left:%d req=%p\n", write_fifo()
1036 * @ep: control endpoint
1045 static int read_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req) read_ep0_fifo() argument
1049 while (epout_has_pkt(ep)) { read_ep0_fifo()
1050 count = read_packet(ep, req); read_ep0_fifo()
1051 ep_write_UDCCSR(ep, UDCCSR0_OPC); read_ep0_fifo()
1052 inc_ep_stats_bytes(ep, count, !USB_DIR_IN); read_ep0_fifo()
1054 is_short = (count < ep->fifo_size); read_ep0_fifo()
1055 ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n", read_ep0_fifo()
1056 udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "", read_ep0_fifo()
1070 * @ep: control endpoint
1083 static int write_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req) write_ep0_fifo() argument
1088 count = write_packet(ep, req, EP0_FIFO_SIZE); write_ep0_fifo()
1089 inc_ep_stats_bytes(ep, count, USB_DIR_IN); write_ep0_fifo()
1096 ep_write_UDCCSR(ep, UDCCSR0_IPR); write_ep0_fifo()
1098 ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n", write_ep0_fifo()
1101 &req->req, udc_ep_readl(ep, UDCCSR)); write_ep0_fifo()
1122 struct pxa_ep *ep; pxa_ep_queue() local
1141 ep = udc_usb_ep->pxa_ep; pxa_ep_queue()
1142 if (unlikely(!ep)) pxa_ep_queue()
1145 dev = ep->dev; pxa_ep_queue()
1147 ep_dbg(ep, "bogus device state\n"); pxa_ep_queue()
1154 if (unlikely(EPXFERTYPE_is_ISO(ep) pxa_ep_queue()
1155 && req->req.length > ep->fifo_size)) pxa_ep_queue()
1158 spin_lock_irqsave(&ep->lock, flags); pxa_ep_queue()
1159 recursion_detected = ep->in_handle_ep; pxa_ep_queue()
1161 is_first_req = list_empty(&ep->queue); pxa_ep_queue()
1162 ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n", pxa_ep_queue()
1166 if (!ep->enabled) { pxa_ep_queue()
1173 ep_err(ep, "refusing to queue req %p (already queued)\n", req); pxa_ep_queue()
1181 ep_add_request(ep, req); pxa_ep_queue()
1182 spin_unlock_irqrestore(&ep->lock, flags); pxa_ep_queue()
1184 if (is_ep0(ep)) { pxa_ep_queue()
1188 ep_end_in_req(ep, req, NULL); pxa_ep_queue()
1190 ep_err(ep, "got a request of %d bytes while" pxa_ep_queue()
1193 ep_del_request(ep, req); pxa_ep_queue()
1196 ep0_idle(ep->dev); pxa_ep_queue()
1199 if (!ep_is_full(ep)) pxa_ep_queue()
1200 if (write_ep0_fifo(ep, req)) pxa_ep_queue()
1201 ep0_end_in_req(ep, req, NULL); pxa_ep_queue()
1204 if ((length == 0) || !epout_has_pkt(ep)) pxa_ep_queue()
1205 if (read_ep0_fifo(ep, req)) pxa_ep_queue()
1206 ep0_end_out_req(ep, req, NULL); pxa_ep_queue()
1209 ep_err(ep, "odd state %s to send me a request\n", pxa_ep_queue()
1210 EP0_STNAME(ep->dev)); pxa_ep_queue()
1211 ep_del_request(ep, req); pxa_ep_queue()
1217 handle_ep(ep); pxa_ep_queue()
1223 spin_unlock_irqrestore(&ep->lock, flags); pxa_ep_queue()
1236 struct pxa_ep *ep; pxa_ep_dequeue() local
1245 ep = udc_usb_ep->pxa_ep; pxa_ep_dequeue()
1246 if (!ep || is_ep0(ep)) pxa_ep_dequeue()
1249 spin_lock_irqsave(&ep->lock, flags); pxa_ep_dequeue()
1252 list_for_each_entry(req, &ep->queue, queue) { pxa_ep_dequeue()
1259 spin_unlock_irqrestore(&ep->lock, flags); pxa_ep_dequeue()
1261 req_done(ep, req, -ECONNRESET, NULL); pxa_ep_dequeue()
1274 struct pxa_ep *ep; pxa_ep_set_halt() local
1283 ep = udc_usb_ep->pxa_ep; pxa_ep_set_halt()
1284 if (!ep || is_ep0(ep)) pxa_ep_set_halt()
1294 ep_dbg(ep, "only host can clear halt\n"); pxa_ep_set_halt()
1298 spin_lock_irqsave(&ep->lock, flags); pxa_ep_set_halt()
1301 if (ep->dir_in && (ep_is_full(ep) || !list_empty(&ep->queue))) pxa_ep_set_halt()
1306 ep_write_UDCCSR(ep, UDCCSR_FST | UDCCSR_FEF); pxa_ep_set_halt()
1307 if (is_ep0(ep)) pxa_ep_set_halt()
1308 set_ep0state(ep->dev, STALL); pxa_ep_set_halt()
1311 spin_unlock_irqrestore(&ep->lock, flags); pxa_ep_set_halt()
1323 struct pxa_ep *ep; pxa_ep_fifo_status() local
1329 ep = udc_usb_ep->pxa_ep; pxa_ep_fifo_status()
1330 if (!ep || is_ep0(ep)) pxa_ep_fifo_status()
1333 if (ep->dir_in) pxa_ep_fifo_status()
1335 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN || ep_is_empty(ep)) pxa_ep_fifo_status()
1338 return ep_count_bytes_remain(ep) + 1; pxa_ep_fifo_status()
1349 struct pxa_ep *ep; pxa_ep_fifo_flush() local
1356 ep = udc_usb_ep->pxa_ep; pxa_ep_fifo_flush()
1357 if (!ep || is_ep0(ep)) pxa_ep_fifo_flush()
1360 spin_lock_irqsave(&ep->lock, flags); pxa_ep_fifo_flush()
1362 if (unlikely(!list_empty(&ep->queue))) pxa_ep_fifo_flush()
1363 ep_dbg(ep, "called while queue list not empty\n"); pxa_ep_fifo_flush()
1364 ep_dbg(ep, "called\n"); pxa_ep_fifo_flush()
1367 if (!ep->dir_in) { pxa_ep_fifo_flush()
1368 while (!ep_is_empty(ep)) pxa_ep_fifo_flush()
1369 udc_ep_readl(ep, UDCDR); pxa_ep_fifo_flush()
1372 ep_write_UDCCSR(ep, pxa_ep_fifo_flush()
1374 | (EPXFERTYPE_is_ISO(ep) ? 0 : UDCCSR_SST)); pxa_ep_fifo_flush()
1377 spin_unlock_irqrestore(&ep->lock, flags); pxa_ep_fifo_flush()
1385 * Nothing much to do here, as ep configuration is done once and for all
1393 struct pxa_ep *ep; pxa_ep_enable() local
1402 ep = udc_usb_ep->pxa_ep; pxa_ep_enable()
1403 ep_warn(ep, "usb_ep %s already enabled, doing nothing\n", pxa_ep_enable()
1406 ep = find_pxa_ep(udc_usb_ep->dev, udc_usb_ep); pxa_ep_enable()
1409 if (!ep || is_ep0(ep)) { pxa_ep_enable()
1411 "unable to match pxa_ep for ep %s\n", pxa_ep_enable()
1417 || (ep->type != usb_endpoint_type(desc))) { pxa_ep_enable()
1418 ep_err(ep, "type mismatch\n"); pxa_ep_enable()
1422 if (ep->fifo_size < usb_endpoint_maxp(desc)) { pxa_ep_enable()
1423 ep_err(ep, "bad maxpacket\n"); pxa_ep_enable()
1427 udc_usb_ep->pxa_ep = ep; pxa_ep_enable()
1428 udc = ep->dev; pxa_ep_enable()
1431 ep_err(ep, "bogus device state\n"); pxa_ep_enable()
1435 ep->enabled = 1; pxa_ep_enable()
1440 ep_dbg(ep, "enabled\n"); pxa_ep_enable()
1454 struct pxa_ep *ep; pxa_ep_disable() local
1461 ep = udc_usb_ep->pxa_ep; pxa_ep_disable()
1462 if (!ep || is_ep0(ep) || !list_empty(&ep->queue)) pxa_ep_disable()
1465 ep->enabled = 0; pxa_ep_disable()
1466 nuke(ep, -ESHUTDOWN); pxa_ep_disable()
1471 ep_dbg(ep, "disabled\n"); pxa_ep_disable()
1707 struct pxa_ep *ep; udc_init_data() local
1718 ep = &dev->pxa_ep[i]; udc_init_data()
1720 ep->enabled = is_ep0(ep); udc_init_data()
1721 INIT_LIST_HEAD(&ep->queue); udc_init_data()
1722 spin_lock_init(&ep->lock); udc_init_data()
1869 struct pxa_ep *ep = &udc->pxa_ep[0]; handle_ep0_ctrl_req() local
1878 nuke(ep, -EPROTO); handle_ep0_ctrl_req()
1879 spin_lock_irqsave(&ep->lock, flags); handle_ep0_ctrl_req()
1887 if (epout_has_pkt(ep) && (ep_count_bytes_remain(ep) == 0)) handle_ep0_ctrl_req()
1888 ep_write_UDCCSR(ep, UDCCSR0_OPC); handle_ep0_ctrl_req()
1892 if (unlikely(ep_is_empty(ep))) handle_ep0_ctrl_req()
1894 u.word[i] = udc_ep_readl(ep, UDCDR); handle_ep0_ctrl_req()
1897 have_extrabytes = !ep_is_empty(ep); handle_ep0_ctrl_req()
1898 while (!ep_is_empty(ep)) { handle_ep0_ctrl_req()
1899 i = udc_ep_readl(ep, UDCDR); handle_ep0_ctrl_req()
1900 ep_err(ep, "wrong to have extra bytes for setup : 0x%08x\n", i); handle_ep0_ctrl_req()
1903 ep_dbg(ep, "SETUP %02x.%02x v%04x i%04x l%04x\n", handle_ep0_ctrl_req()
1916 ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC); handle_ep0_ctrl_req()
1918 spin_unlock_irqrestore(&ep->lock, flags); handle_ep0_ctrl_req()
1920 spin_lock_irqsave(&ep->lock, flags); handle_ep0_ctrl_req()
1924 spin_unlock_irqrestore(&ep->lock, flags); handle_ep0_ctrl_req()
1927 ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n", handle_ep0_ctrl_req()
1928 udc_ep_readl(ep, UDCCSR), i); handle_ep0_ctrl_req()
1929 ep_write_UDCCSR(ep, UDCCSR0_FST | UDCCSR0_FTF); handle_ep0_ctrl_req()
1940 * Context : when in_interrupt() or with ep->lock held
1985 struct pxa_ep *ep = &udc->pxa_ep[0]; handle_ep0() local
1989 if (!list_empty(&ep->queue)) handle_ep0()
1990 req = list_entry(ep->queue.next, struct pxa27x_request, queue); handle_ep0()
1992 udccsr0 = udc_ep_readl(ep, UDCCSR); handle_ep0()
1993 ep_dbg(ep, "state=%s, req=%p, udccsr0=0x%03x, udcbcr=%d, irq_msk=%x\n", handle_ep0()
1994 EP0_STNAME(udc), req, udccsr0, udc_ep_readl(ep, UDCBCR), handle_ep0()
1998 ep_dbg(ep, "clearing stall status\n"); handle_ep0()
1999 nuke(ep, -EPIPE); handle_ep0()
2000 ep_write_UDCCSR(ep, UDCCSR0_SST); handle_ep0()
2005 nuke(ep, 0); handle_ep0()
2024 if (epout_has_pkt(ep)) handle_ep0()
2025 ep_write_UDCCSR(ep, UDCCSR0_OPC); handle_ep0()
2026 if (req && !ep_is_full(ep)) handle_ep0()
2027 completed = write_ep0_fifo(ep, req); handle_ep0()
2029 ep0_end_in_req(ep, req, NULL); handle_ep0()
2032 if (epout_has_pkt(ep) && req) handle_ep0()
2033 completed = read_ep0_fifo(ep, req); handle_ep0()
2035 ep0_end_out_req(ep, req, NULL); handle_ep0()
2038 ep_write_UDCCSR(ep, UDCCSR0_FST); handle_ep0()
2051 ep_warn(ep, "should never get in %s state here!!!\n", handle_ep0()
2052 EP0_STNAME(ep->dev)); handle_ep0()
2060 * @ep: pxa physical endpoint
2065 * Is always called when in_interrupt() and with ep->lock released.
2067 static void handle_ep(struct pxa_ep *ep) handle_ep() argument
2072 int is_in = ep->dir_in; handle_ep()
2076 spin_lock_irqsave(&ep->lock, flags); handle_ep()
2077 if (ep->in_handle_ep) handle_ep()
2079 ep->in_handle_ep = 1; handle_ep()
2083 udccsr = udc_ep_readl(ep, UDCCSR); handle_ep()
2085 if (likely(!list_empty(&ep->queue))) handle_ep()
2086 req = list_entry(ep->queue.next, handle_ep()
2091 ep_dbg(ep, "req:%p, udccsr 0x%03x loop=%d\n", handle_ep()
2095 udc_ep_writel(ep, UDCCSR, handle_ep()
2101 if (likely(!ep_is_full(ep))) handle_ep()
2102 completed = write_fifo(ep, req); handle_ep()
2104 if (likely(epout_has_pkt(ep))) handle_ep()
2105 completed = read_fifo(ep, req); handle_ep()
2110 ep_end_in_req(ep, req, &flags); handle_ep()
2112 ep_end_out_req(ep, req, &flags); handle_ep()
2116 ep->in_handle_ep = 0; handle_ep()
2118 spin_unlock_irqrestore(&ep->lock, flags); handle_ep()
2189 struct pxa_ep *ep; irq_handle_data() local
2209 ep = &udc->pxa_ep[i]; irq_handle_data()
2210 ep->stats.irqs++; irq_handle_data()
2211 handle_ep(ep); irq_handle_data()
2222 ep = &udc->pxa_ep[i]; irq_handle_data()
2223 ep->stats.irqs++; irq_handle_data()
2224 handle_ep(ep); irq_handle_data()
2291 struct pxa_ep *ep = &udc->pxa_ep[0]; irq_udc_reset() local
2304 nuke(ep, -EPROTO); irq_udc_reset()
2305 ep_write_UDCCSR(ep, UDCCSR0_FTF | UDCCSR0_OPC); irq_udc_reset()
2530 struct pxa_ep *ep; pxa_udc_suspend() local
2532 ep = &udc->pxa_ep[0]; pxa_udc_suspend()
2533 udc->udccsr0 = udc_ep_readl(ep, UDCCSR); pxa_udc_suspend()
2555 struct pxa_ep *ep; pxa_udc_resume() local
2557 ep = &udc->pxa_ep[0]; pxa_udc_resume()
2558 udc_ep_writel(ep, UDCCSR, udc->udccsr0 & (UDCCSR0_FST | UDCCSR0_DME)); pxa_udc_resume()
H A Dudc-xilinx.c111 #define to_xusb_ep(ep) container_of((ep), struct xusb_ep, ep_usb)
118 * @ep: pointer to xusb_endpoint structure
123 struct xusb_ep *ep; member in struct:xusb_req
167 * @ep: an array of endpoint structures
184 struct xusb_ep ep[8]; member in struct:xusb_udc
265 struct xusb_ep *ep0 = &udc->ep[XUSB_EP_NUMBER_ZERO]; xudc_wrstatus()
277 * @ep: pointer to the usb device endpoint structure.
283 static void xudc_epconfig(struct xusb_ep *ep, struct xusb_udc *udc) xudc_epconfig() argument
291 epcfgreg = ((ep->is_in << 29) | (ep->is_iso << 28) | xudc_epconfig()
292 (ep->ep_usb.maxpacket << 15) | (ep->rambase)); xudc_epconfig()
293 udc->write_fn(udc->addr, ep->offset, epcfgreg); xudc_epconfig()
296 udc->write_fn(udc->addr, ep->offset + XUSB_EP_BUF0COUNT_OFFSET, xudc_epconfig()
297 ep->buffer0count); xudc_epconfig()
298 udc->write_fn(udc->addr, ep->offset + XUSB_EP_BUF1COUNT_OFFSET, xudc_epconfig()
299 ep->buffer1count); xudc_epconfig()
300 if (ep->buffer0ready) xudc_epconfig()
302 1 << ep->epnumber); xudc_epconfig()
303 if (ep->buffer1ready) xudc_epconfig()
305 1 << (ep->epnumber + XUSB_STATUS_EP_BUFF2_SHIFT)); xudc_epconfig()
310 * @ep: pointer to the usb device endpoint structure.
320 static int xudc_start_dma(struct xusb_ep *ep, dma_addr_t src, xudc_start_dma() argument
323 struct xusb_udc *udc = ep->udc; xudc_start_dma()
370 * @ep: pointer to the usb device endpoint structure.
380 static int xudc_dma_send(struct xusb_ep *ep, struct xusb_req *req, xudc_dma_send() argument
386 struct xusb_udc *udc = ep->udc; xudc_dma_send()
392 if (!ep->curbufnum && !ep->buffer0ready) { xudc_dma_send()
394 eprambase = (u32 __force *)(udc->addr + ep->rambase); xudc_dma_send()
396 udc->write_fn(udc->addr, ep->offset + xudc_dma_send()
399 XUSB_DMA_BRR_CTRL | (1 << ep->epnumber)); xudc_dma_send()
400 ep->buffer0ready = 1; xudc_dma_send()
401 ep->curbufnum = 1; xudc_dma_send()
402 } else if (ep->curbufnum && !ep->buffer1ready) { xudc_dma_send()
404 eprambase = (u32 __force *)(udc->addr + ep->rambase + xudc_dma_send()
405 ep->ep_usb.maxpacket); xudc_dma_send()
407 udc->write_fn(udc->addr, ep->offset + xudc_dma_send()
410 XUSB_DMA_BRR_CTRL | (1 << (ep->epnumber + xudc_dma_send()
412 ep->buffer1ready = 1; xudc_dma_send()
413 ep->curbufnum = 0; xudc_dma_send()
419 return xudc_start_dma(ep, src, dst, length); xudc_dma_send()
424 * @ep: pointer to the usb device endpoint structure.
434 static int xudc_dma_receive(struct xusb_ep *ep, struct xusb_req *req, xudc_dma_receive() argument
440 struct xusb_udc *udc = ep->udc; xudc_dma_receive()
443 if (!ep->curbufnum && !ep->buffer0ready) { xudc_dma_receive()
445 eprambase = (u32 __force *)(udc->addr + ep->rambase); xudc_dma_receive()
449 (1 << ep->epnumber)); xudc_dma_receive()
450 ep->buffer0ready = 1; xudc_dma_receive()
451 ep->curbufnum = 1; xudc_dma_receive()
452 } else if (ep->curbufnum && !ep->buffer1ready) { xudc_dma_receive()
455 ep->rambase + ep->ep_usb.maxpacket); xudc_dma_receive()
459 (1 << (ep->epnumber + xudc_dma_receive()
461 ep->buffer1ready = 1; xudc_dma_receive()
462 ep->curbufnum = 0; xudc_dma_receive()
468 return xudc_start_dma(ep, src, dst, length); xudc_dma_receive()
473 * @ep: pointer to the usb endpoint configuration structure.
483 static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req, xudc_eptxrx() argument
489 struct xusb_udc *udc = ep->udc; xudc_eptxrx()
493 if (ep->is_in) xudc_eptxrx()
494 rc = xudc_dma_send(ep, req, bufferptr, bufferlen); xudc_eptxrx()
496 rc = xudc_dma_receive(ep, req, bufferptr, bufferlen); xudc_eptxrx()
500 if (!ep->curbufnum && !ep->buffer0ready) { xudc_eptxrx()
502 eprambase = (u32 __force *)(udc->addr + ep->rambase); xudc_eptxrx()
503 if (ep->is_in) { xudc_eptxrx()
505 udc->write_fn(udc->addr, ep->offset + xudc_eptxrx()
514 1 << ep->epnumber); xudc_eptxrx()
515 ep->buffer0ready = 1; xudc_eptxrx()
516 ep->curbufnum = 1; xudc_eptxrx()
517 } else if (ep->curbufnum && !ep->buffer1ready) { xudc_eptxrx()
519 eprambase = (u32 __force *)(udc->addr + ep->rambase + xudc_eptxrx()
520 ep->ep_usb.maxpacket); xudc_eptxrx()
521 if (ep->is_in) { xudc_eptxrx()
523 udc->write_fn(udc->addr, ep->offset + xudc_eptxrx()
532 1 << (ep->epnumber + XUSB_STATUS_EP_BUFF2_SHIFT)); xudc_eptxrx()
533 ep->buffer1ready = 1; xudc_eptxrx()
534 ep->curbufnum = 0; xudc_eptxrx()
544 * @ep: pointer to the usb device endpoint structure.
551 static void xudc_done(struct xusb_ep *ep, struct xusb_req *req, int status) xudc_done() argument
553 struct xusb_udc *udc = ep->udc; xudc_done()
564 ep->ep_usb.name, req, status); xudc_done()
566 if (udc->dma_enabled && ep->epnumber && req->usb_req.length) xudc_done()
568 ep->is_in); xudc_done()
572 req->usb_req.complete(&ep->ep_usb, &req->usb_req); xudc_done()
579 * @ep: pointer to the usb device endpoint structure.
586 static int xudc_read_fifo(struct xusb_ep *ep, struct xusb_req *req) xudc_read_fifo() argument
594 struct xusb_udc *udc = ep->udc; xudc_read_fifo()
596 if (ep->buffer0ready && ep->buffer1ready) { xudc_read_fifo()
601 if (ep->curbufnum) xudc_read_fifo()
606 count = udc->read_fn(udc->addr + ep->offset + bufoffset); xudc_read_fifo()
608 if (!ep->buffer0ready && !ep->buffer1ready) xudc_read_fifo()
614 is_short = count < ep->ep_usb.maxpacket; xudc_read_fifo()
624 ep->ep_usb.name, count); xudc_read_fifo()
626 xudc_done(ep, req, -EOVERFLOW); xudc_read_fifo()
630 ret = xudc_eptxrx(ep, req, buf, count); xudc_read_fifo()
635 ep->ep_usb.name, count, is_short ? "/S" : "", req, xudc_read_fifo()
645 xudc_done(ep, req, 0); xudc_read_fifo()
659 xudc_done(ep, req, -ECONNRESET); xudc_read_fifo()
669 * @ep: pointer to the usb device endpoint structure.
676 static int xudc_write_fifo(struct xusb_ep *ep, struct xusb_req *req) xudc_write_fifo() argument
682 struct xusb_udc *udc = ep->udc; xudc_write_fifo()
686 max = le16_to_cpu(ep->desc->wMaxPacketSize); xudc_write_fifo()
692 ret = xudc_eptxrx(ep, req, buf, length); xudc_write_fifo()
706 __func__, ep->ep_usb.name, length, is_last ? "/L" : "", xudc_write_fifo()
711 xudc_done(ep, req, 0); xudc_write_fifo()
721 xudc_done(ep, req, -ECONNRESET); xudc_write_fifo()
731 * @ep: pointer to the usb device endpoint structure.
734 static void xudc_nuke(struct xusb_ep *ep, int status) xudc_nuke() argument
738 while (!list_empty(&ep->queue)) { xudc_nuke()
739 req = list_first_entry(&ep->queue, struct xusb_req, queue); xudc_nuke()
740 xudc_done(ep, req, status); xudc_nuke()
753 struct xusb_ep *ep = to_xusb_ep(_ep); xudc_ep_set_halt() local
758 if (!_ep || (!ep->desc && ep->epnumber)) { xudc_ep_set_halt()
759 pr_debug("%s: bad ep or descriptor\n", __func__); xudc_ep_set_halt()
762 udc = ep->udc; xudc_ep_set_halt()
764 if (ep->is_in && (!list_empty(&ep->queue)) && value) { xudc_ep_set_halt()
769 if (ep->buffer0ready || ep->buffer1ready) { xudc_ep_set_halt()
778 epcfgreg = udc->read_fn(udc->addr + ep->offset); xudc_ep_set_halt()
780 udc->write_fn(udc->addr, ep->offset, epcfgreg); xudc_ep_set_halt()
783 epcfgreg = udc->read_fn(udc->addr + ep->offset); xudc_ep_set_halt()
785 udc->write_fn(udc->addr, ep->offset, epcfgreg); xudc_ep_set_halt()
786 if (ep->epnumber) { xudc_ep_set_halt()
788 epcfgreg = udc->read_fn(ep->udc->addr + ep->offset); xudc_ep_set_halt()
790 udc->write_fn(udc->addr, ep->offset, epcfgreg); xudc_ep_set_halt()
800 * @ep: pointer to the xusb endpoint structure.
805 static int __xudc_ep_enable(struct xusb_ep *ep, __xudc_ep_enable() argument
808 struct xusb_udc *udc = ep->udc; __xudc_ep_enable()
814 ep->is_in = ((desc->bEndpointAddress & USB_DIR_IN) != 0); __xudc_ep_enable()
816 ep->epnumber = (desc->bEndpointAddress & 0x0f); __xudc_ep_enable()
817 ep->desc = desc; __xudc_ep_enable()
818 ep->ep_usb.desc = desc; __xudc_ep_enable()
820 ep->ep_usb.maxpacket = maxpacket = le16_to_cpu(desc->wMaxPacketSize); __xudc_ep_enable()
826 ep->is_iso = 0; __xudc_ep_enable()
830 ep->is_iso = 0; __xudc_ep_enable()
838 ep->is_iso = 0; __xudc_ep_enable()
847 ep->is_iso = 1; __xudc_ep_enable()
851 ep->buffer0ready = 0; __xudc_ep_enable()
852 ep->buffer1ready = 0; __xudc_ep_enable()
853 ep->curbufnum = 0; __xudc_ep_enable()
854 ep->rambase = rambase[ep->epnumber]; __xudc_ep_enable()
855 xudc_epconfig(ep, udc); __xudc_ep_enable()
858 ep->epnumber, maxpacket); __xudc_ep_enable()
861 epcfg = udc->read_fn(udc->addr + ep->offset); __xudc_ep_enable()
863 udc->write_fn(udc->addr, ep->offset, epcfg); __xudc_ep_enable()
864 if (ep->epnumber) __xudc_ep_enable()
865 ep->rambase <<= 2; __xudc_ep_enable()
869 ier |= (XUSB_STATUS_INTR_BUFF_COMP_SHIFT_MASK << ep->epnumber); __xudc_ep_enable()
873 if (ep->epnumber && !ep->is_in) { __xudc_ep_enable()
875 1 << ep->epnumber); __xudc_ep_enable()
876 ep->buffer0ready = 1; __xudc_ep_enable()
878 (1 << (ep->epnumber + __xudc_ep_enable()
880 ep->buffer1ready = 1; __xudc_ep_enable()
896 struct xusb_ep *ep; xudc_ep_enable() local
902 pr_debug("%s: bad ep or descriptor\n", __func__); xudc_ep_enable()
906 ep = to_xusb_ep(_ep); xudc_ep_enable()
907 udc = ep->udc; xudc_ep_enable()
915 ret = __xudc_ep_enable(ep, desc); xudc_ep_enable()
929 struct xusb_ep *ep; xudc_ep_disable() local
935 pr_debug("%s: invalid ep\n", __func__); xudc_ep_disable()
939 ep = to_xusb_ep(_ep); xudc_ep_disable()
940 udc = ep->udc; xudc_ep_disable()
944 xudc_nuke(ep, -ESHUTDOWN); xudc_ep_disable()
947 ep->desc = NULL; xudc_ep_disable()
948 ep->ep_usb.desc = NULL; xudc_ep_disable()
950 dev_dbg(udc->dev, "USB Ep %d disable\n ", ep->epnumber); xudc_ep_disable()
952 epcfg = udc->read_fn(udc->addr + ep->offset); xudc_ep_disable()
954 udc->write_fn(udc->addr, ep->offset, epcfg); xudc_ep_disable()
970 struct xusb_ep *ep = to_xusb_ep(_ep); xudc_ep_alloc_request() local
974 udc = ep->udc; xudc_ep_alloc_request()
981 req->ep = ep; xudc_ep_alloc_request()
1084 struct xusb_ep *ep = to_xusb_ep(_ep); xudc_ep_queue() local
1085 struct xusb_udc *udc = ep->udc; xudc_ep_queue()
1089 if (!ep->desc) { xudc_ep_queue()
1091 __func__, ep->name); xudc_ep_queue()
1107 ep->is_in); xudc_ep_queue()
1109 dev_dbg(udc->dev, "gadget_map failed ep%d\n", xudc_ep_queue()
1110 ep->epnumber); xudc_ep_queue()
1116 if (list_empty(&ep->queue)) { xudc_ep_queue()
1117 if (ep->is_in) { xudc_ep_queue()
1119 if (!xudc_write_fifo(ep, req)) xudc_ep_queue()
1123 if (!xudc_read_fifo(ep, req)) xudc_ep_queue()
1129 list_add_tail(&req->queue, &ep->queue); xudc_ep_queue()
1144 struct xusb_ep *ep = to_xusb_ep(_ep); xudc_ep_dequeue() local
1146 struct xusb_udc *udc = ep->udc; xudc_ep_dequeue()
1151 list_for_each_entry(req, &ep->queue, queue) { xudc_ep_dequeue()
1156 spin_unlock_irqrestore(&ep->udc->lock, flags); xudc_ep_dequeue()
1159 xudc_done(ep, req, -ECONNRESET); xudc_ep_dequeue()
1167 * @ep: pointer to the usb endpoint structure.
1174 static int xudc_ep0_enable(struct usb_ep *ep, xudc_ep0_enable() argument
1182 * @ep: pointer to the usb endpoint structure.
1188 static int xudc_ep0_disable(struct usb_ep *ep) xudc_ep0_disable() argument
1310 struct xusb_ep *ep = &udc->ep[ep_number]; xudc_eps_init() local
1313 list_add_tail(&ep->ep_usb.ep_list, xudc_eps_init()
1315 usb_ep_set_maxpacket_limit(&ep->ep_usb, xudc_eps_init()
1317 snprintf(ep->name, EPNAME_SIZE, "ep%d", ep_number); xudc_eps_init()
1318 ep->ep_usb.name = ep->name; xudc_eps_init()
1319 ep->ep_usb.ops = &xusb_ep_ops; xudc_eps_init()
1321 ep->ep_usb.caps.type_iso = true; xudc_eps_init()
1322 ep->ep_usb.caps.type_bulk = true; xudc_eps_init()
1323 ep->ep_usb.caps.type_int = true; xudc_eps_init()
1325 ep->ep_usb.name = ep0name; xudc_eps_init()
1326 usb_ep_set_maxpacket_limit(&ep->ep_usb, EP0_MAX_PACKET); xudc_eps_init()
1327 ep->ep_usb.ops = &xusb_ep0_ops; xudc_eps_init()
1329 ep->ep_usb.caps.type_control = true; xudc_eps_init()
1332 ep->ep_usb.caps.dir_in = true; xudc_eps_init()
1333 ep->ep_usb.caps.dir_out = true; xudc_eps_init()
1335 ep->udc = udc; xudc_eps_init()
1336 ep->epnumber = ep_number; xudc_eps_init()
1337 ep->desc = NULL; xudc_eps_init()
1342 ep->offset = XUSB_EP0_CONFIG_OFFSET + (ep_number * 0x10); xudc_eps_init()
1343 ep->is_in = 0; xudc_eps_init()
1344 ep->is_iso = 0; xudc_eps_init()
1345 ep->maxpacket = 0; xudc_eps_init()
1346 xudc_epconfig(ep, udc); xudc_eps_init()
1349 INIT_LIST_HEAD(&ep->queue); xudc_eps_init()
1360 struct xusb_ep *ep; xudc_stop_activity() local
1363 ep = &udc->ep[i]; xudc_stop_activity()
1364 xudc_nuke(ep, -ESHUTDOWN); xudc_stop_activity()
1379 struct xusb_ep *ep0 = &udc->ep[XUSB_EP_NUMBER_ZERO]; xudc_start()
1450 struct xusb_ep *ep; xudc_clear_stall_all_ep() local
1455 ep = &udc->ep[i]; xudc_clear_stall_all_ep()
1456 epcfgreg = udc->read_fn(udc->addr + ep->offset); xudc_clear_stall_all_ep()
1458 udc->write_fn(udc->addr, ep->offset, epcfgreg); xudc_clear_stall_all_ep()
1459 if (ep->epnumber) { xudc_clear_stall_all_ep()
1461 epcfgreg = udc->read_fn(udc->addr + ep->offset); xudc_clear_stall_all_ep()
1463 udc->write_fn(udc->addr, ep->offset, epcfgreg); xudc_clear_stall_all_ep()
1569 struct xusb_ep *ep0 = &udc->ep[XUSB_EP_NUMBER_ZERO]; xudc_ep0_stall()
1584 struct xusb_ep *ep0 = &udc->ep[0]; xudc_setaddress()
1605 struct xusb_ep *ep0 = &udc->ep[0]; xudc_getstatus()
1625 target_ep = &udc->ep[epnum]; xudc_getstatus()
1660 struct xusb_ep *ep0 = &udc->ep[0]; xudc_set_clear_feature()
1692 target_ep = &udc->ep[endpoint]; xudc_set_clear_feature()
1746 struct xusb_ep *ep0 = &udc->ep[0]; xudc_handle_setup()
1812 struct xusb_ep *ep0 = &udc->ep[0]; xudc_ep0_out()
1861 struct xusb_ep *ep0 = &udc->ep[0]; xudc_ep0_in()
1959 struct xusb_ep *ep; xudc_nonctrl_ep_handler() local
1961 ep = &udc->ep[epnum]; xudc_nonctrl_ep_handler()
1964 ep->buffer0ready = 0; xudc_nonctrl_ep_handler()
1966 ep->buffer1ready = 0; xudc_nonctrl_ep_handler()
1968 if (list_empty(&ep->queue)) xudc_nonctrl_ep_handler()
1971 req = list_first_entry(&ep->queue, struct xusb_req, queue); xudc_nonctrl_ep_handler()
1973 if (ep->is_in) xudc_nonctrl_ep_handler()
1974 xudc_write_fifo(ep, req); xudc_nonctrl_ep_handler()
1976 xudc_read_fifo(ep, req); xudc_nonctrl_ep_handler()
2104 udc->gadget.ep0 = &udc->ep[XUSB_EP_NUMBER_ZERO].ep_usb; xudc_probe()
2122 ep0 = &udc->ep[0]; xudc_probe()
H A Dpxa25x_udc.c127 static void pxa25x_ep_fifo_flush (struct usb_ep *ep);
215 struct pxa25x_ep *ep; pxa25x_ep_enable() local
218 ep = container_of (_ep, struct pxa25x_ep, ep); pxa25x_ep_enable()
221 || ep->bEndpointAddress != desc->bEndpointAddress pxa25x_ep_enable()
222 || ep->fifo_size < usb_endpoint_maxp (desc)) { pxa25x_ep_enable()
223 DMSG("%s, bad ep or descriptor\n", __func__); pxa25x_ep_enable()
228 if (ep->bmAttributes != desc->bmAttributes pxa25x_ep_enable()
229 && ep->bmAttributes != USB_ENDPOINT_XFER_BULK pxa25x_ep_enable()
244 dev = ep->dev; pxa25x_ep_enable()
250 ep->ep.desc = desc; pxa25x_ep_enable()
251 ep->stopped = 0; pxa25x_ep_enable()
252 ep->pio_irqs = 0; pxa25x_ep_enable()
253 ep->ep.maxpacket = usb_endpoint_maxp (desc); pxa25x_ep_enable()
266 struct pxa25x_ep *ep; pxa25x_ep_disable() local
269 ep = container_of (_ep, struct pxa25x_ep, ep); pxa25x_ep_disable()
270 if (!_ep || !ep->ep.desc) { pxa25x_ep_disable()
272 _ep ? ep->ep.name : NULL); pxa25x_ep_disable()
277 nuke (ep, -ESHUTDOWN); pxa25x_ep_disable()
282 ep->ep.desc = NULL; pxa25x_ep_disable()
283 ep->stopped = 1; pxa25x_ep_disable()
332 static void done(struct pxa25x_ep *ep, struct pxa25x_request *req, int status) done() argument
334 unsigned stopped = ep->stopped; done()
345 ep->ep.name, &req->req, status, done()
349 ep->stopped = 1; done()
350 usb_gadget_giveback_request(&ep->ep, &req->req); done()
351 ep->stopped = stopped; done()
386 write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) write_fifo() argument
390 max = usb_endpoint_maxp(ep->ep.desc); write_fifo()
395 count = write_packet(ep->reg_uddr, req, max); write_fifo()
407 is_short = unlikely (max < ep->fifo_size); write_fifo()
411 ep->ep.name, count, write_fifo()
419 *ep->reg_udccs = UDCCS_BI_TPC; write_fifo()
421 *ep->reg_udccs = UDCCS_BI_TSP; write_fifo()
425 done (ep, req, 0); write_fifo()
426 if (list_empty(&ep->queue)) write_fifo()
427 pio_irq_disable (ep->bEndpointAddress); write_fifo()
435 } while (*ep->reg_udccs & UDCCS_BI_TFS); write_fifo()
453 write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) write_ep0_fifo() argument
459 ep->dev->stats.write.bytes += count; write_ep0_fifo()
468 if (ep->dev->req_pending) write_ep0_fifo()
469 ep0start(ep->dev, UDCCS0_IPR, "short IN"); write_ep0_fifo()
474 done (ep, req, 0); write_ep0_fifo()
475 ep0_idle(ep->dev); write_ep0_fifo()
498 } else if (ep->dev->req_pending) write_ep0_fifo()
499 ep0start(ep->dev, 0, "IN"); write_ep0_fifo()
513 read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) read_fifo() argument
524 udccs = *ep->reg_udccs; read_fifo()
533 count = 1 + (0x0ff & *ep->reg_ubcr); read_fifo()
537 is_short = (count < ep->ep.maxpacket); read_fifo()
539 ep->ep.name, udccs, count, read_fifo()
543 u8 byte = (u8) *ep->reg_uddr; read_fifo()
552 ep->ep.name, count); read_fifo()
559 *ep->reg_udccs = UDCCS_BO_RPC; read_fifo()
563 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) { read_fifo()
572 done (ep, req, 0); read_fifo()
573 if (list_empty(&ep->queue)) read_fifo()
574 pio_irq_disable (ep->bEndpointAddress); read_fifo()
590 read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) read_ep0_fifo() argument
607 DMSG("%s overflow\n", ep->ep.name); read_ep0_fifo()
632 struct pxa25x_ep *ep; pxa25x_ep_queue() local
643 ep = container_of(_ep, struct pxa25x_ep, ep); pxa25x_ep_queue()
644 if (unlikely(!_ep || (!ep->ep.desc && ep->ep.name != ep0name))) { pxa25x_ep_queue()
645 DMSG("%s, bad ep\n", __func__); pxa25x_ep_queue()
649 dev = ep->dev; pxa25x_ep_queue()
659 if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC pxa25x_ep_queue()
660 && req->req.length > usb_endpoint_maxp(ep->ep.desc))) pxa25x_ep_queue()
672 if (list_empty(&ep->queue) && !ep->stopped) { pxa25x_ep_queue()
673 if (ep->ep.desc == NULL/* ep0 */) { pxa25x_ep_queue()
679 if (write_ep0_fifo(ep, req)) pxa25x_ep_queue()
692 done(ep, req, 0); pxa25x_ep_queue()
700 && read_ep0_fifo(ep, req))) { pxa25x_ep_queue()
702 done(ep, req, 0); pxa25x_ep_queue()
713 } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) { pxa25x_ep_queue()
714 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0 pxa25x_ep_queue()
715 && write_fifo(ep, req)) pxa25x_ep_queue()
717 } else if ((*ep->reg_udccs & UDCCS_BO_RFS) != 0 pxa25x_ep_queue()
718 && read_fifo(ep, req)) { pxa25x_ep_queue()
722 if (likely(req && ep->ep.desc)) pxa25x_ep_queue()
723 pio_irq_enable(ep->bEndpointAddress); pxa25x_ep_queue()
728 list_add_tail(&req->queue, &ep->queue); pxa25x_ep_queue()
738 static void nuke(struct pxa25x_ep *ep, int status) nuke() argument
743 while (!list_empty(&ep->queue)) { nuke()
744 req = list_entry(ep->queue.next, nuke()
747 done(ep, req, status); nuke()
749 if (ep->ep.desc) nuke()
750 pio_irq_disable (ep->bEndpointAddress); nuke()
757 struct pxa25x_ep *ep; pxa25x_ep_dequeue() local
761 ep = container_of(_ep, struct pxa25x_ep, ep); pxa25x_ep_dequeue()
762 if (!_ep || ep->ep.name == ep0name) pxa25x_ep_dequeue()
768 list_for_each_entry (req, &ep->queue, queue) { pxa25x_ep_dequeue()
777 done(ep, req, -ECONNRESET); pxa25x_ep_dequeue()
787 struct pxa25x_ep *ep; pxa25x_ep_set_halt() local
790 ep = container_of(_ep, struct pxa25x_ep, ep); pxa25x_ep_set_halt()
792 || (!ep->ep.desc && ep->ep.name != ep0name)) pxa25x_ep_set_halt()
793 || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) { pxa25x_ep_set_halt()
794 DMSG("%s, bad ep\n", __func__); pxa25x_ep_set_halt()
809 if ((ep->bEndpointAddress & USB_DIR_IN) != 0 pxa25x_ep_set_halt()
810 && ((*ep->reg_udccs & UDCCS_BI_TFS) == 0 pxa25x_ep_set_halt()
811 || !list_empty(&ep->queue))) { pxa25x_ep_set_halt()
817 *ep->reg_udccs = UDCCS_BI_FST|UDCCS_BI_FTF; pxa25x_ep_set_halt()
820 if (!ep->ep.desc) { pxa25x_ep_set_halt()
821 start_watchdog(ep->dev); pxa25x_ep_set_halt()
822 ep->dev->req_pending = 0; pxa25x_ep_set_halt()
823 ep->dev->ep0state = EP0_STALL; pxa25x_ep_set_halt()
829 if (*ep->reg_udccs & UDCCS_BI_SST) pxa25x_ep_set_halt()
842 struct pxa25x_ep *ep; pxa25x_ep_fifo_status() local
844 ep = container_of(_ep, struct pxa25x_ep, ep); pxa25x_ep_fifo_status()
846 DMSG("%s, bad ep\n", __func__); pxa25x_ep_fifo_status()
850 if ((ep->bEndpointAddress & USB_DIR_IN) != 0) pxa25x_ep_fifo_status()
852 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN pxa25x_ep_fifo_status()
853 || (*ep->reg_udccs & UDCCS_BO_RFS) == 0) pxa25x_ep_fifo_status()
856 return (*ep->reg_ubcr & 0xfff) + 1; pxa25x_ep_fifo_status()
861 struct pxa25x_ep *ep; pxa25x_ep_fifo_flush() local
863 ep = container_of(_ep, struct pxa25x_ep, ep); pxa25x_ep_fifo_flush()
864 if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) { pxa25x_ep_fifo_flush()
865 DMSG("%s, bad ep\n", __func__); pxa25x_ep_fifo_flush()
872 if ((ep->bEndpointAddress & USB_DIR_IN) == 0) { pxa25x_ep_fifo_flush()
873 while (((*ep->reg_udccs) & UDCCS_BO_RNE) != 0) pxa25x_ep_fifo_flush()
874 (void) *ep->reg_uddr; pxa25x_ep_fifo_flush()
879 *ep->reg_udccs = UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR pxa25x_ep_fifo_flush()
880 | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC pxa25x_ep_fifo_flush()
1081 struct pxa25x_ep *ep = &dev->ep [i]; udc_seq_show() local
1087 desc = ep->ep.desc; udc_seq_show()
1090 tmp = *dev->ep [i].reg_udccs; udc_seq_show()
1093 ep->ep.name, usb_endpoint_maxp(desc), udc_seq_show()
1094 "pio", tmp, ep->pio_irqs); udc_seq_show()
1099 ep->pio_irqs); udc_seq_show()
1101 if (list_empty(&ep->queue)) { udc_seq_show()
1105 list_for_each_entry(req, &ep->queue, queue) { udc_seq_show()
1183 struct pxa25x_ep *ep = &dev->ep[i]; udc_reinit() local
1186 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list); udc_reinit()
1188 ep->ep.desc = NULL; udc_reinit()
1189 ep->stopped = 0; udc_reinit()
1190 INIT_LIST_HEAD (&ep->queue); udc_reinit()
1191 ep->pio_irqs = 0; udc_reinit()
1192 usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket); udc_reinit()
1294 struct pxa25x_ep *ep = &dev->ep[i]; reset_gadget() local
1296 ep->stopped = 1; reset_gadget()
1297 nuke(ep, -ESHUTDOWN); reset_gadget()
1321 struct pxa25x_ep *ep = &dev->ep[i]; stop_activity() local
1323 ep->stopped = 1; stop_activity()
1324 nuke(ep, -ESHUTDOWN); stop_activity()
1402 nuke(&dev->ep[i], -ECONNABORTED); clear_ep_state()
1423 struct pxa25x_ep *ep = &dev->ep [0]; handle_ep0() local
1431 if (list_empty(&ep->queue)) handle_ep0()
1434 req = list_entry(ep->queue.next, struct pxa25x_request, queue); handle_ep0()
1438 nuke(ep, -EPIPE); handle_ep0()
1446 nuke(ep, 0); handle_ep0()
1461 nuke (ep, -EPROTO); handle_ep0()
1509 * - ep reset doesn't include halt(?). handle_ep0()
1578 nuke(ep, -EPROTO); handle_ep0()
1603 done(ep, req, 0); handle_ep0()
1608 (void) write_ep0_fifo(ep, req); handle_ep0()
1616 if (read_ep0_fifo(ep, req)) handle_ep0()
1617 done(ep, req, 0); handle_ep0()
1623 done(ep, req, 0); handle_ep0()
1629 done(ep, req, 0); handle_ep0()
1644 static void handle_ep(struct pxa25x_ep *ep) handle_ep() argument
1647 int is_in = ep->bEndpointAddress & USB_DIR_IN; handle_ep()
1653 if (likely (!list_empty(&ep->queue))) handle_ep()
1654 req = list_entry(ep->queue.next, handle_ep()
1661 udccs = *ep->reg_udccs; handle_ep()
1664 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK)) handle_ep()
1668 *ep->reg_udccs = tmp; handle_ep()
1670 completed = write_fifo(ep, req); handle_ep()
1673 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK)) handle_ep()
1679 *ep->reg_udccs = tmp; handle_ep()
1683 completed = read_fifo(ep, req); handle_ep()
1685 pio_irq_disable (ep->bEndpointAddress); handle_ep()
1687 ep->pio_irqs++; handle_ep()
1767 dev->ep[0].pio_irqs++; pxa25x_udc_irq()
1777 handle_ep(&dev->ep[i]); pxa25x_udc_irq()
1783 handle_ep(&dev->ep[i+8]); pxa25x_udc_irq()
1811 .ep0 = &memory.ep[0].ep,
1820 .ep[0] = {
1821 .ep = {
1834 .ep[1] = {
1835 .ep = {
1849 .ep[2] = {
1850 .ep = {
1866 .ep[3] = {
1867 .ep = {
1881 .ep[4] = {
1882 .ep = {
1897 .ep[5] = {
1898 .ep = {
1913 .ep[6] = {
1914 .ep = {
1928 .ep[7] = {
1929 .ep = {
1944 .ep[8] = {
1945 .ep = {
1959 .ep[9] = {
1960 .ep = {
1975 .ep[10] = {
1976 .ep = {
1991 .ep[11] = {
1992 .ep = {
2006 .ep[12] = {
2007 .ep = {
2022 .ep[13] = {
2023 .ep = {
2037 .ep[14] = {
2038 .ep = {
2053 .ep[15] = {
2054 .ep = {
2130 /* A0/A1 "not released"; ep 13, 15 unusable */ pxa25x_udc_probe()
H A Dat91_udc.c109 static void proc_ep_show(struct seq_file *s, struct at91_ep *ep) proc_ep_show() argument
118 struct at91_udc *udc = ep->udc; proc_ep_show()
122 csr = __raw_readl(ep->creg); proc_ep_show()
128 ep->ep.name, ep->ep.maxpacket, proc_ep_show()
129 ep->is_in ? "in" : "out", proc_ep_show()
130 ep->is_iso ? " iso" : "", proc_ep_show()
131 ep->is_pingpong proc_ep_show()
132 ? (ep->fifo_bank ? "pong" : "ping") proc_ep_show()
134 ep->stopped ? " stopped" : ""); proc_ep_show()
154 if (list_empty (&ep->queue)) proc_ep_show()
157 else list_for_each_entry (req, &ep->queue, queue) { proc_ep_show()
181 seq_printf(s, " ep%d", i); proc_irq_show()
189 struct at91_ep *ep; proc_udc_show() local
232 proc_ep_show(s, &udc->ep[0]); proc_udc_show()
233 list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) { proc_udc_show()
234 if (ep->ep.desc) proc_udc_show()
235 proc_ep_show(s, ep); proc_udc_show()
275 static void done(struct at91_ep *ep, struct at91_request *req, int status) done() argument
277 unsigned stopped = ep->stopped; done()
278 struct at91_udc *udc = ep->udc; done()
286 VDBG("%s done %p, status %d\n", ep->ep.name, req, status); done()
288 ep->stopped = 1; done()
290 usb_gadget_giveback_request(&ep->ep, &req->req); done()
292 ep->stopped = stopped; done()
295 if (list_empty(&ep->queue) && ep->int_mask != (1 << 0)) done()
296 at91_udp_write(udc, AT91_UDP_IDR, ep->int_mask); done()
326 static int read_fifo (struct at91_ep *ep, struct at91_request *req) read_fifo() argument
328 u32 __iomem *creg = ep->creg; read_fifo()
329 u8 __iomem *dreg = ep->creg + (AT91_UDP_FDR(0) - AT91_UDP_CSR(0)); read_fifo()
347 if (count > ep->ep.maxpacket) read_fifo()
348 count = ep->ep.maxpacket; read_fifo()
350 DBG("%s buffer overflow\n", ep->ep.name); read_fifo()
358 if (ep->is_pingpong) { read_fifo()
359 if (ep->fifo_bank == 0) { read_fifo()
361 ep->fifo_bank = 1; read_fifo()
364 ep->fifo_bank = 0; read_fifo()
371 is_done = (count < ep->ep.maxpacket); read_fifo()
375 PACKET("%s %p out/%d%s\n", ep->ep.name, &req->req, count, read_fifo()
383 done(ep, req, 0); read_fifo()
384 else if (ep->is_pingpong) { read_fifo()
401 static int write_fifo(struct at91_ep *ep, struct at91_request *req) write_fifo() argument
403 u32 __iomem *creg = ep->creg; write_fifo()
405 u8 __iomem *dreg = ep->creg + (AT91_UDP_FDR(0) - AT91_UDP_CSR(0)); write_fifo()
435 if (ep->ep.maxpacket < total) { write_fifo()
436 count = ep->ep.maxpacket; write_fifo()
440 is_last = (count < ep->ep.maxpacket) || !req->req.zero; write_fifo()
462 PACKET("%s %p in/%d%s\n", ep->ep.name, &req->req, count, write_fifo()
465 done(ep, req, 0); write_fifo()
469 static void nuke(struct at91_ep *ep, int status) nuke() argument
474 ep->stopped = 1; nuke()
475 if (list_empty(&ep->queue)) nuke()
478 VDBG("%s %s\n", __func__, ep->ep.name); nuke()
479 while (!list_empty(&ep->queue)) { nuke()
480 req = list_entry(ep->queue.next, struct at91_request, queue); nuke()
481 done(ep, req, status); nuke()
490 struct at91_ep *ep = container_of(_ep, struct at91_ep, ep); at91_ep_enable() local
496 if (!_ep || !ep at91_ep_enable()
500 || maxpacket > ep->maxpacket) { at91_ep_enable()
501 DBG("bad ep or descriptor\n"); at91_ep_enable()
505 udc = ep->udc; at91_ep_enable()
532 if (!ep->is_pingpong) { at91_ep_enable()
543 ep->is_in = usb_endpoint_dir_in(desc); at91_ep_enable()
544 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC); at91_ep_enable()
545 ep->stopped = 0; at91_ep_enable()
546 if (ep->is_in) at91_ep_enable()
550 __raw_writel(tmp, ep->creg); at91_ep_enable()
552 ep->ep.maxpacket = maxpacket; at91_ep_enable()
558 at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask); at91_ep_enable()
567 struct at91_ep *ep = container_of(_ep, struct at91_ep, ep); at91_ep_disable() local
568 struct at91_udc *udc = ep->udc; at91_ep_disable()
571 if (ep == &ep->udc->ep[0]) at91_ep_disable()
576 nuke(ep, -ESHUTDOWN); at91_ep_disable()
579 ep->ep.desc = NULL; at91_ep_disable()
580 ep->ep.maxpacket = ep->maxpacket; at91_ep_disable()
583 if (ep->udc->clocked) { at91_ep_disable()
584 at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask); at91_ep_disable()
586 __raw_writel(0, ep->creg); at91_ep_disable()
624 struct at91_ep *ep; at91_ep_queue() local
630 ep = container_of(_ep, struct at91_ep, ep); at91_ep_queue()
638 if (!_ep || (!ep->ep.desc && ep->ep.name != ep0name)) { at91_ep_queue()
639 DBG("invalid ep\n"); at91_ep_queue()
643 udc = ep->udc; at91_ep_queue()
656 if (list_empty(&ep->queue) && !ep->stopped) { at91_ep_queue()
667 is_ep0 = (ep->ep.name == ep0name); at91_ep_queue()
690 tmp = __raw_readl(ep->creg); at91_ep_queue()
693 __raw_writel(tmp, ep->creg); at91_ep_queue()
699 if (ep->is_in) at91_ep_queue()
700 status = write_fifo(ep, req); at91_ep_queue()
702 status = read_fifo(ep, req); at91_ep_queue()
712 list_add_tail (&req->queue, &ep->queue); at91_ep_queue()
713 at91_udp_write(udc, AT91_UDP_IER, ep->int_mask); at91_ep_queue()
722 struct at91_ep *ep; at91_ep_dequeue() local
727 ep = container_of(_ep, struct at91_ep, ep); at91_ep_dequeue()
728 if (!_ep || ep->ep.name == ep0name) at91_ep_dequeue()
731 udc = ep->udc; at91_ep_dequeue()
736 list_for_each_entry (req, &ep->queue, queue) { at91_ep_dequeue()
745 done(ep, req, -ECONNRESET); at91_ep_dequeue()
752 struct at91_ep *ep = container_of(_ep, struct at91_ep, ep); at91_ep_set_halt() local
753 struct at91_udc *udc = ep->udc; at91_ep_set_halt()
759 if (!_ep || ep->is_iso || !ep->udc->clocked) at91_ep_set_halt()
762 creg = ep->creg; at91_ep_set_halt()
772 if (ep->is_in && (!list_empty(&ep->queue) || (csr >> 16) != 0)) at91_ep_set_halt()
779 VDBG("halt %s\n", ep->ep.name); at91_ep_set_halt()
781 at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask); at91_ep_set_halt()
850 struct at91_ep *ep = &udc->ep[i]; udc_reinit() local
853 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); udc_reinit()
854 ep->ep.desc = NULL; udc_reinit()
855 ep->stopped = 0; udc_reinit()
856 ep->fifo_bank = 0; udc_reinit()
857 usb_ep_set_maxpacket_limit(&ep->ep, ep->maxpacket); udc_reinit()
858 ep->creg = (void __iomem *) udc->udp_baseaddr + AT91_UDP_CSR(i); udc_reinit()
860 INIT_LIST_HEAD(&ep->queue); udc_reinit()
875 struct at91_ep *ep = &udc->ep[i]; reset_gadget() local
877 ep->stopped = 1; reset_gadget()
878 nuke(ep, -ESHUTDOWN); reset_gadget()
900 struct at91_ep *ep = &udc->ep[i]; stop_activity() local
901 ep->stopped = 1; stop_activity()
902 nuke(ep, -ESHUTDOWN); stop_activity()
1020 static int handle_ep(struct at91_ep *ep) handle_ep() argument
1023 u32 __iomem *creg = ep->creg; handle_ep()
1026 if (!list_empty(&ep->queue)) handle_ep()
1027 req = list_entry(ep->queue.next, handle_ep()
1032 if (ep->is_in) { handle_ep()
1039 return write_fifo(ep, req); handle_ep()
1044 if (ep->is_iso && req) handle_ep()
1052 return read_fifo(ep, req); handle_ep()
1062 static void handle_setup(struct at91_udc *udc, struct at91_ep *ep, u32 csr) handle_setup() argument
1064 u32 __iomem *creg = ep->creg; handle_setup()
1065 u8 __iomem *dreg = ep->creg + (AT91_UDP_FDR(0) - AT91_UDP_CSR(0)); handle_setup()
1078 ep->is_in = 1; handle_setup()
1081 ep->is_in = 0; handle_setup()
1093 ep->stopped = 0; handle_setup()
1191 ep = &udc->ep[tmp]; handle_setup()
1192 if (tmp >= NUM_ENDPOINTS || (tmp && !ep->ep.desc)) handle_setup()
1197 if (!ep->is_in) handle_setup()
1199 } else if (ep->is_in) handle_setup()
1202 PACKET("get %s status\n", ep->ep.name); handle_setup()
1203 if (__raw_readl(ep->creg) & AT91_UDP_FORCESTALL) handle_setup()
1214 ep = &udc->ep[tmp]; handle_setup()
1217 if (!ep->ep.desc || ep->is_iso) handle_setup()
1220 if (!ep->is_in) handle_setup()
1222 } else if (ep->is_in) handle_setup()
1225 tmp = __raw_readl(ep->creg); handle_setup()
1228 __raw_writel(tmp, ep->creg); handle_setup()
1233 ep = &udc->ep[tmp]; handle_setup()
1238 if (!ep->ep.desc || ep->is_iso) handle_setup()
1241 if (!ep->is_in) handle_setup()
1243 } else if (ep->is_in) handle_setup()
1246 at91_udp_write(udc, AT91_UDP_RST_EP, ep->int_mask); handle_setup()
1248 tmp = __raw_readl(ep->creg); handle_setup()
1251 __raw_writel(tmp, ep->creg); handle_setup()
1252 if (!list_empty(&ep->queue)) handle_setup()
1253 handle_ep(ep); handle_setup()
1290 struct at91_ep *ep0 = &udc->ep[0]; handle_ep0()
1505 struct at91_ep *ep = &udc->ep[1]; at91_udc_irq() local
1512 handle_ep(ep); at91_udc_irq()
1513 ep++; at91_udc_irq()
1614 struct at91_ep *ep; at91rm9200_udc_init() local
1619 ep = &udc->ep[i]; at91rm9200_udc_init()
1624 ep->maxpacket = 8; at91rm9200_udc_init()
1627 ep->maxpacket = 64; at91rm9200_udc_init()
1630 ep->maxpacket = 256; at91rm9200_udc_init()
1670 struct at91_ep *ep; at91sam9260_udc_init() local
1674 ep = &udc->ep[i]; at91sam9260_udc_init()
1678 ep->maxpacket = 64; at91sam9260_udc_init()
1681 ep->maxpacket = 512; at91sam9260_udc_init()
1708 struct at91_ep *ep; at91sam9261_udc_init() local
1712 ep = &udc->ep[i]; at91sam9261_udc_init()
1716 ep->maxpacket = 8; at91sam9261_udc_init()
1719 ep->maxpacket = 64; at91sam9261_udc_init()
1722 ep->maxpacket = 256; at91sam9261_udc_init()
1753 struct at91_ep *ep; at91sam9263_udc_init() local
1757 ep = &udc->ep[i]; at91sam9263_udc_init()
1764 ep->maxpacket = 64; at91sam9263_udc_init()
1768 ep->maxpacket = 256; at91sam9263_udc_init()
1832 struct at91_ep *ep; at91udc_probe() local
1847 udc->gadget.ep0 = &udc->ep[0].ep; at91udc_probe()
1852 ep = &udc->ep[i]; at91udc_probe()
1853 ep->ep.name = ep_info[i].name; at91udc_probe()
1854 ep->ep.caps = ep_info[i].caps; at91udc_probe()
1855 ep->ep.ops = &at91_ep_ops; at91udc_probe()
1856 ep->udc = udc; at91udc_probe()
1857 ep->int_mask = BIT(i); at91udc_probe()
1859 ep->is_pingpong = 1; at91udc_probe()
H A Dmv_u3d_core.c40 static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status);
55 struct mv_u3d_ep *ep; mv_u3d_ep0_reset() local
60 ep = &u3d->eps[i]; mv_u3d_ep0_reset()
61 ep->u3d = u3d; mv_u3d_ep0_reset()
63 /* ep0 ep context, ep0 in and out share the same ep context */ mv_u3d_ep0_reset()
64 ep->ep_context = &u3d->ep_context[1]; mv_u3d_ep0_reset()
67 /* reset ep state machine */ mv_u3d_ep0_reset()
166 "complete_tr error: ep=%d %s: error = 0x%x\n", mv_u3d_process_ep_req()
187 void mv_u3d_done(struct mv_u3d_ep *ep, struct mv_u3d_req *req, int status)
188 __releases(&ep->udc->lock)
189 __acquires(&ep->udc->lock)
191 struct mv_u3d *u3d = (struct mv_u3d *)ep->u3d;
194 /* Removed the req from ep queue */
208 dma_unmap_single(ep->u3d->gadget.dev.parent,
216 usb_gadget_unmap_request(&u3d->gadget, &req->req, mv_u3d_ep_dir(ep));
220 ep->ep.name, &req->req, status,
224 spin_unlock(&ep->u3d->lock);
226 usb_gadget_giveback_request(&ep->ep, &req->req);
228 spin_lock(&ep->u3d->lock);
231 static int mv_u3d_queue_trb(struct mv_u3d_ep *ep, struct mv_u3d_req *req) mv_u3d_queue_trb() argument
238 u3d = ep->u3d; mv_u3d_queue_trb()
239 direction = mv_u3d_ep_dir(ep); mv_u3d_queue_trb()
241 /* ep0 in and out share the same ep context slot 1*/ mv_u3d_queue_trb()
242 if (ep->ep_num == 0) mv_u3d_queue_trb()
245 ep_context = &(u3d->ep_context[ep->ep_num * 2 + direction]); mv_u3d_queue_trb()
248 if (!list_empty(&ep->queue)) { mv_u3d_queue_trb()
268 /* ring bell the ep */ mv_u3d_queue_trb()
269 if (ep->ep_num == 0) mv_u3d_queue_trb()
272 tmp = ep->ep_num * 2 mv_u3d_queue_trb()
293 u3d = req->ep->u3d; mv_u3d_build_trb_one()
322 if (req->ep->ep_num == 0) mv_u3d_build_trb_one()
329 direction = mv_u3d_ep_dir(req->ep); mv_u3d_build_trb_one()
356 u3d = req->ep->u3d; mv_u3d_build_trb_chain()
368 if (req->ep->ep_num == 0) mv_u3d_build_trb_chain()
375 direction = mv_u3d_ep_dir(req->ep); mv_u3d_build_trb_chain()
383 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0) mv_u3d_build_trb_chain()
423 u3d = req->ep->u3d; mv_u3d_req_to_trb()
482 mv_u3d_start_queue(struct mv_u3d_ep *ep) mv_u3d_start_queue() argument
484 struct mv_u3d *u3d = ep->u3d; mv_u3d_start_queue()
488 if (!list_empty(&ep->req_list) && !ep->processing) mv_u3d_start_queue()
489 req = list_entry(ep->req_list.next, struct mv_u3d_req, list); mv_u3d_start_queue()
493 ep->processing = 1; mv_u3d_start_queue()
497 mv_u3d_ep_dir(ep)); mv_u3d_start_queue()
507 ret = mv_u3d_queue_trb(ep, req); mv_u3d_start_queue()
509 ep->processing = 0; mv_u3d_start_queue()
513 ep->processing = 0; mv_u3d_start_queue()
520 list_add_tail(&req->queue, &ep->queue); mv_u3d_start_queue()
529 struct mv_u3d_ep *ep; mv_u3d_ep_enable() local
538 ep = container_of(_ep, struct mv_u3d_ep, ep); mv_u3d_ep_enable()
539 u3d = ep->u3d; mv_u3d_ep_enable()
544 direction = mv_u3d_ep_dir(ep); mv_u3d_ep_enable()
552 ep_context = (struct mv_u3d_ep_context *)ep->ep_context; mv_u3d_ep_enable()
560 "than 16 on bulk ep\n"); mv_u3d_ep_enable()
565 "maxburst: %d on bulk %s\n", maxburst, ep->name); mv_u3d_ep_enable()
575 "max burst should be 1 on int ep " mv_u3d_ep_enable()
584 "max burst should be 1 on isoc ep " mv_u3d_ep_enable()
594 ep->ep.maxpacket = max; mv_u3d_ep_enable()
595 ep->ep.desc = desc; mv_u3d_ep_enable()
596 ep->enabled = 1; mv_u3d_ep_enable()
600 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); mv_u3d_ep_enable()
602 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); mv_u3d_ep_enable()
605 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); mv_u3d_ep_enable()
611 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1); mv_u3d_ep_enable()
613 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0); mv_u3d_ep_enable()
615 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0); mv_u3d_ep_enable()
618 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0); mv_u3d_ep_enable()
624 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1); mv_u3d_ep_enable()
635 struct mv_u3d_ep *ep; mv_u3d_ep_disable() local
643 ep = container_of(_ep, struct mv_u3d_ep, ep); mv_u3d_ep_disable()
644 if (!ep->ep.desc) mv_u3d_ep_disable()
647 u3d = ep->u3d; mv_u3d_ep_disable()
650 ep_context = ep->ep_context; mv_u3d_ep_disable()
652 direction = mv_u3d_ep_dir(ep); mv_u3d_ep_disable()
656 mv_u3d_nuke(ep, -ESHUTDOWN); mv_u3d_ep_disable()
661 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1); mv_u3d_ep_disable()
664 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1); mv_u3d_ep_disable()
666 epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr1); mv_u3d_ep_disable()
669 iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1); mv_u3d_ep_disable()
672 ep->enabled = 0; mv_u3d_ep_disable()
674 ep->ep.desc = NULL; mv_u3d_ep_disable()
703 struct mv_u3d_ep *ep = container_of(_ep, struct mv_u3d_ep, ep); mv_u3d_ep_fifo_flush() local
708 if (!ep->enabled) mv_u3d_ep_fifo_flush()
711 u3d = ep->u3d; mv_u3d_ep_fifo_flush()
712 direction = mv_u3d_ep_dir(ep); mv_u3d_ep_fifo_flush()
715 if (!ep->ep_num) { mv_u3d_ep_fifo_flush()
735 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); mv_u3d_ep_fifo_flush()
737 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); mv_u3d_ep_fifo_flush()
741 while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0) & mv_u3d_ep_fifo_flush()
749 "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num, mv_u3d_ep_fifo_flush()
757 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0); mv_u3d_ep_fifo_flush()
759 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0); mv_u3d_ep_fifo_flush()
763 while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0) & mv_u3d_ep_fifo_flush()
771 "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num, mv_u3d_ep_fifo_flush()
785 struct mv_u3d_ep *ep; mv_u3d_ep_queue() local
794 ep = container_of(_ep, struct mv_u3d_ep, ep); mv_u3d_ep_queue()
795 u3d = ep->u3d; mv_u3d_ep_queue()
799 if (!ep->ep_num mv_u3d_ep_queue()
822 if (unlikely(!ep->ep.desc)) { mv_u3d_ep_queue()
823 dev_err(u3d->dev, "%s, bad ep\n", __func__); mv_u3d_ep_queue()
826 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { mv_u3d_ep_queue()
827 if (req->req.length > ep->ep.maxpacket) mv_u3d_ep_queue()
837 req->ep = ep; mv_u3d_ep_queue()
840 spin_lock_irqsave(&ep->req_lock, flags); mv_u3d_ep_queue()
841 is_first_req = list_empty(&ep->req_list); mv_u3d_ep_queue()
842 list_add_tail(&req->list, &ep->req_list); mv_u3d_ep_queue()
843 spin_unlock_irqrestore(&ep->req_lock, flags); mv_u3d_ep_queue()
851 mv_u3d_start_queue(ep); mv_u3d_ep_queue()
859 struct mv_u3d_ep *ep; mv_u3d_ep_dequeue() local
871 ep = container_of(_ep, struct mv_u3d_ep, ep); mv_u3d_ep_dequeue()
872 u3d = ep->u3d; mv_u3d_ep_dequeue()
874 spin_lock_irqsave(&ep->u3d->lock, flags); mv_u3d_ep_dequeue()
877 list_for_each_entry(req, &ep->queue, queue) { mv_u3d_ep_dequeue()
887 if (ep->queue.next == &req->queue) { mv_u3d_ep_dequeue()
891 /* The request isn't the last request in this ep queue */ mv_u3d_ep_dequeue()
892 if (req->queue.next != &ep->queue) { mv_u3d_ep_dequeue()
894 "it is the last request in this ep queue\n"); mv_u3d_ep_dequeue()
895 ep_context = ep->ep_context; mv_u3d_ep_dequeue()
904 ep_context = ep->ep_context; mv_u3d_ep_dequeue()
912 mv_u3d_done(ep, req, -ECONNRESET); mv_u3d_ep_dequeue()
914 /* remove the req from the ep req list */ mv_u3d_ep_dequeue()
915 if (!list_empty(&ep->req_list)) { mv_u3d_ep_dequeue()
917 curr_req = list_entry(ep->req_list.next, mv_u3d_ep_dequeue()
921 ep->processing = 0; mv_u3d_ep_dequeue()
926 spin_unlock_irqrestore(&ep->u3d->lock, flags); mv_u3d_ep_dequeue()
934 struct mv_u3d_ep *ep = u3d->eps; mv_u3d_ep_set_stall() local
938 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); mv_u3d_ep_set_stall()
943 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); mv_u3d_ep_set_stall()
945 tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0); mv_u3d_ep_set_stall()
950 iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0); mv_u3d_ep_set_stall()
956 struct mv_u3d_ep *ep; mv_u3d_ep_set_halt_wedge() local
961 ep = container_of(_ep, struct mv_u3d_ep, ep); mv_u3d_ep_set_halt_wedge()
962 u3d = ep->u3d; mv_u3d_ep_set_halt_wedge()
963 if (!ep->ep.desc) { mv_u3d_ep_set_halt_wedge()
968 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { mv_u3d_ep_set_halt_wedge()
974 * Attempt to halt IN ep will fail if any transfer requests mv_u3d_ep_set_halt_wedge()
977 if (halt && (mv_u3d_ep_dir(ep) == MV_U3D_EP_DIR_IN) mv_u3d_ep_set_halt_wedge()
978 && !list_empty(&ep->queue)) { mv_u3d_ep_set_halt_wedge()
983 spin_lock_irqsave(&ep->u3d->lock, flags); mv_u3d_ep_set_halt_wedge()
984 mv_u3d_ep_set_stall(u3d, ep->ep_num, mv_u3d_ep_dir(ep), halt); mv_u3d_ep_set_halt_wedge()
986 ep->wedge = 1; mv_u3d_ep_set_halt_wedge()
988 ep->wedge = 0; mv_u3d_ep_set_halt_wedge()
989 spin_unlock_irqrestore(&ep->u3d->lock, flags); mv_u3d_ep_set_halt_wedge()
991 if (ep->ep_num == 0) mv_u3d_ep_set_halt_wedge()
1062 /* Enable ctrl ep */ mv_u3d_controller_start()
1315 struct mv_u3d_ep *ep; mv_u3d_eps_init() local
1320 ep = &u3d->eps[1]; mv_u3d_eps_init()
1321 ep->u3d = u3d; mv_u3d_eps_init()
1322 strncpy(ep->name, "ep0", sizeof(ep->name)); mv_u3d_eps_init()
1323 ep->ep.name = ep->name; mv_u3d_eps_init()
1324 ep->ep.ops = &mv_u3d_ep_ops; mv_u3d_eps_init()
1325 ep->wedge = 0; mv_u3d_eps_init()
1326 usb_ep_set_maxpacket_limit(&ep->ep, MV_U3D_EP0_MAX_PKT_SIZE); mv_u3d_eps_init()
1327 ep->ep.caps.type_control = true; mv_u3d_eps_init()
1328 ep->ep.caps.dir_in = true; mv_u3d_eps_init()
1329 ep->ep.caps.dir_out = true; mv_u3d_eps_init()
1330 ep->ep_num = 0; mv_u3d_eps_init()
1331 ep->ep.desc = &mv_u3d_ep0_desc; mv_u3d_eps_init()
1332 INIT_LIST_HEAD(&ep->queue); mv_u3d_eps_init()
1333 INIT_LIST_HEAD(&ep->req_list); mv_u3d_eps_init()
1334 ep->ep_type = USB_ENDPOINT_XFER_CONTROL; mv_u3d_eps_init()
1337 ep->ep_context = &u3d->ep_context[1]; mv_u3d_eps_init()
1341 ep = &u3d->eps[i]; mv_u3d_eps_init()
1343 snprintf(name, sizeof(name), "ep%din", i >> 1); mv_u3d_eps_init()
1344 ep->direction = MV_U3D_EP_DIR_IN; mv_u3d_eps_init()
1345 ep->ep.caps.dir_in = true; mv_u3d_eps_init()
1347 snprintf(name, sizeof(name), "ep%dout", i >> 1); mv_u3d_eps_init()
1348 ep->direction = MV_U3D_EP_DIR_OUT; mv_u3d_eps_init()
1349 ep->ep.caps.dir_out = true; mv_u3d_eps_init()
1351 ep->u3d = u3d; mv_u3d_eps_init()
1352 strncpy(ep->name, name, sizeof(ep->name)); mv_u3d_eps_init()
1353 ep->ep.name = ep->name; mv_u3d_eps_init()
1355 ep->ep.caps.type_iso = true; mv_u3d_eps_init()
1356 ep->ep.caps.type_bulk = true; mv_u3d_eps_init()
1357 ep->ep.caps.type_int = true; mv_u3d_eps_init()
1359 ep->ep.ops = &mv_u3d_ep_ops; mv_u3d_eps_init()
1360 usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0); mv_u3d_eps_init()
1361 ep->ep_num = i / 2; mv_u3d_eps_init()
1363 INIT_LIST_HEAD(&ep->queue); mv_u3d_eps_init()
1364 list_add_tail(&ep->ep.ep_list, &u3d->gadget.ep_list); mv_u3d_eps_init()
1366 INIT_LIST_HEAD(&ep->req_list); mv_u3d_eps_init()
1367 spin_lock_init(&ep->req_lock); mv_u3d_eps_init()
1368 ep->ep_context = &u3d->ep_context[i]; mv_u3d_eps_init()
1375 static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status) mv_u3d_nuke() argument
1378 mv_u3d_ep_fifo_flush(&ep->ep); mv_u3d_nuke()
1380 while (!list_empty(&ep->queue)) { mv_u3d_nuke()
1382 req = list_entry(ep->queue.next, struct mv_u3d_req, queue); mv_u3d_nuke()
1383 mv_u3d_done(ep, req, status); mv_u3d_nuke()
1391 struct mv_u3d_ep *ep; mv_u3d_stop_activity() local
1395 list_for_each_entry(ep, &u3d->gadget.ep_list, ep.ep_list) { mv_u3d_stop_activity()
1396 mv_u3d_nuke(ep, -ESHUTDOWN); mv_u3d_stop_activity()
1633 dev_dbg(u3d->dev, "tr_complete: ep: 0x%x\n", tmp); mv_u3d_irq_process_tr_complete()
1652 /* remove req out of ep request list after completion */ mv_u3d_irq_process_tr_complete()
1682 dev_dbg(u3d->dev, "call mv_u3d_start_queue from ep complete\n"); mv_u3d_irq_process_tr_complete()
1731 dev_err(u3d->dev, "under run, ep%d\n", trbunderrun); mv_u3d_irq()
1882 dev_err(&dev->dev, "allocate ep context memory failed\n"); mv_u3d_probe()
1943 u3d->gadget.ep0 = &u3d->eps[1].ep; /* gadget ep0 */ mv_u3d_probe()
H A Dmv_udc_core.c44 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
45 ((ep)->udc->ep0_dir) : ((ep)->direction))
63 static void nuke(struct mv_ep *ep, int status);
77 struct mv_ep *ep; ep0_reset() local
83 ep = &udc->eps[i]; ep0_reset()
84 ep->udc = udc; ep0_reset()
87 ep->dqh = &udc->ep_dqh[i]; ep0_reset()
90 ep->dqh->max_packet_length = ep0_reset()
94 ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE; ep0_reset()
170 "complete_tr error: ep=%d %s: error = 0x%x\n", process_ep_req()
190 bit_pos = 1 << curr_req->ep->ep_num; process_ep_req()
192 bit_pos = 1 << (16 + curr_req->ep->ep_num); process_ep_req()
213 static void done(struct mv_ep *ep, struct mv_req *req, int status)
214 __releases(&ep->udc->lock)
215 __acquires(&ep->udc->lock)
218 unsigned char stopped = ep->stopped;
222 udc = (struct mv_udc *)ep->udc;
241 usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
245 ep->ep.name, &req->req, status,
248 ep->stopped = 1;
250 spin_unlock(&ep->udc->lock);
252 usb_gadget_giveback_request(&ep->ep, &req->req);
254 spin_lock(&ep->udc->lock);
255 ep->stopped = stopped;
258 static int queue_dtd(struct mv_ep *ep, struct mv_req *req) queue_dtd() argument
267 udc = ep->udc; queue_dtd()
268 direction = ep_dir(ep); queue_dtd()
269 dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]); queue_dtd()
270 bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num); queue_dtd()
273 if (!(list_empty(&ep->queue))) { queue_dtd()
275 lastreq = list_entry(ep->queue.prev, struct mv_req, queue); queue_dtd()
350 if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) { build_dtd()
351 dqh = req->ep->dqh; build_dtd()
355 (unsigned)(mult * req->ep->ep.maxpacket)); build_dtd()
360 udc = req->ep->udc; build_dtd()
384 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0) build_dtd()
418 udc = req->ep->udc; req_to_dtd()
448 struct mv_ep *ep; mv_ep_enable() local
455 ep = container_of(_ep, struct mv_ep, ep); mv_ep_enable()
456 udc = ep->udc; mv_ep_enable()
465 direction = ep_dir(ep); mv_ep_enable()
474 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num); mv_ep_enable()
480 "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x," mv_ep_enable()
482 (unsigned)ep->ep_num, direction ? "SEND" : "RECV", mv_ep_enable()
513 dqh = ep->dqh; mv_ep_enable()
521 ep->ep.maxpacket = max; mv_ep_enable()
522 ep->ep.desc = desc; mv_ep_enable()
523 ep->stopped = 0; mv_ep_enable()
526 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); mv_ep_enable()
538 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); mv_ep_enable()
544 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); mv_ep_enable()
548 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); mv_ep_enable()
551 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); mv_ep_enable()
555 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); mv_ep_enable()
568 struct mv_ep *ep; mv_ep_disable() local
573 ep = container_of(_ep, struct mv_ep, ep); mv_ep_disable()
574 if ((_ep == NULL) || !ep->ep.desc) mv_ep_disable()
577 udc = ep->udc; mv_ep_disable()
580 dqh = ep->dqh; mv_ep_disable()
584 direction = ep_dir(ep); mv_ep_disable()
585 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num); mv_ep_disable()
591 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); mv_ep_disable()
595 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); mv_ep_disable()
598 nuke(ep, -ESHUTDOWN); mv_ep_disable()
600 ep->ep.desc = NULL; mv_ep_disable()
601 ep->stopped = 1; mv_ep_disable()
637 struct mv_ep *ep; mv_ep_fifo_flush() local
643 ep = container_of(_ep, struct mv_ep, ep); mv_ep_fifo_flush()
644 if (!ep->ep.desc) mv_ep_fifo_flush()
647 udc = ep->udc; mv_ep_fifo_flush()
648 direction = ep_dir(ep); mv_ep_fifo_flush()
650 if (ep->ep_num == 0) mv_ep_fifo_flush()
653 bit_pos = 1 << ep->ep_num; mv_ep_fifo_flush()
655 bit_pos = 1 << (16 + ep->ep_num); mv_ep_fifo_flush()
697 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep); mv_ep_queue() local
699 struct mv_udc *udc = ep->udc; mv_ep_queue()
709 if (unlikely(!_ep || !ep->ep.desc)) { mv_ep_queue()
710 dev_err(&udc->dev->dev, "%s, bad ep", __func__); mv_ep_queue()
714 udc = ep->udc; mv_ep_queue()
718 req->ep = ep; mv_ep_queue()
721 retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep)); mv_ep_queue()
733 retval = queue_dtd(ep, req); mv_ep_queue()
747 if (ep->ep_num == 0) mv_ep_queue()
751 list_add_tail(&req->queue, &ep->queue); mv_ep_queue()
757 usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep)); mv_ep_queue()
762 static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req) mv_prime_ep() argument
764 struct mv_dqh *dqh = ep->dqh; mv_prime_ep()
777 bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num); mv_prime_ep()
780 writel(bit_pos, &ep->udc->op_regs->epprime); mv_prime_ep()
786 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep); mv_ep_dequeue() local
788 struct mv_udc *udc = ep->udc; mv_ep_dequeue()
796 spin_lock_irqsave(&ep->udc->lock, flags); mv_ep_dequeue()
797 stopped = ep->stopped; mv_ep_dequeue()
799 /* Stop the ep before we deal with the queue */ mv_ep_dequeue()
800 ep->stopped = 1; mv_ep_dequeue()
801 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); mv_ep_dequeue()
802 if (ep_dir(ep) == EP_DIR_IN) mv_ep_dequeue()
806 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); mv_ep_dequeue()
809 list_for_each_entry(req, &ep->queue, queue) { mv_ep_dequeue()
819 if (ep->queue.next == &req->queue) { mv_ep_dequeue()
823 /* The request isn't the last request in this ep queue */ mv_ep_dequeue()
824 if (req->queue.next != &ep->queue) { mv_ep_dequeue()
831 mv_prime_ep(ep, next_req); mv_ep_dequeue()
835 qh = ep->dqh; mv_ep_dequeue()
850 done(ep, req, -ECONNRESET); mv_ep_dequeue()
854 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); mv_ep_dequeue()
855 if (ep_dir(ep) == EP_DIR_IN) mv_ep_dequeue()
859 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); mv_ep_dequeue()
860 ep->stopped = stopped; mv_ep_dequeue()
862 spin_unlock_irqrestore(&ep->udc->lock, flags); mv_ep_dequeue()
903 struct mv_ep *ep; mv_ep_set_halt_wedge() local
908 ep = container_of(_ep, struct mv_ep, ep); mv_ep_set_halt_wedge()
909 udc = ep->udc; mv_ep_set_halt_wedge()
910 if (!_ep || !ep->ep.desc) { mv_ep_set_halt_wedge()
915 if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { mv_ep_set_halt_wedge()
921 * Attempt to halt IN ep will fail if any transfer requests mv_ep_set_halt_wedge()
924 if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) { mv_ep_set_halt_wedge()
929 spin_lock_irqsave(&ep->udc->lock, flags); mv_ep_set_halt_wedge()
930 ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt); mv_ep_set_halt_wedge()
932 ep->wedge = 1; mv_ep_set_halt_wedge()
934 ep->wedge = 0; mv_ep_set_halt_wedge()
935 spin_unlock_irqrestore(&ep->udc->lock, flags); mv_ep_set_halt_wedge()
937 if (ep->ep_num == 0) { mv_ep_set_halt_wedge()
1247 struct mv_ep *ep; eps_init() local
1252 ep = &udc->eps[0]; eps_init()
1253 ep->udc = udc; eps_init()
1254 strncpy(ep->name, "ep0", sizeof(ep->name)); eps_init()
1255 ep->ep.name = ep->name; eps_init()
1256 ep->ep.ops = &mv_ep_ops; eps_init()
1257 ep->wedge = 0; eps_init()
1258 ep->stopped = 0; eps_init()
1259 usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE); eps_init()
1260 ep->ep.caps.type_control = true; eps_init()
1261 ep->ep.caps.dir_in = true; eps_init()
1262 ep->ep.caps.dir_out = true; eps_init()
1263 ep->ep_num = 0; eps_init()
1264 ep->ep.desc = &mv_ep0_desc; eps_init()
1265 INIT_LIST_HEAD(&ep->queue); eps_init()
1267 ep->ep_type = USB_ENDPOINT_XFER_CONTROL; eps_init()
1271 ep = &udc->eps[i]; eps_init()
1273 snprintf(name, sizeof(name), "ep%din", i / 2); eps_init()
1274 ep->direction = EP_DIR_IN; eps_init()
1275 ep->ep.caps.dir_in = true; eps_init()
1277 snprintf(name, sizeof(name), "ep%dout", i / 2); eps_init()
1278 ep->direction = EP_DIR_OUT; eps_init()
1279 ep->ep.caps.dir_out = true; eps_init()
1281 ep->udc = udc; eps_init()
1282 strncpy(ep->name, name, sizeof(ep->name)); eps_init()
1283 ep->ep.name = ep->name; eps_init()
1285 ep->ep.caps.type_iso = true; eps_init()
1286 ep->ep.caps.type_bulk = true; eps_init()
1287 ep->ep.caps.type_int = true; eps_init()
1289 ep->ep.ops = &mv_ep_ops; eps_init()
1290 ep->stopped = 0; eps_init()
1291 usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0); eps_init()
1292 ep->ep_num = i / 2; eps_init()
1294 INIT_LIST_HEAD(&ep->queue); eps_init()
1295 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); eps_init()
1297 ep->dqh = &udc->ep_dqh[i]; eps_init()
1304 static void nuke(struct mv_ep *ep, int status) nuke() argument
1307 ep->stopped = 1; nuke()
1310 mv_ep_fifo_flush(&ep->ep); nuke()
1312 while (!list_empty(&ep->queue)) { nuke()
1314 req = list_entry(ep->queue.next, struct mv_req, queue); nuke()
1315 done(ep, req, status); nuke()
1321 struct mv_ep *ep; gadget_reset() local
1325 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { gadget_reset()
1326 nuke(ep, -ESHUTDOWN); gadget_reset()
1339 struct mv_ep *ep; stop_activity() local
1343 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { stop_activity()
1344 nuke(ep, -ESHUTDOWN); stop_activity()
1431 static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req) prime_status_complete() argument
1433 struct mv_ep *mvep = container_of(ep, struct mv_ep, ep); prime_status_complete()
1455 struct mv_ep *ep; udc_prime_status() local
1457 ep = &udc->eps[0]; udc_prime_status()
1470 req->ep = ep; udc_prime_status()
1482 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent, udc_prime_status()
1484 ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); udc_prime_status()
1490 retval = queue_dtd(ep, req); udc_prime_status()
1503 list_add_tail(&req->queue, &ep->queue); udc_prime_status()
1507 usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep)); udc_prime_status()
1573 struct mv_ep *ep; ch9clearfeature() local
1594 ep = &udc->eps[ep_num * 2 + direction]; ch9clearfeature()
1595 if (ep->wedge == 1) ch9clearfeature()
1667 __releases(&ep->udc->lock)
1668 __acquires(&ep->udc->lock)
2268 udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */ mv_udc_probe()
H A Ds3c2410_udc.c252 static void s3c2410_udc_done(struct s3c2410_ep *ep, s3c2410_udc_done() argument
255 unsigned halted = ep->halted; s3c2410_udc_done()
264 ep->halted = 1; s3c2410_udc_done()
265 usb_gadget_giveback_request(&ep->ep, &req->req); s3c2410_udc_done()
266 ep->halted = halted; s3c2410_udc_done()
270 struct s3c2410_ep *ep, int status) s3c2410_udc_nuke()
273 if (&ep->queue == NULL) s3c2410_udc_nuke()
276 while (!list_empty(&ep->queue)) { s3c2410_udc_nuke()
278 req = list_entry(ep->queue.next, struct s3c2410_request, s3c2410_udc_nuke()
280 s3c2410_udc_done(ep, req, status); s3c2410_udc_nuke()
320 static int s3c2410_udc_write_fifo(struct s3c2410_ep *ep, s3c2410_udc_write_fifo() argument
329 idx = ep->bEndpointAddress & 0x7F; s3c2410_udc_write_fifo()
350 count = s3c2410_udc_write_packet(fifo_reg, req, ep->ep.maxpacket); s3c2410_udc_write_fifo()
353 if (count != ep->ep.maxpacket) s3c2410_udc_write_fifo()
363 "Written ep%d %d.%d of %d b [last %d,z %d]\n", s3c2410_udc_write_fifo()
376 ep->dev->ep0state = EP0_IDLE; s3c2410_udc_write_fifo()
385 s3c2410_udc_done(ep, req, 0); s3c2410_udc_write_fifo()
420 static int s3c2410_udc_read_fifo(struct s3c2410_ep *ep, s3c2410_udc_read_fifo() argument
432 idx = ep->bEndpointAddress & 0x7F; s3c2410_udc_read_fifo()
469 if (fifo_count > ep->ep.maxpacket) s3c2410_udc_read_fifo()
470 avail = ep->ep.maxpacket; s3c2410_udc_read_fifo()
479 if (idx != 0 && fifo_count < ep->ep.maxpacket) { s3c2410_udc_read_fifo()
499 ep->dev->ep0state = EP0_IDLE; s3c2410_udc_read_fifo()
508 s3c2410_udc_done(ep, req, 0); s3c2410_udc_read_fifo()
600 struct s3c2410_ep *ep, s3c2410_udc_handle_ep0_idle()
610 s3c2410_udc_nuke(dev, ep, -EPROTO); s3c2410_udc_handle_ep0_idle()
681 s3c2410_udc_set_halt(&dev->ep[crq->wIndex & 0x7f].ep, 0); s3c2410_udc_handle_ep0_idle()
694 s3c2410_udc_set_halt(&dev->ep[crq->wIndex & 0x7f].ep, 1); s3c2410_udc_handle_ep0_idle()
742 struct s3c2410_ep *ep = &dev->ep[0]; s3c2410_udc_handle_ep0() local
746 if (list_empty(&ep->queue)) s3c2410_udc_handle_ep0()
749 req = list_entry(ep->queue.next, struct s3c2410_request, queue); s3c2410_udc_handle_ep0()
762 s3c2410_udc_nuke(dev, ep, -EPIPE); s3c2410_udc_handle_ep0()
772 s3c2410_udc_nuke(dev, ep, 0); s3c2410_udc_handle_ep0()
779 s3c2410_udc_handle_ep0_idle(dev, ep, &crq, ep0csr); s3c2410_udc_handle_ep0()
785 s3c2410_udc_write_fifo(ep, req); s3c2410_udc_handle_ep0()
791 s3c2410_udc_read_fifo(ep, req); s3c2410_udc_handle_ep0()
810 static void s3c2410_udc_handle_ep(struct s3c2410_ep *ep) s3c2410_udc_handle_ep() argument
813 int is_in = ep->bEndpointAddress & USB_DIR_IN; s3c2410_udc_handle_ep()
817 if (likely(!list_empty(&ep->queue))) s3c2410_udc_handle_ep()
818 req = list_entry(ep->queue.next, s3c2410_udc_handle_ep()
823 idx = ep->bEndpointAddress & 0x7F; s3c2410_udc_handle_ep()
828 dprintk(DEBUG_VERBOSE, "ep%01d write csr:%02x %d\n", s3c2410_udc_handle_ep()
840 s3c2410_udc_write_fifo(ep, req); s3c2410_udc_handle_ep()
844 dprintk(DEBUG_VERBOSE, "ep%01d rd csr:%02x\n", idx, ep_csr1); s3c2410_udc_handle_ep()
854 s3c2410_udc_read_fifo(ep, req); s3c2410_udc_handle_ep()
916 udc_write((dev->ep[0].ep.maxpacket & 0x7ff) >> 3, s3c2410_udc_irq()
978 dprintk(DEBUG_VERBOSE, "USB ep%d irq\n", i); s3c2410_udc_irq()
982 s3c2410_udc_handle_ep(&dev->ep[i]); s3c2410_udc_irq()
993 s3c2410_udc_handle_ep(&dev->ep[i]); s3c2410_udc_irq()
1011 static inline struct s3c2410_ep *to_s3c2410_ep(struct usb_ep *ep) to_s3c2410_ep() argument
1013 return container_of(ep, struct s3c2410_ep, ep); to_s3c2410_ep()
1033 struct s3c2410_ep *ep; s3c2410_udc_ep_enable() local
1039 ep = to_s3c2410_ep(_ep); s3c2410_udc_ep_enable()
1046 dev = ep->dev; s3c2410_udc_ep_enable()
1054 ep->ep.desc = desc; s3c2410_udc_ep_enable()
1055 ep->halted = 0; s3c2410_udc_ep_enable()
1056 ep->bEndpointAddress = desc->bEndpointAddress; s3c2410_udc_ep_enable()
1059 udc_write(ep->num, S3C2410_UDC_INDEX_REG); s3c2410_udc_ep_enable()
1067 udc_write(ep->num, S3C2410_UDC_INDEX_REG); s3c2410_udc_ep_enable()
1069 udc_write(ep->num, S3C2410_UDC_INDEX_REG); s3c2410_udc_ep_enable()
1076 udc_write(ep->num, S3C2410_UDC_INDEX_REG); s3c2410_udc_ep_enable()
1078 udc_write(ep->num, S3C2410_UDC_INDEX_REG); s3c2410_udc_ep_enable()
1084 udc_write(ep->num, S3C2410_UDC_INDEX_REG); s3c2410_udc_ep_enable()
1086 udc_write(ep->num, S3C2410_UDC_INDEX_REG); s3c2410_udc_ep_enable()
1092 udc_write(int_en_reg | (1 << ep->num), S3C2410_UDC_EP_INT_EN_REG); s3c2410_udc_ep_enable()
1096 dprintk(DEBUG_NORMAL, "enable %s(%d) ep%x%s-blk max %02x\n", s3c2410_udc_ep_enable()
1097 _ep->name, ep->num, tmp, s3c2410_udc_ep_enable()
1111 struct s3c2410_ep *ep = to_s3c2410_ep(_ep); s3c2410_udc_ep_disable() local
1115 if (!_ep || !ep->ep.desc) { s3c2410_udc_ep_disable()
1117 _ep ? ep->ep.name : NULL); s3c2410_udc_ep_disable()
1125 ep->ep.desc = NULL; s3c2410_udc_ep_disable()
1126 ep->halted = 1; s3c2410_udc_ep_disable()
1128 s3c2410_udc_nuke(ep->dev, ep, -ESHUTDOWN); s3c2410_udc_ep_disable()
1132 udc_write(int_en_reg & ~(1<<ep->num), S3C2410_UDC_EP_INT_EN_REG); s3c2410_udc_ep_disable()
1168 struct s3c2410_ep *ep = to_s3c2410_ep(_ep); s3c2410_udc_free_request() local
1173 if (!ep || !_req || (!ep->ep.desc && _ep->name != ep0name)) s3c2410_udc_free_request()
1187 struct s3c2410_ep *ep = to_s3c2410_ep(_ep); s3c2410_udc_queue() local
1193 if (unlikely(!_ep || (!ep->ep.desc && ep->ep.name != ep0name))) { s3c2410_udc_queue()
1198 dev = ep->dev; s3c2410_udc_queue()
1223 dprintk(DEBUG_VERBOSE, "%s: ep%x len %d\n", s3c2410_udc_queue()
1224 __func__, ep->bEndpointAddress, _req->length); s3c2410_udc_queue()
1226 if (ep->bEndpointAddress) { s3c2410_udc_queue()
1227 udc_write(ep->bEndpointAddress & 0x7F, S3C2410_UDC_INDEX_REG); s3c2410_udc_queue()
1229 ep_csr = udc_read((ep->bEndpointAddress & USB_DIR_IN) s3c2410_udc_queue()
1240 if (list_empty(&ep->queue) && !ep->halted) { s3c2410_udc_queue()
1241 if (ep->bEndpointAddress == 0 /* ep0 */) { s3c2410_udc_queue()
1245 && s3c2410_udc_write_fifo(ep, s3c2410_udc_queue()
1255 && s3c2410_udc_read_fifo(ep, s3c2410_udc_queue()
1266 } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0 s3c2410_udc_queue()
1268 && s3c2410_udc_write_fifo(ep, req)) { s3c2410_udc_queue()
1272 && s3c2410_udc_read_fifo(ep, req)) { s3c2410_udc_queue()
1279 list_add_tail(&req->queue, &ep->queue); s3c2410_udc_queue()
1292 struct s3c2410_ep *ep = to_s3c2410_ep(_ep); s3c2410_udc_dequeue() local
1306 udc = to_s3c2410_udc(ep->gadget); s3c2410_udc_dequeue()
1310 list_for_each_entry(req, &ep->queue, queue) { s3c2410_udc_dequeue()
1324 s3c2410_udc_done(ep, req, -ECONNRESET); s3c2410_udc_dequeue()
1336 struct s3c2410_ep *ep = to_s3c2410_ep(_ep); s3c2410_udc_set_halt() local
1341 if (unlikely(!_ep || (!ep->ep.desc && ep->ep.name != ep0name))) { s3c2410_udc_set_halt()
1348 idx = ep->bEndpointAddress & 0x7F; s3c2410_udc_set_halt()
1355 ep_csr = udc_read((ep->bEndpointAddress & USB_DIR_IN) s3c2410_udc_set_halt()
1359 if ((ep->bEndpointAddress & USB_DIR_IN) != 0) { s3c2410_udc_set_halt()
1382 ep->halted = value ? 1 : 0; s3c2410_udc_set_halt()
1602 struct s3c2410_ep *ep = &dev->ep[i]; s3c2410_udc_reinit() local
1605 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list); s3c2410_udc_reinit()
1607 ep->dev = dev; s3c2410_udc_reinit()
1608 ep->ep.desc = NULL; s3c2410_udc_reinit()
1609 ep->halted = 0; s3c2410_udc_reinit()
1610 INIT_LIST_HEAD(&ep->queue); s3c2410_udc_reinit()
1611 usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket); s3c2410_udc_reinit()
1630 udc_write((dev->ep[i].ep.maxpacket & 0x7ff) >> 3, s3c2410_udc_enable()
1680 .ep0 = &memory.ep[0].ep,
1688 .ep[0] = {
1690 .ep = {
1701 .ep[1] = {
1703 .ep = {
1715 .ep[2] = {
1717 .ep = {
1729 .ep[3] = {
1731 .ep = {
1743 .ep[4] = {
1745 .ep = {
1794 memory.ep[1].fifo_size = S3C2440_EP_FIFO_SIZE; s3c2410_udc_probe()
1795 memory.ep[2].fifo_size = S3C2440_EP_FIFO_SIZE; s3c2410_udc_probe()
1796 memory.ep[3].fifo_size = S3C2440_EP_FIFO_SIZE; s3c2410_udc_probe()
1797 memory.ep[4].fifo_size = S3C2440_EP_FIFO_SIZE; s3c2410_udc_probe()
269 s3c2410_udc_nuke(struct s3c2410_udc *udc, struct s3c2410_ep *ep, int status) s3c2410_udc_nuke() argument
599 s3c2410_udc_handle_ep0_idle(struct s3c2410_udc *dev, struct s3c2410_ep *ep, struct usb_ctrlrequest *crq, u32 ep0csr) s3c2410_udc_handle_ep0_idle() argument
H A Dlpc32xx_udc.c97 #define EP_MASK_SEL(ep, dir) (1 << (((ep) * 2) + dir))
127 struct usb_ep ep; member in struct:lpc32xx_ep
190 struct lpc32xx_ep ep[NUM_ENDPOINTS]; member in struct:lpc32xx_udc
511 static void proc_ep_show(struct seq_file *s, struct lpc32xx_ep *ep) proc_ep_show() argument
517 ep->ep.name, ep->ep.maxpacket, proc_ep_show()
518 ep->is_in ? "in" : "out"); proc_ep_show()
519 seq_printf(s, " type %4s", epnames[ep->eptype]); proc_ep_show()
520 seq_printf(s, " ints: %12d", ep->totalints); proc_ep_show()
522 if (list_empty(&ep->queue)) proc_ep_show()
525 list_for_each_entry(req, &ep->queue, queue) { proc_ep_show()
538 struct lpc32xx_ep *ep; proc_udc_show() local
554 proc_ep_show(s, &udc->ep[0]); proc_udc_show()
555 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) proc_udc_show()
556 proc_ep_show(s, ep); proc_udc_show()
1036 static int udc_ep_in_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) udc_ep_in_req_dma() argument
1039 u32 hwep = ep->hwep_num; udc_ep_in_req_dma()
1041 ep->req_pending = 1; udc_ep_in_req_dma()
1044 req = list_entry(ep->queue.next, struct lpc32xx_request, queue); udc_ep_in_req_dma()
1053 if (req->req.length % ep->ep.maxpacket) udc_ep_in_req_dma()
1062 static int udc_ep_out_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) udc_ep_out_req_dma() argument
1065 u32 hwep = ep->hwep_num; udc_ep_out_req_dma()
1067 ep->req_pending = 1; udc_ep_out_req_dma()
1070 req = list_entry(ep->queue.next, struct lpc32xx_request, queue); udc_ep_out_req_dma()
1116 struct lpc32xx_ep *ep = &udc->ep[0]; udc_enable() local
1136 udc_realize_hwep(udc, i, ep->ep.maxpacket); udc_enable()
1401 struct lpc32xx_ep *ep = &udc->ep[i]; uda_usb_reset() local
1402 ep->req_pending = 0; uda_usb_reset()
1445 struct lpc32xx_ep *ep = &udc->ep[i]; udc_reinit() local
1448 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); udc_reinit()
1449 usb_ep_set_maxpacket_limit(&ep->ep, ep->maxpacket); udc_reinit()
1450 INIT_LIST_HEAD(&ep->queue); udc_reinit()
1451 ep->req_pending = 0; udc_reinit()
1458 static void done(struct lpc32xx_ep *ep, struct lpc32xx_request *req, int status) done() argument
1460 struct lpc32xx_udc *udc = ep->udc; done()
1468 if (ep->lep) { done()
1469 usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in); done()
1476 ep_dbg(ep, "%s done %p, status %d\n", ep->ep.name, req, status); done()
1478 ep->req_pending = 0; done()
1480 usb_gadget_giveback_request(&ep->ep, &req->req); done()
1485 static void nuke(struct lpc32xx_ep *ep, int status) nuke() argument
1489 while (!list_empty(&ep->queue)) { nuke()
1490 req = list_entry(ep->queue.next, struct lpc32xx_request, queue); nuke()
1491 done(ep, req, status); nuke()
1495 uda_disable_hwepint(ep->udc, ep->hwep_num); nuke()
1496 udc_disable_hwep(ep->udc, ep->hwep_num); nuke()
1504 struct lpc32xx_ep *ep0 = &udc->ep[0]; udc_ep0_in_req()
1520 } else if (ts > ep0->ep.maxpacket) udc_ep0_in_req()
1521 ts = ep0->ep.maxpacket; /* Just send what we can */ udc_ep0_in_req()
1529 if (tsend >= ep0->ep.maxpacket) udc_ep0_in_req()
1542 struct lpc32xx_ep *ep0 = &udc->ep[0]; udc_ep0_out_req()
1561 if (bufferspace > ep0->ep.maxpacket) udc_ep0_out_req()
1562 bufferspace = ep0->ep.maxpacket; udc_ep0_out_req()
1570 if (tr < ep0->ep.maxpacket) { udc_ep0_out_req()
1594 struct lpc32xx_ep *ep = &udc->ep[i]; stop_activity() local
1595 nuke(ep, -ESHUTDOWN); stop_activity()
1627 struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); lpc32xx_ep_disable() local
1628 struct lpc32xx_udc *udc = ep->udc; lpc32xx_ep_disable()
1631 if ((ep->hwep_num_base == 0) || (ep->hwep_num == 0)) lpc32xx_ep_disable()
1635 nuke(ep, -ESHUTDOWN); lpc32xx_ep_disable()
1638 udc_ep_dma_disable(udc, ep->hwep_num); lpc32xx_ep_disable()
1639 writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr)); lpc32xx_ep_disable()
1640 writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr)); lpc32xx_ep_disable()
1641 writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr)); lpc32xx_ep_disable()
1642 writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr)); lpc32xx_ep_disable()
1645 udc->udca_v_base[ep->hwep_num] = 0; lpc32xx_ep_disable()
1648 uda_clear_hwepint(udc, ep->hwep_num); lpc32xx_ep_disable()
1649 udc_unrealize_hwep(udc, ep->hwep_num); lpc32xx_ep_disable()
1651 ep->hwep_num = 0; lpc32xx_ep_disable()
1665 struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); lpc32xx_ep_enable() local
1666 struct lpc32xx_udc *udc = ep->udc; lpc32xx_ep_enable()
1672 if ((!_ep) || (!ep) || (!desc) || lpc32xx_ep_enable()
1674 dev_dbg(udc->dev, "bad ep or descriptor\n"); lpc32xx_ep_enable()
1678 if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) { lpc32xx_ep_enable()
1679 dev_dbg(udc->dev, "bad ep descriptor's packet size\n"); lpc32xx_ep_enable()
1684 if (ep->hwep_num_base == 0) { lpc32xx_ep_enable()
1701 if (maxpacket > ep->maxpacket) { lpc32xx_ep_enable()
1729 ep->is_in = (desc->bEndpointAddress & USB_DIR_IN) != 0; lpc32xx_ep_enable()
1730 ep->ep.maxpacket = maxpacket; lpc32xx_ep_enable()
1733 if (ep->is_in) lpc32xx_ep_enable()
1735 ep->hwep_num = ep->hwep_num_base + EP_IN; lpc32xx_ep_enable()
1737 ep->hwep_num = ep->hwep_num_base; lpc32xx_ep_enable()
1739 ep_dbg(ep, "EP enabled: %s, HW:%d, MP:%d IN:%d\n", ep->ep.name, lpc32xx_ep_enable()
1740 ep->hwep_num, maxpacket, (ep->is_in == 1)); lpc32xx_ep_enable()
1744 udc_realize_hwep(udc, ep->hwep_num, ep->ep.maxpacket); lpc32xx_ep_enable()
1745 udc_clr_buffer_hwep(udc, ep->hwep_num); lpc32xx_ep_enable()
1746 uda_disable_hwepint(udc, ep->hwep_num); lpc32xx_ep_enable()
1747 udc_clrstall_hwep(udc, ep->hwep_num); lpc32xx_ep_enable()
1750 udc_ep_dma_disable(udc, ep->hwep_num); lpc32xx_ep_enable()
1751 writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr)); lpc32xx_ep_enable()
1752 writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr)); lpc32xx_ep_enable()
1753 writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr)); lpc32xx_ep_enable()
1754 writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr)); lpc32xx_ep_enable()
1798 struct lpc32xx_ep *ep; lpc32xx_ep_queue() local
1804 ep = container_of(_ep, struct lpc32xx_ep, ep); lpc32xx_ep_queue()
1810 udc = ep->udc; lpc32xx_ep_queue()
1815 if (ep->lep) { lpc32xx_ep_queue()
1818 status = usb_gadget_map_request(&udc->gadget, _req, ep->is_in); lpc32xx_ep_queue()
1836 if (ep->eptype == EP_ISO_TYPE) { lpc32xx_ep_queue()
1841 if (ep->is_in) lpc32xx_ep_queue()
1846 dd->dd_setup = DD_SETUP_PACKETLEN(ep->ep.maxpacket) | lpc32xx_ep_queue()
1850 ep_dbg(ep, "%s queue req %p len %d buf %p (in=%d) z=%d\n", _ep->name, lpc32xx_ep_queue()
1851 _req, _req->length, _req->buf, ep->is_in, _req->zero); lpc32xx_ep_queue()
1860 if (list_empty(&ep->queue)) { lpc32xx_ep_queue()
1861 list_add_tail(&req->queue, &ep->queue); lpc32xx_ep_queue()
1863 if (ep->hwep_num_base == 0) { lpc32xx_ep_queue()
1865 if (ep->is_in) { lpc32xx_ep_queue()
1874 } else if (ep->is_in) { lpc32xx_ep_queue()
1876 if (!ep->req_pending) lpc32xx_ep_queue()
1877 udc_ep_in_req_dma(udc, ep); lpc32xx_ep_queue()
1880 if (!ep->req_pending) lpc32xx_ep_queue()
1881 udc_ep_out_req_dma(udc, ep); lpc32xx_ep_queue()
1883 list_add_tail(&req->queue, &ep->queue); lpc32xx_ep_queue()
1893 struct lpc32xx_ep *ep; lpc32xx_ep_dequeue() local
1897 ep = container_of(_ep, struct lpc32xx_ep, ep); lpc32xx_ep_dequeue()
1898 if (!_ep || ep->hwep_num_base == 0) lpc32xx_ep_dequeue()
1901 spin_lock_irqsave(&ep->udc->lock, flags); lpc32xx_ep_dequeue()
1904 list_for_each_entry(req, &ep->queue, queue) { lpc32xx_ep_dequeue()
1909 spin_unlock_irqrestore(&ep->udc->lock, flags); lpc32xx_ep_dequeue()
1913 done(ep, req, -ECONNRESET); lpc32xx_ep_dequeue()
1915 spin_unlock_irqrestore(&ep->udc->lock, flags); lpc32xx_ep_dequeue()
1923 struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); lpc32xx_ep_set_halt() local
1924 struct lpc32xx_udc *udc = ep->udc; lpc32xx_ep_set_halt()
1927 if ((!ep) || (ep->hwep_num <= 1)) lpc32xx_ep_set_halt()
1931 if (ep->is_in) lpc32xx_ep_set_halt()
1938 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num), lpc32xx_ep_set_halt()
1942 ep->wedge = 0; lpc32xx_ep_set_halt()
1943 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num), lpc32xx_ep_set_halt()
1955 struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); lpc32xx_ep_set_wedge() local
1957 if (!_ep || !ep->udc) lpc32xx_ep_set_wedge()
1960 ep->wedge = 1; lpc32xx_ep_set_wedge()
1977 void udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) udc_send_in_zlp() argument
1980 udc_clearep_getsts(udc, ep->hwep_num); udc_send_in_zlp()
1983 udc_write_hwep(udc, ep->hwep_num, NULL, 0); udc_send_in_zlp()
1991 void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) udc_handle_eps() argument
1996 if (ep->hwep_num <= 0) udc_handle_eps()
1999 uda_clear_hwepint(udc, ep->hwep_num); udc_handle_eps()
2002 if (!(udc->enabled_hwepints & (1 << ep->hwep_num))) udc_handle_eps()
2006 epstatus = udc_clearep_getsts(udc, ep->hwep_num); udc_handle_eps()
2015 if (ep->is_in) { udc_handle_eps()
2016 udc_send_in_zlp(udc, ep); udc_handle_eps()
2017 uda_disable_hwepint(udc, ep->hwep_num); udc_handle_eps()
2022 req = list_entry(ep->queue.next, struct lpc32xx_request, queue); udc_handle_eps()
2024 done(ep, req, 0); udc_handle_eps()
2027 if (!list_empty(&ep->queue)) { udc_handle_eps()
2028 if (ep->is_in) udc_handle_eps()
2029 udc_ep_in_req_dma(udc, ep); udc_handle_eps()
2031 udc_ep_out_req_dma(udc, ep); udc_handle_eps()
2033 ep->req_pending = 0; udc_handle_eps()
2039 static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep) udc_handle_dma_ep() argument
2046 ep->totalints++; udc_handle_dma_ep()
2049 req = list_entry(ep->queue.next, struct lpc32xx_request, queue); udc_handle_dma_ep()
2051 ep_err(ep, "DMA interrupt on no req!\n"); udc_handle_dma_ep()
2058 ep_warn(ep, "DMA descriptor did not retire\n"); udc_handle_dma_ep()
2061 udc_ep_dma_disable(udc, ep->hwep_num); udc_handle_dma_ep()
2062 writel((1 << ep->hwep_num), USBD_EOTINTCLR(udc->udp_baseaddr)); udc_handle_dma_ep()
2063 writel((1 << ep->hwep_num), USBD_NDDRTINTCLR(udc->udp_baseaddr)); udc_handle_dma_ep()
2067 (1 << ep->hwep_num)) { udc_handle_dma_ep()
2068 writel((1 << ep->hwep_num), udc_handle_dma_ep()
2070 ep_err(ep, "AHB critical error!\n"); udc_handle_dma_ep()
2071 ep->req_pending = 0; udc_handle_dma_ep()
2076 done(ep, req, -ECONNABORTED); udc_handle_dma_ep()
2085 ep->req_pending = 0; udc_handle_dma_ep()
2086 ep_err(ep, "DMA critical EP error: DD not serviced (0x%x)!\n", udc_handle_dma_ep()
2089 done(ep, req, -ECONNABORTED); udc_handle_dma_ep()
2094 ep->req_pending = 0; udc_handle_dma_ep()
2095 ep_err(ep, "DMA critical EP error: EOT prior to service completion (0x%x)!\n", udc_handle_dma_ep()
2097 done(ep, req, -ECONNABORTED); udc_handle_dma_ep()
2108 ep->req_pending = 0; udc_handle_dma_ep()
2109 ep_err(ep, "DMA critical EP error: System error (0x%x)!\n", udc_handle_dma_ep()
2111 done(ep, req, -ECONNABORTED); udc_handle_dma_ep()
2116 if (ep->eptype == EP_ISO_TYPE) { udc_handle_dma_ep()
2117 if (ep->is_in) udc_handle_dma_ep()
2132 if (udc_clearep_getsts(udc, ep->hwep_num) & EP_SEL_F) { udc_handle_dma_ep()
2133 udc_clearep_getsts(udc, ep->hwep_num); udc_handle_dma_ep()
2134 uda_enable_hwepint(udc, ep->hwep_num); udc_handle_dma_ep()
2135 epstatus = udc_clearep_getsts(udc, ep->hwep_num); udc_handle_dma_ep()
2140 udc_send_in_zlp(udc, ep); udc_handle_dma_ep()
2144 done(ep, req, 0); udc_handle_dma_ep()
2147 udc_clearep_getsts(udc, ep->hwep_num); udc_handle_dma_ep()
2148 if (!list_empty((&ep->queue))) { udc_handle_dma_ep()
2149 if (ep->is_in) udc_handle_dma_ep()
2150 udc_ep_in_req_dma(udc, ep); udc_handle_dma_ep()
2152 udc_ep_out_req_dma(udc, ep); udc_handle_dma_ep()
2154 ep->req_pending = 0; udc_handle_dma_ep()
2197 struct lpc32xx_ep *ep; udc_get_status() local
2212 ep = &udc->ep[tmp]; udc_get_status()
2217 if (!ep->is_in) udc_get_status()
2219 } else if (ep->is_in) udc_get_status()
2223 udc_protocol_cmd_w(udc, CMD_SEL_EP(ep->hwep_num)); udc_get_status()
2224 tmp = udc_protocol_cmd_r(udc, DAT_SEL_EP(ep->hwep_num)); udc_get_status()
2244 struct lpc32xx_ep *ep, *ep0 = &udc->ep[0]; udc_handle_ep0_setup() local
2299 ep = &udc->ep[tmp]; udc_handle_ep0_setup()
2300 tmp = ep->hwep_num; udc_handle_ep0_setup()
2306 else if (!ep->wedge) udc_handle_ep0_setup()
2384 struct lpc32xx_ep *ep0 = &udc->ep[0]; udc_handle_ep0_in()
2418 struct lpc32xx_ep *ep0 = &udc->ep[0]; udc_handle_ep0_out()
2574 .ep[0] = {
2575 .ep = {
2587 .ep[1] = {
2588 .ep = {
2600 .ep[2] = {
2601 .ep = {
2613 .ep[3] = {
2614 .ep = {
2626 .ep[4] = {
2627 .ep = {
2639 .ep[5] = {
2640 .ep = {
2652 .ep[6] = {
2653 .ep = {
2665 .ep[7] = {
2666 .ep = {
2678 .ep[8] = {
2679 .ep = {
2691 .ep[9] = {
2692 .ep = {
2704 .ep[10] = {
2705 .ep = {
2717 .ep[11] = {
2718 .ep = {
2730 .ep[12] = {
2731 .ep = {
2743 .ep[13] = {
2744 .ep = {
2756 .ep[14] = {
2757 .ep = {
2769 .ep[15] = {
2770 .ep = {
2854 if (tmp & (1 << udc->ep[i].hwep_num)) lpc32xx_usb_hp_irq()
2855 udc_handle_eps(udc, &udc->ep[i]); lpc32xx_usb_hp_irq()
2879 if (tmp & (1 << udc->ep[i].hwep_num)) lpc32xx_usb_devdma_irq()
2880 udc_handle_dma_ep(udc, &udc->ep[i]); lpc32xx_usb_devdma_irq()
3067 udc->ep[i].udc = udc; lpc32xx_udc_probe()
3068 udc->gadget.ep0 = &udc->ep[0].ep; lpc32xx_udc_probe()
H A Dnet2280.h94 struct usb_ep ep; member in struct:net2280_ep
117 static inline void allow_status(struct net2280_ep *ep) allow_status() argument
123 &ep->regs->ep_rsp); allow_status()
124 ep->stopped = 1; allow_status()
127 static inline void allow_status_338x(struct net2280_ep *ep) allow_status_338x() argument
134 writel(BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE), &ep->regs->ep_rsp); allow_status_338x()
136 ep->stopped = 1; allow_status_338x()
139 ep->responded = 0; allow_status_338x()
155 struct net2280_ep ep[9]; member in struct:net2280
193 static inline void set_halt(struct net2280_ep *ep) set_halt() argument
198 ((ep->dev->chiprev == CHIPREV_1) << SET_NAK_OUT_PACKETS) | set_halt()
200 &ep->regs->ep_rsp); set_halt()
203 static inline void clear_halt(struct net2280_ep *ep) clear_halt() argument
212 ((ep->dev->chiprev == CHIPREV_1) << CLEAR_NAK_OUT_PACKETS), clear_halt()
213 &ep->regs->ep_rsp); clear_halt()
337 static inline void set_fifo_bytecount(struct net2280_ep *ep, unsigned count) set_fifo_bytecount() argument
339 if (ep->dev->pdev->vendor == 0x17cc) set_fifo_bytecount()
340 writeb(count, 2 + (u8 __iomem *) &ep->regs->ep_cfg); set_fifo_bytecount()
342 u32 tmp = readl(&ep->cfg->ep_cfg) & set_fifo_bytecount()
344 writel(tmp | (count << EP_FIFO_BYTE_COUNT), &ep->cfg->ep_cfg); set_fifo_bytecount()
348 static inline void start_out_naking(struct net2280_ep *ep) start_out_naking() argument
351 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp); start_out_naking()
353 readl(&ep->regs->ep_rsp); start_out_naking()
356 static inline void stop_out_naking(struct net2280_ep *ep) stop_out_naking() argument
360 tmp = readl(&ep->regs->ep_stat); stop_out_naking()
362 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp); stop_out_naking()
366 static inline void set_max_speed(struct net2280_ep *ep, u32 max) set_max_speed() argument
372 if (ep->dev->enhanced_mode) set_max_speed()
373 reg = ep_enhanced[ep->num]; set_max_speed()
375 reg = (ep->num + 1) * 0x10; set_max_speed()
376 if (ep->dev->gadget.speed != USB_SPEED_HIGH) set_max_speed()
380 set_idx_reg(ep->dev->regs, reg, max); set_max_speed()
H A Ddummy_hcd.c88 struct usb_ep ep; member in struct:dummy_ep
97 struct list_head queue; /* ep's requests */
103 return container_of(_ep, struct dummy_ep, ep); usb_ep_to_dummy_ep()
119 * number can be changed. Names like "ep-a" are used for this type.
254 struct dummy_ep ep[DUMMY_ENDPOINTS]; member in struct:dummy
291 static inline struct dummy *ep_to_dummy(struct dummy_ep *ep) ep_to_dummy() argument
293 return container_of(ep->gadget, struct dummy, gadget); ep_to_dummy()
315 static void nuke(struct dummy *dum, struct dummy_ep *ep) nuke() argument
317 while (!list_empty(&ep->queue)) { nuke()
320 req = list_entry(ep->queue.next, struct dummy_request, queue); nuke()
325 usb_gadget_giveback_request(&ep->ep, &req->req); nuke()
333 struct dummy_ep *ep; stop_activity() local
341 list_for_each_entry(ep, &dum->gadget.ep_list, ep.ep_list) stop_activity()
342 nuke(dum, ep); stop_activity()
485 struct dummy_ep *ep; dummy_enable() local
489 ep = usb_ep_to_dummy_ep(_ep); dummy_enable()
490 if (!_ep || !desc || ep->desc || _ep->name == ep0name dummy_enable()
493 dum = ep_to_dummy(ep); dummy_enable()
519 if (strstr(ep->ep.name, "-iso") dummy_enable()
520 || strstr(ep->ep.name, "-int")) { dummy_enable()
542 if (strstr(ep->ep.name, "-iso")) /* bulk is ok */ dummy_enable()
562 if (strstr(ep->ep.name, "-bulk") dummy_enable()
563 || strstr(ep->ep.name, "-int")) dummy_enable()
589 "non-bulk ep %s\n", _ep->name); dummy_enable()
592 ep->stream_en = 1; dummy_enable()
594 ep->desc = desc; dummy_enable()
596 dev_dbg(udc_dev(dum), "enabled %s (ep%d%s-%s) maxpacket %d stream %s\n", dummy_enable()
615 max, ep->stream_en ? "enabled" : "disabled"); dummy_enable()
620 ep->halted = ep->wedged = 0; dummy_enable()
628 struct dummy_ep *ep; dummy_disable() local
632 ep = usb_ep_to_dummy_ep(_ep); dummy_disable()
633 if (!_ep || !ep->desc || _ep->name == ep0name) dummy_disable()
635 dum = ep_to_dummy(ep); dummy_disable()
638 ep->desc = NULL; dummy_disable()
639 ep->stream_en = 0; dummy_disable()
640 nuke(dum, ep); dummy_disable()
650 struct dummy_ep *ep; dummy_alloc_request() local
655 ep = usb_ep_to_dummy_ep(_ep); dummy_alloc_request()
678 static void fifo_complete(struct usb_ep *ep, struct usb_request *req) fifo_complete() argument
685 struct dummy_ep *ep; dummy_queue() local
695 ep = usb_ep_to_dummy_ep(_ep); dummy_queue()
696 if (!_ep || (!ep->desc && _ep->name != ep0name)) dummy_queue()
699 dum = ep_to_dummy(ep); dummy_queue()
705 dev_dbg(udc_dev(dum), "ep %p queue req %p to %s, len %d buf %p\n", dummy_queue()
706 ep, _req, _ep->name, _req->length, _req->buf); dummy_queue()
713 if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) && dummy_queue()
715 list_empty(&ep->queue) && dummy_queue()
724 list_add_tail(&req->queue, &ep->queue); dummy_queue()
731 list_add_tail(&req->queue, &ep->queue); dummy_queue()
742 struct dummy_ep *ep; dummy_dequeue() local
750 ep = usb_ep_to_dummy_ep(_ep); dummy_dequeue()
751 dum = ep_to_dummy(ep); dummy_dequeue()
758 list_for_each_entry(req, &ep->queue, queue) { dummy_dequeue()
781 struct dummy_ep *ep; dummy_set_halt_and_wedge() local
786 ep = usb_ep_to_dummy_ep(_ep); dummy_set_halt_and_wedge()
787 dum = ep_to_dummy(ep); dummy_set_halt_and_wedge()
791 ep->halted = ep->wedged = 0; dummy_set_halt_and_wedge()
792 else if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) && dummy_set_halt_and_wedge()
793 !list_empty(&ep->queue)) dummy_set_halt_and_wedge()
796 ep->halted = 1; dummy_set_halt_and_wedge()
798 ep->wedged = 1; dummy_set_halt_and_wedge()
881 dum->ep[0].ep.maxpacket = 9; dummy_udc_update_ep0()
883 dum->ep[0].ep.maxpacket = 64; dummy_udc_update_ep0()
1003 struct dummy_ep *ep = &dum->ep[i]; init_dummy_udc_hw() local
1007 ep->ep.name = ep_info[i].name; init_dummy_udc_hw()
1008 ep->ep.caps = ep_info[i].caps; init_dummy_udc_hw()
1009 ep->ep.ops = &dummy_ep_ops; init_dummy_udc_hw()
1010 list_add_tail(&ep->ep.ep_list, &dum->gadget.ep_list); init_dummy_udc_hw()
1011 ep->halted = ep->wedged = ep->already_seen = init_dummy_udc_hw()
1012 ep->setup_stage = 0; init_dummy_udc_hw()
1013 usb_ep_set_maxpacket_limit(&ep->ep, ~0); init_dummy_udc_hw()
1014 ep->ep.max_streams = 16; init_dummy_udc_hw()
1015 ep->last_io = jiffies; init_dummy_udc_hw()
1016 ep->gadget = &dum->gadget; init_dummy_udc_hw()
1017 ep->desc = NULL; init_dummy_udc_hw()
1018 INIT_LIST_HEAD(&ep->queue); init_dummy_udc_hw()
1021 dum->gadget.ep0 = &dum->ep[0].ep; init_dummy_udc_hw()
1022 list_del_init(&dum->ep[0].ep.ep_list); init_dummy_udc_hw()
1135 const struct usb_endpoint_descriptor *desc = &urb->ep->desc; dummy_ep_stream_en()
1347 struct dummy_ep *ep, int limit, int *status) transfer()
1355 list_for_each_entry(req, &ep->queue, queue) { transfer()
1365 /* 1..N packets of ep->ep.maxpacket each ... the last one transfer()
1383 if (limit < ep->ep.maxpacket && limit < len) transfer()
1390 if (len >= ep->ep.maxpacket) { transfer()
1392 if (len % ep->ep.maxpacket) transfer()
1394 len -= len % ep->ep.maxpacket; transfer()
1401 ep->last_io = jiffies; transfer()
1462 usb_gadget_giveback_request(&ep->ep, &req->req); transfer()
1480 static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep) periodic_bytes() argument
1482 int limit = ep->ep.maxpacket; periodic_bytes()
1488 tmp = usb_endpoint_maxp(ep->desc); periodic_bytes()
1494 switch (usb_endpoint_type(ep->desc)) { periodic_bytes()
1524 return &dum->ep[0]; find_endpoint()
1526 struct dummy_ep *ep = &dum->ep[i]; find_endpoint() local
1528 if (!ep->desc) find_endpoint()
1530 if (ep->desc->bEndpointAddress == address) find_endpoint()
1531 return ep; find_endpoint()
1626 if (!ep2 || ep2->ep.name == ep0name) { handle_control_request()
1768 dum->ep[i].already_seen = 0; dummy_timer()
1776 struct dummy_ep *ep = NULL; dummy_timer() local
1794 /* find the gadget's ep for this request (if configured) */ dummy_timer()
1798 ep = find_endpoint(dum, address); dummy_timer()
1799 if (!ep) { dummy_timer()
1802 "no ep configured for urb %p\n", dummy_timer()
1808 if (ep->already_seen) dummy_timer()
1810 ep->already_seen = 1; dummy_timer()
1811 if (ep == &dum->ep[0] && urb->error_count) { dummy_timer()
1812 ep->setup_stage = 1; /* a new urb */ dummy_timer()
1815 if (ep->halted && !ep->setup_stage) { dummy_timer()
1817 dev_dbg(dummy_dev(dum_hcd), "ep %s halted, urb %p\n", dummy_timer()
1818 ep->ep.name, urb); dummy_timer()
1825 if (ep == &dum->ep[0] && ep->setup_stage) { dummy_timer()
1831 list_for_each_entry(req, &ep->queue, queue) { dummy_timer()
1838 usb_gadget_giveback_request(&ep->ep, &req->req); dummy_timer()
1840 ep->already_seen = 0; dummy_timer()
1848 ep->last_io = jiffies; dummy_timer()
1849 ep->setup_stage = 0; dummy_timer()
1850 ep->halted = 0; dummy_timer()
1890 * complete whether or not ep has requests queued. dummy_timer()
1893 limit = max(limit, periodic_bytes(dum, ep)); dummy_timer()
1901 limit = max(limit, periodic_bytes(dum, ep)); dummy_timer()
1906 ep->last_io = jiffies; dummy_timer()
1907 total -= transfer(dum_hcd, urb, ep, limit, &status); dummy_timer()
1918 if (ep) dummy_timer()
1919 ep->already_seen = ep->setup_stage = 0; dummy_timer()
2328 int ep = usb_pipeendpoint(urb->pipe); show_urb() local
2331 "urb/%p %s ep%d%s%s len %d/%d\n", show_urb()
2351 ep, ep ? (usb_pipein(urb->pipe) ? "in" : "out") : "", show_urb()
1346 transfer(struct dummy_hcd *dum_hcd, struct urb *urb, struct dummy_ep *ep, int limit, int *status) transfer() argument
H A Dfsl_udc_core.c161 static void done(struct fsl_ep *ep, struct fsl_req *req, int status) fsl_set_accessors()
162 __releases(ep->udc->lock) fsl_set_accessors()
163 __acquires(ep->udc->lock) fsl_set_accessors()
166 unsigned char stopped = ep->stopped; fsl_set_accessors()
170 udc = (struct fsl_udc *)ep->udc; fsl_set_accessors()
190 usb_gadget_unmap_request(&ep->udc->gadget, &req->req, ep_is_in(ep)); fsl_set_accessors()
194 ep->ep.name, &req->req, status, fsl_set_accessors()
197 ep->stopped = 1; fsl_set_accessors()
199 spin_unlock(&ep->udc->lock); fsl_set_accessors()
201 usb_gadget_giveback_request(&ep->ep, &req->req); fsl_set_accessors()
203 spin_lock(&ep->udc->lock); fsl_set_accessors()
204 ep->stopped = stopped; fsl_set_accessors()
208 * nuke(): delete all requests related to this ep
211 static void nuke(struct fsl_ep *ep, int status) nuke() argument
213 ep->stopped = 1; nuke()
216 fsl_ep_fifo_flush(&ep->ep); nuke()
219 while (!list_empty(&ep->queue)) { nuke()
222 req = list_entry(ep->queue.next, struct fsl_req, queue); nuke()
223 done(ep, req, status); nuke()
463 /* Get stall status of a specific ep
496 /* Interrupt On Setup (IOS). for control ep */ struct_ep_qh_setup()
509 VDBG("error ep type is %d", ep_type); struct_ep_qh_setup()
520 /* Setup qh structure and ep register for ep0. */ ep0_setup()
523 /* the intialization of an ep includes: fields in QH, Regs, ep0_setup()
550 struct fsl_ep *ep = NULL; fsl_ep_enable() local
556 ep = container_of(_ep, struct fsl_ep, ep); fsl_ep_enable()
563 udc = ep->udc; fsl_ep_enable()
599 ep->ep.maxpacket = max; fsl_ep_enable()
600 ep->ep.desc = desc; fsl_ep_enable()
601 ep->stopped = 0; fsl_ep_enable()
606 struct_ep_qh_setup(udc, (unsigned char) ep_index(ep), fsl_ep_enable()
614 dr_ep_setup((unsigned char) ep_index(ep), fsl_ep_enable()
623 VDBG("enabled %s (ep%d%s) maxpacket %d",ep->ep.name, fsl_ep_enable()
624 ep->ep.desc->bEndpointAddress & 0x0f, fsl_ep_enable()
632 * @ep : the ep being unconfigured. May not be ep0
638 struct fsl_ep *ep = NULL; fsl_ep_disable() local
643 ep = container_of(_ep, struct fsl_ep, ep); fsl_ep_disable()
644 if (!_ep || !ep->ep.desc) { fsl_ep_disable()
645 VDBG("%s not enabled", _ep ? ep->ep.name : NULL); fsl_ep_disable()
649 /* disable ep on controller */ fsl_ep_disable()
650 ep_num = ep_index(ep); fsl_ep_disable()
652 if (ep_is_in(ep)) { fsl_ep_disable()
661 udc = (struct fsl_udc *)ep->udc; fsl_ep_disable()
665 nuke(ep, -ESHUTDOWN); fsl_ep_disable()
667 ep->ep.desc = NULL; fsl_ep_disable()
668 ep->stopped = 1; fsl_ep_disable()
706 static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td) fsl_prime_ep() argument
708 struct ep_queue_head *qh = get_qh_by_ep(ep); fsl_prime_ep()
722 fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16)) fsl_prime_ep()
723 : (1 << (ep_index(ep))), &dr_regs->endpointprime); fsl_prime_ep()
727 static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req) fsl_queue_td() argument
732 VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */ fsl_queue_td()
734 bitmask = ep_is_in(ep) fsl_queue_td()
735 ? (1 << (ep_index(ep) + 16)) fsl_queue_td()
736 : (1 << (ep_index(ep))); fsl_queue_td()
739 if (!(list_empty(&ep->queue)) && !(ep_index(ep) == 0)) { fsl_queue_td()
742 lastreq = list_entry(ep->queue.prev, struct fsl_req, queue); fsl_queue_td()
769 fsl_prime_ep(ep, req->head); fsl_queue_td()
810 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0) fsl_build_dtd()
874 struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep); fsl_ep_queue() local
886 if (unlikely(!_ep || !ep->ep.desc)) { fsl_ep_queue()
887 VDBG("%s, bad ep", __func__); fsl_ep_queue()
890 if (usb_endpoint_xfer_isoc(ep->ep.desc)) { fsl_ep_queue()
891 if (req->req.length > ep->ep.maxpacket) fsl_ep_queue()
895 udc = ep->udc; fsl_ep_queue()
899 req->ep = ep; fsl_ep_queue()
901 ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep)); fsl_ep_queue()
912 fsl_queue_td(ep, req); fsl_ep_queue()
919 list_add_tail(&req->queue, &ep->queue); fsl_ep_queue()
928 struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep); fsl_ep_dequeue() local
937 spin_lock_irqsave(&ep->udc->lock, flags); fsl_ep_dequeue()
938 stopped = ep->stopped; fsl_ep_dequeue()
940 /* Stop the ep before we deal with the queue */ fsl_ep_dequeue()
941 ep->stopped = 1; fsl_ep_dequeue()
942 ep_num = ep_index(ep); fsl_ep_dequeue()
944 if (ep_is_in(ep)) fsl_ep_dequeue()
951 list_for_each_entry(req, &ep->queue, queue) { fsl_ep_dequeue()
961 if (ep->queue.next == &req->queue) { fsl_ep_dequeue()
965 /* The request isn't the last request in this ep queue */ fsl_ep_dequeue()
966 if (req->queue.next != &ep->queue) { fsl_ep_dequeue()
973 fsl_prime_ep(ep, next_req->head); fsl_ep_dequeue()
983 done(ep, req, -ECONNRESET); fsl_ep_dequeue()
987 if (ep_is_in(ep)) fsl_ep_dequeue()
992 ep->stopped = stopped; fsl_ep_dequeue()
994 spin_unlock_irqrestore(&ep->udc->lock, flags); fsl_ep_dequeue()
1002 * @ep: the non-isochronous endpoint being stalled
1008 struct fsl_ep *ep = NULL; fsl_ep_set_halt() local
1014 ep = container_of(_ep, struct fsl_ep, ep); fsl_ep_set_halt()
1015 udc = ep->udc; fsl_ep_set_halt()
1016 if (!_ep || !ep->ep.desc) { fsl_ep_set_halt()
1021 if (usb_endpoint_xfer_isoc(ep->ep.desc)) { fsl_ep_set_halt()
1026 /* Attempt to halt IN ep will fail if any transfer requests fsl_ep_set_halt()
1028 if (value && ep_is_in(ep) && !list_empty(&ep->queue)) { fsl_ep_set_halt()
1034 ep_dir = ep_is_in(ep) ? USB_SEND : USB_RECV; fsl_ep_set_halt()
1035 ep_num = (unsigned char)(ep_index(ep)); fsl_ep_set_halt()
1036 spin_lock_irqsave(&ep->udc->lock, flags); fsl_ep_set_halt()
1038 spin_unlock_irqrestore(&ep->udc->lock, flags); fsl_ep_set_halt()
1040 if (ep_index(ep) == 0) { fsl_ep_set_halt()
1045 VDBG(" %s %s halt stat %d", ep->ep.name, fsl_ep_set_halt()
1053 struct fsl_ep *ep; fsl_ep_fifo_status() local
1059 ep = container_of(_ep, struct fsl_ep, ep); fsl_ep_fifo_status()
1060 if (!_ep || (!ep->ep.desc && ep_index(ep) != 0)) fsl_ep_fifo_status()
1063 udc = (struct fsl_udc *)ep->udc; fsl_ep_fifo_status()
1068 qh = get_qh_by_ep(ep); fsl_ep_fifo_status()
1070 bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) : fsl_ep_fifo_status()
1071 (1 << (ep_index(ep))); fsl_ep_fifo_status()
1083 struct fsl_ep *ep; fsl_ep_fifo_flush() local
1092 ep = container_of(_ep, struct fsl_ep, ep); fsl_ep_fifo_flush()
1093 if (!ep->ep.desc) fsl_ep_fifo_flush()
1096 ep_num = ep_index(ep); fsl_ep_fifo_flush()
1097 ep_dir = ep_is_in(ep) ? USB_SEND : USB_RECV; fsl_ep_fifo_flush()
1113 ERR("ep flush timeout\n"); fsl_ep_fifo_flush()
1270 struct fsl_ep *ep; ep0_prime_status() local
1278 ep = &udc->eps[0]; ep0_prime_status()
1282 req->ep = ep; ep0_prime_status()
1289 ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep)); ep0_prime_status()
1294 fsl_queue_td(ep, req); ep0_prime_status()
1298 list_add_tail(&req->queue, &ep->queue); ep0_prime_status()
1305 struct fsl_ep *ep = get_ep_by_pipe(udc, pipe); udc_reset_ep_queue() local
1307 if (ep->name) udc_reset_ep_queue()
1308 nuke(ep, -ESHUTDOWN); udc_reset_ep_queue()
1333 struct fsl_ep *ep; ch9getstatus() local
1336 ep = &udc->eps[0]; ch9getstatus()
1353 if (!target_ep->ep.desc) ch9getstatus()
1365 req->ep = ep; ch9getstatus()
1372 ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep)); ch9getstatus()
1378 fsl_queue_td(ep, req); ch9getstatus()
1382 list_add_tail(&req->queue, &ep->queue); ch9getstatus()
1431 struct fsl_ep *ep; variable in typeref:struct:fsl_ep
1435 ep = get_ep_by_pipe(udc, pipe);
1438 rc = fsl_ep_set_halt(&ep->ep,
1694 /* If the ep is configured */ dtd_complete_irq()
1705 VDBG("status of process_ep_req= %d, ep = %d", dtd_complete_irq()
1773 /* Clear up all ep queues */ reset_queues()
1887 /* Setup package, we only support ep0 as control ep */ fsl_udc_irq()
2002 ep.ep_list) fsl_udc_stop()
2025 struct fsl_ep *ep = NULL; fsl_proc_read() local
2147 "Endpoint Setup Status Reg: SETUP on ep 0x%x\n\n", fsl_proc_read()
2168 ep = &udc->eps[0]; fsl_proc_read()
2170 ep->ep.name, ep_maxpacket(ep), ep_index(ep)); fsl_proc_read()
2172 if (list_empty(&ep->queue)) { fsl_proc_read()
2175 list_for_each_entry(req, &ep->queue, queue) { fsl_proc_read()
2182 /* other gadget->eplist ep */ fsl_proc_read()
2183 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { fsl_proc_read()
2184 if (ep->ep.desc) { fsl_proc_read()
2188 ep->ep.name, ep_maxpacket(ep), fsl_proc_read()
2189 ep_index(ep)); fsl_proc_read()
2191 if (list_empty(&ep->queue)) { fsl_proc_read()
2194 list_for_each_entry(req, &ep->queue, queue) { fsl_proc_read()
2200 } /* end for each_entry of ep req */ fsl_proc_read()
2202 } /* end for if(ep->queue) */ fsl_proc_read()
2203 } /* end (ep->desc) */ fsl_proc_read()
2284 /* FIXME: fsl_alloc_request() ignores ep argument */ struct_udc_setup()
2300 * Link fsl_ep->ep to gadget->ep_list
2307 struct fsl_ep *ep = &udc->eps[index]; struct_ep_setup() local
2309 ep->udc = udc; struct_ep_setup()
2310 strcpy(ep->name, name); struct_ep_setup()
2311 ep->ep.name = ep->name; struct_ep_setup()
2313 ep->ep.ops = &fsl_ep_ops; struct_ep_setup()
2314 ep->stopped = 0; struct_ep_setup()
2317 ep->ep.caps.type_control = true; struct_ep_setup()
2319 ep->ep.caps.type_iso = true; struct_ep_setup()
2320 ep->ep.caps.type_bulk = true; struct_ep_setup()
2321 ep->ep.caps.type_int = true; struct_ep_setup()
2325 ep->ep.caps.dir_in = true; struct_ep_setup()
2327 ep->ep.caps.dir_out = true; struct_ep_setup()
2332 usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0); struct_ep_setup()
2334 /* the queue lists any req for this ep */ struct_ep_setup()
2335 INIT_LIST_HEAD(&ep->queue); struct_ep_setup()
2339 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); struct_ep_setup()
2340 ep->gadget = &udc->gadget; struct_ep_setup()
2341 ep->qh = &udc->ep_qh[index]; struct_ep_setup()
2430 /* DEN is bidirectional ep number, max_ep doubles the number */ fsl_udc_probe()
2467 udc_controller->gadget.ep0 = &udc_controller->eps[0].ep; fsl_udc_probe()
2487 udc_controller->eps[0].ep.desc = &fsl_ep0_desc; fsl_udc_probe()
2488 usb_ep_set_maxpacket_limit(&udc_controller->eps[0].ep, fsl_udc_probe()
2496 sprintf(name, "ep%dout", i); fsl_udc_probe()
2498 sprintf(name, "ep%din", i); fsl_udc_probe()
H A Dpxa27x_udc.h178 #define ofs_UDCCR(ep) (UDCCRn(ep->idx))
179 #define ofs_UDCCSR(ep) (UDCCSRn(ep->idx))
180 #define ofs_UDCBCR(ep) (UDCBCRn(ep->idx))
181 #define ofs_UDCDR(ep) (UDCDRn(ep->idx))
184 #define udc_ep_readl(ep, reg) \
185 __raw_readl((ep)->dev->regs + ofs_##reg(ep))
186 #define udc_ep_writel(ep, reg, value) \
187 __raw_writel((value), ep->dev->regs + ofs_##reg(ep))
188 #define udc_ep_readb(ep, reg) \
189 __raw_readb((ep)->dev->regs + ofs_##reg(ep))
190 #define udc_ep_writeb(ep, reg, value) \
191 __raw_writeb((value), ep->dev->regs + ofs_##reg(ep))
202 #define EPIDX(ep) (ep->idx)
203 #define EPADDR(ep) (ep->addr)
204 #define EPXFERTYPE(ep) (ep->type)
205 #define EPNAME(ep) (ep->name)
206 #define is_ep0(ep) (!ep->idx)
207 #define EPXFERTYPE_is_ISO(ep) (EPXFERTYPE(ep) == USB_ENDPOINT_XFER_ISOC)
224 * - "ep1-in" matches pxa endpoint EPA (which is an IN ep at addr 1, when
226 * - "ep1-in" matches pxa endpoint EPB (which is an IN ep at addr 1, when
228 * - "ep1-in" matches pxa endpoint EPC (which is an IN ep at addr 1, when
254 #define USB_EP_IN_BULK(n) USB_EP_BULK(n, "ep" #n "in-bulk", 1, \
256 #define USB_EP_OUT_BULK(n) USB_EP_BULK(n, "ep" #n "out-bulk", 0, \
258 #define USB_EP_IN_ISO(n) USB_EP_ISO(n, "ep" #n "in-iso", 1, \
260 #define USB_EP_OUT_ISO(n) USB_EP_ISO(n, "ep" #n "out-iso", 0, \
262 #define USB_EP_IN_INT(n) USB_EP_INT(n, "ep" #n "in-int", 1, \
270 .name = "ep" #_idx, \
498 #define ep_dbg(ep, fmt, arg...) \
499 dev_dbg(ep->dev->dev, "%s:%s: " fmt, EPNAME(ep), __func__, ## arg)
500 #define ep_vdbg(ep, fmt, arg...) \
501 dev_vdbg(ep->dev->dev, "%s:%s: " fmt, EPNAME(ep), __func__, ## arg)
502 #define ep_err(ep, fmt, arg...) \
503 dev_err(ep->dev->dev, "%s:%s: " fmt, EPNAME(ep), __func__, ## arg)
504 #define ep_info(ep, fmt, arg...) \
505 dev_info(ep->dev->dev, "%s:%s: " fmt, EPNAME(ep), __func__, ## arg)
506 #define ep_warn(ep, fmt, arg...) \
507 dev_warn(ep->dev->dev, "%s:%s:" fmt, EPNAME(ep), __func__, ## arg)
H A Dmv_u3d.h75 /* ep control register */
117 /* ep direction */
120 #define mv_u3d_ep_dir(ep) (((ep)->ep_num == 0) ? \
121 ((ep)->u3d->ep0_dir) : ((ep)->direction))
146 u32 epxoutcr0; /* ep out control 0 register */
147 u32 epxoutcr1; /* ep out control 1 register */
148 u32 epxincr0; /* ep in control 0 register */
149 u32 epxincr1; /* ep in control 1 register */
190 struct epxcr epcr[16]; /* ep control register */
291 struct usb_ep ep; member in struct:mv_u3d_ep
293 struct list_head queue; /* ep request queued hardware */
294 struct list_head req_list; /* list of ep request */
295 struct mv_u3d_ep_context *ep_context; /* ep context */
298 u32 processing; /* there is ep request
300 spinlock_t req_lock; /* ep lock */
310 struct mv_u3d_ep *ep; member in struct:mv_u3d_req
311 struct list_head queue; /* ep requst queued on hardware */
312 struct list_head list; /* ep request list */
H A Datmel_usba_udc.h208 #define usba_ep_readl(ep, reg) \
209 usba_io_readl((ep)->ep_regs + USBA_EPT_##reg)
210 #define usba_ep_writel(ep, reg, value) \
211 usba_io_writel((value), (ep)->ep_regs + USBA_EPT_##reg)
212 #define usba_dma_readl(ep, reg) \
213 usba_io_readl((ep)->dma_regs + USBA_DMA_##reg)
214 #define usba_dma_writel(ep, reg, value) \
215 usba_io_writel((value), (ep)->dma_regs + USBA_DMA_##reg)
283 struct usb_ep ep; member in struct:usba_ep
359 static inline struct usba_ep *to_usba_ep(struct usb_ep *ep) to_usba_ep() argument
361 return container_of(ep, struct usba_ep, ep); to_usba_ep()
374 #define ep_is_control(ep) ((ep)->index == 0)
375 #define ep_is_idle(ep) ((ep)->state == EP_STATE_IDLE)
H A Dfotg210.h152 #define FOTG210_INEPMPSR(ep) (0x160 + 4 * ((ep) - 1))
158 #define FOTG210_OUTEPMPSR(ep) (0x180 + 4 * ((ep) - 1))
162 #define EPMAP_FIFONO(ep, dir) \
163 ((((ep) - 1) << ((ep) - 1) * 8) << ((dir) ? 0 : 4))
164 #define EPMAP_FIFONOMSK(ep, dir) \
165 ((3 << ((ep) - 1) * 8) << ((dir) ? 0 : 4))
173 #define FIFOMAP_EPNO(ep) ((ep) << ((ep) - 1) * 8)
174 #define FIFOMAP_EPNOMSK(ep) (0xF << ((ep) - 1) * 8)
220 struct usb_ep ep; member in struct:fotg210_ep
244 struct fotg210_ep *ep[FOTG210_MAX_NUM_EP]; member in struct:fotg210_udc
H A Dnet2272.h433 struct usb_ep ep; member in struct:net2272_ep
457 struct net2272_ep ep[4]; member in struct:net2272
537 net2272_ep_write(struct net2272_ep *ep, unsigned int reg, u8 value) net2272_ep_write() argument
539 struct net2272 *dev = ep->dev; net2272_ep_write()
541 if (dev->pagesel != ep->num) { net2272_ep_write()
542 net2272_write(dev, PAGESEL, ep->num); net2272_ep_write()
543 dev->pagesel = ep->num; net2272_ep_write()
549 net2272_ep_read(struct net2272_ep *ep, unsigned int reg) net2272_ep_read() argument
551 struct net2272 *dev = ep->dev; net2272_ep_read()
553 if (dev->pagesel != ep->num) { net2272_ep_read()
554 net2272_write(dev, PAGESEL, ep->num); net2272_ep_read()
555 dev->pagesel = ep->num; net2272_ep_read()
560 static void allow_status(struct net2272_ep *ep) allow_status() argument
563 net2272_ep_write(ep, EP_RSPCLR, allow_status()
567 ep->stopped = 1; allow_status()
570 static void set_halt(struct net2272_ep *ep) set_halt() argument
573 net2272_ep_write(ep, EP_RSPCLR, 1 << CONTROL_STATUS_PHASE_HANDSHAKE); set_halt()
574 net2272_ep_write(ep, EP_RSPSET, 1 << ENDPOINT_HALT); set_halt()
577 static void clear_halt(struct net2272_ep *ep) clear_halt() argument
580 net2272_ep_write(ep, EP_RSPCLR, clear_halt()
585 static void set_fifo_bytecount(struct net2272_ep *ep, unsigned count) set_fifo_bytecount() argument
588 net2272_ep_write(ep, EP_TRANSFER2, count >> 16); set_fifo_bytecount()
589 net2272_ep_write(ep, EP_TRANSFER1, count >> 8); set_fifo_bytecount()
590 net2272_ep_write(ep, EP_TRANSFER0, count); set_fifo_bytecount()
H A Ds3c-hsudc.c101 * @ep: USB gadget layer representation of device endpoint.
102 * @name: Endpoint name (as required by ep autoconfiguration).
111 struct usb_ep ep; member in struct:s3c_hsudc_ep
141 * ep: List of endpoints supported by the controller.
155 struct s3c_hsudc_ep ep[]; member in struct:s3c_hsudc
158 #define ep_maxpacket(_ep) ((_ep)->ep.maxpacket)
171 static inline struct s3c_hsudc_ep *our_ep(struct usb_ep *ep) our_ep() argument
173 return container_of(ep, struct s3c_hsudc_ep, ep); our_ep()
261 usb_gadget_giveback_request(&hsep->ep, &hsreq->req); s3c_hsudc_complete_request()
297 hsep = &hsudc->ep[epnum]; s3c_hsudc_stop_activity()
400 is_short = (rlen < hsep->ep.maxpacket); s3c_hsudc_read_fifo()
432 struct s3c_hsudc_ep *hsep = &hsudc->ep[ep_idx]; s3c_hsudc_epin_intr()
465 struct s3c_hsudc_ep *hsep = &hsudc->ep[ep_idx]; s3c_hsudc_epout_intr()
569 hsep = &hsudc->ep[ep_num]; s3c_hsudc_handle_reqfeat()
573 s3c_hsudc_set_halt(&hsep->ep, set); s3c_hsudc_handle_reqfeat()
591 struct s3c_hsudc_ep *hsep0 = &hsudc->ep[0]; s3c_hsudc_process_req_status()
608 hsep = &hsudc->ep[epnum]; s3c_hsudc_process_req_status()
630 struct s3c_hsudc_ep *hsep = &hsudc->ep[0]; s3c_hsudc_process_setup()
680 s3c_hsudc_set_halt(&hsep->ep, 1); s3c_hsudc_process_setup()
696 struct s3c_hsudc_ep *hsep = &hsudc->ep[0]; s3c_hsudc_handle_ep0_intr()
782 hsep->ep.desc = desc; s3c_hsudc_ep_enable()
783 hsep->ep.maxpacket = usb_endpoint_maxp(desc); s3c_hsudc_ep_enable()
805 if (!_ep || !hsep->ep.desc) s3c_hsudc_ep_disable()
815 hsep->ep.desc = NULL; s3c_hsudc_ep_disable()
844 * @ep: Endpoint for which request is deallocated (not used).
849 static void s3c_hsudc_free_request(struct usb_ep *ep, struct usb_request *_req) s3c_hsudc_free_request() argument
936 if (!_ep || hsep->ep.name == ep0name) s3c_hsudc_dequeue()
990 snprintf(hsep->name, sizeof(hsep->name), "ep%d%s", epnum, dir); s3c_hsudc_initep()
995 INIT_LIST_HEAD(&hsep->ep.ep_list); s3c_hsudc_initep()
997 list_add_tail(&hsep->ep.ep_list, &hsudc->gadget.ep_list); s3c_hsudc_initep()
1000 hsep->ep.name = hsep->name; s3c_hsudc_initep()
1001 usb_ep_set_maxpacket_limit(&hsep->ep, epnum ? 512 : 64); s3c_hsudc_initep()
1002 hsep->ep.ops = &s3c_hsudc_ep_ops; s3c_hsudc_initep()
1004 hsep->ep.desc = NULL; s3c_hsudc_initep()
1009 hsep->ep.caps.type_control = true; s3c_hsudc_initep()
1010 hsep->ep.caps.dir_in = true; s3c_hsudc_initep()
1011 hsep->ep.caps.dir_out = true; s3c_hsudc_initep()
1013 hsep->ep.caps.type_iso = true; s3c_hsudc_initep()
1014 hsep->ep.caps.type_bulk = true; s3c_hsudc_initep()
1015 hsep->ep.caps.type_int = true; s3c_hsudc_initep()
1019 hsep->ep.caps.dir_in = true; s3c_hsudc_initep()
1021 hsep->ep.caps.dir_out = true; s3c_hsudc_initep()
1024 writel(hsep->ep.maxpacket, hsudc->regs + S3C_MPR); s3c_hsudc_initep()
1040 s3c_hsudc_initep(hsudc, &hsudc->ep[epnum], epnum); s3c_hsudc_setup_ep()
1117 hsep = &hsudc->ep[ep_idx]; s3c_hsudc_irq()
1136 hsep = &hsudc->ep[ep_idx]; s3c_hsudc_irq()
1309 hsudc->gadget.ep0 = &hsudc->ep[0].ep; s3c_hsudc_probe()
H A Ds3c2410_udc.h22 struct usb_ep ep; member in struct:s3c2410_ep
56 struct list_head queue; /* ep's requests */
79 struct s3c2410_ep ep[S3C2410_ENDPOINTS]; member in struct:s3c2410_udc
H A Dbcm63xx_udc.c82 * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
237 * @ep: USB gadget layer representation of the EP.
245 struct usb_ep ep; member in struct:bcm63xx_ep
260 struct list_head queue; /* ep's requests */
353 static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep) our_ep() argument
355 return container_of(ep, struct bcm63xx_ep, ep); our_ep()
577 usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt); bcm63xx_ep_setup()
966 bep->ep.name = bcm63xx_ep_info[i].name; bcm63xx_init_udc_hw()
967 bep->ep.caps = bcm63xx_ep_info[i].caps; bcm63xx_init_udc_hw()
969 bep->ep.ops = &bcm63xx_udc_ep_ops; bcm63xx_init_udc_hw()
970 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list); bcm63xx_init_udc_hw()
972 usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT); bcm63xx_init_udc_hw()
974 bep->ep.desc = NULL; bcm63xx_init_udc_hw()
978 udc->gadget.ep0 = &udc->bep[0].ep; bcm63xx_init_udc_hw()
979 list_del(&udc->bep[0].ep.ep_list); bcm63xx_init_udc_hw()
1035 * @ep: Endpoint to enable.
1041 static int bcm63xx_ep_enable(struct usb_ep *ep, bcm63xx_ep_enable() argument
1044 struct bcm63xx_ep *bep = our_ep(ep); bcm63xx_ep_enable()
1049 if (!ep || !desc || ep->name == bcm63xx_ep0name) bcm63xx_ep_enable()
1070 ep->desc = desc; bcm63xx_ep_enable()
1071 ep->maxpacket = usb_endpoint_maxp(desc); bcm63xx_ep_enable()
1079 * @ep: Endpoint to disable.
1081 static int bcm63xx_ep_disable(struct usb_ep *ep) bcm63xx_ep_disable() argument
1083 struct bcm63xx_ep *bep = our_ep(ep); bcm63xx_ep_disable()
1089 if (!ep || !ep->desc) bcm63xx_ep_disable()
1112 usb_gadget_giveback_request(&iudma->bep->ep, &breq->req); bcm63xx_ep_disable()
1116 ep->desc = NULL; bcm63xx_ep_disable()
1124 * @ep: Endpoint associated with the request.
1127 static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep, bcm63xx_udc_alloc_request() argument
1140 * @ep: Endpoint associated with the request.
1143 static void bcm63xx_udc_free_request(struct usb_ep *ep, bcm63xx_udc_free_request() argument
1152 * @ep: Endpoint associated with the request.
1164 static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req, bcm63xx_udc_queue() argument
1167 struct bcm63xx_ep *bep = our_ep(ep); bcm63xx_udc_queue()
1173 if (unlikely(!req || !req->complete || !req->buf || !ep)) bcm63xx_udc_queue()
1210 * @ep: Endpoint associated with the request.
1217 static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req) bcm63xx_udc_dequeue() argument
1219 struct bcm63xx_ep *bep = our_ep(ep); bcm63xx_udc_dequeue()
1253 req->complete(ep, req); bcm63xx_udc_dequeue()
1260 * @ep: Endpoint to halt.
1265 static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value) bcm63xx_udc_set_halt() argument
1267 struct bcm63xx_ep *bep = our_ep(ep); bcm63xx_udc_set_halt()
1281 * @ep: Endpoint to wedge.
1285 static int bcm63xx_udc_set_wedge(struct usb_ep *ep) bcm63xx_udc_set_wedge() argument
1287 struct bcm63xx_ep *bep = our_ep(ep); bcm63xx_udc_set_wedge()
1424 req->complete(&udc->bep[0].ep, req); bcm63xx_ep0_complete()
2122 req->complete(&bep->ep, req); bcm63xx_udc_data_isr()
2203 seq_printf(s, " [ep%d]:\n", bcm63xx_iudma_dbg_show()
H A Dudc-core.c122 void usb_gadget_giveback_request(struct usb_ep *ep, usb_gadget_giveback_request() argument
128 req->complete(ep, req); usb_gadget_giveback_request()
135 * gadget_find_ep_by_name - returns ep whose name is the same as sting passed
142 struct usb_ep *ep; gadget_find_ep_by_name() local
144 gadget_for_each_ep(ep, g) { gadget_for_each_ep()
145 if (!strcmp(ep->name, name)) gadget_for_each_ep()
146 return ep; gadget_for_each_ep()
156 struct usb_ep *ep, struct usb_endpoint_descriptor *desc, usb_gadget_ep_match_desc()
164 if (ep->claimed) usb_gadget_ep_match_desc()
170 if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in) usb_gadget_ep_match_desc()
172 if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out) usb_gadget_ep_match_desc()
175 if (max > ep->maxpacket_limit) usb_gadget_ep_match_desc()
187 if (!ep->caps.type_iso) usb_gadget_ep_match_desc()
194 if (!ep->caps.type_bulk) usb_gadget_ep_match_desc()
202 if (num_req_streams > ep->max_streams) usb_gadget_ep_match_desc()
210 if (!ep->caps.type_int && !ep->caps.type_bulk) usb_gadget_ep_match_desc()
155 usb_gadget_ep_match_desc(struct usb_gadget *gadget, struct usb_ep *ep, struct usb_endpoint_descriptor *desc, struct usb_ss_ep_comp_descriptor *ep_comp) usb_gadget_ep_match_desc() argument
H A Dr8a66597-udc.h59 struct usb_ep ep; member in struct:r8a66597_ep
98 struct r8a66597_ep ep[R8A66597_MAX_NUM_PIPE]; member in struct:r8a66597
198 struct r8a66597_ep *ep, r8a66597_write_fifo()
202 void __iomem *fifoaddr = r8a66597->reg + ep->fifoaddr; r8a66597_write_fifo()
231 r8a66597_bclr(r8a66597, MBW_16, ep->fifosel); r8a66597_write_fifo()
235 r8a66597_bclr(r8a66597, MBW_16, ep->fifosel); r8a66597_write_fifo()
197 r8a66597_write_fifo(struct r8a66597 *r8a66597, struct r8a66597_ep *ep, unsigned char *buf, int len) r8a66597_write_fifo() argument
H A Dpxa25x_udc.h41 struct usb_ep ep; member in struct:pxa25x_ep
123 struct pxa25x_ep ep [PXA_UDC_NUM_ENDPOINTS]; member in struct:pxa25x_udc
229 if (dev->ep[i].ep.desc == NULL) dump_state()
231 DMSG ("udccs%d = %02x\n", i, *dev->ep->reg_udccs); dump_state()
H A Dfsl_qe_udc.h156 #define ep_index(EP) ((EP)->ep.desc->bEndpointAddress & 0xF)
157 #define ep_maxpacket(EP) ((EP)->ep.maxpacket)
159 USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
169 /* ep tramsfer mode */
261 struct qe_ep *ep; member in struct:qe_req
266 struct usb_ep ep; member in struct:qe_ep
H A Dfsl_usb2_udc.h452 struct fsl_ep *ep; member in struct:fsl_req
463 struct usb_ep ep; member in struct:fsl_ep
574 #define ep_index(EP) ((EP)->ep.desc->bEndpointAddress&0xF)
575 #define ep_maxpacket(EP) ((EP)->ep.maxpacket)
577 USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
585 static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep) get_qh_by_ep() argument
588 if (ep_index(ep) != 0) get_qh_by_ep()
589 return ep->qh; get_qh_by_ep()
591 return &ep->udc->ep_qh[(ep->udc->ep0_dir == get_qh_by_ep()
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
H A Dcm.c147 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
152 static void deref_qp(struct c4iw_ep *ep) deref_qp() argument
154 c4iw_qp_rem_ref(&ep->com.qp->ibqp); deref_qp()
155 clear_bit(QP_REFERENCED, &ep->com.flags); deref_qp()
158 static void ref_qp(struct c4iw_ep *ep) ref_qp() argument
160 set_bit(QP_REFERENCED, &ep->com.flags); ref_qp()
161 c4iw_qp_add_ref(&ep->com.qp->ibqp); ref_qp()
164 static void start_ep_timer(struct c4iw_ep *ep) start_ep_timer() argument
166 PDBG("%s ep %p\n", __func__, ep); start_ep_timer()
167 if (timer_pending(&ep->timer)) { start_ep_timer()
168 pr_err("%s timer already started! ep %p\n", start_ep_timer()
169 __func__, ep); start_ep_timer()
172 clear_bit(TIMEOUT, &ep->com.flags); start_ep_timer()
173 c4iw_get_ep(&ep->com); start_ep_timer()
174 ep->timer.expires = jiffies + ep_timeout_secs * HZ; start_ep_timer()
175 ep->timer.data = (unsigned long)ep; start_ep_timer()
176 ep->timer.function = ep_timeout; start_ep_timer()
177 add_timer(&ep->timer); start_ep_timer()
180 static int stop_ep_timer(struct c4iw_ep *ep) stop_ep_timer() argument
182 PDBG("%s ep %p stopping\n", __func__, ep); stop_ep_timer()
183 del_timer_sync(&ep->timer); stop_ep_timer()
184 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { stop_ep_timer()
185 c4iw_put_ep(&ep->com); stop_ep_timer()
237 static void set_emss(struct c4iw_ep *ep, u16 opt) set_emss() argument
239 ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] - set_emss()
240 ((AF_INET == ep->com.remote_addr.ss_family) ? set_emss()
243 ep->mss = ep->emss; set_emss()
245 ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4); set_emss()
246 if (ep->emss < 128) set_emss()
247 ep->emss = 128; set_emss()
248 if (ep->emss & 7) set_emss()
250 TCPOPT_MSS_G(opt), ep->mss, ep->emss); set_emss()
252 ep->mss, ep->emss); set_emss()
289 PDBG("%s alloc ep %p\n", __func__, epc); alloc_ep()
295 struct c4iw_ep *ep; _c4iw_free_ep() local
297 ep = container_of(kref, struct c4iw_ep, com.kref); _c4iw_free_ep()
298 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); _c4iw_free_ep()
299 if (test_bit(QP_REFERENCED, &ep->com.flags)) _c4iw_free_ep()
300 deref_qp(ep); _c4iw_free_ep()
301 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { _c4iw_free_ep()
302 if (ep->com.remote_addr.ss_family == AF_INET6) { _c4iw_free_ep()
305 &ep->com.mapped_local_addr; _c4iw_free_ep()
308 ep->com.dev->rdev.lldi.ports[0], _c4iw_free_ep()
312 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); _c4iw_free_ep()
313 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); _c4iw_free_ep()
314 dst_release(ep->dst); _c4iw_free_ep()
315 cxgb4_l2t_release(ep->l2t); _c4iw_free_ep()
317 if (test_bit(RELEASE_MAPINFO, &ep->com.flags)) { _c4iw_free_ep()
318 print_addr(&ep->com, __func__, "remove_mapinfo/mapping"); _c4iw_free_ep()
319 iwpm_remove_mapinfo(&ep->com.local_addr, _c4iw_free_ep()
320 &ep->com.mapped_local_addr); _c4iw_free_ep()
321 iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW); _c4iw_free_ep()
323 kfree(ep); _c4iw_free_ep()
326 static void release_ep_resources(struct c4iw_ep *ep) release_ep_resources() argument
328 set_bit(RELEASE_RESOURCES, &ep->com.flags); release_ep_resources()
329 c4iw_put_ep(&ep->com); release_ep_resources()
450 struct c4iw_ep *ep = handle; act_open_req_arp_failure() local
454 connect_reply_upcall(ep, -EHOSTUNREACH); act_open_req_arp_failure()
455 state_set(&ep->com, DEAD); act_open_req_arp_failure()
456 if (ep->com.remote_addr.ss_family == AF_INET6) { act_open_req_arp_failure()
458 (struct sockaddr_in6 *)&ep->com.mapped_local_addr; act_open_req_arp_failure()
459 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], act_open_req_arp_failure()
462 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); act_open_req_arp_failure()
463 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); act_open_req_arp_failure()
464 dst_release(ep->dst); act_open_req_arp_failure()
465 cxgb4_l2t_release(ep->l2t); act_open_req_arp_failure()
466 c4iw_put_ep(&ep->com); act_open_req_arp_failure()
483 static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) send_flowc() argument
495 16)) | FW_WR_FLOWID_V(ep->hwtid)); send_flowc()
499 (ep->com.dev->rdev.lldi.pf)); send_flowc()
501 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); send_flowc()
503 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); send_flowc()
505 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); send_flowc()
507 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); send_flowc()
509 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); send_flowc()
511 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win); send_flowc()
513 flowc->mnemval[7].val = cpu_to_be32(ep->emss); send_flowc()
523 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); send_flowc()
524 c4iw_ofld_send(&ep->com.dev->rdev, skb); send_flowc()
527 static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) send_halfclose() argument
533 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); send_halfclose()
539 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); send_halfclose()
543 INIT_TP_WR(req, ep->hwtid); send_halfclose()
545 ep->hwtid)); send_halfclose()
546 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); send_halfclose()
549 static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) send_abort() argument
554 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); send_abort()
561 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); send_abort()
562 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); send_abort()
565 INIT_TP_WR(req, ep->hwtid); send_abort()
566 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); send_abort()
568 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); send_abort()
574 static void c4iw_form_pm_msg(struct c4iw_ep *ep, c4iw_form_pm_msg() argument
577 memcpy(&pm_msg->loc_addr, &ep->com.local_addr, c4iw_form_pm_msg()
578 sizeof(ep->com.local_addr)); c4iw_form_pm_msg()
579 memcpy(&pm_msg->rem_addr, &ep->com.remote_addr, c4iw_form_pm_msg()
580 sizeof(ep->com.remote_addr)); c4iw_form_pm_msg()
594 static void c4iw_record_pm_msg(struct c4iw_ep *ep, c4iw_record_pm_msg() argument
597 memcpy(&ep->com.mapped_local_addr, &pm_msg->mapped_loc_addr, c4iw_record_pm_msg()
598 sizeof(ep->com.mapped_local_addr)); c4iw_record_pm_msg()
599 memcpy(&ep->com.mapped_remote_addr, &pm_msg->mapped_rem_addr, c4iw_record_pm_msg()
600 sizeof(ep->com.mapped_remote_addr)); c4iw_record_pm_msg()
633 static int send_connect(struct c4iw_ep *ep) send_connect() argument
648 &ep->com.mapped_local_addr; send_connect()
650 &ep->com.mapped_remote_addr; send_connect()
652 &ep->com.mapped_local_addr; send_connect()
654 &ep->com.mapped_remote_addr; send_connect()
656 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; send_connect()
678 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ? send_connect()
682 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); send_connect()
690 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); send_connect()
692 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, send_connect()
694 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); send_connect()
701 win = ep->rcv_win >> 10; send_connect()
710 L2T_IDX_V(ep->l2t->idx) | send_connect()
711 TX_CHAN_V(ep->tx_chan) | send_connect()
712 SMAC_SEL_V(ep->smac_idx) | send_connect()
713 DSCP_V(ep->tos) | send_connect()
718 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); send_connect()
734 if (ep->com.remote_addr.ss_family == AF_INET6) send_connect()
735 cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0], send_connect()
738 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); send_connect()
740 if (ep->com.remote_addr.ss_family == AF_INET) { send_connect()
767 ((ep->rss_qid<<14) | ep->atid))); send_connect()
774 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { send_connect()
776 ep->com.dev->rdev.lldi.ports[0], send_connect()
777 ep->l2t)); send_connect()
782 ep->com.dev->rdev.lldi.ports[0], send_connect()
783 ep->l2t))); send_connect()
815 ((ep->rss_qid<<14)|ep->atid))); send_connect()
824 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { send_connect()
826 ep->com.dev->rdev.lldi.ports[0], send_connect()
827 ep->l2t)); send_connect()
832 ep->com.dev->rdev.lldi.ports[0], send_connect()
833 ep->l2t))); send_connect()
840 set_bit(ACT_OPEN_REQ, &ep->com.history); send_connect()
841 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); send_connect()
843 if (ret && ep->com.remote_addr.ss_family == AF_INET6) send_connect()
844 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], send_connect()
849 static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, send_mpa_req() argument
857 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); send_mpa_req()
861 mpalen = sizeof(*mpa) + ep->plen; send_mpa_req()
867 connect_reply_upcall(ep, -ENOMEM); send_mpa_req()
870 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); send_mpa_req()
879 FW_WR_FLOWID_V(ep->hwtid) | send_mpa_req()
891 mpa->private_data_size = htons(ep->plen); send_mpa_req()
894 ep->tried_with_mpa_v1 = 1; send_mpa_req()
895 ep->retry_with_mpa_v1 = 0; send_mpa_req()
901 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, send_mpa_req()
902 ep->ord); send_mpa_req()
903 mpa_v2_params.ird = htons((u16)ep->ird); send_mpa_req()
904 mpa_v2_params.ord = htons((u16)ep->ord); send_mpa_req()
918 if (ep->plen) send_mpa_req()
921 ep->mpa_pkt + sizeof(*mpa), ep->plen); send_mpa_req()
923 if (ep->plen) send_mpa_req()
925 ep->mpa_pkt + sizeof(*mpa), ep->plen); send_mpa_req()
934 BUG_ON(ep->mpa_skb); send_mpa_req()
935 ep->mpa_skb = skb; send_mpa_req()
936 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); send_mpa_req()
937 start_ep_timer(ep); send_mpa_req()
938 __state_set(&ep->com, MPA_REQ_SENT); send_mpa_req()
939 ep->mpa_attr.initiator = 1; send_mpa_req()
940 ep->snd_seq += mpalen; send_mpa_req()
944 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) send_mpa_reject() argument
952 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); send_mpa_reject()
955 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) send_mpa_reject()
964 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); send_mpa_reject()
973 FW_WR_FLOWID_V(ep->hwtid) | send_mpa_reject()
984 mpa->revision = ep->mpa_attr.version; send_mpa_reject()
987 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { send_mpa_reject()
991 mpa_v2_params.ird = htons(((u16)ep->ird) | send_mpa_reject()
994 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? send_mpa_reject()
1003 if (ep->plen) send_mpa_reject()
1016 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); send_mpa_reject()
1018 BUG_ON(ep->mpa_skb); send_mpa_reject()
1019 ep->mpa_skb = skb; send_mpa_reject()
1020 ep->snd_seq += mpalen; send_mpa_reject()
1021 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); send_mpa_reject()
1024 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) send_mpa_reply() argument
1032 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); send_mpa_reply()
1035 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) send_mpa_reply()
1044 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); send_mpa_reply()
1053 FW_WR_FLOWID_V(ep->hwtid) | send_mpa_reply()
1063 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | send_mpa_reply()
1065 mpa->revision = ep->mpa_attr.version; send_mpa_reply()
1068 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { send_mpa_reply()
1072 mpa_v2_params.ird = htons((u16)ep->ird); send_mpa_reply()
1073 mpa_v2_params.ord = htons((u16)ep->ord); send_mpa_reply()
1074 if (peer2peer && (ep->mpa_attr.p2p_type != send_mpa_reply()
1089 if (ep->plen) send_mpa_reply()
1103 ep->mpa_skb = skb; send_mpa_reply()
1104 __state_set(&ep->com, MPA_REP_SENT); send_mpa_reply()
1105 ep->snd_seq += mpalen; send_mpa_reply()
1106 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); send_mpa_reply()
1111 struct c4iw_ep *ep; act_establish() local
1117 ep = lookup_atid(t, atid); act_establish()
1119 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, act_establish()
1122 mutex_lock(&ep->com.mutex); act_establish()
1123 dst_confirm(ep->dst); act_establish()
1126 ep->hwtid = tid; act_establish()
1127 cxgb4_insert_tid(t, ep, tid); act_establish()
1128 insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid); act_establish()
1130 ep->snd_seq = be32_to_cpu(req->snd_isn); act_establish()
1131 ep->rcv_seq = be32_to_cpu(req->rcv_isn); act_establish()
1133 set_emss(ep, ntohs(req->tcp_opt)); act_establish()
1136 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); act_establish()
1138 set_bit(ACT_ESTAB, &ep->com.history); act_establish()
1141 send_flowc(ep, NULL); act_establish()
1142 if (ep->retry_with_mpa_v1) act_establish()
1143 send_mpa_req(ep, skb, 1); act_establish()
1145 send_mpa_req(ep, skb, mpa_rev); act_establish()
1146 mutex_unlock(&ep->com.mutex); act_establish()
1150 static void close_complete_upcall(struct c4iw_ep *ep, int status) close_complete_upcall() argument
1154 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); close_complete_upcall()
1158 if (ep->com.cm_id) { close_complete_upcall()
1159 PDBG("close complete delivered ep %p cm_id %p tid %u\n", close_complete_upcall()
1160 ep, ep->com.cm_id, ep->hwtid); close_complete_upcall()
1161 ep->com.cm_id->event_handler(ep->com.cm_id, &event); close_complete_upcall()
1162 ep->com.cm_id->rem_ref(ep->com.cm_id); close_complete_upcall()
1163 ep->com.cm_id = NULL; close_complete_upcall()
1164 set_bit(CLOSE_UPCALL, &ep->com.history); close_complete_upcall()
1168 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) abort_connection() argument
1170 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); abort_connection()
1171 __state_set(&ep->com, ABORTING); abort_connection()
1172 set_bit(ABORT_CONN, &ep->com.history); abort_connection()
1173 return send_abort(ep, skb, gfp); abort_connection()
1176 static void peer_close_upcall(struct c4iw_ep *ep) peer_close_upcall() argument
1180 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); peer_close_upcall()
1183 if (ep->com.cm_id) { peer_close_upcall()
1184 PDBG("peer close delivered ep %p cm_id %p tid %u\n", peer_close_upcall()
1185 ep, ep->com.cm_id, ep->hwtid); peer_close_upcall()
1186 ep->com.cm_id->event_handler(ep->com.cm_id, &event); peer_close_upcall()
1187 set_bit(DISCONN_UPCALL, &ep->com.history); peer_close_upcall()
1191 static void peer_abort_upcall(struct c4iw_ep *ep) peer_abort_upcall() argument
1195 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); peer_abort_upcall()
1199 if (ep->com.cm_id) { peer_abort_upcall()
1200 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, peer_abort_upcall()
1201 ep->com.cm_id, ep->hwtid); peer_abort_upcall()
1202 ep->com.cm_id->event_handler(ep->com.cm_id, &event); peer_abort_upcall()
1203 ep->com.cm_id->rem_ref(ep->com.cm_id); peer_abort_upcall()
1204 ep->com.cm_id = NULL; peer_abort_upcall()
1205 set_bit(ABORT_UPCALL, &ep->com.history); peer_abort_upcall()
1209 static void connect_reply_upcall(struct c4iw_ep *ep, int status) connect_reply_upcall() argument
1213 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); connect_reply_upcall()
1217 memcpy(&event.local_addr, &ep->com.local_addr, connect_reply_upcall()
1218 sizeof(ep->com.local_addr)); connect_reply_upcall()
1219 memcpy(&event.remote_addr, &ep->com.remote_addr, connect_reply_upcall()
1220 sizeof(ep->com.remote_addr)); connect_reply_upcall()
1223 if (!ep->tried_with_mpa_v1) { connect_reply_upcall()
1225 event.ord = ep->ird; connect_reply_upcall()
1226 event.ird = ep->ord; connect_reply_upcall()
1227 event.private_data_len = ep->plen - connect_reply_upcall()
1229 event.private_data = ep->mpa_pkt + connect_reply_upcall()
1234 event.ord = cur_max_read_depth(ep->com.dev); connect_reply_upcall()
1235 event.ird = cur_max_read_depth(ep->com.dev); connect_reply_upcall()
1236 event.private_data_len = ep->plen; connect_reply_upcall()
1237 event.private_data = ep->mpa_pkt + connect_reply_upcall()
1242 PDBG("%s ep %p tid %u status %d\n", __func__, ep, connect_reply_upcall()
1243 ep->hwtid, status); connect_reply_upcall()
1244 set_bit(CONN_RPL_UPCALL, &ep->com.history); connect_reply_upcall()
1245 ep->com.cm_id->event_handler(ep->com.cm_id, &event); connect_reply_upcall()
1248 ep->com.cm_id->rem_ref(ep->com.cm_id); connect_reply_upcall()
1249 ep->com.cm_id = NULL; connect_reply_upcall()
1253 static int connect_request_upcall(struct c4iw_ep *ep) connect_request_upcall() argument
1258 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); connect_request_upcall()
1261 memcpy(&event.local_addr, &ep->com.local_addr, connect_request_upcall()
1262 sizeof(ep->com.local_addr)); connect_request_upcall()
1263 memcpy(&event.remote_addr, &ep->com.remote_addr, connect_request_upcall()
1264 sizeof(ep->com.remote_addr)); connect_request_upcall()
1265 event.provider_data = ep; connect_request_upcall()
1266 if (!ep->tried_with_mpa_v1) { connect_request_upcall()
1268 event.ord = ep->ord; connect_request_upcall()
1269 event.ird = ep->ird; connect_request_upcall()
1270 event.private_data_len = ep->plen - connect_request_upcall()
1272 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + connect_request_upcall()
1276 event.ord = cur_max_read_depth(ep->com.dev); connect_request_upcall()
1277 event.ird = cur_max_read_depth(ep->com.dev); connect_request_upcall()
1278 event.private_data_len = ep->plen; connect_request_upcall()
1279 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); connect_request_upcall()
1281 c4iw_get_ep(&ep->com); connect_request_upcall()
1282 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, connect_request_upcall()
1285 c4iw_put_ep(&ep->com); connect_request_upcall()
1286 set_bit(CONNREQ_UPCALL, &ep->com.history); connect_request_upcall()
1287 c4iw_put_ep(&ep->parent_ep->com); connect_request_upcall()
1291 static void established_upcall(struct c4iw_ep *ep) established_upcall() argument
1295 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); established_upcall()
1298 event.ird = ep->ord; established_upcall()
1299 event.ord = ep->ird; established_upcall()
1300 if (ep->com.cm_id) { established_upcall()
1301 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); established_upcall()
1302 ep->com.cm_id->event_handler(ep->com.cm_id, &event); established_upcall()
1303 set_bit(ESTAB_UPCALL, &ep->com.history); established_upcall()
1307 static int update_rx_credits(struct c4iw_ep *ep, u32 credits) update_rx_credits() argument
1313 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); update_rx_credits()
1325 if (ep->rcv_win > RCV_BUFSIZ_M * 1024) update_rx_credits()
1326 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024; update_rx_credits()
1330 INIT_TP_WR(req, ep->hwtid); update_rx_credits()
1332 ep->hwtid)); update_rx_credits()
1336 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); update_rx_credits()
1337 c4iw_ofld_send(&ep->com.dev->rdev, skb); update_rx_credits()
1343 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) process_mpa_reply() argument
1355 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); process_mpa_reply()
1362 if (stop_ep_timer(ep)) process_mpa_reply()
1369 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { process_mpa_reply()
1377 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), process_mpa_reply()
1379 ep->mpa_pkt_len += skb->len; process_mpa_reply()
1384 if (ep->mpa_pkt_len < sizeof(*mpa)) process_mpa_reply()
1386 mpa = (struct mpa_message *) ep->mpa_pkt; process_mpa_reply()
1413 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { process_mpa_reply()
1418 ep->plen = (u8) plen; process_mpa_reply()
1424 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) process_mpa_reply()
1437 __state_set(&ep->com, FPDU_MODE); process_mpa_reply()
1438 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; process_mpa_reply()
1439 ep->mpa_attr.recv_marker_enabled = markers_enabled; process_mpa_reply()
1440 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; process_mpa_reply()
1441 ep->mpa_attr.version = mpa->revision; process_mpa_reply()
1442 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; process_mpa_reply()
1445 ep->mpa_attr.enhanced_rdma_conn = process_mpa_reply()
1447 if (ep->mpa_attr.enhanced_rdma_conn) { process_mpa_reply()
1449 (ep->mpa_pkt + sizeof(*mpa)); process_mpa_reply()
1454 PDBG("%s responder ird %u ord %u ep ird %u ord %u\n", process_mpa_reply()
1455 __func__, resp_ird, resp_ord, ep->ird, ep->ord); process_mpa_reply()
1462 if (ep->ird < resp_ord) { process_mpa_reply()
1464 ep->com.dev->rdev.lldi.max_ordird_qp) process_mpa_reply()
1465 ep->ird = resp_ord; process_mpa_reply()
1468 } else if (ep->ird > resp_ord) { process_mpa_reply()
1469 ep->ird = resp_ord; process_mpa_reply()
1471 if (ep->ord > resp_ird) { process_mpa_reply()
1473 ep->ord = resp_ird; process_mpa_reply()
1479 ep->ird = resp_ord; process_mpa_reply()
1480 ep->ord = resp_ird; process_mpa_reply()
1487 ep->mpa_attr.p2p_type = process_mpa_reply()
1491 ep->mpa_attr.p2p_type = process_mpa_reply()
1497 ep->mpa_attr.p2p_type = p2p_type; process_mpa_reply()
1501 "%d\n", __func__, ep->mpa_attr.crc_enabled, process_mpa_reply()
1502 ep->mpa_attr.recv_marker_enabled, process_mpa_reply()
1503 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, process_mpa_reply()
1504 ep->mpa_attr.p2p_type, p2p_type); process_mpa_reply()
1512 if ((ep->mpa_attr.version == 2) && peer2peer && process_mpa_reply()
1513 (ep->mpa_attr.p2p_type != p2p_type)) { process_mpa_reply()
1514 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; process_mpa_reply()
1518 attrs.mpa_attr = ep->mpa_attr; process_mpa_reply()
1519 attrs.max_ird = ep->ird; process_mpa_reply()
1520 attrs.max_ord = ep->ord; process_mpa_reply()
1521 attrs.llp_stream_handle = ep; process_mpa_reply()
1529 err = c4iw_modify_qp(ep->com.qp->rhp, process_mpa_reply()
1530 ep->com.qp, mask, &attrs, 1); process_mpa_reply()
1544 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, process_mpa_reply()
1564 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, process_mpa_reply()
1572 __state_set(&ep->com, ABORTING); process_mpa_reply()
1573 send_abort(ep, skb, GFP_KERNEL); process_mpa_reply()
1575 connect_reply_upcall(ep, err); process_mpa_reply()
1579 static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) process_mpa_request() argument
1585 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); process_mpa_request()
1591 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { process_mpa_request()
1592 (void)stop_ep_timer(ep); process_mpa_request()
1593 abort_connection(ep, skb, GFP_KERNEL); process_mpa_request()
1602 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), process_mpa_request()
1604 ep->mpa_pkt_len += skb->len; process_mpa_request()
1610 if (ep->mpa_pkt_len < sizeof(*mpa)) process_mpa_request()
1614 mpa = (struct mpa_message *) ep->mpa_pkt; process_mpa_request()
1622 (void)stop_ep_timer(ep); process_mpa_request()
1623 abort_connection(ep, skb, GFP_KERNEL); process_mpa_request()
1628 (void)stop_ep_timer(ep); process_mpa_request()
1629 abort_connection(ep, skb, GFP_KERNEL); process_mpa_request()
1639 (void)stop_ep_timer(ep); process_mpa_request()
1640 abort_connection(ep, skb, GFP_KERNEL); process_mpa_request()
1647 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { process_mpa_request()
1648 (void)stop_ep_timer(ep); process_mpa_request()
1649 abort_connection(ep, skb, GFP_KERNEL); process_mpa_request()
1652 ep->plen = (u8) plen; process_mpa_request()
1657 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) process_mpa_request()
1664 ep->mpa_attr.initiator = 0; process_mpa_request()
1665 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; process_mpa_request()
1666 ep->mpa_attr.recv_marker_enabled = markers_enabled; process_mpa_request()
1667 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; process_mpa_request()
1668 ep->mpa_attr.version = mpa->revision; process_mpa_request()
1670 ep->tried_with_mpa_v1 = 1; process_mpa_request()
1671 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; process_mpa_request()
1674 ep->mpa_attr.enhanced_rdma_conn = process_mpa_request()
1676 if (ep->mpa_attr.enhanced_rdma_conn) { process_mpa_request()
1678 (ep->mpa_pkt + sizeof(*mpa)); process_mpa_request()
1679 ep->ird = ntohs(mpa_v2_params->ird) & process_mpa_request()
1681 ep->ord = ntohs(mpa_v2_params->ord) & process_mpa_request()
1683 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, process_mpa_request()
1684 ep->ord); process_mpa_request()
1689 ep->mpa_attr.p2p_type = process_mpa_request()
1693 ep->mpa_attr.p2p_type = process_mpa_request()
1699 ep->mpa_attr.p2p_type = p2p_type; process_mpa_request()
1703 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, process_mpa_request()
1704 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, process_mpa_request()
1705 ep->mpa_attr.p2p_type); process_mpa_request()
1712 if (!stop_ep_timer(ep)) { process_mpa_request()
1713 __state_set(&ep->com, MPA_REQ_RCVD); process_mpa_request()
1716 mutex_lock_nested(&ep->parent_ep->com.mutex, process_mpa_request()
1718 if (ep->parent_ep->com.state != DEAD) { process_mpa_request()
1719 if (connect_request_upcall(ep)) process_mpa_request()
1720 abort_connection(ep, skb, GFP_KERNEL); process_mpa_request()
1722 abort_connection(ep, skb, GFP_KERNEL); process_mpa_request()
1724 mutex_unlock(&ep->parent_ep->com.mutex); process_mpa_request()
1731 struct c4iw_ep *ep; rx_data() local
1739 ep = lookup_tid(t, tid); rx_data()
1740 if (!ep) rx_data()
1742 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); rx_data()
1745 mutex_lock(&ep->com.mutex); rx_data()
1748 update_rx_credits(ep, dlen); rx_data()
1750 switch (ep->com.state) { rx_data()
1752 ep->rcv_seq += dlen; rx_data()
1753 disconnect = process_mpa_reply(ep, skb); rx_data()
1756 ep->rcv_seq += dlen; rx_data()
1757 process_mpa_request(ep, skb); rx_data()
1761 BUG_ON(!ep->com.qp); rx_data()
1764 " qpid %u ep %p state %d tid %u status %d\n", rx_data()
1765 __func__, ep->com.qp->wq.sq.qid, ep, rx_data()
1766 ep->com.state, ep->hwtid, status); rx_data()
1768 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, rx_data()
1776 mutex_unlock(&ep->com.mutex); rx_data()
1778 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); rx_data()
1784 struct c4iw_ep *ep; abort_rpl() local
1790 ep = lookup_tid(t, tid); abort_rpl()
1791 if (!ep) { abort_rpl()
1795 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); abort_rpl()
1796 mutex_lock(&ep->com.mutex); abort_rpl()
1797 switch (ep->com.state) { abort_rpl()
1799 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); abort_rpl()
1800 __state_set(&ep->com, DEAD); abort_rpl()
1804 printk(KERN_ERR "%s ep %p state %d\n", abort_rpl()
1805 __func__, ep, ep->com.state); abort_rpl()
1808 mutex_unlock(&ep->com.mutex); abort_rpl()
1811 release_ep_resources(ep); abort_rpl()
1815 static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) send_fw_act_open_req() argument
1830 ep->com.dev->rdev.lldi.ports[0], send_fw_act_open_req()
1831 ep->l2t)); send_fw_act_open_req()
1832 sin = (struct sockaddr_in *)&ep->com.mapped_local_addr; send_fw_act_open_req()
1835 sin = (struct sockaddr_in *)&ep->com.mapped_remote_addr; send_fw_act_open_req()
1845 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, send_fw_act_open_req()
1847 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); send_fw_act_open_req()
1854 win = ep->rcv_win >> 10; send_fw_act_open_req()
1864 L2T_IDX_V(ep->l2t->idx) | send_fw_act_open_req()
1865 TX_CHAN_V(ep->tx_chan) | send_fw_act_open_req()
1866 SMAC_SEL_V(ep->smac_idx) | send_fw_act_open_req()
1867 DSCP_V(ep->tos) | send_fw_act_open_req()
1871 TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | send_fw_act_open_req()
1874 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid)); send_fw_act_open_req()
1883 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); send_fw_act_open_req()
1884 set_bit(ACT_OFLD_CONN, &ep->com.history); send_fw_act_open_req()
1885 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); send_fw_act_open_req()
1920 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) set_tcp_window() argument
1922 ep->snd_win = snd_win; set_tcp_window()
1923 ep->rcv_win = rcv_win; set_tcp_window()
1924 PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win); set_tcp_window()
1929 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, import_ep() argument
1960 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, import_ep()
1962 if (!ep->l2t) import_ep()
1964 ep->mtu = pdev->mtu; import_ep()
1965 ep->tx_chan = cxgb4_port_chan(pdev); import_ep()
1966 ep->smac_idx = cxgb4_tp_smt_idx(adapter_type, import_ep()
1970 ep->txq_idx = cxgb4_port_idx(pdev) * step; import_ep()
1973 ep->ctrlq_idx = cxgb4_port_idx(pdev); import_ep()
1974 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ import_ep()
1976 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); import_ep()
1980 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, import_ep()
1982 if (!ep->l2t) import_ep()
1984 ep->mtu = dst_mtu(dst); import_ep()
1985 ep->tx_chan = cxgb4_port_chan(pdev); import_ep()
1986 ep->smac_idx = cxgb4_tp_smt_idx(adapter_type, import_ep()
1990 ep->txq_idx = cxgb4_port_idx(pdev) * step; import_ep()
1991 ep->ctrlq_idx = cxgb4_port_idx(pdev); import_ep()
1994 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ import_ep()
1996 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); import_ep()
1999 ep->retry_with_mpa_v1 = 0; import_ep()
2000 ep->tried_with_mpa_v1 = 0; import_ep()
2012 static int c4iw_reconnect(struct c4iw_ep *ep) c4iw_reconnect() argument
2016 &ep->com.cm_id->local_addr; c4iw_reconnect()
2018 &ep->com.cm_id->remote_addr; c4iw_reconnect()
2020 &ep->com.cm_id->local_addr; c4iw_reconnect()
2022 &ep->com.cm_id->remote_addr; c4iw_reconnect()
2026 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); c4iw_reconnect()
2027 init_timer(&ep->timer); c4iw_reconnect()
2032 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); c4iw_reconnect()
2033 if (ep->atid == -1) { c4iw_reconnect()
2038 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid); c4iw_reconnect()
2041 if (ep->com.cm_id->local_addr.ss_family == AF_INET) { c4iw_reconnect()
2042 ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr, c4iw_reconnect()
2048 ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr, c4iw_reconnect()
2055 if (!ep->dst) { c4iw_reconnect()
2060 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false, c4iw_reconnect()
2061 ep->com.dev->rdev.lldi.adapter_type); c4iw_reconnect()
2068 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, c4iw_reconnect()
2069 ep->l2t->idx); c4iw_reconnect()
2071 state_set(&ep->com, CONNECTING); c4iw_reconnect()
2072 ep->tos = 0; c4iw_reconnect()
2075 err = send_connect(ep); c4iw_reconnect()
2079 cxgb4_l2t_release(ep->l2t); c4iw_reconnect()
2081 dst_release(ep->dst); c4iw_reconnect()
2083 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); c4iw_reconnect()
2084 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); c4iw_reconnect()
2092 connect_reply_upcall(ep, -ECONNRESET); c4iw_reconnect()
2093 c4iw_put_ep(&ep->com); c4iw_reconnect()
2100 struct c4iw_ep *ep; act_open_rpl() local
2111 ep = lookup_atid(t, atid); act_open_rpl()
2112 la = (struct sockaddr_in *)&ep->com.mapped_local_addr; act_open_rpl()
2113 ra = (struct sockaddr_in *)&ep->com.mapped_remote_addr; act_open_rpl()
2114 la6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr; act_open_rpl()
2115 ra6 = (struct sockaddr_in6 *)&ep->com.mapped_remote_addr; act_open_rpl()
2117 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, act_open_rpl()
2123 ep->stats.connect_neg_adv++; act_open_rpl()
2130 set_bit(ACT_OPEN_RPL, &ep->com.history); act_open_rpl()
2143 if (ep->com.local_addr.ss_family == AF_INET && act_open_rpl()
2145 send_fw_act_open_req(ep, act_open_rpl()
2152 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { act_open_rpl()
2153 set_bit(ACT_RETRY_INUSE, &ep->com.history); act_open_rpl()
2154 if (ep->com.remote_addr.ss_family == AF_INET6) { act_open_rpl()
2157 &ep->com.mapped_local_addr; act_open_rpl()
2159 ep->com.dev->rdev.lldi.ports[0], act_open_rpl()
2163 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, act_open_rpl()
2166 dst_release(ep->dst); act_open_rpl()
2167 cxgb4_l2t_release(ep->l2t); act_open_rpl()
2168 c4iw_reconnect(ep); act_open_rpl()
2173 if (ep->com.local_addr.ss_family == AF_INET) { act_open_rpl()
2187 connect_reply_upcall(ep, status2errno(status)); act_open_rpl()
2188 state_set(&ep->com, DEAD); act_open_rpl()
2190 if (ep->com.remote_addr.ss_family == AF_INET6) { act_open_rpl()
2192 (struct sockaddr_in6 *)&ep->com.mapped_local_addr; act_open_rpl()
2193 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], act_open_rpl()
2197 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); act_open_rpl()
2199 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); act_open_rpl()
2201 dst_release(ep->dst); act_open_rpl()
2202 cxgb4_l2t_release(ep->l2t); act_open_rpl()
2203 c4iw_put_ep(&ep->com); act_open_rpl()
2213 struct c4iw_listen_ep *ep = lookup_stid(t, stid); pass_open_rpl() local
2215 if (!ep) { pass_open_rpl()
2219 PDBG("%s ep %p status %d error %d\n", __func__, ep, pass_open_rpl()
2221 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); pass_open_rpl()
2232 struct c4iw_listen_ep *ep = lookup_stid(t, stid); close_listsrv_rpl() local
2234 PDBG("%s ep %p\n", __func__, ep); close_listsrv_rpl()
2235 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); close_listsrv_rpl()
2239 static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, accept_cr() argument
2249 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; accept_cr()
2251 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); accept_cr()
2259 INIT_TP_WR(rpl5, ep->hwtid); accept_cr()
2262 INIT_TP_WR(rpl, ep->hwtid); accept_cr()
2265 ep->hwtid)); accept_cr()
2267 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, accept_cr()
2269 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); accept_cr()
2276 win = ep->rcv_win >> 10; accept_cr()
2284 L2T_IDX_V(ep->l2t->idx) | accept_cr()
2285 TX_CHAN_V(ep->tx_chan) | accept_cr()
2286 SMAC_SEL_V(ep->smac_idx) | accept_cr()
2287 DSCP_V(ep->tos >> 2) | accept_cr()
2291 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); accept_cr()
2327 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); accept_cr()
2329 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); accept_cr()
2402 printk(KERN_ERR "%s - listening ep not in LISTEN\n", pass_accept_req()
2412 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n" pass_accept_req()
2420 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" pass_accept_req()
2437 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", pass_accept_req()
2538 struct c4iw_ep *ep; pass_establish() local
2543 ep = lookup_tid(t, tid); pass_establish()
2544 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); pass_establish()
2545 ep->snd_seq = be32_to_cpu(req->snd_isn); pass_establish()
2546 ep->rcv_seq = be32_to_cpu(req->rcv_isn); pass_establish()
2548 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid, pass_establish()
2551 set_emss(ep, ntohs(req->tcp_opt)); pass_establish()
2553 dst_confirm(ep->dst); pass_establish()
2554 state_set(&ep->com, MPA_REQ_WAIT); pass_establish()
2555 start_ep_timer(ep); pass_establish()
2556 send_flowc(ep, skb); pass_establish()
2557 set_bit(PASS_ESTAB, &ep->com.history); pass_establish()
2565 struct c4iw_ep *ep; peer_close() local
2573 ep = lookup_tid(t, tid); peer_close()
2574 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); peer_close()
2575 dst_confirm(ep->dst); peer_close()
2577 set_bit(PEER_CLOSE, &ep->com.history); peer_close()
2578 mutex_lock(&ep->com.mutex); peer_close()
2579 switch (ep->com.state) { peer_close()
2581 __state_set(&ep->com, CLOSING); peer_close()
2584 __state_set(&ep->com, CLOSING); peer_close()
2585 connect_reply_upcall(ep, -ECONNRESET); peer_close()
2595 __state_set(&ep->com, CLOSING); peer_close()
2596 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); peer_close()
2597 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); peer_close()
2600 __state_set(&ep->com, CLOSING); peer_close()
2601 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); peer_close()
2602 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); peer_close()
2605 start_ep_timer(ep); peer_close()
2606 __state_set(&ep->com, CLOSING); peer_close()
2608 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, peer_close()
2611 peer_close_upcall(ep); peer_close()
2619 __state_set(&ep->com, MORIBUND); peer_close()
2623 (void)stop_ep_timer(ep); peer_close()
2624 if (ep->com.cm_id && ep->com.qp) { peer_close()
2626 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, peer_close()
2629 close_complete_upcall(ep, 0); peer_close()
2630 __state_set(&ep->com, DEAD); peer_close()
2640 mutex_unlock(&ep->com.mutex); peer_close()
2642 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); peer_close()
2644 release_ep_resources(ep); peer_close()
2651 struct c4iw_ep *ep; peer_abort() local
2660 ep = lookup_tid(t, tid); peer_abort()
2663 __func__, ep->hwtid, req->status, peer_abort()
2665 ep->stats.abort_neg_adv++; peer_abort()
2671 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, peer_abort()
2672 ep->com.state); peer_abort()
2673 set_bit(PEER_ABORT, &ep->com.history); peer_abort()
2680 if (ep->com.state != MPA_REQ_SENT) peer_abort()
2681 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); peer_abort()
2683 mutex_lock(&ep->com.mutex); peer_abort()
2684 switch (ep->com.state) { peer_abort()
2688 (void)stop_ep_timer(ep); peer_abort()
2691 (void)stop_ep_timer(ep); peer_abort()
2692 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) peer_abort()
2693 connect_reply_upcall(ep, -ECONNRESET); peer_abort()
2705 ep->retry_with_mpa_v1 = 1; peer_abort()
2714 stop_ep_timer(ep); peer_abort()
2717 if (ep->com.cm_id && ep->com.qp) { peer_abort()
2719 ret = c4iw_modify_qp(ep->com.qp->rhp, peer_abort()
2720 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, peer_abort()
2727 peer_abort_upcall(ep); peer_abort()
2733 mutex_unlock(&ep->com.mutex); peer_abort()
2739 dst_confirm(ep->dst); peer_abort()
2740 if (ep->com.state != ABORTING) { peer_abort()
2741 __state_set(&ep->com, DEAD); peer_abort()
2743 if (!ep->retry_with_mpa_v1) peer_abort()
2746 mutex_unlock(&ep->com.mutex); peer_abort()
2755 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); peer_abort()
2757 INIT_TP_WR(rpl, ep->hwtid); peer_abort()
2758 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); peer_abort()
2760 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); peer_abort()
2763 release_ep_resources(ep); peer_abort()
2764 else if (ep->retry_with_mpa_v1) { peer_abort()
2765 if (ep->com.remote_addr.ss_family == AF_INET6) { peer_abort()
2768 &ep->com.mapped_local_addr; peer_abort()
2770 ep->com.dev->rdev.lldi.ports[0], peer_abort()
2774 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); peer_abort()
2775 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); peer_abort()
2776 dst_release(ep->dst); peer_abort()
2777 cxgb4_l2t_release(ep->l2t); peer_abort()
2778 c4iw_reconnect(ep); peer_abort()
2786 struct c4iw_ep *ep; close_con_rpl() local
2793 ep = lookup_tid(t, tid); close_con_rpl()
2795 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); close_con_rpl()
2796 BUG_ON(!ep); close_con_rpl()
2799 mutex_lock(&ep->com.mutex); close_con_rpl()
2800 switch (ep->com.state) { close_con_rpl()
2802 __state_set(&ep->com, MORIBUND); close_con_rpl()
2805 (void)stop_ep_timer(ep); close_con_rpl()
2806 if ((ep->com.cm_id) && (ep->com.qp)) { close_con_rpl()
2808 c4iw_modify_qp(ep->com.qp->rhp, close_con_rpl()
2809 ep->com.qp, close_con_rpl()
2813 close_complete_upcall(ep, 0); close_con_rpl()
2814 __state_set(&ep->com, DEAD); close_con_rpl()
2824 mutex_unlock(&ep->com.mutex); close_con_rpl()
2826 release_ep_resources(ep); close_con_rpl()
2835 struct c4iw_ep *ep; terminate() local
2838 ep = lookup_tid(t, tid); terminate()
2839 BUG_ON(!ep); terminate()
2841 if (ep && ep->com.qp) { terminate()
2843 ep->com.qp->wq.sq.qid); terminate()
2845 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, terminate()
2848 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); terminate()
2860 struct c4iw_ep *ep; fw4_ack() local
2867 ep = lookup_tid(t, tid); fw4_ack()
2868 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); fw4_ack()
2870 PDBG("%s 0 credit ack ep %p tid %u state %u\n", fw4_ack()
2871 __func__, ep, ep->hwtid, state_read(&ep->com)); fw4_ack()
2875 dst_confirm(ep->dst); fw4_ack()
2876 if (ep->mpa_skb) { fw4_ack()
2877 PDBG("%s last streaming msg ack ep %p tid %u state %u " fw4_ack()
2878 "initiator %u freeing skb\n", __func__, ep, ep->hwtid, fw4_ack()
2879 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); fw4_ack()
2880 kfree_skb(ep->mpa_skb); fw4_ack()
2881 ep->mpa_skb = NULL; fw4_ack()
2890 struct c4iw_ep *ep = to_ep(cm_id); c4iw_reject_cr() local
2891 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); c4iw_reject_cr()
2893 mutex_lock(&ep->com.mutex); c4iw_reject_cr()
2894 if (ep->com.state == DEAD) { c4iw_reject_cr()
2895 mutex_unlock(&ep->com.mutex); c4iw_reject_cr()
2896 c4iw_put_ep(&ep->com); c4iw_reject_cr()
2899 set_bit(ULP_REJECT, &ep->com.history); c4iw_reject_cr()
2900 BUG_ON(ep->com.state != MPA_REQ_RCVD); c4iw_reject_cr()
2902 abort_connection(ep, NULL, GFP_KERNEL); c4iw_reject_cr()
2904 err = send_mpa_reject(ep, pdata, pdata_len); c4iw_reject_cr()
2907 mutex_unlock(&ep->com.mutex); c4iw_reject_cr()
2909 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); c4iw_reject_cr()
2910 c4iw_put_ep(&ep->com); c4iw_reject_cr()
2919 struct c4iw_ep *ep = to_ep(cm_id); c4iw_accept_cr() local
2923 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); c4iw_accept_cr()
2925 mutex_lock(&ep->com.mutex); c4iw_accept_cr()
2926 if (ep->com.state == DEAD) { c4iw_accept_cr()
2931 BUG_ON(ep->com.state != MPA_REQ_RCVD); c4iw_accept_cr()
2934 set_bit(ULP_ACCEPT, &ep->com.history); c4iw_accept_cr()
2935 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) || c4iw_accept_cr()
2936 (conn_param->ird > cur_max_read_depth(ep->com.dev))) { c4iw_accept_cr()
2937 abort_connection(ep, NULL, GFP_KERNEL); c4iw_accept_cr()
2942 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { c4iw_accept_cr()
2943 if (conn_param->ord > ep->ird) { c4iw_accept_cr()
2945 ep->ord = ep->ird; c4iw_accept_cr()
2947 ep->ird = conn_param->ird; c4iw_accept_cr()
2948 ep->ord = conn_param->ord; c4iw_accept_cr()
2949 send_mpa_reject(ep, conn_param->private_data, c4iw_accept_cr()
2951 abort_connection(ep, NULL, GFP_KERNEL); c4iw_accept_cr()
2956 if (conn_param->ird < ep->ord) { c4iw_accept_cr()
2958 ep->ord <= h->rdev.lldi.max_ordird_qp) { c4iw_accept_cr()
2959 conn_param->ird = ep->ord; c4iw_accept_cr()
2961 abort_connection(ep, NULL, GFP_KERNEL); c4iw_accept_cr()
2967 ep->ird = conn_param->ird; c4iw_accept_cr()
2968 ep->ord = conn_param->ord; c4iw_accept_cr()
2970 if (ep->mpa_attr.version == 1) { c4iw_accept_cr()
2971 if (peer2peer && ep->ird == 0) c4iw_accept_cr()
2972 ep->ird = 1; c4iw_accept_cr()
2975 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) && c4iw_accept_cr()
2976 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0) c4iw_accept_cr()
2977 ep->ird = 1; c4iw_accept_cr()
2980 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); c4iw_accept_cr()
2983 ep->com.cm_id = cm_id; c4iw_accept_cr()
2984 ep->com.qp = qp; c4iw_accept_cr()
2985 ref_qp(ep); c4iw_accept_cr()
2988 attrs.mpa_attr = ep->mpa_attr; c4iw_accept_cr()
2989 attrs.max_ird = ep->ird; c4iw_accept_cr()
2990 attrs.max_ord = ep->ord; c4iw_accept_cr()
2991 attrs.llp_stream_handle = ep; c4iw_accept_cr()
3001 err = c4iw_modify_qp(ep->com.qp->rhp, c4iw_accept_cr()
3002 ep->com.qp, mask, &attrs, 1); c4iw_accept_cr()
3005 err = send_mpa_reply(ep, conn_param->private_data, c4iw_accept_cr()
3010 __state_set(&ep->com, FPDU_MODE); c4iw_accept_cr()
3011 established_upcall(ep); c4iw_accept_cr()
3012 mutex_unlock(&ep->com.mutex); c4iw_accept_cr()
3013 c4iw_put_ep(&ep->com); c4iw_accept_cr()
3016 ep->com.cm_id = NULL; c4iw_accept_cr()
3017 abort_connection(ep, NULL, GFP_KERNEL); c4iw_accept_cr()
3020 mutex_unlock(&ep->com.mutex); c4iw_accept_cr()
3021 c4iw_put_ep(&ep->com); c4iw_accept_cr()
3089 struct c4iw_ep *ep; c4iw_connect() local
3106 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); c4iw_connect()
3107 if (!ep) { c4iw_connect()
3108 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); c4iw_connect()
3112 init_timer(&ep->timer); c4iw_connect()
3113 ep->plen = conn_param->private_data_len; c4iw_connect()
3114 if (ep->plen) c4iw_connect()
3115 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), c4iw_connect()
3116 conn_param->private_data, ep->plen); c4iw_connect()
3117 ep->ird = conn_param->ird; c4iw_connect()
3118 ep->ord = conn_param->ord; c4iw_connect()
3120 if (peer2peer && ep->ord == 0) c4iw_connect()
3121 ep->ord = 1; c4iw_connect()
3124 ep->com.dev = dev; c4iw_connect()
3125 ep->com.cm_id = cm_id; c4iw_connect()
3126 ep->com.qp = get_qhp(dev, conn_param->qpn); c4iw_connect()
3127 if (!ep->com.qp) { c4iw_connect()
3132 ref_qp(ep); c4iw_connect()
3134 ep->com.qp, cm_id); c4iw_connect()
3139 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); c4iw_connect()
3140 if (ep->atid == -1) { c4iw_connect()
3145 insert_handle(dev, &dev->atid_idr, ep, ep->atid); c4iw_connect()
3147 memcpy(&ep->com.local_addr, &cm_id->local_addr, c4iw_connect()
3148 sizeof(ep->com.local_addr)); c4iw_connect()
3149 memcpy(&ep->com.remote_addr, &cm_id->remote_addr, c4iw_connect()
3150 sizeof(ep->com.remote_addr)); c4iw_connect()
3153 memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr, c4iw_connect()
3154 sizeof(ep->com.mapped_local_addr)); c4iw_connect()
3155 memcpy(&ep->com.mapped_remote_addr, &cm_id->remote_addr, c4iw_connect()
3156 sizeof(ep->com.mapped_remote_addr)); c4iw_connect()
3165 c4iw_form_pm_msg(ep, &pm_msg); c4iw_connect()
3171 c4iw_record_pm_msg(ep, &pm_msg); c4iw_connect()
3173 if (iwpm_create_mapinfo(&ep->com.local_addr, c4iw_connect()
3174 &ep->com.mapped_local_addr, RDMA_NL_C4IW)) { c4iw_connect()
3175 iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW); c4iw_connect()
3179 print_addr(&ep->com, __func__, "add_query/create_mapinfo"); c4iw_connect()
3180 set_bit(RELEASE_MAPINFO, &ep->com.flags); c4iw_connect()
3182 laddr = (struct sockaddr_in *)&ep->com.mapped_local_addr; c4iw_connect()
3183 raddr = (struct sockaddr_in *)&ep->com.mapped_remote_addr; c4iw_connect()
3184 laddr6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr; c4iw_connect()
3185 raddr6 = (struct sockaddr_in6 *) &ep->com.mapped_remote_addr; c4iw_connect()
3204 ep->dst = find_route(dev, laddr->sin_addr.s_addr, c4iw_connect()
3225 ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr, c4iw_connect()
3230 if (!ep->dst) { c4iw_connect()
3236 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true, c4iw_connect()
3237 ep->com.dev->rdev.lldi.adapter_type); c4iw_connect()
3244 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, c4iw_connect()
3245 ep->l2t->idx); c4iw_connect()
3247 state_set(&ep->com, CONNECTING); c4iw_connect()
3248 ep->tos = 0; c4iw_connect()
3251 err = send_connect(ep); c4iw_connect()
3255 cxgb4_l2t_release(ep->l2t); c4iw_connect()
3257 dst_release(ep->dst); c4iw_connect()
3259 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); c4iw_connect()
3260 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); c4iw_connect()
3263 c4iw_put_ep(&ep->com); c4iw_connect()
3268 static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) create_server6() argument
3272 &ep->com.mapped_local_addr; create_server6()
3274 c4iw_init_wr_wait(&ep->com.wr_wait); create_server6()
3275 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0], create_server6()
3276 ep->stid, &sin6->sin6_addr, create_server6()
3278 ep->com.dev->rdev.lldi.rxq_ids[0]); create_server6()
3280 err = c4iw_wait_for_reply(&ep->com.dev->rdev, create_server6()
3281 &ep->com.wr_wait, create_server6()
3287 err, ep->stid, create_server6()
3290 cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0], create_server6()
3295 static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) create_server4() argument
3299 &ep->com.mapped_local_addr; create_server4()
3304 ep->com.dev->rdev.lldi.ports[0], ep->stid, create_server4()
3306 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0); create_server4()
3308 if (c4iw_fatal_error(&ep->com.dev->rdev)) { create_server4()
3317 c4iw_init_wr_wait(&ep->com.wr_wait); create_server4()
3318 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], create_server4()
3319 ep->stid, sin->sin_addr.s_addr, sin->sin_port, create_server4()
3320 0, ep->com.dev->rdev.lldi.rxq_ids[0]); create_server4()
3322 err = c4iw_wait_for_reply(&ep->com.dev->rdev, create_server4()
3323 &ep->com.wr_wait, create_server4()
3330 , err, ep->stid, create_server4()
3339 struct c4iw_listen_ep *ep; c4iw_create_listen() local
3346 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); c4iw_create_listen()
3347 if (!ep) { c4iw_create_listen()
3348 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); c4iw_create_listen()
3352 PDBG("%s ep %p\n", __func__, ep); c4iw_create_listen()
3354 ep->com.cm_id = cm_id; c4iw_create_listen()
3355 ep->com.dev = dev; c4iw_create_listen()
3356 ep->backlog = backlog; c4iw_create_listen()
3357 memcpy(&ep->com.local_addr, &cm_id->local_addr, c4iw_create_listen()
3358 sizeof(ep->com.local_addr)); c4iw_create_listen()
3364 ep->com.local_addr.ss_family == AF_INET) c4iw_create_listen()
3365 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, c4iw_create_listen()
3366 cm_id->local_addr.ss_family, ep); c4iw_create_listen()
3368 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, c4iw_create_listen()
3369 cm_id->local_addr.ss_family, ep); c4iw_create_listen()
3371 if (ep->stid == -1) { c4iw_create_listen()
3376 insert_handle(dev, &dev->stid_idr, ep, ep->stid); c4iw_create_listen()
3379 memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr, c4iw_create_listen()
3380 sizeof(ep->com.mapped_local_addr)); c4iw_create_listen()
3389 memcpy(&pm_msg.loc_addr, &ep->com.local_addr, c4iw_create_listen()
3390 sizeof(ep->com.local_addr)); c4iw_create_listen()
3396 memcpy(&ep->com.mapped_local_addr, c4iw_create_listen()
3398 sizeof(ep->com.mapped_local_addr)); c4iw_create_listen()
3400 if (iwpm_create_mapinfo(&ep->com.local_addr, c4iw_create_listen()
3401 &ep->com.mapped_local_addr, RDMA_NL_C4IW)) { c4iw_create_listen()
3405 print_addr(&ep->com, __func__, "add_mapping/create_mapinfo"); c4iw_create_listen()
3407 set_bit(RELEASE_MAPINFO, &ep->com.flags); c4iw_create_listen()
3408 state_set(&ep->com, LISTEN); c4iw_create_listen()
3409 if (ep->com.local_addr.ss_family == AF_INET) c4iw_create_listen()
3410 err = create_server4(dev, ep); c4iw_create_listen()
3412 err = create_server6(dev, ep); c4iw_create_listen()
3414 cm_id->provider_data = ep; c4iw_create_listen()
3419 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, c4iw_create_listen()
3420 ep->com.local_addr.ss_family); c4iw_create_listen()
3423 c4iw_put_ep(&ep->com); c4iw_create_listen()
3432 struct c4iw_listen_ep *ep = to_listen_ep(cm_id); c4iw_destroy_listen() local
3434 PDBG("%s ep %p\n", __func__, ep); c4iw_destroy_listen()
3437 state_set(&ep->com, DEAD); c4iw_destroy_listen()
3438 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn && c4iw_destroy_listen()
3439 ep->com.local_addr.ss_family == AF_INET) { c4iw_destroy_listen()
3441 ep->com.dev->rdev.lldi.ports[0], ep->stid, c4iw_destroy_listen()
3442 ep->com.dev->rdev.lldi.rxq_ids[0], 0); c4iw_destroy_listen()
3445 c4iw_init_wr_wait(&ep->com.wr_wait); c4iw_destroy_listen()
3447 ep->com.dev->rdev.lldi.ports[0], ep->stid, c4iw_destroy_listen()
3448 ep->com.dev->rdev.lldi.rxq_ids[0], 0); c4iw_destroy_listen()
3451 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, c4iw_destroy_listen()
3453 sin6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr; c4iw_destroy_listen()
3454 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], c4iw_destroy_listen()
3457 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); c4iw_destroy_listen()
3458 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, c4iw_destroy_listen()
3459 ep->com.local_addr.ss_family); c4iw_destroy_listen()
3462 c4iw_put_ep(&ep->com); c4iw_destroy_listen()
3466 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) c4iw_ep_disconnect() argument
3473 mutex_lock(&ep->com.mutex); c4iw_ep_disconnect()
3475 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, c4iw_ep_disconnect()
3476 states[ep->com.state], abrupt); c4iw_ep_disconnect()
3478 rdev = &ep->com.dev->rdev; c4iw_ep_disconnect()
3481 close_complete_upcall(ep, -EIO); c4iw_ep_disconnect()
3482 ep->com.state = DEAD; c4iw_ep_disconnect()
3484 switch (ep->com.state) { c4iw_ep_disconnect()
3492 ep->com.state = ABORTING; c4iw_ep_disconnect()
3494 ep->com.state = CLOSING; c4iw_ep_disconnect()
3495 start_ep_timer(ep); c4iw_ep_disconnect()
3497 set_bit(CLOSE_SENT, &ep->com.flags); c4iw_ep_disconnect()
3500 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { c4iw_ep_disconnect()
3503 (void)stop_ep_timer(ep); c4iw_ep_disconnect()
3504 ep->com.state = ABORTING; c4iw_ep_disconnect()
3506 ep->com.state = MORIBUND; c4iw_ep_disconnect()
3512 PDBG("%s ignoring disconnect ep %p state %u\n", c4iw_ep_disconnect()
3513 __func__, ep, ep->com.state); c4iw_ep_disconnect()
3522 set_bit(EP_DISC_ABORT, &ep->com.history); c4iw_ep_disconnect()
3523 close_complete_upcall(ep, -ECONNRESET); c4iw_ep_disconnect()
3524 ret = send_abort(ep, NULL, gfp); c4iw_ep_disconnect()
3526 set_bit(EP_DISC_CLOSE, &ep->com.history); c4iw_ep_disconnect()
3527 ret = send_halfclose(ep, gfp); c4iw_ep_disconnect()
3532 mutex_unlock(&ep->com.mutex); c4iw_ep_disconnect()
3534 release_ep_resources(ep); c4iw_ep_disconnect()
3541 struct c4iw_ep *ep; active_ofld_conn_reply() local
3544 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, active_ofld_conn_reply()
3546 if (!ep) active_ofld_conn_reply()
3551 set_bit(ACT_RETRY_NOMEM, &ep->com.history); active_ofld_conn_reply()
3552 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { active_ofld_conn_reply()
3553 send_fw_act_open_req(ep, atid); active_ofld_conn_reply()
3557 set_bit(ACT_RETRY_INUSE, &ep->com.history); active_ofld_conn_reply()
3558 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { active_ofld_conn_reply()
3559 send_fw_act_open_req(ep, atid); active_ofld_conn_reply()
3573 connect_reply_upcall(ep, status2errno(req->retval)); active_ofld_conn_reply()
3574 state_set(&ep->com, DEAD); active_ofld_conn_reply()
3575 if (ep->com.remote_addr.ss_family == AF_INET6) { active_ofld_conn_reply()
3577 (struct sockaddr_in6 *)&ep->com.mapped_local_addr; active_ofld_conn_reply()
3578 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], active_ofld_conn_reply()
3583 dst_release(ep->dst); active_ofld_conn_reply()
3584 cxgb4_l2t_release(ep->l2t); active_ofld_conn_reply()
3585 c4iw_put_ep(&ep->com); active_ofld_conn_reply()
3939 static void process_timeout(struct c4iw_ep *ep) process_timeout() argument
3944 mutex_lock(&ep->com.mutex); process_timeout()
3945 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, process_timeout()
3946 ep->com.state); process_timeout()
3947 set_bit(TIMEDOUT, &ep->com.history); process_timeout()
3948 switch (ep->com.state) { process_timeout()
3950 __state_set(&ep->com, ABORTING); process_timeout()
3951 connect_reply_upcall(ep, -ETIMEDOUT); process_timeout()
3954 __state_set(&ep->com, ABORTING); process_timeout()
3958 if (ep->com.cm_id && ep->com.qp) { process_timeout()
3960 c4iw_modify_qp(ep->com.qp->rhp, process_timeout()
3961 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, process_timeout()
3964 __state_set(&ep->com, ABORTING); process_timeout()
3965 close_complete_upcall(ep, -ETIMEDOUT); process_timeout()
3971 * These states are expected if the ep timed out at the same process_timeout()
3978 WARN(1, "%s unexpected state ep %p tid %u state %u\n", process_timeout()
3979 __func__, ep, ep->hwtid, ep->com.state); process_timeout()
3983 abort_connection(ep, NULL, GFP_KERNEL); process_timeout()
3984 mutex_unlock(&ep->com.mutex); process_timeout()
3985 c4iw_put_ep(&ep->com); process_timeout()
3990 struct c4iw_ep *ep; process_timedout_eps() local
4001 ep = list_entry(tmp, struct c4iw_ep, entry); process_timedout_eps()
4002 process_timeout(ep); process_timedout_eps()
4034 struct c4iw_ep *ep = (struct c4iw_ep *)arg; ep_timeout() local
4038 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { ep_timeout()
4042 if (!ep->entry.next) { ep_timeout()
4043 list_add_tail(&ep->entry, &timeout_list); ep_timeout()
4116 struct c4iw_ep *ep; peer_abort_intr() local
4120 ep = lookup_tid(t, tid); peer_abort_intr()
4121 if (!ep) { peer_abort_intr()
4129 __func__, ep->hwtid, req->status, peer_abort_intr()
4131 ep->stats.abort_neg_adv++; peer_abort_intr()
4136 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, peer_abort_intr()
4137 ep->com.state); peer_abort_intr()
4144 if (mpa_rev == 2 && !ep->tried_with_mpa_v1) { peer_abort_intr()
4145 if (ep->com.state != MPA_REQ_SENT) peer_abort_intr()
4146 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); peer_abort_intr()
4148 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); peer_abort_intr()
H A Ddevice.c242 if (qp->ep) { dump_qp()
243 if (qp->ep->com.local_addr.ss_family == AF_INET) { dump_qp()
245 &qp->ep->com.local_addr; dump_qp()
247 &qp->ep->com.remote_addr; dump_qp()
249 &qp->ep->com.mapped_local_addr; dump_qp()
251 &qp->ep->com.mapped_remote_addr; dump_qp()
255 "onchip %u ep tid %u state %u " dump_qp()
260 qp->ep->hwtid, (int)qp->ep->com.state, dump_qp()
267 &qp->ep->com.local_addr; dump_qp()
269 &qp->ep->com.remote_addr; dump_qp()
272 &qp->ep->com.mapped_local_addr; dump_qp()
275 &qp->ep->com.mapped_remote_addr; dump_qp()
279 "onchip %u ep tid %u state %u " dump_qp()
284 qp->ep->hwtid, (int)qp->ep->com.state, dump_qp()
543 struct c4iw_ep *ep = p; dump_ep() local
552 if (ep->com.local_addr.ss_family == AF_INET) { dump_ep()
554 &ep->com.local_addr; dump_ep()
556 &ep->com.remote_addr; dump_ep()
558 &ep->com.mapped_local_addr; dump_ep()
560 &ep->com.mapped_remote_addr; dump_ep()
563 "ep %p cm_id %p qp %p state %d flags 0x%lx " dump_ep()
567 ep, ep->com.cm_id, ep->com.qp, dump_ep()
568 (int)ep->com.state, ep->com.flags, dump_ep()
569 ep->com.history, ep->hwtid, ep->atid, dump_ep()
570 ep->stats.connect_neg_adv, dump_ep()
571 ep->stats.abort_neg_adv, dump_ep()
578 &ep->com.local_addr; dump_ep()
580 &ep->com.remote_addr; dump_ep()
582 &ep->com.mapped_local_addr; dump_ep()
584 &ep->com.mapped_remote_addr; dump_ep()
587 "ep %p cm_id %p qp %p state %d flags 0x%lx " dump_ep()
591 ep, ep->com.cm_id, ep->com.qp, dump_ep()
592 (int)ep->com.state, ep->com.flags, dump_ep()
593 ep->com.history, ep->hwtid, ep->atid, dump_ep()
594 ep->stats.connect_neg_adv, dump_ep()
595 ep->stats.abort_neg_adv, dump_ep()
608 struct c4iw_listen_ep *ep = p; dump_listen_ep() local
617 if (ep->com.local_addr.ss_family == AF_INET) { dump_listen_ep()
619 &ep->com.local_addr; dump_listen_ep()
621 &ep->com.mapped_local_addr; dump_listen_ep()
624 "ep %p cm_id %p state %d flags 0x%lx stid %d " dump_listen_ep()
626 ep, ep->com.cm_id, (int)ep->com.state, dump_listen_ep()
627 ep->com.flags, ep->stid, ep->backlog, dump_listen_ep()
632 &ep->com.local_addr; dump_listen_ep()
634 &ep->com.mapped_local_addr; dump_listen_ep()
637 "ep %p cm_id %p state %d flags 0x%lx stid %d " dump_listen_ep()
639 ep, ep->com.cm_id, (int)ep->com.state, dump_listen_ep()
640 ep->com.flags, ep->stid, ep->backlog, dump_listen_ep()
H A Dqp.c1089 qhp->ep->hwtid); post_terminate()
1094 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); post_terminate()
1100 FW_WR_FLOWID_V(qhp->ep->hwtid) | post_terminate()
1203 struct c4iw_ep *ep) rdma_fini()
1210 ep->hwtid); rdma_fini()
1215 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); rdma_fini()
1223 FW_WR_FLOWID_V(ep->hwtid) | rdma_fini()
1225 wqe->cookie = (uintptr_t)&ep->com.wr_wait; rdma_fini()
1232 ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid, rdma_fini()
1271 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); rdma_init()
1284 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); rdma_init()
1292 FW_WR_FLOWID_V(qhp->ep->hwtid) | rdma_init()
1295 wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait; rdma_init()
1324 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq); rdma_init()
1325 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq); rdma_init()
1336 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait, rdma_init()
1337 qhp->ep->hwtid, qhp->wq.sq.qid, __func__); rdma_init()
1358 struct c4iw_ep *ep = NULL; c4iw_modify_qp() local
1360 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__, c4iw_modify_qp()
1361 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, c4iw_modify_qp()
1423 qhp->ep = qhp->attr.llp_stream_handle; c4iw_modify_qp()
1432 c4iw_get_ep(&qhp->ep->com); c4iw_modify_qp()
1449 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); c4iw_modify_qp()
1452 ep = qhp->ep; c4iw_modify_qp()
1456 c4iw_get_ep(&qhp->ep->com); c4iw_modify_qp()
1458 ret = rdma_fini(rhp, qhp, ep); c4iw_modify_qp()
1467 ep = qhp->ep; c4iw_modify_qp()
1469 c4iw_get_ep(&qhp->ep->com); c4iw_modify_qp()
1474 ret = rdma_fini(rhp, qhp, ep); c4iw_modify_qp()
1485 ep = qhp->ep; c4iw_modify_qp()
1486 c4iw_get_ep(&qhp->ep->com); c4iw_modify_qp()
1505 c4iw_put_ep(&qhp->ep->com); c4iw_modify_qp()
1506 qhp->ep = NULL; c4iw_modify_qp()
1543 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep, c4iw_modify_qp()
1548 if (!ep) c4iw_modify_qp()
1549 ep = qhp->ep; c4iw_modify_qp()
1550 qhp->ep = NULL; c4iw_modify_qp()
1554 BUG_ON(!ep); c4iw_modify_qp()
1569 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC : c4iw_modify_qp()
1571 c4iw_put_ep(&ep->com); c4iw_modify_qp()
1579 c4iw_put_ep(&ep->com); c4iw_modify_qp()
1599 wait_event(qhp->wait, !qhp->ep); c4iw_destroy_qp()
1202 rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep) rdma_fini() argument
/linux-4.4.14/drivers/net/ethernet/cirrus/
H A Dep93xx_eth.c182 #define rdb(ep, off) __raw_readb((ep)->base_addr + (off))
183 #define rdw(ep, off) __raw_readw((ep)->base_addr + (off))
184 #define rdl(ep, off) __raw_readl((ep)->base_addr + (off))
185 #define wrb(ep, off, val) __raw_writeb((val), (ep)->base_addr + (off))
186 #define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off))
187 #define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off))
191 struct ep93xx_priv *ep = netdev_priv(dev); ep93xx_mdio_read() local
195 wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg); ep93xx_mdio_read()
198 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) ep93xx_mdio_read()
207 data = rdl(ep, REG_MIIDATA); ep93xx_mdio_read()
215 struct ep93xx_priv *ep = netdev_priv(dev); ep93xx_mdio_write() local
218 wrl(ep, REG_MIIDATA, data); ep93xx_mdio_write()
219 wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg); ep93xx_mdio_write()
222 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) ep93xx_mdio_write()
233 struct ep93xx_priv *ep = netdev_priv(dev); ep93xx_rx() local
243 entry = ep->rx_pointer; ep93xx_rx()
244 rstat = ep->descs->rstat + entry; ep93xx_rx()
286 struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry]; ep93xx_rx()
290 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); ep93xx_rx()
306 ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1); ep93xx_rx()
313 static int ep93xx_have_more_rx(struct ep93xx_priv *ep) ep93xx_have_more_rx() argument
315 struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer; ep93xx_have_more_rx()
321 struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi); ep93xx_poll() local
322 struct net_device *dev = ep->dev; ep93xx_poll()
330 spin_lock_irq(&ep->rx_lock); ep93xx_poll()
332 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); ep93xx_poll()
333 if (ep93xx_have_more_rx(ep)) { ep93xx_poll()
334 wrl(ep, REG_INTEN, REG_INTEN_TX); ep93xx_poll()
335 wrl(ep, REG_INTSTSP, REG_INTSTS_RX); ep93xx_poll()
338 spin_unlock_irq(&ep->rx_lock); ep93xx_poll()
345 wrw(ep, REG_RXDENQ, rx); ep93xx_poll()
346 wrw(ep, REG_RXSTSENQ, rx); ep93xx_poll()
354 struct ep93xx_priv *ep = netdev_priv(dev); ep93xx_xmit() local
364 entry = ep->tx_pointer; ep93xx_xmit()
365 ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); ep93xx_xmit()
367 txd = &ep->descs->tdesc[entry]; ep93xx_xmit()
372 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); ep93xx_xmit()
377 spin_lock_irq(&ep->tx_pending_lock); ep93xx_xmit()
378 ep->tx_pending++; ep93xx_xmit()
379 if (ep->tx_pending == TX_QUEUE_ENTRIES) ep93xx_xmit()
381 spin_unlock_irq(&ep->tx_pending_lock); ep93xx_xmit()
383 wrl(ep, REG_TXDENQ, 1); ep93xx_xmit()
390 struct ep93xx_priv *ep = netdev_priv(dev); ep93xx_tx_complete() local
395 spin_lock(&ep->tx_pending_lock); ep93xx_tx_complete()
401 entry = ep->tx_clean_pointer; ep93xx_tx_complete()
402 tstat = ep->descs->tstat + entry; ep93xx_tx_complete()
416 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; ep93xx_tx_complete()
430 ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1); ep93xx_tx_complete()
431 if (ep->tx_pending == TX_QUEUE_ENTRIES) ep93xx_tx_complete()
433 ep->tx_pending--; ep93xx_tx_complete()
435 spin_unlock(&ep->tx_pending_lock); ep93xx_tx_complete()
444 struct ep93xx_priv *ep = netdev_priv(dev); ep93xx_irq() local
447 status = rdl(ep, REG_INTSTSC); ep93xx_irq()
452 spin_lock(&ep->rx_lock); ep93xx_irq()
453 if (likely(napi_schedule_prep(&ep->napi))) { ep93xx_irq()
454 wrl(ep, REG_INTEN, REG_INTEN_TX); ep93xx_irq()
455 __napi_schedule(&ep->napi); ep93xx_irq()
457 spin_unlock(&ep->rx_lock); ep93xx_irq()
466 static void ep93xx_free_buffers(struct ep93xx_priv *ep) ep93xx_free_buffers() argument
468 struct device *dev = ep->dev->dev.parent; ep93xx_free_buffers()
474 d = ep->descs->rdesc[i].buf_addr; ep93xx_free_buffers()
478 kfree(ep->rx_buf[i]); ep93xx_free_buffers()
484 d = ep->descs->tdesc[i].buf_addr; ep93xx_free_buffers()
488 kfree(ep->tx_buf[i]); ep93xx_free_buffers()
491 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, ep93xx_free_buffers()
492 ep->descs_dma_addr); ep93xx_free_buffers()
495 static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) ep93xx_alloc_buffers() argument
497 struct device *dev = ep->dev->dev.parent; ep93xx_alloc_buffers()
500 ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs), ep93xx_alloc_buffers()
501 &ep->descs_dma_addr, GFP_KERNEL); ep93xx_alloc_buffers()
502 if (ep->descs == NULL) ep93xx_alloc_buffers()
519 ep->rx_buf[i] = buf; ep93xx_alloc_buffers()
520 ep->descs->rdesc[i].buf_addr = d; ep93xx_alloc_buffers()
521 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; ep93xx_alloc_buffers()
538 ep->tx_buf[i] = buf; ep93xx_alloc_buffers()
539 ep->descs->tdesc[i].buf_addr = d; ep93xx_alloc_buffers()
545 ep93xx_free_buffers(ep); ep93xx_alloc_buffers()
551 struct ep93xx_priv *ep = netdev_priv(dev); ep93xx_start_hw() local
555 wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); ep93xx_start_hw()
557 if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) ep93xx_start_hw()
567 wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9)); ep93xx_start_hw()
570 if ((ep93xx_mdio_read(dev, ep->mii.phy_id, MII_BMSR) & 0x0040) != 0) ep93xx_start_hw()
571 wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9) | (1 << 8)); ep93xx_start_hw()
574 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rdesc); ep93xx_start_hw()
575 wrl(ep, REG_RXDQBADD, addr); ep93xx_start_hw()
576 wrl(ep, REG_RXDCURADD, addr); ep93xx_start_hw()
577 wrw(ep, REG_RXDQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rdesc)); ep93xx_start_hw()
580 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rstat); ep93xx_start_hw()
581 wrl(ep, REG_RXSTSQBADD, addr); ep93xx_start_hw()
582 wrl(ep, REG_RXSTSQCURADD, addr); ep93xx_start_hw()
583 wrw(ep, REG_RXSTSQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rstat)); ep93xx_start_hw()
586 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tdesc); ep93xx_start_hw()
587 wrl(ep, REG_TXDQBADD, addr); ep93xx_start_hw()
588 wrl(ep, REG_TXDQCURADD, addr); ep93xx_start_hw()
589 wrw(ep, REG_TXDQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tdesc)); ep93xx_start_hw()
592 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tstat); ep93xx_start_hw()
593 wrl(ep, REG_TXSTSQBADD, addr); ep93xx_start_hw()
594 wrl(ep, REG_TXSTSQCURADD, addr); ep93xx_start_hw()
595 wrw(ep, REG_TXSTSQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tstat)); ep93xx_start_hw()
597 wrl(ep, REG_BMCTL, REG_BMCTL_ENABLE_TX | REG_BMCTL_ENABLE_RX); ep93xx_start_hw()
598 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); ep93xx_start_hw()
599 wrl(ep, REG_GIINTMSK, 0); ep93xx_start_hw()
602 if ((rdl(ep, REG_BMSTS) & REG_BMSTS_RX_ACTIVE) != 0) ep93xx_start_hw()
612 wrl(ep, REG_RXDENQ, RX_QUEUE_ENTRIES); ep93xx_start_hw()
613 wrl(ep, REG_RXSTSENQ, RX_QUEUE_ENTRIES); ep93xx_start_hw()
615 wrb(ep, REG_INDAD0, dev->dev_addr[0]); ep93xx_start_hw()
616 wrb(ep, REG_INDAD1, dev->dev_addr[1]); ep93xx_start_hw()
617 wrb(ep, REG_INDAD2, dev->dev_addr[2]); ep93xx_start_hw()
618 wrb(ep, REG_INDAD3, dev->dev_addr[3]); ep93xx_start_hw()
619 wrb(ep, REG_INDAD4, dev->dev_addr[4]); ep93xx_start_hw()
620 wrb(ep, REG_INDAD5, dev->dev_addr[5]); ep93xx_start_hw()
621 wrl(ep, REG_AFP, 0); ep93xx_start_hw()
623 wrl(ep, REG_MAXFRMLEN, (MAX_PKT_SIZE << 16) | MAX_PKT_SIZE); ep93xx_start_hw()
625 wrl(ep, REG_RXCTL, REG_RXCTL_DEFAULT); ep93xx_start_hw()
626 wrl(ep, REG_TXCTL, REG_TXCTL_ENABLE); ep93xx_start_hw()
633 struct ep93xx_priv *ep = netdev_priv(dev); ep93xx_stop_hw() local
636 wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); ep93xx_stop_hw()
638 if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) ep93xx_stop_hw()
649 struct ep93xx_priv *ep = netdev_priv(dev); ep93xx_open() local
652 if (ep93xx_alloc_buffers(ep)) ep93xx_open()
655 napi_enable(&ep->napi); ep93xx_open()
658 napi_disable(&ep->napi); ep93xx_open()
659 ep93xx_free_buffers(ep); ep93xx_open()
663 spin_lock_init(&ep->rx_lock); ep93xx_open()
664 ep->rx_pointer = 0; ep93xx_open()
665 ep->tx_clean_pointer = 0; ep93xx_open()
666 ep->tx_pointer = 0; ep93xx_open()
667 spin_lock_init(&ep->tx_pending_lock); ep93xx_open()
668 ep->tx_pending = 0; ep93xx_open()
670 err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev); ep93xx_open()
672 napi_disable(&ep->napi); ep93xx_open()
674 ep93xx_free_buffers(ep); ep93xx_open()
678 wrl(ep, REG_GIINTMSK, REG_GIINTMSK_ENABLE); ep93xx_open()
687 struct ep93xx_priv *ep = netdev_priv(dev); ep93xx_close() local
689 napi_disable(&ep->napi); ep93xx_close()
692 wrl(ep, REG_GIINTMSK, 0); ep93xx_close()
693 free_irq(ep->irq, dev); ep93xx_close()
695 ep93xx_free_buffers(ep); ep93xx_close()
702 struct ep93xx_priv *ep = netdev_priv(dev); ep93xx_ioctl() local
705 return generic_mii_ioctl(&ep->mii, data, cmd, NULL); ep93xx_ioctl()
716 struct ep93xx_priv *ep = netdev_priv(dev); ep93xx_get_settings() local
717 return mii_ethtool_gset(&ep->mii, cmd); ep93xx_get_settings()
722 struct ep93xx_priv *ep = netdev_priv(dev); ep93xx_set_settings() local
723 return mii_ethtool_sset(&ep->mii, cmd); ep93xx_set_settings()
728 struct ep93xx_priv *ep = netdev_priv(dev); ep93xx_nway_reset() local
729 return mii_nway_restart(&ep->mii); ep93xx_nway_reset()
734 struct ep93xx_priv *ep = netdev_priv(dev); ep93xx_get_link() local
735 return mii_link_ok(&ep->mii); ep93xx_get_link()
778 struct ep93xx_priv *ep; ep93xx_eth_remove() local
784 ep = netdev_priv(dev); ep93xx_eth_remove()
788 ep93xx_free_buffers(ep); ep93xx_eth_remove()
790 if (ep->base_addr != NULL) ep93xx_eth_remove()
791 iounmap(ep->base_addr); ep93xx_eth_remove()
793 if (ep->res != NULL) { ep93xx_eth_remove()
794 release_resource(ep->res); ep93xx_eth_remove()
795 kfree(ep->res); ep93xx_eth_remove()
807 struct ep93xx_priv *ep; ep93xx_eth_probe() local
826 ep = netdev_priv(dev); ep93xx_eth_probe()
827 ep->dev = dev; ep93xx_eth_probe()
829 netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); ep93xx_eth_probe()
833 ep->res = request_mem_region(mem->start, resource_size(mem), ep93xx_eth_probe()
835 if (ep->res == NULL) { ep93xx_eth_probe()
841 ep->base_addr = ioremap(mem->start, resource_size(mem)); ep93xx_eth_probe()
842 if (ep->base_addr == NULL) { ep93xx_eth_probe()
847 ep->irq = irq; ep93xx_eth_probe()
849 ep->mii.phy_id = data->phy_id; ep93xx_eth_probe()
850 ep->mii.phy_id_mask = 0x1f; ep93xx_eth_probe()
851 ep->mii.reg_num_mask = 0x1f; ep93xx_eth_probe()
852 ep->mii.dev = dev; ep93xx_eth_probe()
853 ep->mii.mdio_read = ep93xx_mdio_read; ep93xx_eth_probe()
854 ep->mii.mdio_write = ep93xx_mdio_write; ep93xx_eth_probe()
855 ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */ ep93xx_eth_probe()
867 dev->name, ep->irq, dev->dev_addr); ep93xx_eth_probe()
/linux-4.4.14/drivers/misc/mic/scif/
H A Dscif_epd.c21 void scif_cleanup_ep_qp(struct scif_endpt *ep) scif_cleanup_ep_qp() argument
23 struct scif_qp *qp = ep->qp_info.qp; scif_cleanup_ep_qp()
27 qp->outbound_q.size, ep->remote_dev); scif_cleanup_ep_qp()
32 sizeof(struct scif_qp), ep->remote_dev); scif_cleanup_ep_qp()
36 scif_unmap_single(qp->local_qp, ep->remote_dev, scif_cleanup_ep_qp()
41 scif_unmap_single(qp->local_buf, ep->remote_dev, scif_cleanup_ep_qp()
49 struct scif_endpt *ep = endpt; scif_teardown_ep() local
50 struct scif_qp *qp = ep->qp_info.qp; scif_teardown_ep()
53 spin_lock(&ep->lock); scif_teardown_ep()
54 scif_cleanup_ep_qp(ep); scif_teardown_ep()
55 spin_unlock(&ep->lock); scif_teardown_ep()
65 void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held) scif_add_epd_to_zombie_list() argument
69 spin_lock(&ep->lock); scif_add_epd_to_zombie_list()
70 ep->state = SCIFEP_ZOMBIE; scif_add_epd_to_zombie_list()
71 spin_unlock(&ep->lock); scif_add_epd_to_zombie_list()
72 list_add_tail(&ep->list, &scif_info.zombie); scif_add_epd_to_zombie_list()
81 struct scif_endpt *ep = NULL; scif_find_listen_ep() local
86 ep = list_entry(pos, struct scif_endpt, list); scif_find_listen_ep()
87 if (ep->port.port == port) { scif_find_listen_ep()
89 return ep; scif_find_listen_ep()
99 struct scif_endpt *ep; scif_cleanup_zombie_epd() local
103 ep = list_entry(pos, struct scif_endpt, list); scif_cleanup_zombie_epd()
104 if (scif_rma_ep_can_uninit(ep)) { scif_cleanup_zombie_epd()
107 put_iova_domain(&ep->rma_info.iovad); scif_cleanup_zombie_epd()
108 kfree(ep); scif_cleanup_zombie_epd()
130 struct scif_endpt *ep = NULL; scif_cnctreq() local
138 ep = scif_find_listen_ep(msg->dst.port); scif_cnctreq()
139 if (!ep) scif_cnctreq()
143 spin_lock(&ep->lock); scif_cnctreq()
145 if (ep->backlog <= ep->conreqcnt) { scif_cnctreq()
147 spin_unlock(&ep->lock); scif_cnctreq()
152 list_add_tail(&conreq->list, &ep->conlist); scif_cnctreq()
153 ep->conreqcnt++; scif_cnctreq()
154 wake_up_interruptible(&ep->conwq); scif_cnctreq()
155 spin_unlock(&ep->lock); scif_cnctreq()
176 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; scif_cnctgnt() local
178 spin_lock(&ep->lock); scif_cnctgnt()
179 if (SCIFEP_CONNECTING == ep->state) { scif_cnctgnt()
180 ep->peer.node = msg->src.node; scif_cnctgnt()
181 ep->peer.port = msg->src.port; scif_cnctgnt()
182 ep->qp_info.gnt_pld = msg->payload[1]; scif_cnctgnt()
183 ep->remote_ep = msg->payload[2]; scif_cnctgnt()
184 ep->state = SCIFEP_MAPPING; scif_cnctgnt()
186 wake_up(&ep->conwq); scif_cnctgnt()
188 spin_unlock(&ep->lock); scif_cnctgnt()
201 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; scif_cnctgnt_ack() local
204 spin_lock(&ep->lock); scif_cnctgnt_ack()
205 /* New ep is now connected with all resources set. */ scif_cnctgnt_ack()
206 ep->state = SCIFEP_CONNECTED; scif_cnctgnt_ack()
207 list_add_tail(&ep->list, &scif_info.connected); scif_cnctgnt_ack()
208 wake_up(&ep->conwq); scif_cnctgnt_ack()
209 spin_unlock(&ep->lock); scif_cnctgnt_ack()
223 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; scif_cnctgnt_nack() local
225 spin_lock(&ep->lock); scif_cnctgnt_nack()
226 ep->state = SCIFEP_CLOSING; scif_cnctgnt_nack()
227 wake_up(&ep->conwq); scif_cnctgnt_nack()
228 spin_unlock(&ep->lock); scif_cnctgnt_nack()
240 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; scif_cnctrej() local
242 spin_lock(&ep->lock); scif_cnctrej()
243 if (SCIFEP_CONNECTING == ep->state) { scif_cnctrej()
244 ep->state = SCIFEP_BOUND; scif_cnctrej()
245 wake_up(&ep->conwq); scif_cnctrej()
247 spin_unlock(&ep->lock); scif_cnctrej()
265 struct scif_endpt *ep = NULL; scif_discnct() local
273 * The local ep may have sent a disconnect and and been closed scif_discnct()
276 * check if the remote ep matches scif_discnct()
281 ep = tmpep; scif_discnct()
282 spin_lock(&ep->lock); scif_discnct()
289 * before the other side sent the disconnect. If so the ep will no scif_discnct()
293 if (!ep) { scif_discnct()
298 ep->state = SCIFEP_DISCONNECTED; scif_discnct()
299 list_add_tail(&ep->list, &scif_info.disconnected); scif_discnct()
301 wake_up_interruptible(&ep->sendwq); scif_discnct()
302 wake_up_interruptible(&ep->recvwq); scif_discnct()
303 spin_unlock(&ep->lock); scif_discnct()
319 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; scif_discnt_ack() local
321 spin_lock(&ep->lock); scif_discnt_ack()
322 ep->state = SCIFEP_DISCONNECTED; scif_discnt_ack()
323 spin_unlock(&ep->lock); scif_discnt_ack()
324 complete(&ep->discon); scif_discnt_ack()
335 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; scif_clientsend() local
337 spin_lock(&ep->lock); scif_clientsend()
338 if (SCIFEP_CONNECTED == ep->state) scif_clientsend()
339 wake_up_interruptible(&ep->recvwq); scif_clientsend()
340 spin_unlock(&ep->lock); scif_clientsend()
351 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; scif_clientrcvd() local
353 spin_lock(&ep->lock); scif_clientrcvd()
354 if (SCIFEP_CONNECTED == ep->state) scif_clientrcvd()
355 wake_up_interruptible(&ep->sendwq); scif_clientrcvd()
356 spin_unlock(&ep->lock); scif_clientrcvd()
H A Dscif_api.c35 ASYNC_CONN_IDLE = 1, /* ep setup for async connect */
53 struct scif_endpt *ep; scif_open() local
57 ep = kzalloc(sizeof(*ep), GFP_KERNEL); scif_open()
58 if (!ep) scif_open()
61 ep->qp_info.qp = kzalloc(sizeof(*ep->qp_info.qp), GFP_KERNEL); scif_open()
62 if (!ep->qp_info.qp) scif_open()
65 err = scif_anon_inode_getfile(ep); scif_open()
69 spin_lock_init(&ep->lock); scif_open()
70 mutex_init(&ep->sendlock); scif_open()
71 mutex_init(&ep->recvlock); scif_open()
73 scif_rma_ep_init(ep); scif_open()
74 ep->state = SCIFEP_UNBOUND; scif_open()
76 "SCIFAPI open: ep %p success\n", ep); scif_open()
77 return ep; scif_open()
80 kfree(ep->qp_info.qp); scif_open()
82 kfree(ep); scif_open()
92 static struct scif_endpt *scif_disconnect_ep(struct scif_endpt *ep) scif_disconnect_ep() argument
108 wake_up_interruptible(&ep->sendwq); scif_disconnect_ep()
109 wake_up_interruptible(&ep->recvwq); scif_disconnect_ep()
110 mutex_lock(&ep->sendlock); scif_disconnect_ep()
111 mutex_unlock(&ep->sendlock); scif_disconnect_ep()
112 mutex_lock(&ep->recvlock); scif_disconnect_ep()
113 mutex_unlock(&ep->recvlock); scif_disconnect_ep()
119 if (tmpep == ep) { scif_disconnect_ep()
122 spin_lock(&ep->lock); scif_disconnect_ep()
131 * the ep lock is not locked, traverse the disconnected scif_disconnect_ep()
136 if (tmpep == ep) { scif_disconnect_ep()
145 init_completion(&ep->discon); scif_disconnect_ep()
147 msg.src = ep->port; scif_disconnect_ep()
148 msg.dst = ep->peer; scif_disconnect_ep()
149 msg.payload[0] = (u64)ep; scif_disconnect_ep()
150 msg.payload[1] = ep->remote_ep; scif_disconnect_ep()
152 err = scif_nodeqp_send(ep->remote_dev, &msg); scif_disconnect_ep()
153 spin_unlock(&ep->lock); scif_disconnect_ep()
158 wait_for_completion_timeout(&ep->discon, scif_disconnect_ep()
160 return ep; scif_disconnect_ep()
165 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_close() local
171 dev_dbg(scif_info.mdev.this_device, "SCIFAPI close: ep %p %s\n", scif_close()
172 ep, scif_ep_states[ep->state]); scif_close()
174 spin_lock(&ep->lock); scif_close()
175 flush_conn = (ep->conn_async_state == ASYNC_CONN_INPROGRESS); scif_close()
176 spin_unlock(&ep->lock); scif_close()
181 spin_lock(&ep->lock); scif_close()
182 oldstate = ep->state; scif_close()
184 ep->state = SCIFEP_CLOSING; scif_close()
191 spin_unlock(&ep->lock); scif_close()
197 if (tmpep == ep) { scif_close()
207 spin_unlock(&ep->lock); scif_close()
213 spin_unlock(&ep->lock); scif_close()
215 scif_disconnect_ep(ep); scif_close()
225 spin_unlock(&ep->lock); scif_close()
231 if (tmpep == ep) scif_close()
235 while (ep->acceptcnt) { scif_close()
236 aep = list_first_entry(&ep->li_accept, scif_close()
270 ep->acceptcnt--; scif_close()
273 spin_lock(&ep->lock); scif_close()
277 while (ep->conreqcnt) { scif_close()
278 conreq = list_first_entry(&ep->conlist, scif_close()
294 ep->conreqcnt--; scif_close()
298 spin_unlock(&ep->lock); scif_close()
300 wake_up_interruptible(&ep->conwq); scif_close()
304 scif_put_port(ep->port.port); scif_close()
305 scif_anon_inode_fput(ep); scif_close()
306 scif_teardown_ep(ep); scif_close()
307 scif_add_epd_to_zombie_list(ep, !SCIF_EPLOCK_HELD); scif_close()
319 struct scif_endpt *ep = (struct scif_endpt *)epd; __scif_flush() local
321 switch (ep->state) { __scif_flush()
324 ep->state = SCIFEP_CLLISTEN; __scif_flush()
327 wake_up_interruptible(&ep->conwq); __scif_flush()
338 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_bind() local
343 "SCIFAPI bind: ep %p %s requested port number %d\n", scif_bind()
344 ep, scif_ep_states[ep->state], pn); scif_bind()
357 spin_lock(&ep->lock); scif_bind()
358 if (ep->state == SCIFEP_BOUND) { scif_bind()
361 } else if (ep->state != SCIFEP_UNBOUND) { scif_bind()
380 ep->state = SCIFEP_BOUND; scif_bind()
381 ep->port.node = scif_info.nodeid; scif_bind()
382 ep->port.port = pn; scif_bind()
383 ep->conn_async_state = ASYNC_CONN_IDLE; scif_bind()
388 spin_unlock(&ep->lock); scif_bind()
396 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_listen() local
399 "SCIFAPI listen: ep %p %s\n", ep, scif_ep_states[ep->state]); scif_listen()
400 spin_lock(&ep->lock); scif_listen()
401 switch (ep->state) { scif_listen()
407 spin_unlock(&ep->lock); scif_listen()
413 spin_unlock(&ep->lock); scif_listen()
419 ep->state = SCIFEP_LISTENING; scif_listen()
420 ep->backlog = backlog; scif_listen()
422 ep->conreqcnt = 0; scif_listen()
423 ep->acceptcnt = 0; scif_listen()
424 INIT_LIST_HEAD(&ep->conlist); scif_listen()
425 init_waitqueue_head(&ep->conwq); scif_listen()
426 INIT_LIST_HEAD(&ep->li_accept); scif_listen()
427 spin_unlock(&ep->lock); scif_listen()
431 * on a listen before placing on the list of listening ep's scif_listen()
433 scif_teardown_ep(ep); scif_listen()
434 ep->qp_info.qp = NULL; scif_listen()
437 list_add_tail(&ep->list, &scif_info.listen); scif_listen()
471 static int scif_conn_func(struct scif_endpt *ep) scif_conn_func() argument
477 err = scif_reserve_dma_chan(ep); scif_conn_func()
479 dev_err(&ep->remote_dev->sdev->dev, scif_conn_func()
481 ep->state = SCIFEP_BOUND; scif_conn_func()
485 err = scif_setup_qp_connect(ep->qp_info.qp, &ep->qp_info.qp_offset, scif_conn_func()
486 SCIF_ENDPT_QP_SIZE, ep->remote_dev); scif_conn_func()
488 dev_err(&ep->remote_dev->sdev->dev, scif_conn_func()
490 __func__, err, ep->qp_info.qp_offset); scif_conn_func()
491 ep->state = SCIFEP_BOUND; scif_conn_func()
495 spdev = scif_get_peer_dev(ep->remote_dev); scif_conn_func()
501 msg.src = ep->port; scif_conn_func()
502 msg.dst = ep->conn_port; scif_conn_func()
504 msg.payload[0] = (u64)ep; scif_conn_func()
505 msg.payload[1] = ep->qp_info.qp_offset; scif_conn_func()
506 err = _scif_nodeqp_send(ep->remote_dev, &msg); scif_conn_func()
514 err = wait_event_timeout(ep->conwq, ep->state != SCIFEP_CONNECTING, scif_conn_func()
517 dev_err(&ep->remote_dev->sdev->dev, scif_conn_func()
519 ep->state = SCIFEP_BOUND; scif_conn_func()
521 spdev = scif_get_peer_dev(ep->remote_dev); scif_conn_func()
526 if (ep->state == SCIFEP_MAPPING) { scif_conn_func()
527 err = scif_setup_qp_connect_response(ep->remote_dev, scif_conn_func()
528 ep->qp_info.qp, scif_conn_func()
529 ep->qp_info.gnt_pld); scif_conn_func()
535 dev_err(&ep->remote_dev->sdev->dev, scif_conn_func()
538 msg.payload[0] = ep->remote_ep; scif_conn_func()
539 _scif_nodeqp_send(ep->remote_dev, &msg); scif_conn_func()
540 ep->state = SCIFEP_BOUND; scif_conn_func()
545 msg.payload[0] = ep->remote_ep; scif_conn_func()
546 err = _scif_nodeqp_send(ep->remote_dev, &msg); scif_conn_func()
548 ep->state = SCIFEP_BOUND; scif_conn_func()
551 ep->state = SCIFEP_CONNECTED; scif_conn_func()
553 list_add_tail(&ep->list, &scif_info.connected); scif_conn_func()
555 dev_dbg(&ep->remote_dev->sdev->dev, scif_conn_func()
556 "SCIFAPI connect: ep %p connected\n", ep); scif_conn_func()
557 } else if (ep->state == SCIFEP_BOUND) { scif_conn_func()
558 dev_dbg(&ep->remote_dev->sdev->dev, scif_conn_func()
559 "SCIFAPI connect: ep %p connection refused\n", ep); scif_conn_func()
568 scif_cleanup_ep_qp(ep); scif_conn_func()
581 struct scif_endpt *ep; scif_conn_handler() local
584 ep = NULL; scif_conn_handler()
587 ep = list_first_entry(&scif_info.nb_connect_list, scif_conn_handler()
589 list_del(&ep->conn_list); scif_conn_handler()
592 if (ep) { scif_conn_handler()
593 ep->conn_err = scif_conn_func(ep); scif_conn_handler()
594 wake_up_interruptible(&ep->conn_pend_wq); scif_conn_handler()
596 } while (ep); scif_conn_handler()
601 struct scif_endpt *ep = (struct scif_endpt *)epd; __scif_connect() local
606 dev_dbg(scif_info.mdev.this_device, "SCIFAPI connect: ep %p %s\n", ep, __scif_connect()
607 scif_ep_states[ep->state]); __scif_connect()
621 spin_lock(&ep->lock); __scif_connect()
622 switch (ep->state) { __scif_connect()
628 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) __scif_connect()
629 ep->conn_async_state = ASYNC_CONN_FLUSH_WORK; __scif_connect()
639 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) __scif_connect()
645 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) __scif_connect()
646 ep->conn_async_state = ASYNC_CONN_FLUSH_WORK; __scif_connect()
651 ep->port.port = scif_get_new_port(); __scif_connect()
652 if (!ep->port.port) { __scif_connect()
655 ep->port.node = scif_info.nodeid; __scif_connect()
656 ep->conn_async_state = ASYNC_CONN_IDLE; __scif_connect()
672 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) { __scif_connect()
673 ep->conn_async_state = ASYNC_CONN_FLUSH_WORK; __scif_connect()
674 } else if (ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) { __scif_connect()
677 ep->conn_port = *dst; __scif_connect()
678 init_waitqueue_head(&ep->sendwq); __scif_connect()
679 init_waitqueue_head(&ep->recvwq); __scif_connect()
680 init_waitqueue_head(&ep->conwq); __scif_connect()
681 ep->conn_async_state = 0; __scif_connect()
684 ep->conn_async_state = ASYNC_CONN_INPROGRESS; __scif_connect()
689 if (err || ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) __scif_connect()
692 ep->state = SCIFEP_CONNECTING; __scif_connect()
693 ep->remote_dev = &scif_dev[dst->node]; __scif_connect()
694 ep->qp_info.qp->magic = SCIFEP_MAGIC; __scif_connect()
695 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) { __scif_connect()
696 init_waitqueue_head(&ep->conn_pend_wq); __scif_connect()
698 list_add_tail(&ep->conn_list, &scif_info.nb_connect_list); __scif_connect()
704 spin_unlock(&ep->lock); __scif_connect()
708 } else if (ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) { __scif_connect()
710 err = ep->conn_err; __scif_connect()
711 spin_lock(&ep->lock); __scif_connect()
712 ep->conn_async_state = ASYNC_CONN_IDLE; __scif_connect()
713 spin_unlock(&ep->lock); __scif_connect()
715 err = scif_conn_func(ep); __scif_connect()
758 "SCIFAPI accept: ep %p %s\n", lep, scif_ep_states[lep->state]); scif_accept()
849 "SCIFAPI accept: ep %p new %p scif_setup_qp_accept %d qp_offset 0x%llx\n", scif_accept()
948 struct scif_endpt *ep = (struct scif_endpt *)epd; _scif_send() local
952 struct scif_qp *qp = ep->qp_info.qp; _scif_send()
957 spin_lock(&ep->lock); _scif_send()
958 while (sent_len != len && SCIFEP_CONNECTED == ep->state) { _scif_send()
973 notif_msg.src = ep->port; _scif_send()
975 notif_msg.payload[0] = ep->remote_ep; _scif_send()
976 ret = _scif_nodeqp_send(ep->remote_dev, &notif_msg); _scif_send()
988 spin_unlock(&ep->lock); _scif_send()
991 wait_event_interruptible(ep->sendwq, _scif_send()
992 (SCIFEP_CONNECTED != ep->state) || _scif_send()
995 spin_lock(&ep->lock); _scif_send()
1001 else if (!ret && SCIFEP_CONNECTED != ep->state) _scif_send()
1002 ret = SCIFEP_DISCONNECTED == ep->state ? _scif_send()
1004 spin_unlock(&ep->lock); _scif_send()
1011 struct scif_endpt *ep = (struct scif_endpt *)epd; _scif_recv() local
1015 struct scif_qp *qp = ep->qp_info.qp; _scif_recv()
1019 spin_lock(&ep->lock); _scif_recv()
1020 while (remaining_len && (SCIFEP_CONNECTED == ep->state || _scif_recv()
1021 SCIFEP_DISCONNECTED == ep->state)) { _scif_recv()
1032 if (ep->state == SCIFEP_CONNECTED) { _scif_recv()
1045 notif_msg.src = ep->port; _scif_recv()
1047 notif_msg.payload[0] = ep->remote_ep; _scif_recv()
1048 ret = _scif_nodeqp_send(ep->remote_dev, _scif_recv()
1061 if (ep->state == SCIFEP_DISCONNECTED) _scif_recv()
1070 spin_unlock(&ep->lock); _scif_recv()
1076 wait_event_interruptible(ep->recvwq, _scif_recv()
1077 SCIFEP_CONNECTED != ep->state || _scif_recv()
1081 spin_lock(&ep->lock); _scif_recv()
1087 else if (!ret && ep->state != SCIFEP_CONNECTED) _scif_recv()
1088 ret = ep->state == SCIFEP_DISCONNECTED ? _scif_recv()
1090 spin_unlock(&ep->lock); _scif_recv()
1106 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_user_send() local
1114 "SCIFAPI send (U): ep %p %s\n", ep, scif_ep_states[ep->state]); scif_user_send()
1132 mutex_lock(&ep->sendlock); scif_user_send()
1149 mutex_unlock(&ep->sendlock); scif_user_send()
1167 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_user_recv() local
1175 "SCIFAPI recv (U): ep %p %s\n", ep, scif_ep_states[ep->state]); scif_user_recv()
1193 mutex_lock(&ep->recvlock); scif_user_recv()
1210 mutex_unlock(&ep->recvlock); scif_user_recv()
1228 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_send() local
1232 "SCIFAPI send (K): ep %p %s\n", ep, scif_ep_states[ep->state]); scif_send()
1239 if (!ep->remote_dev) scif_send()
1248 mutex_lock(&ep->sendlock); scif_send()
1253 mutex_unlock(&ep->sendlock); scif_send()
1270 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_recv() local
1274 "SCIFAPI recv (K): ep %p %s\n", ep, scif_ep_states[ep->state]); scif_recv()
1288 mutex_lock(&ep->recvlock); scif_recv()
1293 mutex_unlock(&ep->recvlock); scif_recv()
1300 poll_table *p, struct scif_endpt *ep) _scif_poll_wait()
1309 spin_unlock(&ep->lock); _scif_poll_wait()
1311 spin_lock(&ep->lock); _scif_poll_wait()
1315 __scif_pollfd(struct file *f, poll_table *wait, struct scif_endpt *ep) __scif_pollfd() argument
1320 "SCIFAPI pollfd: ep %p %s\n", ep, scif_ep_states[ep->state]); __scif_pollfd()
1322 spin_lock(&ep->lock); __scif_pollfd()
1325 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) { __scif_pollfd()
1326 _scif_poll_wait(f, &ep->conn_pend_wq, wait, ep); __scif_pollfd()
1327 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) { __scif_pollfd()
1328 if (ep->state == SCIFEP_CONNECTED || __scif_pollfd()
1329 ep->state == SCIFEP_DISCONNECTED || __scif_pollfd()
1330 ep->conn_err) __scif_pollfd()
1337 if (ep->state == SCIFEP_LISTENING) { __scif_pollfd()
1338 _scif_poll_wait(f, &ep->conwq, wait, ep); __scif_pollfd()
1339 if (ep->state == SCIFEP_LISTENING) { __scif_pollfd()
1340 if (ep->conreqcnt) __scif_pollfd()
1347 if (ep->state == SCIFEP_CONNECTED || ep->state == SCIFEP_DISCONNECTED) { __scif_pollfd()
1349 _scif_poll_wait(f, &ep->recvwq, wait, ep); __scif_pollfd()
1351 _scif_poll_wait(f, &ep->sendwq, wait, ep); __scif_pollfd()
1352 if (ep->state == SCIFEP_CONNECTED || __scif_pollfd()
1353 ep->state == SCIFEP_DISCONNECTED) { __scif_pollfd()
1355 if (scif_rb_count(&ep->qp_info.qp->inbound_q, 1)) __scif_pollfd()
1358 if (scif_rb_space(&ep->qp_info.qp->outbound_q)) __scif_pollfd()
1361 if (ep->state == SCIFEP_DISCONNECTED) __scif_pollfd()
1370 spin_unlock(&ep->lock); __scif_pollfd()
1299 _scif_poll_wait(struct file *f, wait_queue_head_t *wq, poll_table *p, struct scif_endpt *ep) _scif_poll_wait() argument
H A Dscif_fence.c29 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; scif_recv_mark() local
32 err = _scif_fence_mark(ep, &mark); scif_recv_mark()
37 msg->payload[0] = ep->remote_ep; scif_recv_mark()
39 scif_nodeqp_send(ep->remote_dev, msg); scif_recv_mark()
50 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; scif_recv_mark_resp() local
54 mutex_lock(&ep->rma_info.rma_lock); scif_recv_mark_resp()
61 mutex_unlock(&ep->rma_info.rma_lock); scif_recv_mark_resp()
73 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; scif_recv_wait() local
83 msg->payload[0] = ep->remote_ep; scif_recv_wait()
85 scif_nodeqp_send(ep->remote_dev, msg); scif_recv_wait()
95 atomic_inc(&ep->rma_info.fence_refcount); scif_recv_wait()
110 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; scif_recv_wait_resp() local
114 mutex_lock(&ep->rma_info.rma_lock); scif_recv_wait_resp()
119 mutex_unlock(&ep->rma_info.rma_lock); scif_recv_wait_resp()
131 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; scif_recv_sig_local() local
134 err = scif_prog_signal(ep, msg->payload[1], msg->payload[2], scif_recv_sig_local()
140 msg->payload[0] = ep->remote_ep; scif_recv_sig_local()
141 scif_nodeqp_send(ep->remote_dev, msg); scif_recv_sig_local()
152 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; scif_recv_sig_remote() local
155 err = scif_prog_signal(ep, msg->payload[1], msg->payload[2], scif_recv_sig_remote()
161 msg->payload[0] = ep->remote_ep; scif_recv_sig_remote()
162 scif_nodeqp_send(ep->remote_dev, msg); scif_recv_sig_remote()
173 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; scif_recv_sig_resp() local
177 mutex_lock(&ep->rma_info.rma_lock); scif_recv_sig_resp()
182 mutex_unlock(&ep->rma_info.rma_lock); scif_recv_sig_resp()
199 dma_pool_free(status->ep->remote_dev->signal_pool, status, scif_prog_signal_cb()
205 struct scif_endpt *ep = (struct scif_endpt *)epd; _scif_prog_signal() local
206 struct dma_chan *chan = ep->rma_info.dma_chan; _scif_prog_signal()
218 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", _scif_prog_signal()
225 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", _scif_prog_signal()
237 status = dma_pool_alloc(ep->remote_dev->signal_pool, GFP_KERNEL, _scif_prog_signal()
241 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", _scif_prog_signal()
247 status->ep = ep; _scif_prog_signal()
254 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", _scif_prog_signal()
265 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", _scif_prog_signal()
273 dma_pool_free(ep->remote_dev->signal_pool, status, _scif_prog_signal()
292 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_prog_signal() local
298 mutex_lock(&ep->rma_info.rma_lock); scif_prog_signal()
305 req.head = &ep->rma_info.reg_list; scif_prog_signal()
307 req.head = &ep->rma_info.remote_reg_list; scif_prog_signal()
316 if (scif_is_mgmt_node() && scifdev_self(ep->remote_dev)) { scif_prog_signal()
331 mutex_unlock(&ep->rma_info.rma_lock); scif_prog_signal()
337 struct scif_endpt *ep = (struct scif_endpt *)epd; _scif_fence_wait() local
342 err = wait_event_interruptible_timeout(ep->rma_info.markwq, _scif_fence_wait()
344 ep->rma_info.dma_chan, _scif_fence_wait()
364 struct scif_endpt *ep; scif_rma_handle_remote_fences() local
376 ep = (struct scif_endpt *)fence->msg.payload[0]; scif_rma_handle_remote_fences()
378 err = _scif_fence_wait(ep, mark); scif_rma_handle_remote_fences()
383 fence->msg.payload[0] = ep->remote_ep; scif_rma_handle_remote_fences()
384 scif_nodeqp_send(ep->remote_dev, &fence->msg); scif_rma_handle_remote_fences()
386 if (!atomic_sub_return(1, &ep->rma_info.fence_refcount)) scif_rma_handle_remote_fences()
397 struct scif_endpt *ep = (struct scif_endpt *)epd; _scif_send_fence() local
408 msg.src = ep->port; _scif_send_fence()
410 msg.payload[0] = ep->remote_ep; _scif_send_fence()
414 spin_lock(&ep->lock); _scif_send_fence()
415 if (ep->state == SCIFEP_CONNECTED) _scif_send_fence()
416 err = scif_nodeqp_send(ep->remote_dev, &msg); _scif_send_fence()
419 spin_unlock(&ep->lock); _scif_send_fence()
426 if (!err && scifdev_alive(ep)) _scif_send_fence()
432 mutex_lock(&ep->rma_info.rma_lock); _scif_send_fence()
441 mutex_unlock(&ep->rma_info.rma_lock); _scif_send_fence()
472 static int _scif_send_fence_signal_wait(struct scif_endpt *ep, _scif_send_fence_signal_wait() argument
481 if (!err && scifdev_alive(ep)) _scif_send_fence_signal_wait()
488 mutex_lock(&ep->rma_info.rma_lock); _scif_send_fence_signal_wait()
491 mutex_unlock(&ep->rma_info.rma_lock); _scif_send_fence_signal_wait()
515 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_send_fence_signal() local
525 msg.src = ep->port; scif_send_fence_signal()
528 msg.payload[0] = ep->remote_ep; scif_send_fence_signal()
532 spin_lock(&ep->lock); scif_send_fence_signal()
533 if (ep->state == SCIFEP_CONNECTED) scif_send_fence_signal()
534 err = scif_nodeqp_send(ep->remote_dev, &msg); scif_send_fence_signal()
537 spin_unlock(&ep->lock); scif_send_fence_signal()
540 err = _scif_send_fence_signal_wait(ep, fence_req); scif_send_fence_signal()
548 msg.payload[0] = ep->remote_ep; scif_send_fence_signal()
552 spin_lock(&ep->lock); scif_send_fence_signal()
553 if (ep->state == SCIFEP_CONNECTED) scif_send_fence_signal()
554 err = scif_nodeqp_send(ep->remote_dev, &msg); scif_send_fence_signal()
557 spin_unlock(&ep->lock); scif_send_fence_signal()
560 err = _scif_send_fence_signal_wait(ep, fence_req); scif_send_fence_signal()
570 struct scif_endpt *ep = (struct scif_endpt *)arg; scif_fence_mark_cb() local
572 wake_up_interruptible(&ep->rma_info.markwq); scif_fence_mark_cb()
573 atomic_dec(&ep->rma_info.fence_refcount); scif_fence_mark_cb()
584 struct scif_endpt *ep = (struct scif_endpt *)epd; _scif_fence_mark() local
585 struct dma_chan *chan = ep->rma_info.dma_chan; _scif_fence_mark()
594 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", _scif_fence_mark()
601 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", _scif_fence_mark()
609 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", _scif_fence_mark()
614 tx->callback_param = ep; _scif_fence_mark()
618 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", _scif_fence_mark()
622 atomic_inc(&ep->rma_info.fence_refcount); _scif_fence_mark()
631 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_fence_mark() local
635 "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x\n", scif_fence_mark()
636 ep, flags, *mark); scif_fence_mark()
637 err = scif_verify_epd(ep); scif_fence_mark()
657 if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) { scif_fence_mark()
665 err = scif_send_fence_mark(ep, mark); scif_fence_mark()
671 "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x err %d\n", scif_fence_mark()
672 ep, flags, *mark, err); scif_fence_mark()
679 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_fence_wait() local
683 "SCIFAPI fence_wait: ep %p mark 0x%x\n", scif_fence_wait()
684 ep, mark); scif_fence_wait()
685 err = scif_verify_epd(ep); scif_fence_wait()
693 if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) { scif_fence_wait()
713 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_fence_signal() local
717 "SCIFAPI fence_signal: ep %p loff 0x%lx lval 0x%llx roff 0x%lx rval 0x%llx flags 0x%x\n", scif_fence_signal()
718 ep, loff, lval, roff, rval, flags); scif_fence_signal()
719 err = scif_verify_epd(ep); scif_fence_signal()
H A Dscif_mmap.c37 struct scif_endpt *ep; scif_recv_munmap() local
39 ep = (struct scif_endpt *)recv_window->ep; scif_recv_munmap()
45 req.head = &ep->rma_info.reg_list; scif_recv_munmap()
46 msg->payload[0] = ep->remote_ep; scif_recv_munmap()
48 mutex_lock(&ep->rma_info.rma_lock); scif_recv_munmap()
60 atomic_inc(&ep->rma_info.tw_refcount); scif_recv_munmap()
61 ep->rma_info.async_list_del = 1; scif_recv_munmap()
63 scif_free_window_offset(ep, window, window->offset); scif_recv_munmap()
66 mutex_unlock(&ep->rma_info.rma_lock); scif_recv_munmap()
75 static void __scif_zap_mmaps(struct scif_endpt *ep) __scif_zap_mmaps() argument
82 spin_lock(&ep->lock); __scif_zap_mmaps()
83 list_for_each(item, &ep->rma_info.vma_list) { __scif_zap_mmaps()
89 "%s ep %p zap vma %p size 0x%lx\n", __scif_zap_mmaps()
90 __func__, ep, info->vma, size); __scif_zap_mmaps()
92 spin_unlock(&ep->lock); __scif_zap_mmaps()
101 struct scif_endpt *ep; _scif_zap_mmaps() local
106 ep = list_entry(item, struct scif_endpt, list); list_for_each()
107 if (ep->remote_dev->node == node) list_for_each()
108 __scif_zap_mmaps(ep); list_for_each()
131 static void __scif_cleanup_rma_for_zombies(struct scif_endpt *ep) __scif_cleanup_rma_for_zombies() argument
136 list_for_each_safe(pos, tmp, &ep->rma_info.remote_reg_list) { __scif_cleanup_rma_for_zombies()
145 atomic_inc(&ep->rma_info.tw_refcount); __scif_cleanup_rma_for_zombies()
155 struct scif_endpt *ep; scif_cleanup_rma_for_zombies() local
160 ep = list_entry(item, struct scif_endpt, list); scif_cleanup_rma_for_zombies()
161 if (ep->remote_dev && ep->remote_dev->node == node) scif_cleanup_rma_for_zombies()
162 __scif_cleanup_rma_for_zombies(ep); scif_cleanup_rma_for_zombies()
169 static int scif_insert_vma(struct scif_endpt *ep, struct vm_area_struct *vma) scif_insert_vma() argument
180 spin_lock(&ep->lock); scif_insert_vma()
181 list_add_tail(&info->list, &ep->rma_info.vma_list); scif_insert_vma()
182 spin_unlock(&ep->lock); scif_insert_vma()
188 static void scif_delete_vma(struct scif_endpt *ep, struct vm_area_struct *vma) scif_delete_vma() argument
193 spin_lock(&ep->lock); scif_delete_vma()
194 list_for_each(item, &ep->rma_info.vma_list) { scif_delete_vma()
202 spin_unlock(&ep->lock); scif_delete_vma()
205 static phys_addr_t scif_get_phys(phys_addr_t phys, struct scif_endpt *ep) scif_get_phys() argument
207 struct scif_dev *scifdev = (struct scif_dev *)ep->remote_dev; scif_get_phys()
224 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_get_pages() local
230 "SCIFAPI get_pinned_pages: ep %p offset 0x%lx len 0x%lx\n", scif_get_pages()
231 ep, offset, len); scif_get_pages()
232 err = scif_verify_epd(ep); scif_get_pages()
249 req.head = &ep->rma_info.remote_reg_list; scif_get_pages()
251 mutex_lock(&ep->rma_info.rma_lock); scif_get_pages()
255 dev_err(&ep->remote_dev->sdev->dev, scif_get_pages()
274 if (scif_is_mgmt_node() && !scifdev_self(ep->remote_dev)) { scif_get_pages()
292 ep); scif_get_pages()
293 if (scif_is_mgmt_node() && !scifdev_self(ep->remote_dev)) scif_get_pages()
295 ep->remote_dev->sdev->aper->va + scif_get_pages()
297 ep->remote_dev->sdev->aper->pa; scif_get_pages()
302 mutex_unlock(&ep->rma_info.rma_lock); scif_get_pages()
312 dev_err(&ep->remote_dev->sdev->dev, scif_get_pages()
321 struct scif_endpt *ep; scif_put_pages() local
333 ep = (struct scif_endpt *)window->ep; scif_put_pages()
340 if (ep->state != SCIFEP_CONNECTED && ep->state != SCIFEP_DISCONNECTED) scif_put_pages()
343 mutex_lock(&ep->rma_info.rma_lock); scif_put_pages()
350 mutex_unlock(&ep->rma_info.rma_lock); scif_put_pages()
351 scif_drain_dma_intr(ep->remote_dev->sdev, scif_put_pages()
352 ep->rma_info.dma_chan); scif_put_pages()
355 msg.src = ep->port; scif_put_pages()
358 scif_nodeqp_send(ep->remote_dev, &msg); scif_put_pages()
362 mutex_unlock(&ep->rma_info.rma_lock); scif_put_pages()
387 struct scif_endpt *ep = (struct scif_endpt *)start_window->ep; scif_rma_list_mmap() local
388 struct list_head *head = &ep->rma_info.remote_reg_list; scif_rma_list_mmap()
406 phys_addr = scif_get_phys(phys_addr, ep); list_for_each_entry_from()
430 head = &ep->rma_info.remote_reg_list; list_for_each_entry_from()
464 struct scif_endpt *ep = (struct scif_endpt *)start_window->ep; scif_rma_list_munmap() local
465 struct list_head *head = &ep->rma_info.remote_reg_list; scif_rma_list_munmap()
469 msg.src = ep->port; scif_rma_list_munmap()
480 struct scif_dev *rdev = ep->remote_dev; list_for_each_entry_safe_from()
483 ep->rma_info.dma_chan); list_for_each_entry_safe_from()
487 scif_nodeqp_send(ep->remote_dev, &msg); list_for_each_entry_safe_from()
504 struct scif_endpt *ep; /* End point for remote window */ member in struct:vma_pvt
540 scif_insert_vma(vmapvt->ep, vma); scif_vma_open()
553 struct scif_endpt *ep; scif_munmap() local
565 ep = vmapvt->ep; scif_munmap()
569 "SCIFAPI munmap: ep %p nr_pages 0x%x offset 0x%llx\n", scif_munmap()
570 ep, nr_pages, offset); scif_munmap()
576 req.head = &ep->rma_info.remote_reg_list; scif_munmap()
578 mutex_lock(&ep->rma_info.rma_lock); scif_munmap()
587 mutex_unlock(&ep->rma_info.rma_lock); scif_munmap()
595 scif_delete_vma(ep, vma); scif_munmap()
615 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_mmap() local
622 "SCIFAPI mmap: ep %p start_offset 0x%llx nr_pages 0x%x\n", scif_mmap()
623 ep, start_offset, nr_pages); scif_mmap()
624 err = scif_verify_epd(ep); scif_mmap()
630 err = scif_insert_vma(ep, vma); scif_mmap()
636 scif_delete_vma(ep, vma); scif_mmap()
640 vmapvt->ep = ep; scif_mmap()
648 req.head = &ep->rma_info.remote_reg_list; scif_mmap()
650 mutex_lock(&ep->rma_info.rma_lock); scif_mmap()
654 dev_err(&ep->remote_dev->sdev->dev, scif_mmap()
660 if (!scifdev_self(ep->remote_dev)) scif_mmap()
677 if (!scifdev_self(ep->remote_dev)) scif_mmap()
683 dev_err(&ep->remote_dev->sdev->dev, scif_mmap()
691 mutex_unlock(&ep->rma_info.rma_lock); scif_mmap()
694 dev_err(&ep->remote_dev->sdev->dev, scif_mmap()
696 scif_delete_vma(ep, vma); scif_mmap()
H A Dscif_rma.c30 * @ep: end point
34 void scif_rma_ep_init(struct scif_endpt *ep) scif_rma_ep_init() argument
36 struct scif_endpt_rma_info *rma = &ep->rma_info; scif_rma_ep_init()
59 * @ep: end point
63 int scif_rma_ep_can_uninit(struct scif_endpt *ep) scif_rma_ep_can_uninit() argument
67 mutex_lock(&ep->rma_info.rma_lock); scif_rma_ep_can_uninit()
69 if (list_empty(&ep->rma_info.reg_list) && scif_rma_ep_can_uninit()
70 list_empty(&ep->rma_info.remote_reg_list) && scif_rma_ep_can_uninit()
71 list_empty(&ep->rma_info.mmn_list) && scif_rma_ep_can_uninit()
72 !atomic_read(&ep->rma_info.tw_refcount) && scif_rma_ep_can_uninit()
73 !atomic_read(&ep->rma_info.tcw_refcount) && scif_rma_ep_can_uninit()
74 !atomic_read(&ep->rma_info.fence_refcount)) scif_rma_ep_can_uninit()
76 mutex_unlock(&ep->rma_info.rma_lock); scif_rma_ep_can_uninit()
139 * @ep: end point
146 struct scif_window *scif_create_window(struct scif_endpt *ep, int nr_pages, scif_create_window() argument
165 window->ep = (u64)ep; scif_create_window()
186 * @ep: end point
191 static void scif_destroy_incomplete_window(struct scif_endpt *ep, scif_destroy_incomplete_window() argument
204 if (!err && scifdev_alive(ep)) scif_destroy_incomplete_window()
207 mutex_lock(&ep->rma_info.rma_lock); scif_destroy_incomplete_window()
210 msg.src = ep->port; scif_destroy_incomplete_window()
211 msg.payload[0] = ep->remote_ep; scif_destroy_incomplete_window()
215 _scif_nodeqp_send(ep->remote_dev, &msg); scif_destroy_incomplete_window()
217 mutex_unlock(&ep->rma_info.rma_lock); scif_destroy_incomplete_window()
219 scif_free_window_offset(ep, window, window->offset); scif_destroy_incomplete_window()
314 * @ep: end point
319 int scif_destroy_window(struct scif_endpt *ep, struct scif_window *window) scif_destroy_window() argument
332 scif_free_window_offset(ep, window, window->offset); scif_destroy_window()
333 scif_unmap_window(ep->remote_dev, window); scif_destroy_window()
474 * @ep: end point
516 * @ep: end point
638 * @ep: end point
643 static int scif_send_scif_unregister(struct scif_endpt *ep, scif_send_scif_unregister() argument
649 msg.src = ep->port; scif_send_scif_unregister()
652 return scif_nodeqp_send(ep->remote_dev, &msg); scif_send_scif_unregister()
664 struct scif_endpt *ep = (struct scif_endpt *)window->ep; scif_unregister_window() local
678 mutex_unlock(&ep->rma_info.rma_lock); scif_unregister_window()
680 err = scif_send_scif_unregister(ep, window); scif_unregister_window()
687 mutex_lock(&ep->rma_info.rma_lock); scif_unregister_window()
695 if (!err && scifdev_alive(ep)) scif_unregister_window()
706 mutex_lock(&ep->rma_info.rma_lock); scif_unregister_window()
712 if (!scifdev_alive(ep)) { scif_unregister_window()
728 atomic_inc(&ep->rma_info.tw_refcount); scif_unregister_window()
730 scif_free_window_offset(ep, window, window->offset); scif_unregister_window()
731 mutex_unlock(&ep->rma_info.rma_lock); scif_unregister_window()
733 scifdev_alive(ep)) { scif_unregister_window()
734 scif_drain_dma_intr(ep->remote_dev->sdev, scif_unregister_window()
735 ep->rma_info.dma_chan); scif_unregister_window()
744 mutex_lock(&ep->rma_info.rma_lock); scif_unregister_window()
751 * @ep: end point
756 static int scif_send_alloc_request(struct scif_endpt *ep, scif_send_alloc_request() argument
770 return _scif_nodeqp_send(ep->remote_dev, &msg); scif_send_alloc_request()
775 * @ep: end point
781 static int scif_prep_remote_window(struct scif_endpt *ep, scif_prep_remote_window() argument
793 map_err = scif_map_window(ep->remote_dev, window); scif_prep_remote_window()
795 dev_err(&ep->remote_dev->sdev->dev, scif_prep_remote_window()
804 mutex_lock(&ep->rma_info.rma_lock); scif_prep_remote_window()
806 mutex_unlock(&ep->rma_info.rma_lock); scif_prep_remote_window()
807 if (!err && scifdev_alive(ep)) scif_prep_remote_window()
823 dev_err(&ep->remote_dev->sdev->dev, scif_prep_remote_window()
826 msg.src = ep->port; scif_prep_remote_window()
827 msg.payload[0] = ep->remote_ep; scif_prep_remote_window()
831 spin_lock(&ep->lock); scif_prep_remote_window()
832 if (ep->state == SCIFEP_CONNECTED) scif_prep_remote_window()
833 err = _scif_nodeqp_send(ep->remote_dev, &msg); scif_prep_remote_window()
836 spin_unlock(&ep->lock); scif_prep_remote_window()
841 ep->remote_dev); scif_prep_remote_window()
851 ep->remote_dev); scif_prep_remote_window()
856 ep->remote_dev); scif_prep_remote_window()
872 ep->remote_dev); scif_prep_remote_window()
876 ep->remote_dev); scif_prep_remote_window()
885 if (scifdev_is_p2p(ep->remote_dev)) { scif_prep_remote_window()
895 ep->remote_dev->base_addr; scif_prep_remote_window()
925 sizeof(*window->dma_addr), ep->remote_dev); scif_prep_remote_window()
927 sizeof(*window->num_pages), ep->remote_dev); scif_prep_remote_window()
935 remote_window->ep = ep->remote_ep; scif_prep_remote_window()
939 ep->remote_dev); scif_prep_remote_window()
943 ep->remote_dev); scif_prep_remote_window()
944 scif_iounmap(remote_window, sizeof(*remote_window), ep->remote_dev); scif_prep_remote_window()
951 * @ep: end point
958 static int scif_send_scif_register(struct scif_endpt *ep, scif_send_scif_register() argument
964 msg.src = ep->port; scif_send_scif_register()
965 msg.payload[0] = ep->remote_ep; scif_send_scif_register()
968 spin_lock(&ep->lock); scif_send_scif_register()
969 if (ep->state == SCIFEP_CONNECTED) { scif_send_scif_register()
972 err = _scif_nodeqp_send(ep->remote_dev, &msg); scif_send_scif_register()
973 spin_unlock(&ep->lock); scif_send_scif_register()
981 if (!err && scifdev_alive(ep)) scif_send_scif_register()
990 err = _scif_nodeqp_send(ep->remote_dev, &msg); scif_send_scif_register()
991 spin_unlock(&ep->lock); scif_send_scif_register()
1000 * @ep: end point descriptor
1008 int scif_get_window_offset(struct scif_endpt *ep, int flags, s64 offset, scif_get_window_offset() argument
1017 iova_ptr = reserve_iova(&ep->rma_info.iovad, page_index, scif_get_window_offset()
1022 iova_ptr = alloc_iova(&ep->rma_info.iovad, num_pages, scif_get_window_offset()
1034 * @ep: end point descriptor
1041 void scif_free_window_offset(struct scif_endpt *ep, scif_free_window_offset() argument
1045 free_iova(&ep->rma_info.iovad, offset >> PAGE_SHIFT); scif_free_window_offset()
1097 struct scif_endpt *ep = (struct scif_endpt *)window->ep; scif_alloc_gnt_rej() local
1099 mutex_lock(&ep->rma_info.rma_lock); scif_alloc_gnt_rej()
1107 mutex_unlock(&ep->rma_info.rma_lock); scif_alloc_gnt_rej()
1156 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; scif_recv_reg() local
1160 mutex_lock(&ep->rma_info.rma_lock); scif_recv_reg()
1161 spin_lock(&ep->lock); scif_recv_reg()
1162 if (ep->state == SCIFEP_CONNECTED) { scif_recv_reg()
1164 scif_nodeqp_send(ep->remote_dev, msg); scif_recv_reg()
1165 scif_fixup_aper_base(ep->remote_dev, window); scif_recv_reg()
1167 scif_insert_window(window, &ep->rma_info.remote_reg_list); scif_recv_reg()
1170 scif_nodeqp_send(ep->remote_dev, msg); scif_recv_reg()
1172 spin_unlock(&ep->lock); scif_recv_reg()
1173 mutex_unlock(&ep->rma_info.rma_lock); scif_recv_reg()
1175 scif_destroy_remote_lookup(ep->remote_dev, window); scif_recv_reg()
1196 struct scif_endpt *ep; scif_recv_unreg() local
1199 ep = (struct scif_endpt *)recv_window->ep; scif_recv_unreg()
1205 req.head = &ep->rma_info.remote_reg_list; scif_recv_unreg()
1206 msg->payload[0] = ep->remote_ep; scif_recv_unreg()
1208 mutex_lock(&ep->rma_info.rma_lock); scif_recv_unreg()
1226 atomic_inc(&ep->rma_info.tw_refcount); scif_recv_unreg()
1227 ep->rma_info.async_list_del = 1; scif_recv_unreg()
1240 mutex_unlock(&ep->rma_info.rma_lock); scif_recv_unreg()
1242 scif_drain_dma_intr(ep->remote_dev->sdev, scif_recv_unreg()
1243 ep->rma_info.dma_chan); scif_recv_unreg()
1244 scif_nodeqp_send(ep->remote_dev, msg); scif_recv_unreg()
1259 struct scif_endpt *ep = (struct scif_endpt *)window->ep; scif_recv_reg_ack() local
1261 mutex_lock(&ep->rma_info.rma_lock); scif_recv_reg_ack()
1264 mutex_unlock(&ep->rma_info.rma_lock); scif_recv_reg_ack()
1278 struct scif_endpt *ep = (struct scif_endpt *)window->ep; scif_recv_reg_nack() local
1280 mutex_lock(&ep->rma_info.rma_lock); scif_recv_reg_nack()
1283 mutex_unlock(&ep->rma_info.rma_lock); scif_recv_reg_nack()
1296 struct scif_endpt *ep = (struct scif_endpt *)window->ep; scif_recv_unreg_ack() local
1298 mutex_lock(&ep->rma_info.rma_lock); scif_recv_unreg_ack()
1301 mutex_unlock(&ep->rma_info.rma_lock); scif_recv_unreg_ack()
1315 struct scif_endpt *ep = (struct scif_endpt *)window->ep; scif_recv_unreg_nack() local
1317 mutex_lock(&ep->rma_info.rma_lock); scif_recv_unreg_nack()
1320 mutex_unlock(&ep->rma_info.rma_lock); scif_recv_unreg_nack()
1483 scif_insert_local_window(struct scif_window *window, struct scif_endpt *ep) scif_insert_local_window() argument
1485 mutex_lock(&ep->rma_info.rma_lock); scif_insert_local_window()
1486 scif_insert_window(window, &ep->rma_info.reg_list); scif_insert_local_window()
1487 mutex_unlock(&ep->rma_info.rma_lock); scif_insert_local_window()
1494 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_register_pinned_pages() local
1519 err = scif_verify_epd(ep); scif_register_pinned_pages()
1530 err = scif_get_window_offset(ep, map_flags, offset, scif_register_pinned_pages()
1538 window = scif_create_window(ep, pinned_pages->nr_pages, scif_register_pinned_pages()
1542 scif_free_window_offset(ep, NULL, computed_offset); scif_register_pinned_pages()
1550 spdev = scif_get_peer_dev(ep->remote_dev); scif_register_pinned_pages()
1553 scif_destroy_window(ep, window); scif_register_pinned_pages()
1556 err = scif_send_alloc_request(ep, window); scif_register_pinned_pages()
1558 dev_err(&ep->remote_dev->sdev->dev, scif_register_pinned_pages()
1564 err = scif_prep_remote_window(ep, window); scif_register_pinned_pages()
1566 dev_err(&ep->remote_dev->sdev->dev, scif_register_pinned_pages()
1572 err = scif_send_scif_register(ep, window); scif_register_pinned_pages()
1574 dev_err(&ep->remote_dev->sdev->dev, scif_register_pinned_pages()
1581 scif_insert_local_window(window, ep); scif_register_pinned_pages()
1584 scif_destroy_window(ep, window); scif_register_pinned_pages()
1586 dev_err(&ep->remote_dev->sdev->dev, scif_register_pinned_pages()
1597 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_register() local
1604 "SCIFAPI register: ep %p addr %p len 0x%lx offset 0x%lx prot 0x%x map_flags 0x%x\n", scif_register()
1631 err = scif_verify_epd(ep); scif_register()
1636 err = scif_get_window_offset(ep, map_flags, offset, scif_register()
1641 spdev = scif_get_peer_dev(ep->remote_dev); scif_register()
1644 scif_free_window_offset(ep, NULL, computed_offset); scif_register()
1648 window = scif_create_window(ep, len >> PAGE_SHIFT, scif_register()
1651 scif_free_window_offset(ep, NULL, computed_offset); scif_register()
1658 err = scif_send_alloc_request(ep, window); scif_register()
1660 scif_destroy_incomplete_window(ep, window); scif_register()
1674 scif_destroy_incomplete_window(ep, window); scif_register()
1684 err = scif_prep_remote_window(ep, window); scif_register()
1686 dev_err(&ep->remote_dev->sdev->dev, scif_register()
1692 err = scif_send_scif_register(ep, window); scif_register()
1694 dev_err(&ep->remote_dev->sdev->dev, scif_register()
1701 scif_insert_local_window(window, ep); scif_register()
1702 dev_dbg(&ep->remote_dev->sdev->dev, scif_register()
1703 "SCIFAPI register: ep %p addr %p len 0x%lx computed_offset 0x%llx\n", scif_register()
1707 scif_destroy_window(ep, window); scif_register()
1710 dev_err(&ep->remote_dev->sdev->dev, scif_register()
1719 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_unregister() local
1726 "SCIFAPI unregister: ep %p offset 0x%lx len 0x%lx\n", scif_unregister()
1727 ep, offset, len); scif_unregister()
1739 err = scif_verify_epd(ep); scif_unregister()
1751 req.head = &ep->rma_info.reg_list; scif_unregister()
1753 spdev = scif_get_peer_dev(ep->remote_dev); scif_unregister()
1758 mutex_lock(&ep->rma_info.rma_lock); scif_unregister()
1762 dev_err(&ep->remote_dev->sdev->dev, scif_unregister()
1769 dev_err(&ep->remote_dev->sdev->dev, scif_unregister()
1772 mutex_unlock(&ep->rma_info.rma_lock); scif_unregister()
H A Dscif_nm.c29 struct scif_endpt *ep; scif_invalidate_ep() local
35 ep = list_entry(pos, struct scif_endpt, list); scif_invalidate_ep()
36 if (ep->remote_dev->node == node) { scif_invalidate_ep()
37 scif_unmap_all_windows(ep); scif_invalidate_ep()
38 spin_lock(&ep->lock); scif_invalidate_ep()
39 scif_cleanup_ep_qp(ep); scif_invalidate_ep()
40 spin_unlock(&ep->lock); scif_invalidate_ep()
44 ep = list_entry(pos, struct scif_endpt, list); scif_invalidate_ep()
45 if (ep->remote_dev->node == node) { scif_invalidate_ep()
47 spin_lock(&ep->lock); scif_invalidate_ep()
48 ep->state = SCIFEP_DISCONNECTED; scif_invalidate_ep()
49 list_add_tail(&ep->list, &scif_info.disconnected); scif_invalidate_ep()
50 scif_cleanup_ep_qp(ep); scif_invalidate_ep()
51 wake_up_interruptible(&ep->sendwq); scif_invalidate_ep()
52 wake_up_interruptible(&ep->recvwq); scif_invalidate_ep()
53 spin_unlock(&ep->lock); scif_invalidate_ep()
54 scif_unmap_all_windows(ep); scif_invalidate_ep()
H A Dscif_dma.c84 * @ep: Endpoint Descriptor.
90 int scif_reserve_dma_chan(struct scif_endpt *ep) scif_reserve_dma_chan() argument
98 if (!scif_info.nodeid && scifdev_self(ep->remote_dev)) scif_reserve_dma_chan()
103 scifdev = ep->remote_dev; scif_reserve_dma_chan()
109 mutex_lock(&ep->rma_info.rma_lock); scif_reserve_dma_chan()
110 ep->rma_info.dma_chan = chan; scif_reserve_dma_chan()
111 mutex_unlock(&ep->rma_info.rma_lock); scif_reserve_dma_chan()
123 struct scif_endpt *ep, __scif_rma_destroy_tcw()
136 ep = (struct scif_endpt *)window->ep; __scif_rma_destroy_tcw()
151 struct scif_endpt *ep = mmn->ep; scif_rma_destroy_tcw() local
153 spin_lock(&ep->rma_info.tc_lock); scif_rma_destroy_tcw()
154 __scif_rma_destroy_tcw(mmn, ep, start, len); scif_rma_destroy_tcw()
155 spin_unlock(&ep->rma_info.tc_lock); scif_rma_destroy_tcw()
158 static void scif_rma_destroy_tcw_ep(struct scif_endpt *ep) scif_rma_destroy_tcw_ep() argument
163 list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) { scif_rma_destroy_tcw_ep()
169 static void __scif_rma_destroy_tcw_ep(struct scif_endpt *ep) __scif_rma_destroy_tcw_ep() argument
174 spin_lock(&ep->rma_info.tc_lock); __scif_rma_destroy_tcw_ep()
175 list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) { __scif_rma_destroy_tcw_ep()
177 __scif_rma_destroy_tcw(mmn, ep, 0, ULONG_MAX); __scif_rma_destroy_tcw_ep()
179 spin_unlock(&ep->rma_info.tc_lock); __scif_rma_destroy_tcw_ep()
182 static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes) scif_rma_tc_can_cache() argument
186 if ((atomic_read(&ep->rma_info.tcw_total_pages) scif_rma_tc_can_cache()
192 atomic_read(&ep->rma_info.tcw_total_pages), scif_rma_tc_can_cache()
195 __scif_rma_destroy_tcw_ep(ep); scif_rma_tc_can_cache()
249 static void scif_ep_unregister_mmu_notifier(struct scif_endpt *ep) scif_ep_unregister_mmu_notifier() argument
251 struct scif_endpt_rma_info *rma = &ep->rma_info; scif_ep_unregister_mmu_notifier()
255 mutex_lock(&ep->rma_info.mmn_lock); scif_ep_unregister_mmu_notifier()
262 mutex_unlock(&ep->rma_info.mmn_lock); scif_ep_unregister_mmu_notifier()
266 struct mm_struct *mm, struct scif_endpt *ep) scif_init_mmu_notifier()
268 mmn->ep = ep; scif_init_mmu_notifier()
290 scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep) scif_add_mmu_notifier() argument
298 scif_init_mmu_notifier(mmn, current->mm, ep); scif_add_mmu_notifier()
304 list_add(&mmn->list, &ep->rma_info.mmn_list); scif_add_mmu_notifier()
315 struct scif_endpt *ep; scif_mmu_notif_handler() local
320 ep = list_entry(pos, struct scif_endpt, mmu_list); scif_mmu_notif_handler()
321 list_del(&ep->mmu_list); scif_mmu_notif_handler()
323 scif_rma_destroy_tcw_ep(ep); scif_mmu_notif_handler()
324 scif_ep_unregister_mmu_notifier(ep); scif_mmu_notif_handler()
343 scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep) scif_add_mmu_notifier() argument
357 static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes) scif_rma_tc_can_cache() argument
378 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_register_temp() local
393 err = scif_get_window_offset(ep, 0, 0, scif_register_temp()
400 *out_window = scif_create_window(ep, aligned_len >> PAGE_SHIFT, scif_register_temp()
403 scif_free_window_offset(ep, NULL, *out_offset); scif_register_temp()
413 err = scif_map_window(ep->remote_dev, *out_window); scif_register_temp()
416 scif_destroy_window(ep, *out_window); scif_register_temp()
424 dev_err(&ep->remote_dev->sdev->dev, scif_register_temp()
604 struct scif_endpt *ep; scif_rma_destroy_windows() local
613 ep = (struct scif_endpt *)window->ep; scif_rma_destroy_windows()
614 chan = ep->rma_info.dma_chan; scif_rma_destroy_windows()
618 if (!chan || !scifdev_alive(ep) || scif_rma_destroy_windows()
619 !scif_drain_dma_intr(ep->remote_dev->sdev, scif_rma_destroy_windows()
620 ep->rma_info.dma_chan)) scif_rma_destroy_windows()
624 dev_warn(&ep->remote_dev->sdev->dev, scif_rma_destroy_windows()
628 scif_destroy_window(ep, window); scif_rma_destroy_windows()
631 atomic_dec(&ep->rma_info.tw_refcount); scif_rma_destroy_windows()
648 struct scif_endpt *ep; scif_rma_destroy_tcw_invalid() local
656 ep = (struct scif_endpt *)window->ep; scif_rma_destroy_tcw_invalid()
657 chan = ep->rma_info.dma_chan; scif_rma_destroy_tcw_invalid()
660 mutex_lock(&ep->rma_info.rma_lock); scif_rma_destroy_tcw_invalid()
661 if (!chan || !scifdev_alive(ep) || scif_rma_destroy_tcw_invalid()
662 !scif_drain_dma_intr(ep->remote_dev->sdev, scif_rma_destroy_tcw_invalid()
663 ep->rma_info.dma_chan)) { scif_rma_destroy_tcw_invalid()
665 &ep->rma_info.tcw_total_pages); scif_rma_destroy_tcw_invalid()
666 scif_destroy_window(ep, window); scif_rma_destroy_tcw_invalid()
667 atomic_dec(&ep->rma_info.tcw_refcount); scif_rma_destroy_tcw_invalid()
669 dev_warn(&ep->remote_dev->sdev->dev, scif_rma_destroy_tcw_invalid()
672 mutex_unlock(&ep->rma_info.rma_lock); scif_rma_destroy_tcw_invalid()
1678 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_rma_copy() local
1691 err = scif_verify_epd(ep); scif_rma_copy()
1699 loopback = scifdev_self(ep->remote_dev) ? true : false; scif_rma_copy()
1721 remote_req.head = &ep->rma_info.remote_reg_list; scif_rma_copy()
1723 spdev = scif_get_peer_dev(ep->remote_dev); scif_rma_copy()
1730 mutex_lock(&ep->rma_info.mmn_lock); scif_rma_copy()
1731 mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info); scif_rma_copy()
1733 scif_add_mmu_notifier(current->mm, ep); scif_rma_copy()
1734 mutex_unlock(&ep->rma_info.mmn_lock); scif_rma_copy()
1739 cache = cache && !scif_rma_tc_can_cache(ep, len); scif_rma_copy()
1741 mutex_lock(&ep->rma_info.rma_lock); scif_rma_copy()
1751 spin_lock(&ep->rma_info.tc_lock); scif_rma_copy()
1753 err = scif_query_tcw(ep, &req); scif_rma_copy()
1754 spin_unlock(&ep->rma_info.tc_lock); scif_rma_copy()
1761 mutex_unlock(&ep->rma_info.rma_lock); scif_rma_copy()
1766 atomic_inc(&ep->rma_info.tcw_refcount); scif_rma_copy()
1768 &ep->rma_info.tcw_total_pages); scif_rma_copy()
1770 spin_lock(&ep->rma_info.tc_lock); scif_rma_copy()
1773 spin_unlock(&ep->rma_info.tc_lock); scif_rma_copy()
1789 req.head = &ep->rma_info.reg_list; scif_rma_copy()
1793 mutex_unlock(&ep->rma_info.rma_lock); scif_rma_copy()
1801 mutex_unlock(&ep->rma_info.rma_lock); scif_rma_copy()
1811 copy_work.remote_dev = ep->remote_dev; scif_rma_copy()
1827 chan = ep->rma_info.dma_chan; scif_rma_copy()
1832 atomic_inc(&ep->rma_info.tw_refcount); scif_rma_copy()
1834 mutex_unlock(&ep->rma_info.rma_lock); scif_rma_copy()
1837 struct scif_dev *rdev = ep->remote_dev; scif_rma_copy()
1841 ep->rma_info.dma_chan); scif_rma_copy()
1844 ep->rma_info.dma_chan); scif_rma_copy()
1854 scif_destroy_window(ep, local_window); scif_rma_copy()
1869 "SCIFAPI readfrom: ep %p loffset 0x%lx len 0x%lx offset 0x%lx flags 0x%x\n", scif_readfrom()
1897 "SCIFAPI writeto: ep %p loffset 0x%lx len 0x%lx roffset 0x%lx flags 0x%x\n", scif_writeto()
1925 "SCIFAPI vreadfrom: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n", scif_vreadfrom()
1956 "SCIFAPI vwriteto: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n", scif_vwriteto()
122 __scif_rma_destroy_tcw(struct scif_mmu_notif *mmn, struct scif_endpt *ep, u64 start, u64 len) __scif_rma_destroy_tcw() argument
265 scif_init_mmu_notifier(struct scif_mmu_notif *mmn, struct mm_struct *mm, struct scif_endpt *ep) scif_init_mmu_notifier() argument
H A Dscif_rma_list.c80 * Query the temp cached registration list of ep for an overlapping window
85 int scif_query_tcw(struct scif_endpt *ep, struct scif_rma_req *req) scif_query_tcw() argument
206 struct scif_endpt *ep = (struct scif_endpt *)window->ep; scif_rma_list_unregister() local
207 struct list_head *head = &ep->rma_info.reg_list; scif_rma_list_unregister()
238 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_unmap_all_windows() local
239 struct list_head *head = &ep->rma_info.reg_list; scif_unmap_all_windows()
241 mutex_lock(&ep->rma_info.rma_lock); list_for_each_safe()
244 scif_unmap_window(ep->remote_dev, window); list_for_each_safe()
246 mutex_unlock(&ep->rma_info.rma_lock);
260 struct scif_endpt *ep = (struct scif_endpt *)epd; scif_unregister_all_windows() local
261 struct list_head *head = &ep->rma_info.reg_list; scif_unregister_all_windows()
264 mutex_lock(&ep->rma_info.rma_lock); scif_unregister_all_windows()
270 ep->rma_info.async_list_del = 0; list_for_each_safe()
280 if (ACCESS_ONCE(ep->rma_info.async_list_del)) list_for_each_safe()
283 mutex_unlock(&ep->rma_info.rma_lock);
284 if (!list_empty(&ep->rma_info.mmn_list)) {
286 list_add_tail(&ep->mmu_list, &scif_info.mmu_notif_cleanup);
H A Dscif_debugfs.c109 struct scif_endpt *ep; scif_rma_test() local
114 ep = list_entry(pos, struct scif_endpt, list); scif_rma_test()
115 seq_printf(s, "ep %p self windows\n", ep); scif_rma_test()
116 mutex_lock(&ep->rma_info.rma_lock); scif_rma_test()
117 scif_display_all_windows(&ep->rma_info.reg_list, s); scif_rma_test()
118 seq_printf(s, "ep %p remote windows\n", ep); scif_rma_test()
119 scif_display_all_windows(&ep->rma_info.remote_reg_list, s); scif_rma_test()
120 mutex_unlock(&ep->rma_info.rma_lock); scif_rma_test()
H A Dscif_epd.h94 * @listenep: associated listen ep
139 static inline int scifdev_alive(struct scif_endpt *ep) scifdev_alive() argument
141 return _scifdev_alive(ep->remote_dev); scifdev_alive()
146 * ep: SCIF endpoint
151 static inline int scif_verify_epd(struct scif_endpt *ep) scif_verify_epd() argument
153 if (ep->state == SCIFEP_DISCONNECTED) scif_verify_epd()
156 if (ep->state != SCIFEP_CONNECTED) scif_verify_epd()
159 if (!scifdev_alive(ep)) scif_verify_epd()
183 void scif_cleanup_ep_qp(struct scif_endpt *ep);
184 void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held);
207 struct scif_endpt *ep);
H A Dscif_fd.c53 struct scif_endpt *ep = f->private_data; scif_fdflush() local
55 spin_lock(&ep->lock); scif_fdflush()
65 if (ep->files == id) scif_fdflush()
66 __scif_flush(ep); scif_fdflush()
67 spin_unlock(&ep->lock); scif_fdflush()
113 struct scif_endpt *ep = (struct scif_endpt *)priv; scif_fdioctl() local
122 req.self.node = ep->port.node; scif_fdioctl()
123 req.self.port = ep->port.port; scif_fdioctl()
140 scif_epd_t *ep = (scif_epd_t *)&request.endpt; scif_fdioctl() local
145 err = scif_accept(priv, &request.peer, ep, request.flags); scif_fdioctl()
150 scif_close(*ep); scif_fdioctl()
158 list_add_tail(&((*ep)->miacceptlist), &scif_info.uaccept); scif_fdioctl()
159 list_add_tail(&((*ep)->liacceptlist), &priv->li_accept); scif_fdioctl()
160 (*ep)->listenep = priv; scif_fdioctl()
H A Dscif_rma.h200 * @ep: SCIF endpoint
205 struct scif_endpt *ep; member in struct:scif_status
220 * @ep: Pointer to EP. Useful for passing EP around with messages to
251 u64 ep; member in struct:scif_window
285 * @ep: SCIF endpoint
294 struct scif_endpt *ep; member in struct:scif_mmu_notif
305 void scif_rma_ep_init(struct scif_endpt *ep);
307 int scif_rma_ep_can_uninit(struct scif_endpt *ep);
309 int scif_get_window_offset(struct scif_endpt *ep, int flags,
312 void scif_free_window_offset(struct scif_endpt *ep,
315 struct scif_window *scif_create_window(struct scif_endpt *ep, int nr_pages,
318 int scif_destroy_window(struct scif_endpt *ep, struct scif_window *window);
335 int scif_reserve_dma_chan(struct scif_endpt *ep);
H A Dscif_rma_list.h50 int scif_query_tcw(struct scif_endpt *ep, struct scif_rma_req *request);
/linux-4.4.14/drivers/video/fbdev/
H A Dsunxvr500.c54 static int e3d_get_props(struct e3d_info *ep) e3d_get_props() argument
56 ep->width = of_getintprop_default(ep->of_node, "width", 0); e3d_get_props()
57 ep->height = of_getintprop_default(ep->of_node, "height", 0); e3d_get_props()
58 ep->depth = of_getintprop_default(ep->of_node, "depth", 8); e3d_get_props()
60 if (!ep->width || !ep->height) { e3d_get_props()
62 pci_name(ep->pdev)); e3d_get_props()
97 static void e3d_clut_write(struct e3d_info *ep, int index, u32 val) e3d_clut_write() argument
99 void __iomem *ramdac = ep->ramdac; e3d_clut_write()
102 spin_lock_irqsave(&ep->lock, flags); e3d_clut_write()
107 spin_unlock_irqrestore(&ep->lock, flags); e3d_clut_write()
114 struct e3d_info *ep = info->par; e3d_setcolreg() local
137 e3d_clut_write(ep, regno, value); e3d_setcolreg()
151 struct e3d_info *ep = info->par; e3d_imageblit() local
154 spin_lock_irqsave(&ep->lock, flags); e3d_imageblit()
156 info->screen_base += ep->fb8_buf_diff; e3d_imageblit()
158 info->screen_base -= ep->fb8_buf_diff; e3d_imageblit()
159 spin_unlock_irqrestore(&ep->lock, flags); e3d_imageblit()
164 struct e3d_info *ep = info->par; e3d_fillrect() local
167 spin_lock_irqsave(&ep->lock, flags); e3d_fillrect()
169 info->screen_base += ep->fb8_buf_diff; e3d_fillrect()
171 info->screen_base -= ep->fb8_buf_diff; e3d_fillrect()
172 spin_unlock_irqrestore(&ep->lock, flags); e3d_fillrect()
177 struct e3d_info *ep = info->par; e3d_copyarea() local
180 spin_lock_irqsave(&ep->lock, flags); e3d_copyarea()
182 info->screen_base += ep->fb8_buf_diff; e3d_copyarea()
184 info->screen_base -= ep->fb8_buf_diff; e3d_copyarea()
185 spin_unlock_irqrestore(&ep->lock, flags); e3d_copyarea()
196 static int e3d_set_fbinfo(struct e3d_info *ep) e3d_set_fbinfo() argument
198 struct fb_info *info = ep->info; e3d_set_fbinfo()
203 info->screen_base = ep->fb_base; e3d_set_fbinfo()
204 info->screen_size = ep->fb_size; e3d_set_fbinfo()
206 info->pseudo_palette = ep->pseudo_palette; e3d_set_fbinfo()
210 info->fix.smem_start = ep->fb_base_phys; e3d_set_fbinfo()
211 info->fix.smem_len = ep->fb_size; e3d_set_fbinfo()
213 if (ep->depth == 32 || ep->depth == 24) e3d_set_fbinfo()
218 var->xres = ep->width; e3d_set_fbinfo()
219 var->yres = ep->height; e3d_set_fbinfo()
222 var->bits_per_pixel = ep->depth; e3d_set_fbinfo()
247 struct e3d_info *ep; e3d_pci_register() local
279 ep = info->par; e3d_pci_register()
280 ep->info = info; e3d_pci_register()
281 ep->pdev = pdev; e3d_pci_register()
282 spin_lock_init(&ep->lock); e3d_pci_register()
283 ep->of_node = of_node; e3d_pci_register()
290 &ep->fb_base_reg); e3d_pci_register()
291 ep->fb_base_reg &= PCI_BASE_ADDRESS_MEM_MASK; e3d_pci_register()
293 ep->regs_base_phys = pci_resource_start (pdev, 1); e3d_pci_register()
300 ep->ramdac = ioremap(ep->regs_base_phys + 0x8000, 0x1000); e3d_pci_register()
301 if (!ep->ramdac) { e3d_pci_register()
306 ep->fb8_0_off = readl(ep->ramdac + RAMDAC_VID_8FB_0); e3d_pci_register()
307 ep->fb8_0_off -= ep->fb_base_reg; e3d_pci_register()
309 ep->fb8_1_off = readl(ep->ramdac + RAMDAC_VID_8FB_1); e3d_pci_register()
310 ep->fb8_1_off -= ep->fb_base_reg; e3d_pci_register()
312 ep->fb8_buf_diff = ep->fb8_1_off - ep->fb8_0_off; e3d_pci_register()
314 ep->fb_base_phys = pci_resource_start (pdev, 0); e3d_pci_register()
315 ep->fb_base_phys += ep->fb8_0_off; e3d_pci_register()
324 err = e3d_get_props(ep); e3d_pci_register()
328 line_length = (readl(ep->ramdac + RAMDAC_VID_CFG) >> 16) & 0xff; e3d_pci_register()
331 switch (ep->depth) { e3d_pci_register()
345 ep->fb_size = info->fix.line_length * ep->height; e3d_pci_register()
347 ep->fb_base = ioremap(ep->fb_base_phys, ep->fb_size); e3d_pci_register()
348 if (!ep->fb_base) { e3d_pci_register()
353 err = e3d_set_fbinfo(ep); e3d_pci_register()
374 iounmap(ep->fb_base); e3d_pci_register()
380 iounmap(ep->ramdac); e3d_pci_register()
398 struct e3d_info *ep = info->par; e3d_pci_unregister() local
402 iounmap(ep->ramdac); e3d_pci_unregister()
403 iounmap(ep->fb_base); e3d_pci_unregister()
/linux-4.4.14/drivers/usb/mon/
H A Dmon_text.c103 struct mon_text_ptr *p, const struct mon_event_text *ep);
105 struct mon_text_ptr *p, const struct mon_event_text *ep);
107 struct mon_text_ptr *p, const struct mon_event_text *ep);
109 struct mon_text_ptr *p, const struct mon_event_text *ep);
111 struct mon_text_ptr *p, const struct mon_event_text *ep);
113 struct mon_text_ptr *p, const struct mon_event_text *ep);
115 struct mon_text_ptr *p, const struct mon_event_text *ep);
126 static inline char mon_text_get_setup(struct mon_event_text *ep, mon_text_get_setup() argument
130 if (ep->xfertype != USB_ENDPOINT_XFER_CONTROL || ev_type != 'S') mon_text_get_setup()
136 memcpy(ep->setup, urb->setup_packet, SETUP_MAX); mon_text_get_setup()
140 static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb, mon_text_get_data() argument
150 if (ep->is_in) { mon_text_get_data()
173 memcpy(ep->data, src, len); mon_text_get_data()
191 struct mon_event_text *ep; mon_text_event() local
200 (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) { mon_text_event()
205 ep->type = ev_type; mon_text_event()
206 ep->id = (unsigned long) urb; mon_text_event()
207 ep->busnum = urb->dev->bus->busnum; mon_text_event()
208 ep->devnum = urb->dev->devnum; mon_text_event()
209 ep->epnum = usb_endpoint_num(&urb->ep->desc); mon_text_event()
210 ep->xfertype = usb_endpoint_type(&urb->ep->desc); mon_text_event()
211 ep->is_in = usb_urb_dir_in(urb); mon_text_event()
212 ep->tstamp = stamp; mon_text_event()
213 ep->length = (ev_type == 'S') ? mon_text_event()
216 ep->status = status; mon_text_event()
218 if (ep->xfertype == USB_ENDPOINT_XFER_INT) { mon_text_event()
219 ep->interval = urb->interval; mon_text_event()
220 } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { mon_text_event()
221 ep->interval = urb->interval; mon_text_event()
222 ep->start_frame = urb->start_frame; mon_text_event()
223 ep->error_count = urb->error_count; mon_text_event()
225 ep->numdesc = urb->number_of_packets; mon_text_event()
226 if (ep->xfertype == USB_ENDPOINT_XFER_ISOC && mon_text_event()
231 dp = ep->isodesc; mon_text_event()
242 ep->length = urb->transfer_buffer_length; mon_text_event()
245 ep->setup_flag = mon_text_get_setup(ep, urb, ev_type, rp->r.m_bus); mon_text_event()
246 ep->data_flag = mon_text_get_data(ep, urb, ep->length, ev_type, mon_text_event()
250 list_add_tail(&ep->e_link, &rp->e_list); mon_text_event()
269 struct mon_event_text *ep; mon_text_error() local
272 (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) { mon_text_error()
277 ep->type = 'E'; mon_text_error()
278 ep->id = (unsigned long) urb; mon_text_error()
279 ep->busnum = urb->dev->bus->busnum; mon_text_error()
280 ep->devnum = urb->dev->devnum; mon_text_error()
281 ep->epnum = usb_endpoint_num(&urb->ep->desc); mon_text_error()
282 ep->xfertype = usb_endpoint_type(&urb->ep->desc); mon_text_error()
283 ep->is_in = usb_urb_dir_in(urb); mon_text_error()
284 ep->tstamp = mon_get_timestamp(); mon_text_error()
285 ep->length = 0; mon_text_error()
286 ep->status = error; mon_text_error()
288 ep->setup_flag = '-'; mon_text_error()
289 ep->data_flag = 'E'; mon_text_error()
292 list_add_tail(&ep->e_link, &rp->e_list); mon_text_error()
386 struct mon_event_text *ep; mon_text_read_t() local
389 if (IS_ERR(ep = mon_text_read_wait(rp, file))) mon_text_read_t()
390 return PTR_ERR(ep); mon_text_read_t()
396 mon_text_read_head_t(rp, &ptr, ep); mon_text_read_t()
397 mon_text_read_statset(rp, &ptr, ep); mon_text_read_t()
399 " %d", ep->length); mon_text_read_t()
400 mon_text_read_data(rp, &ptr, ep); mon_text_read_t()
405 kmem_cache_free(rp->e_slab, ep); mon_text_read_t()
413 struct mon_event_text *ep; mon_text_read_u() local
416 if (IS_ERR(ep = mon_text_read_wait(rp, file))) mon_text_read_u()
417 return PTR_ERR(ep); mon_text_read_u()
423 mon_text_read_head_u(rp, &ptr, ep); mon_text_read_u()
424 if (ep->type == 'E') { mon_text_read_u()
425 mon_text_read_statset(rp, &ptr, ep); mon_text_read_u()
426 } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { mon_text_read_u()
427 mon_text_read_isostat(rp, &ptr, ep); mon_text_read_u()
428 mon_text_read_isodesc(rp, &ptr, ep); mon_text_read_u()
429 } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { mon_text_read_u()
430 mon_text_read_intstat(rp, &ptr, ep); mon_text_read_u()
432 mon_text_read_statset(rp, &ptr, ep); mon_text_read_u()
435 " %d", ep->length); mon_text_read_u()
436 mon_text_read_data(rp, &ptr, ep); mon_text_read_u()
441 kmem_cache_free(rp->e_slab, ep); mon_text_read_u()
450 struct mon_event_text *ep; mon_text_read_wait() local
454 while ((ep = mon_text_fetch(rp, mbus)) == NULL) { mon_text_read_wait()
473 return ep; mon_text_read_wait()
477 struct mon_text_ptr *p, const struct mon_event_text *ep) mon_text_read_head_t()
481 udir = (ep->is_in ? 'i' : 'o'); mon_text_read_head_t()
482 switch (ep->xfertype) { mon_text_read_head_t()
490 ep->id, ep->tstamp, ep->type, mon_text_read_head_t()
491 utype, udir, ep->devnum, ep->epnum); mon_text_read_head_t()
495 struct mon_text_ptr *p, const struct mon_event_text *ep) mon_text_read_head_u()
499 udir = (ep->is_in ? 'i' : 'o'); mon_text_read_head_u()
500 switch (ep->xfertype) { mon_text_read_head_u()
508 ep->id, ep->tstamp, ep->type, mon_text_read_head_u()
509 utype, udir, ep->busnum, ep->devnum, ep->epnum); mon_text_read_head_u()
513 struct mon_text_ptr *p, const struct mon_event_text *ep) mon_text_read_statset()
516 if (ep->setup_flag == 0) { /* Setup packet is present and captured */ mon_text_read_statset()
519 ep->setup[0], mon_text_read_statset()
520 ep->setup[1], mon_text_read_statset()
521 (ep->setup[3] << 8) | ep->setup[2], mon_text_read_statset()
522 (ep->setup[5] << 8) | ep->setup[4], mon_text_read_statset()
523 (ep->setup[7] << 8) | ep->setup[6]); mon_text_read_statset()
524 } else if (ep->setup_flag != '-') { /* Unable to capture setup packet */ mon_text_read_statset()
526 " %c __ __ ____ ____ ____", ep->setup_flag); mon_text_read_statset()
529 " %d", ep->status); mon_text_read_statset()
534 struct mon_text_ptr *p, const struct mon_event_text *ep) mon_text_read_intstat()
537 " %d:%d", ep->status, ep->interval); mon_text_read_intstat()
541 struct mon_text_ptr *p, const struct mon_event_text *ep) mon_text_read_isostat()
543 if (ep->type == 'S') { mon_text_read_isostat()
545 " %d:%d:%d", ep->status, ep->interval, ep->start_frame); mon_text_read_isostat()
549 ep->status, ep->interval, ep->start_frame, ep->error_count); mon_text_read_isostat()
554 struct mon_text_ptr *p, const struct mon_event_text *ep) mon_text_read_isodesc()
561 " %d", ep->numdesc); mon_text_read_isodesc()
562 ndesc = ep->numdesc; mon_text_read_isodesc()
567 dp = ep->isodesc; mon_text_read_isodesc()
576 struct mon_text_ptr *p, const struct mon_event_text *ep) mon_text_read_data()
580 if ((data_len = ep->length) > 0) { mon_text_read_data()
581 if (ep->data_flag == 0) { mon_text_read_data()
594 "%02x", ep->data[i]); mon_text_read_data()
600 " %c\n", ep->data_flag); mon_text_read_data()
613 struct mon_event_text *ep; mon_text_release() local
635 ep = list_entry(p, struct mon_event_text, e_link); mon_text_release()
638 kmem_cache_free(rp->e_slab, ep); mon_text_release()
476 mon_text_read_head_t(struct mon_reader_text *rp, struct mon_text_ptr *p, const struct mon_event_text *ep) mon_text_read_head_t() argument
494 mon_text_read_head_u(struct mon_reader_text *rp, struct mon_text_ptr *p, const struct mon_event_text *ep) mon_text_read_head_u() argument
512 mon_text_read_statset(struct mon_reader_text *rp, struct mon_text_ptr *p, const struct mon_event_text *ep) mon_text_read_statset() argument
533 mon_text_read_intstat(struct mon_reader_text *rp, struct mon_text_ptr *p, const struct mon_event_text *ep) mon_text_read_intstat() argument
540 mon_text_read_isostat(struct mon_reader_text *rp, struct mon_text_ptr *p, const struct mon_event_text *ep) mon_text_read_isostat() argument
553 mon_text_read_isodesc(struct mon_reader_text *rp, struct mon_text_ptr *p, const struct mon_event_text *ep) mon_text_read_isodesc() argument
575 mon_text_read_data(struct mon_reader_text *rp, struct mon_text_ptr *p, const struct mon_event_text *ep) mon_text_read_data() argument
H A Dmon_bin.c382 struct mon_bin_hdr *ep; mon_buff_area_fill() local
384 ep = MON_OFF2HDR(rp, offset); mon_buff_area_fill()
385 memset(ep, 0, PKT_SIZE); mon_buff_area_fill()
386 ep->type = '@'; mon_buff_area_fill()
387 ep->len_cap = size - PKT_SIZE; mon_buff_area_fill()
485 const struct usb_endpoint_descriptor *epd = &urb->ep->desc; mon_bin_event()
494 struct mon_bin_hdr *ep; mon_bin_event() local
535 /* Cannot rely on endpoint number in case of control ep.0 */ mon_bin_event()
557 ep = MON_OFF2HDR(rp, offset); mon_bin_event()
563 memset(ep, 0, PKT_SIZE); mon_bin_event()
564 ep->type = ev_type; mon_bin_event()
565 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)]; mon_bin_event()
566 ep->epnum = dir | usb_endpoint_num(epd); mon_bin_event()
567 ep->devnum = urb->dev->devnum; mon_bin_event()
568 ep->busnum = urb->dev->bus->busnum; mon_bin_event()
569 ep->id = (unsigned long) urb; mon_bin_event()
570 ep->ts_sec = ts.tv_sec; mon_bin_event()
571 ep->ts_usec = ts.tv_usec; mon_bin_event()
572 ep->status = status; mon_bin_event()
573 ep->len_urb = urb_length; mon_bin_event()
574 ep->len_cap = length + lendesc; mon_bin_event()
575 ep->xfer_flags = urb->transfer_flags; mon_bin_event()
578 ep->interval = urb->interval; mon_bin_event()
580 ep->interval = urb->interval; mon_bin_event()
581 ep->start_frame = urb->start_frame; mon_bin_event()
582 ep->s.iso.error_count = urb->error_count; mon_bin_event()
583 ep->s.iso.numdesc = urb->number_of_packets; mon_bin_event()
587 ep->flag_setup = mon_bin_get_setup(ep->s.setup, urb, ev_type); mon_bin_event()
589 ep->flag_setup = '-'; mon_bin_event()
593 ep->ndesc = ndesc; mon_bin_event()
601 &ep->flag_data); mon_bin_event()
603 delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); mon_bin_event()
604 ep->len_cap -= length; mon_bin_event()
605 delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); mon_bin_event()
609 ep->flag_data = data_tag; mon_bin_event()
635 struct mon_bin_hdr *ep; mon_bin_error() local
648 ep = MON_OFF2HDR(rp, offset); mon_bin_error()
650 memset(ep, 0, PKT_SIZE); mon_bin_error()
651 ep->type = 'E'; mon_bin_error()
652 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)]; mon_bin_error()
653 ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0; mon_bin_error()
654 ep->epnum |= usb_endpoint_num(&urb->ep->desc); mon_bin_error()
655 ep->devnum = urb->dev->devnum; mon_bin_error()
656 ep->busnum = urb->dev->bus->busnum; mon_bin_error()
657 ep->id = (unsigned long) urb; mon_bin_error()
658 ep->ts_sec = ts.tv_sec; mon_bin_error()
659 ep->ts_usec = ts.tv_usec; mon_bin_error()
660 ep->status = error; mon_bin_error()
662 ep->flag_setup = '-'; mon_bin_error()
663 ep->flag_data = 'E'; mon_bin_error()
739 struct mon_bin_hdr *ep; mon_bin_get_event() local
751 ep = MON_OFF2HDR(rp, rp->b_out); mon_bin_get_event()
753 if (copy_to_user(hdr, ep, hdrbytes)) { mon_bin_get_event()
758 step_len = min(ep->len_cap, nbytes); mon_bin_get_event()
767 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); mon_bin_get_event()
803 struct mon_bin_hdr *ep; mon_bin_read() local
817 ep = MON_OFF2HDR(rp, rp->b_out); mon_bin_read()
821 ptr = ((char *)ep) + rp->b_read; mon_bin_read()
833 step_len = ep->len_cap; mon_bin_read()
854 if (rp->b_read >= hdrbytes + ep->len_cap) { mon_bin_read()
856 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); mon_bin_read()
872 struct mon_bin_hdr *ep; mon_bin_flush() local
881 ep = MON_OFF2HDR(rp, rp->b_out); mon_bin_flush()
882 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); mon_bin_flush()
902 struct mon_bin_hdr *ep; mon_bin_fetch() local
924 ep = MON_OFF2HDR(rp, cur_out); mon_bin_fetch()
931 size = ep->len_cap + PKT_SIZE; mon_bin_fetch()
952 struct mon_bin_hdr *ep; mon_bin_queued() local
965 ep = MON_OFF2HDR(rp, cur_out); mon_bin_queued()
968 size = ep->len_cap + PKT_SIZE; mon_bin_queued()
986 struct mon_bin_hdr *ep; mon_bin_ioctl() local
997 ep = MON_OFF2HDR(rp, rp->b_out); mon_bin_ioctl()
998 ret = ep->len_cap; mon_bin_ioctl()
/linux-4.4.14/drivers/net/ethernet/smsc/
H A Depic100.c328 struct epic_private *ep; epic_init_one() local
359 dev = alloc_etherdev(sizeof (*ep)); epic_init_one()
372 ep = netdev_priv(dev); epic_init_one()
373 ep->ioaddr = ioaddr; epic_init_one()
374 ep->mii.dev = dev; epic_init_one()
375 ep->mii.mdio_read = mdio_read; epic_init_one()
376 ep->mii.mdio_write = mdio_write; epic_init_one()
377 ep->mii.phy_id_mask = 0x1f; epic_init_one()
378 ep->mii.reg_num_mask = 0x1f; epic_init_one()
383 ep->tx_ring = ring_space; epic_init_one()
384 ep->tx_ring_dma = ring_dma; epic_init_one()
389 ep->rx_ring = ring_space; epic_init_one()
390 ep->rx_ring_dma = ring_dma; epic_init_one()
402 spin_lock_init(&ep->lock); epic_init_one()
403 spin_lock_init(&ep->napi_lock); epic_init_one()
404 ep->reschedule_in_poll = 0; epic_init_one()
426 pr_cont(" %4.4x%s", read_eeprom(ep, i), epic_init_one()
430 ep->pci_dev = pdev; epic_init_one()
431 ep->chip_id = chip_idx; epic_init_one()
432 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags; epic_init_one()
433 ep->irq_mask = epic_init_one()
434 (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) epic_init_one()
442 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) { epic_init_one()
445 ep->phys[phy_idx++] = phy; epic_init_one()
452 ep->mii_phy_cnt = phy_idx; epic_init_one()
454 phy = ep->phys[0]; epic_init_one()
455 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE); epic_init_one()
459 ep->mii.advertising, mdio_read(dev, phy, 5)); epic_init_one()
460 } else if ( ! (ep->chip_flags & NO_MII)) { epic_init_one()
464 ep->phys[0] = 3; epic_init_one()
466 ep->mii.phy_id = ep->phys[0]; epic_init_one()
470 if (ep->chip_flags & MII_PWRDWN) epic_init_one()
476 ep->mii.force_media = ep->mii.full_duplex = 1; epic_init_one()
479 dev->if_port = ep->default_port = option; epic_init_one()
485 netif_napi_add(dev, &ep->napi, epic_poll, 64); epic_init_one()
500 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); epic_init_one()
502 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); epic_init_one()
537 static void epic_disable_int(struct net_device *dev, struct epic_private *ep) epic_disable_int() argument
539 void __iomem *ioaddr = ep->ioaddr; epic_disable_int()
552 struct epic_private *ep) epic_napi_irq_off()
554 void __iomem *ioaddr = ep->ioaddr; epic_napi_irq_off()
556 ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent); epic_napi_irq_off()
561 struct epic_private *ep) epic_napi_irq_on()
563 void __iomem *ioaddr = ep->ioaddr; epic_napi_irq_on()
566 ew32(INTMASK, ep->irq_mask | EpicNapiEvent); epic_napi_irq_on()
569 static int read_eeprom(struct epic_private *ep, int location) read_eeprom() argument
571 void __iomem *ioaddr = ep->ioaddr; read_eeprom()
607 struct epic_private *ep = netdev_priv(dev); mdio_read() local
608 void __iomem *ioaddr = ep->ioaddr; mdio_read()
631 struct epic_private *ep = netdev_priv(dev); mdio_write() local
632 void __iomem *ioaddr = ep->ioaddr; mdio_write()
647 struct epic_private *ep = netdev_priv(dev); epic_open() local
648 void __iomem *ioaddr = ep->ioaddr; epic_open()
649 const int irq = ep->pci_dev->irq; epic_open()
655 napi_enable(&ep->napi); epic_open()
658 napi_disable(&ep->napi); epic_open()
677 if (ep->chip_flags & MII_PWRDWN) epic_open()
696 ep->tx_threshold = TX_FIFO_THRESH; epic_open()
697 ew32(TxThresh, ep->tx_threshold); epic_open()
700 if (ep->mii_phy_cnt) epic_open()
701 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]); epic_open()
705 mdio_read(dev, ep->phys[0], MII_BMSR)); epic_open()
708 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA); epic_open()
711 ep->mii.full_duplex = 1; epic_open()
713 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); epic_open()
716 ep->mii.full_duplex ? "full" epic_open()
718 ep->phys[0], mii_lpa); epic_open()
722 ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79); epic_open()
723 ew32(PRxCDAR, ep->rx_ring_dma); epic_open()
724 ew32(PTxCDAR, ep->tx_ring_dma); epic_open()
734 ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) | epic_open()
740 ep->mii.full_duplex ? "full" : "half"); epic_open()
745 init_timer(&ep->timer); epic_open()
746 ep->timer.expires = jiffies + 3*HZ; epic_open()
747 ep->timer.data = (unsigned long)dev; epic_open()
748 ep->timer.function = epic_timer; /* timer handler */ epic_open()
749 add_timer(&ep->timer); epic_open()
759 struct epic_private *ep = netdev_priv(dev); epic_pause() local
760 void __iomem *ioaddr = ep->ioaddr; epic_pause()
782 struct epic_private *ep = netdev_priv(dev); epic_restart() local
783 void __iomem *ioaddr = ep->ioaddr; epic_restart()
790 ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx); epic_restart()
803 if (ep->chip_flags & MII_PWRDWN) epic_restart()
809 ep->tx_threshold = TX_FIFO_THRESH; epic_restart()
810 ew32(TxThresh, ep->tx_threshold); epic_restart()
811 ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79); epic_restart()
812 ew32(PRxCDAR, ep->rx_ring_dma + epic_restart()
813 (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc)); epic_restart()
814 ew32(PTxCDAR, ep->tx_ring_dma + epic_restart()
815 (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc)); epic_restart()
823 ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) | epic_restart()
832 struct epic_private *ep = netdev_priv(dev); check_media() local
833 void __iomem *ioaddr = ep->ioaddr; check_media()
834 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0; check_media()
835 int negotiated = mii_lpa & ep->mii.advertising; check_media()
838 if (ep->mii.force_media) check_media()
842 if (ep->mii.full_duplex != duplex) { check_media()
843 ep->mii.full_duplex = duplex; check_media()
845 ep->mii.full_duplex ? "full" : "half", check_media()
846 ep->phys[0], mii_lpa); check_media()
847 ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79); check_media()
854 struct epic_private *ep = netdev_priv(dev); epic_timer() local
855 void __iomem *ioaddr = ep->ioaddr; epic_timer()
867 ep->timer.expires = jiffies + next_tick; epic_timer()
868 add_timer(&ep->timer); epic_timer()
873 struct epic_private *ep = netdev_priv(dev); epic_tx_timeout() local
874 void __iomem *ioaddr = ep->ioaddr; epic_tx_timeout()
881 ep->dirty_tx, ep->cur_tx); epic_tx_timeout()
894 if (!ep->tx_full) epic_tx_timeout()
901 struct epic_private *ep = netdev_priv(dev); epic_init_ring() local
904 ep->tx_full = 0; epic_init_ring()
905 ep->dirty_tx = ep->cur_tx = 0; epic_init_ring()
906 ep->cur_rx = ep->dirty_rx = 0; epic_init_ring()
907 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); epic_init_ring()
911 ep->rx_ring[i].rxstatus = 0; epic_init_ring()
912 ep->rx_ring[i].buflength = ep->rx_buf_sz; epic_init_ring()
913 ep->rx_ring[i].next = ep->rx_ring_dma + epic_init_ring()
915 ep->rx_skbuff[i] = NULL; epic_init_ring()
918 ep->rx_ring[i-1].next = ep->rx_ring_dma; epic_init_ring()
922 struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2); epic_init_ring()
923 ep->rx_skbuff[i] = skb; epic_init_ring()
927 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, epic_init_ring()
928 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); epic_init_ring()
929 ep->rx_ring[i].rxstatus = DescOwn; epic_init_ring()
931 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE); epic_init_ring()
936 ep->tx_skbuff[i] = NULL; epic_init_ring()
937 ep->tx_ring[i].txstatus = 0x0000; epic_init_ring()
938 ep->tx_ring[i].next = ep->tx_ring_dma + epic_init_ring()
941 ep->tx_ring[i-1].next = ep->tx_ring_dma; epic_init_ring()
946 struct epic_private *ep = netdev_priv(dev); epic_start_xmit() local
947 void __iomem *ioaddr = ep->ioaddr; epic_start_xmit()
959 spin_lock_irqsave(&ep->lock, flags); epic_start_xmit()
960 free_count = ep->cur_tx - ep->dirty_tx; epic_start_xmit()
961 entry = ep->cur_tx % TX_RING_SIZE; epic_start_xmit()
963 ep->tx_skbuff[entry] = skb; epic_start_xmit()
964 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data, epic_start_xmit()
975 ep->tx_full = 1; epic_start_xmit()
977 ep->tx_ring[entry].buflength = ctrl_word | skb->len; epic_start_xmit()
978 ep->tx_ring[entry].txstatus = epic_start_xmit()
982 ep->cur_tx++; epic_start_xmit()
983 if (ep->tx_full) epic_start_xmit()
986 spin_unlock_irqrestore(&ep->lock, flags); epic_start_xmit()
997 static void epic_tx_error(struct net_device *dev, struct epic_private *ep, epic_tx_error() argument
1019 static void epic_tx(struct net_device *dev, struct epic_private *ep) epic_tx() argument
1027 cur_tx = ep->cur_tx; epic_tx()
1028 for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) { epic_tx()
1031 int txstatus = ep->tx_ring[entry].txstatus; epic_tx()
1039 dev->stats.tx_bytes += ep->tx_skbuff[entry]->len; epic_tx()
1041 epic_tx_error(dev, ep, txstatus); epic_tx()
1044 skb = ep->tx_skbuff[entry]; epic_tx()
1045 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, epic_tx()
1048 ep->tx_skbuff[entry] = NULL; epic_tx()
1054 dirty_tx, cur_tx, ep->tx_full); epic_tx()
1058 ep->dirty_tx = dirty_tx; epic_tx()
1059 if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) { epic_tx()
1061 ep->tx_full = 0; epic_tx()
1071 struct epic_private *ep = netdev_priv(dev); epic_interrupt() local
1072 void __iomem *ioaddr = ep->ioaddr; epic_interrupt()
1090 if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { epic_interrupt()
1091 spin_lock(&ep->napi_lock); epic_interrupt()
1092 if (napi_schedule_prep(&ep->napi)) { epic_interrupt()
1093 epic_napi_irq_off(dev, ep); epic_interrupt()
1094 __napi_schedule(&ep->napi); epic_interrupt()
1096 ep->reschedule_in_poll++; epic_interrupt()
1097 spin_unlock(&ep->napi_lock); epic_interrupt()
1115 ew32(TxThresh, ep->tx_threshold += 128); epic_interrupt()
1140 struct epic_private *ep = netdev_priv(dev); epic_rx() local
1141 int entry = ep->cur_rx % RX_RING_SIZE; epic_rx()
1142 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx; epic_rx()
1147 ep->rx_ring[entry].rxstatus); epic_rx()
1153 while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) { epic_rx()
1154 int status = ep->rx_ring[entry].rxstatus; epic_rx()
1188 pci_dma_sync_single_for_cpu(ep->pci_dev, epic_rx()
1189 ep->rx_ring[entry].bufaddr, epic_rx()
1190 ep->rx_buf_sz, epic_rx()
1192 skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len); epic_rx()
1194 pci_dma_sync_single_for_device(ep->pci_dev, epic_rx()
1195 ep->rx_ring[entry].bufaddr, epic_rx()
1196 ep->rx_buf_sz, epic_rx()
1199 pci_unmap_single(ep->pci_dev, epic_rx()
1200 ep->rx_ring[entry].bufaddr, epic_rx()
1201 ep->rx_buf_sz, PCI_DMA_FROMDEVICE); epic_rx()
1202 skb_put(skb = ep->rx_skbuff[entry], pkt_len); epic_rx()
1203 ep->rx_skbuff[entry] = NULL; epic_rx()
1211 entry = (++ep->cur_rx) % RX_RING_SIZE; epic_rx()
1215 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) { epic_rx()
1216 entry = ep->dirty_rx % RX_RING_SIZE; epic_rx()
1217 if (ep->rx_skbuff[entry] == NULL) { epic_rx()
1219 skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2); epic_rx()
1223 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, epic_rx()
1224 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); epic_rx()
1228 ep->rx_ring[entry].rxstatus = DescOwn; epic_rx()
1233 static void epic_rx_err(struct net_device *dev, struct epic_private *ep) epic_rx_err() argument
1235 void __iomem *ioaddr = ep->ioaddr; epic_rx_err()
1250 struct epic_private *ep = container_of(napi, struct epic_private, napi); epic_poll() local
1251 struct net_device *dev = ep->mii.dev; epic_poll()
1253 void __iomem *ioaddr = ep->ioaddr; epic_poll()
1257 epic_tx(dev, ep); epic_poll()
1261 epic_rx_err(dev, ep); epic_poll()
1269 spin_lock_irqsave(&ep->napi_lock, flags); epic_poll()
1271 more = ep->reschedule_in_poll; epic_poll()
1275 epic_napi_irq_on(dev, ep); epic_poll()
1277 ep->reschedule_in_poll--; epic_poll()
1279 spin_unlock_irqrestore(&ep->napi_lock, flags); epic_poll()
1290 struct epic_private *ep = netdev_priv(dev); epic_close() local
1291 struct pci_dev *pdev = ep->pci_dev; epic_close()
1292 void __iomem *ioaddr = ep->ioaddr; epic_close()
1297 napi_disable(&ep->napi); epic_close()
1303 del_timer_sync(&ep->timer); epic_close()
1305 epic_disable_int(dev, ep); epic_close()
1313 skb = ep->rx_skbuff[i]; epic_close()
1314 ep->rx_skbuff[i] = NULL; epic_close()
1315 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */ epic_close()
1316 ep->rx_ring[i].buflength = 0; epic_close()
1318 pci_unmap_single(pdev, ep->rx_ring[i].bufaddr, epic_close()
1319 ep->rx_buf_sz, PCI_DMA_FROMDEVICE); epic_close()
1322 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */ epic_close()
1325 skb = ep->tx_skbuff[i]; epic_close()
1326 ep->tx_skbuff[i] = NULL; epic_close()
1329 pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len, epic_close()
1342 struct epic_private *ep = netdev_priv(dev); epic_get_stats() local
1343 void __iomem *ioaddr = ep->ioaddr; epic_get_stats()
1358 new frame, not around filling ep->setup_frame. This is non-deterministic
1363 struct epic_private *ep = netdev_priv(dev); set_rx_mode() local
1364 void __iomem *ioaddr = ep->ioaddr; set_rx_mode()
1392 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1395 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1456 struct epic_private *ep = netdev_priv(dev); ethtool_begin() local
1457 void __iomem *ioaddr = ep->ioaddr; ethtool_begin()
1469 struct epic_private *ep = netdev_priv(dev); ethtool_complete() local
1470 void __iomem *ioaddr = ep->ioaddr; ethtool_complete()
1521 struct epic_private *ep = netdev_priv(dev); epic_remove_one() local
1523 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); epic_remove_one()
1524 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); epic_remove_one()
1526 pci_iounmap(pdev, ep->ioaddr); epic_remove_one()
1539 struct epic_private *ep = netdev_priv(dev); epic_suspend() local
1540 void __iomem *ioaddr = ep->ioaddr; epic_suspend()
551 epic_napi_irq_off(struct net_device *dev, struct epic_private *ep) epic_napi_irq_off() argument
560 epic_napi_irq_on(struct net_device *dev, struct epic_private *ep) epic_napi_irq_on() argument
/linux-4.4.14/drivers/usb/gadget/udc/bdc/
H A Dbdc_ep.c56 /* Free the bdl during ep disable */ ep_bd_list_free()
57 static void ep_bd_list_free(struct bdc_ep *ep, u32 num_tabs) ep_bd_list_free() argument
59 struct bd_list *bd_list = &ep->bd_list; ep_bd_list_free()
60 struct bdc *bdc = ep->bdc; ep_bd_list_free()
64 dev_dbg(bdc->dev, "%s ep:%s num_tabs:%d\n", ep_bd_list_free()
65 __func__, ep->name, num_tabs); ep_bd_list_free()
68 dev_dbg(bdc->dev, "%s already freed\n", ep->name); ep_bd_list_free()
100 kfree(ep->bd_list.bd_table_array); ep_bd_list_free()
125 /* Allocate the bdl for ep, during config ep */ ep_bd_list_alloc()
126 static int ep_bd_list_alloc(struct bdc_ep *ep) ep_bd_list_alloc() argument
130 struct bdc *bdc = ep->bdc; ep_bd_list_alloc()
134 if (usb_endpoint_xfer_isoc(ep->desc)) ep_bd_list_alloc()
142 "%s ep:%p num_tabs:%d\n", ep_bd_list_alloc()
143 __func__, ep, num_tabs); ep_bd_list_alloc()
146 ep->bd_list.bd_table_array = kzalloc( ep_bd_list_alloc()
149 if (!ep->bd_list.bd_table_array) ep_bd_list_alloc()
174 ep->bd_list.bd_table_array[index] = bd_table; ep_bd_list_alloc()
181 chain_table(prev_table, ep->bd_list.bd_table_array[0], bd_p_tab); ep_bd_list_alloc()
183 ep->bd_list.num_tabs = num_tabs; ep_bd_list_alloc()
184 ep->bd_list.max_bdi = (num_tabs * bd_p_tab) - 1; ep_bd_list_alloc()
185 ep->bd_list.num_tabs = num_tabs; ep_bd_list_alloc()
186 ep->bd_list.num_bds_table = bd_p_tab; ep_bd_list_alloc()
187 ep->bd_list.eqp_bdi = 0; ep_bd_list_alloc()
188 ep->bd_list.hwd_bdi = 0; ep_bd_list_alloc()
193 ep_bd_list_free(ep, num_tabs); ep_bd_list_alloc()
221 static int bd_add_to_bdi(struct bdc_ep *ep, dma_addr_t bd_dma_addr) bd_add_to_bdi() argument
223 struct bd_list *bd_list = &ep->bd_list; bd_add_to_bdi()
225 struct bdc *bdc = ep->bdc; bd_add_to_bdi()
259 /* return the global bdi, to compare with ep eqp_bdi */ bd_add_to_bdi()
264 static int bdi_to_tbi(struct bdc_ep *ep, int bdi) bdi_to_tbi() argument
268 tbi = bdi / ep->bd_list.num_bds_table; bdi_to_tbi()
269 dev_vdbg(ep->bdc->dev, bdi_to_tbi()
271 bdi, ep->bd_list.num_bds_table, tbi); bdi_to_tbi()
277 static inline int find_end_bdi(struct bdc_ep *ep, int next_hwd_bdi) find_end_bdi() argument
283 end_bdi = ep->bd_list.max_bdi - 1; find_end_bdi()
284 else if ((end_bdi % (ep->bd_list.num_bds_table-1)) == 0) find_end_bdi()
291 * How many transfer bd's are available on this ep bdl, chain bds are not
294 static int bd_available_ep(struct bdc_ep *ep) bd_available_ep() argument
296 struct bd_list *bd_list = &ep->bd_list; bd_available_ep()
298 struct bdc *bdc = ep->bdc; bd_available_ep()
327 if (!(bdi_to_tbi(ep, bd_list->hwd_bdi) bd_available_ep()
328 == bdi_to_tbi(ep, bd_list->eqp_bdi))) { bd_available_ep()
349 struct bdc_ep *ep = bdc->bdc_ep_array[epnum]; bdc_notify_xfr() local
353 * We don't have anyway to check if ep state is running, bdc_notify_xfr()
356 if (unlikely(ep->flags & BDC_EP_STOP)) bdc_notify_xfr()
357 ep->flags &= ~BDC_EP_STOP; bdc_notify_xfr()
363 static struct bdc_bd *bdi_to_bd(struct bdc_ep *ep, int bdi) bdi_to_bd() argument
365 int tbi = bdi_to_tbi(ep, bdi); bdi_to_bd()
368 local_bdi = bdi - (tbi * ep->bd_list.num_bds_table); bdi_to_bd()
369 dev_vdbg(ep->bdc->dev, bdi_to_bd()
373 return (ep->bd_list.bd_table_array[tbi]->start_bd + local_bdi); bdi_to_bd()
377 static void ep_bdlist_eqp_adv(struct bdc_ep *ep) ep_bdlist_eqp_adv() argument
379 ep->bd_list.eqp_bdi++; ep_bdlist_eqp_adv()
381 if (((ep->bd_list.eqp_bdi + 1) % ep->bd_list.num_bds_table) == 0) ep_bdlist_eqp_adv()
382 ep->bd_list.eqp_bdi++; ep_bdlist_eqp_adv()
385 if (ep->bd_list.eqp_bdi == (ep->bd_list.max_bdi + 1)) ep_bdlist_eqp_adv()
386 ep->bd_list.eqp_bdi = 0; ep_bdlist_eqp_adv()
395 req->ep->dir = 0; setup_first_bd_ep0()
437 struct bdc_ep *ep; setup_bd_list_xfr() local
442 ep = req->ep; setup_bd_list_xfr()
443 bd_list = &ep->bd_list; setup_bd_list_xfr()
447 bd = bdi_to_bd(ep, bd_list->eqp_bdi); setup_bd_list_xfr()
449 maxp = usb_endpoint_maxp(ep->desc) & 0x7ff; setup_bd_list_xfr()
452 dev_vdbg(bdc->dev, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n", setup_bd_list_xfr()
453 __func__, ep->name, num_bds, tfs, req_len, bd); setup_bd_list_xfr()
462 if (ep->ep_num == 1) { setup_bd_list_xfr()
468 if (!req->ep->dir) setup_bd_list_xfr()
482 bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi); setup_bd_list_xfr()
493 ep_bdlist_eqp_adv(ep); setup_bd_list_xfr()
498 ep->bd_list.eqp_bdi); setup_bd_list_xfr()
499 bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi); setup_bd_list_xfr()
503 bd = bdi_to_bd(ep, bd_xfr->start_bdi); setup_bd_list_xfr()
507 bd_xfr->next_hwd_bdi = ep->bd_list.eqp_bdi; setup_bd_list_xfr()
518 struct bdc_ep *ep; bdc_queue_xfr() local
521 ep = req->ep; bdc_queue_xfr()
524 ep->bd_list.eqp_bdi, ep->bd_list.hwd_bdi); bdc_queue_xfr()
527 bd_available = bd_available_ep(ep); bdc_queue_xfr()
529 /* how many bd's are avaialble on ep */ bdc_queue_xfr()
536 list_add_tail(&req->queue, &ep->queue); bdc_queue_xfr()
537 bdc_dbg_bd_list(bdc, ep); bdc_queue_xfr()
538 bdc_notify_xfr(bdc, ep->ep_num); bdc_queue_xfr()
544 static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req, bdc_req_complete() argument
547 struct bdc *bdc = ep->bdc; bdc_req_complete()
552 dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status); bdc_req_complete()
555 usb_gadget_unmap_request(&bdc->gadget, &req->usb_req, ep->dir); bdc_req_complete()
558 usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req); bdc_req_complete()
564 int bdc_ep_disable(struct bdc_ep *ep) bdc_ep_disable() argument
571 bdc = ep->bdc; bdc_ep_disable()
572 dev_dbg(bdc->dev, "%s() ep->ep_num=%d\n", __func__, ep->ep_num); bdc_ep_disable()
574 ret = bdc_stop_ep(bdc, ep->ep_num); bdc_ep_disable()
581 while (!list_empty(&ep->queue)) { bdc_ep_disable()
582 req = list_entry(ep->queue.next, struct bdc_req, bdc_ep_disable()
584 bdc_req_complete(ep, req, -ESHUTDOWN); bdc_ep_disable()
587 ret = bdc_dconfig_ep(bdc, ep); bdc_ep_disable()
592 ep->flags = 0; bdc_ep_disable()
594 if (ep->ep_num == 1) bdc_ep_disable()
598 ep_bd_list_free(ep, ep->bd_list.num_tabs); bdc_ep_disable()
599 ep->desc = NULL; bdc_ep_disable()
600 ep->comp_desc = NULL; bdc_ep_disable()
601 ep->usb_ep.desc = NULL; bdc_ep_disable()
602 ep->ep_type = 0; bdc_ep_disable()
607 /* Enable the ep */ bdc_ep_enable()
608 int bdc_ep_enable(struct bdc_ep *ep) bdc_ep_enable() argument
613 bdc = ep->bdc; bdc_ep_enable()
617 ret = ep_bd_list_alloc(ep); bdc_ep_enable()
619 dev_err(bdc->dev, "ep bd list allocation failed:%d\n", ret); bdc_ep_enable()
622 bdc_dbg_bd_list(bdc, ep); bdc_ep_enable()
623 /* only for ep0: config ep is called for ep0 from connect event */ bdc_ep_enable()
624 ep->flags |= BDC_EP_ENABLED; bdc_ep_enable()
625 if (ep->ep_num == 1) bdc_ep_enable()
629 ret = bdc_config_ep(bdc, ep); bdc_ep_enable()
633 ep->usb_ep.maxpacket = usb_endpoint_maxp(ep->desc); bdc_ep_enable()
634 ep->usb_ep.desc = ep->desc; bdc_ep_enable()
635 ep->usb_ep.comp_desc = ep->comp_desc; bdc_ep_enable()
636 ep->ep_type = usb_endpoint_type(ep->desc); bdc_ep_enable()
637 ep->flags |= BDC_EP_ENABLED; bdc_ep_enable()
648 struct bdc_ep *ep; ep0_queue_status_stage() local
651 ep = bdc->bdc_ep_array[1]; ep0_queue_status_stage()
652 status_req->ep = ep; ep0_queue_status_stage()
663 static int ep0_queue(struct bdc_ep *ep, struct bdc_req *req) ep0_queue() argument
668 bdc = ep->bdc; ep0_queue()
672 req->epnum = ep->ep_num; ep0_queue()
693 ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir); ep0_queue()
695 dev_err(bdc->dev, "dma mapping failed %s\n", ep->name); ep0_queue()
706 struct bdc_ep *ep; ep0_queue_data_stage() local
710 ep = bdc->bdc_ep_array[1]; ep0_queue_data_stage()
711 bdc->ep0_req.ep = ep; ep0_queue_data_stage()
714 return ep0_queue(ep, &bdc->ep0_req); ep0_queue_data_stage()
717 /* Queue req on ep */ ep_queue()
718 static int ep_queue(struct bdc_ep *ep, struct bdc_req *req) ep_queue() argument
723 if (!req || !ep->usb_ep.desc) ep_queue()
726 bdc = ep->bdc; ep_queue()
730 req->epnum = ep->ep_num; ep_queue()
732 ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir); ep_queue()
741 /* Dequeue a request from ep */ ep_dequeue()
742 static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req) ep_dequeue() argument
756 bdc = ep->bdc; ep_dequeue()
758 eqp_bdi = ep->bd_list.eqp_bdi - 1; ep_dequeue()
761 eqp_bdi = ep->bd_list.max_bdi; ep_dequeue()
764 end_bdi = find_end_bdi(ep, req->bd_xfr.next_hwd_bdi); ep_dequeue()
766 dev_dbg(bdc->dev, "%s ep:%s start:%d end:%d\n", ep_dequeue()
767 __func__, ep->name, start_bdi, end_bdi); ep_dequeue()
768 dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n", ep_dequeue()
769 ep, (void *)ep->usb_ep.desc); ep_dequeue()
770 /* Stop the ep to see where the HW is ? */ ep_dequeue()
771 ret = bdc_stop_ep(bdc, ep->ep_num); ep_dequeue()
772 /* if there is an issue with stopping ep, then no need to go further */ ep_dequeue()
788 curr_hw_dqpi = bd_add_to_bdi(ep, deq_ptr_64); ep_dequeue()
828 tbi = bdi_to_tbi(ep, req->bd_xfr.next_hwd_bdi); ep_dequeue()
829 table = ep->bd_list.bd_table_array[tbi]; ep_dequeue()
832 tbi * ep->bd_list.num_bds_table); ep_dequeue()
834 first_req = list_first_entry(&ep->queue, struct bdc_req, ep_dequeue()
850 bd_start = bdi_to_bd(ep, start_bdi); ep_dequeue()
855 bdc_dbg_bd_list(bdc, ep); ep_dequeue()
861 ret = bdc_ep_bla(bdc, ep, next_bd_dma); ep_dequeue()
871 /* Halt/Clear the ep based on value */ ep_set_halt()
872 static int ep_set_halt(struct bdc_ep *ep, u32 value) ep_set_halt() argument
877 bdc = ep->bdc; ep_set_halt()
878 dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value); ep_set_halt()
882 if (ep->ep_num == 1) ep_set_halt()
885 ret = bdc_ep_set_stall(bdc, ep->ep_num); ep_set_halt()
888 ep->name); ep_set_halt()
890 ep->flags |= BDC_EP_STALL; ep_set_halt()
894 ret = bdc_ep_clear_stall(bdc, ep->ep_num); ep_set_halt()
897 ep->name); ep_set_halt()
899 ep->flags &= ~BDC_EP_STALL; ep_set_halt()
906 /* Free all the ep */ bdc_free_ep()
909 struct bdc_ep *ep; bdc_free_ep() local
914 ep = bdc->bdc_ep_array[epnum]; bdc_free_ep()
915 if (!ep) bdc_free_ep()
918 if (ep->flags & BDC_EP_ENABLED) bdc_free_ep()
919 ep_bd_list_free(ep, ep->bd_list.num_tabs); bdc_free_ep()
923 list_del(&ep->usb_ep.ep_list); bdc_free_ep()
925 kfree(ep); bdc_free_ep()
958 static void handle_xsr_succ_status(struct bdc *bdc, struct bdc_ep *ep, handle_xsr_succ_status() argument
962 struct bd_list *bd_list = &ep->bd_list; handle_xsr_succ_status()
972 dev_dbg(bdc->dev, "%s ep:%p\n", __func__, ep); handle_xsr_succ_status()
975 if (ep->ignore_next_sr) { handle_xsr_succ_status()
976 ep->ignore_next_sr = false; handle_xsr_succ_status()
980 if (unlikely(list_empty(&ep->queue))) { handle_xsr_succ_status()
984 req = list_entry(ep->queue.next, struct bdc_req, handle_xsr_succ_status()
1004 short_bdi = bd_add_to_bdi(ep, deq_ptr_64); handle_xsr_succ_status()
1016 if (!(bdi_to_tbi(ep, start_bdi) == handle_xsr_succ_status()
1017 bdi_to_tbi(ep, short_bdi))) handle_xsr_succ_status()
1033 end_bdi = find_end_bdi(ep, bd_xfr->next_hwd_bdi); handle_xsr_succ_status()
1035 ep->ignore_next_sr = true; handle_xsr_succ_status()
1038 short_bd = bdi_to_bd(ep, short_bdi); handle_xsr_succ_status()
1055 ep->bd_list.hwd_bdi = bd_xfr->next_hwd_bdi; handle_xsr_succ_status()
1057 dev_dbg(bdc->dev, "short xfr on %d\n", ep->ep_num); handle_xsr_succ_status()
1061 bdc_req_complete(ep, bd_xfr->req, status); handle_xsr_succ_status()
1096 struct bdc_ep *ep = bdc->bdc_ep_array[1]; ep0_stall() local
1101 ep_set_halt(ep, 1); ep0_stall()
1104 while (!list_empty(&ep->queue)) { ep0_stall()
1105 req = list_entry(ep->queue.next, struct bdc_req, ep0_stall()
1107 bdc_req_complete(ep, req, -ESHUTDOWN); ep0_stall()
1239 struct bdc_ep *ep; ep0_handle_feature() local
1296 ep = bdc->bdc_ep_array[epnum]; ep0_handle_feature()
1297 if (!ep) ep0_handle_feature()
1300 return ep_set_halt(ep, set); ep0_handle_feature()
1314 struct bdc_ep *ep; ep0_handle_status() local
1362 ep = bdc->bdc_ep_array[epnum]; ep0_handle_status()
1363 if (!ep) { ep0_handle_status()
1367 if (ep->flags & BDC_EP_STALL) ep0_handle_status()
1394 struct bdc_ep *ep; ep0_set_sel() local
1405 ep = bdc->bdc_ep_array[1]; ep0_set_sel()
1406 bdc->ep0_req.ep = ep; ep0_set_sel()
1424 bdc->ep0_req.ep = bdc->bdc_ep_array[1]; ep0_queue_zlp()
1520 struct bdc_ep *ep; bdc_xsf_ep0_data_start() local
1524 ep = bdc->bdc_ep_array[1]; bdc_xsf_ep0_data_start()
1526 if (ep->flags & BDC_EP_STALL) { bdc_xsf_ep0_data_start()
1527 ret = ep_set_halt(ep, 0); bdc_xsf_ep0_data_start()
1559 struct bdc_ep *ep; bdc_xsf_ep0_status_start() local
1565 ep = bdc->bdc_ep_array[1]; bdc_xsf_ep0_status_start()
1571 if (ep->flags & BDC_EP_STALL) { bdc_xsf_ep0_status_start()
1572 ret = ep_set_halt(ep, 0); bdc_xsf_ep0_status_start()
1654 struct bdc_ep *ep; bdc_sr_xsf() local
1659 ep = bdc->bdc_ep_array[ep_num]; bdc_sr_xsf()
1660 if (!ep || !(ep->flags & BDC_EP_ENABLED)) { bdc_sr_xsf()
1661 dev_err(bdc->dev, "xsf for ep not enabled\n"); bdc_sr_xsf()
1674 dev_dbg_ratelimited(bdc->dev, "%s sr_status=%d ep:%s\n", bdc_sr_xsf()
1675 __func__, sr_status, ep->name); bdc_sr_xsf()
1680 handle_xsr_succ_status(bdc, ep, sreport); bdc_sr_xsf()
1710 dev_warn(bdc->dev, "Babble on ep not handled\n"); bdc_sr_xsf()
1723 struct bdc_ep *ep; bdc_gadget_ep_queue() local
1733 ep = to_bdc_ep(_ep); bdc_gadget_ep_queue()
1735 bdc = ep->bdc; bdc_gadget_ep_queue()
1736 dev_dbg(bdc->dev, "%s ep:%p req:%p\n", __func__, ep, req); bdc_gadget_ep_queue()
1738 _req, ep->name, _req->length, _req->zero); bdc_gadget_ep_queue()
1740 if (!ep->usb_ep.desc) { bdc_gadget_ep_queue()
1743 _req, ep->name); bdc_gadget_ep_queue()
1754 if (ep == bdc->bdc_ep_array[1]) bdc_gadget_ep_queue()
1755 ret = ep0_queue(ep, req); bdc_gadget_ep_queue()
1757 ret = ep_queue(ep, req); bdc_gadget_ep_queue()
1769 struct bdc_ep *ep; bdc_gadget_ep_dequeue() local
1776 ep = to_bdc_ep(_ep); bdc_gadget_ep_dequeue()
1778 bdc = ep->bdc; bdc_gadget_ep_dequeue()
1779 dev_dbg(bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req); bdc_gadget_ep_dequeue()
1780 bdc_dbg_bd_list(bdc, ep); bdc_gadget_ep_dequeue()
1783 list_for_each_entry(req, &ep->queue, queue) { bdc_gadget_ep_dequeue()
1792 ret = ep_dequeue(ep, req); bdc_gadget_ep_dequeue()
1797 bdc_req_complete(ep, req, -ECONNRESET); bdc_gadget_ep_dequeue()
1800 bdc_dbg_bd_list(bdc, ep); bdc_gadget_ep_dequeue()
1809 struct bdc_ep *ep; bdc_gadget_ep_set_halt() local
1813 ep = to_bdc_ep(_ep); bdc_gadget_ep_set_halt()
1814 bdc = ep->bdc; bdc_gadget_ep_set_halt()
1815 dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value); bdc_gadget_ep_set_halt()
1817 if (usb_endpoint_xfer_isoc(ep->usb_ep.desc)) bdc_gadget_ep_set_halt()
1819 else if (!list_empty(&ep->queue)) bdc_gadget_ep_set_halt()
1822 ret = ep_set_halt(ep, value); bdc_gadget_ep_set_halt()
1833 struct bdc_ep *ep; bdc_gadget_alloc_request() local
1839 ep = to_bdc_ep(_ep); bdc_gadget_alloc_request()
1840 req->ep = ep; bdc_gadget_alloc_request()
1841 req->epnum = ep->ep_num; bdc_gadget_alloc_request()
1843 dev_dbg(ep->bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req); bdc_gadget_alloc_request()
1864 struct bdc_ep *ep; bdc_gadget_ep_enable() local
1878 ep = to_bdc_ep(_ep); bdc_gadget_ep_enable()
1879 bdc = ep->bdc; bdc_gadget_ep_enable()
1882 if (ep == bdc->bdc_ep_array[1]) bdc_gadget_ep_enable()
1890 dev_dbg(bdc->dev, "%s Enabling %s\n", __func__, ep->name); bdc_gadget_ep_enable()
1892 ep->desc = desc; bdc_gadget_ep_enable()
1893 ep->comp_desc = _ep->comp_desc; bdc_gadget_ep_enable()
1894 ret = bdc_ep_enable(ep); bdc_gadget_ep_enable()
1903 struct bdc_ep *ep; bdc_gadget_ep_disable() local
1911 ep = to_bdc_ep(_ep); bdc_gadget_ep_disable()
1912 bdc = ep->bdc; bdc_gadget_ep_disable()
1915 if (ep == bdc->bdc_ep_array[1]) { bdc_gadget_ep_disable()
1920 "%s() ep:%s ep->flags:%08x\n", bdc_gadget_ep_disable()
1921 __func__, ep->name, ep->flags); bdc_gadget_ep_disable()
1923 if (!(ep->flags & BDC_EP_ENABLED)) { bdc_gadget_ep_disable()
1924 dev_warn(bdc->dev, "%s is already disabled\n", ep->name); bdc_gadget_ep_disable()
1928 ret = bdc_ep_disable(ep); bdc_gadget_ep_disable()
1947 struct bdc_ep *ep; init_ep() local
1950 ep = kzalloc(sizeof(*ep), GFP_KERNEL); init_ep()
1951 if (!ep) init_ep()
1954 ep->bdc = bdc; init_ep()
1955 ep->dir = dir; init_ep()
1958 ep->usb_ep.caps.dir_in = true; init_ep()
1960 ep->usb_ep.caps.dir_out = true; init_ep()
1962 /* ep->ep_num is the index inside bdc_ep */ init_ep()
1964 ep->ep_num = 1; init_ep()
1965 bdc->bdc_ep_array[ep->ep_num] = ep; init_ep()
1966 snprintf(ep->name, sizeof(ep->name), "ep%d", epnum - 1); init_ep()
1967 usb_ep_set_maxpacket_limit(&ep->usb_ep, EP0_MAX_PKT_SIZE); init_ep()
1968 ep->usb_ep.caps.type_control = true; init_ep()
1969 ep->comp_desc = NULL; init_ep()
1970 bdc->gadget.ep0 = &ep->usb_ep; init_ep()
1973 ep->ep_num = epnum * 2 - 1; init_ep()
1975 ep->ep_num = epnum * 2 - 2; init_ep()
1977 bdc->bdc_ep_array[ep->ep_num] = ep; init_ep()
1978 snprintf(ep->name, sizeof(ep->name), "ep%d%s", epnum - 1, init_ep()
1981 usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024); init_ep()
1982 ep->usb_ep.caps.type_iso = true; init_ep()
1983 ep->usb_ep.caps.type_bulk = true; init_ep()
1984 ep->usb_ep.caps.type_int = true; init_ep()
1985 ep->usb_ep.max_streams = 0; init_ep()
1986 list_add_tail(&ep->usb_ep.ep_list, &bdc->gadget.ep_list); init_ep()
1988 ep->usb_ep.ops = &bdc_gadget_ep_ops; init_ep()
1989 ep->usb_ep.name = ep->name; init_ep()
1990 ep->flags = 0; init_ep()
1991 ep->ignore_next_sr = false; init_ep()
1992 dev_dbg(bdc->dev, "ep=%p ep->usb_ep.name=%s epnum=%d ep->epnum=%d\n", init_ep()
1993 ep, ep->usb_ep.name, epnum, ep->ep_num); init_ep()
1995 INIT_LIST_HEAD(&ep->queue); init_ep()
2000 /* Init all ep */ bdc_init_ep()
2011 dev_err(bdc->dev, "init ep ep0 fail %d\n", ret); bdc_init_ep()
2020 "init ep failed for:%d error: %d\n", bdc_init_ep()
2029 "init ep failed for:%d error: %d\n", bdc_init_ep()
H A Dbdc_cmd.c86 dev_err(bdc->dev, "Invalid device/ep state\n"); bdc_submit_cmd()
116 int bdc_dconfig_ep(struct bdc *bdc, struct bdc_ep *ep) bdc_dconfig_ep() argument
120 cmd_sc = BDC_SUB_CMD_DRP_EP|BDC_CMD_EPN(ep->ep_num)|BDC_CMD_EPC; bdc_dconfig_ep()
121 dev_dbg(bdc->dev, "%s ep->ep_num =%d cmd_sc=%x\n", __func__, bdc_dconfig_ep()
122 ep->ep_num, cmd_sc); bdc_dconfig_ep()
127 /* Reinitalize the bdlist after config ep command */ ep_bd_list_reinit()
128 static void ep_bd_list_reinit(struct bdc_ep *ep) ep_bd_list_reinit() argument
130 struct bdc *bdc = ep->bdc; ep_bd_list_reinit()
133 ep->bd_list.eqp_bdi = 0; ep_bd_list_reinit()
134 ep->bd_list.hwd_bdi = 0; ep_bd_list_reinit()
135 bd = ep->bd_list.bd_table_array[0]->start_bd; ep_bd_list_reinit()
136 dev_dbg(bdc->dev, "%s ep:%p bd:%p\n", __func__, ep, bd); ep_bd_list_reinit()
142 int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep) bdc_config_ep() argument
150 desc = ep->desc; bdc_config_ep()
151 comp_desc = ep->comp_desc; bdc_config_ep()
153 param0 = lower_32_bits(ep->bd_list.bd_table_array[0]->dma); bdc_config_ep()
154 param1 = upper_32_bits(ep->bd_list.bd_table_array[0]->dma); bdc_config_ep()
214 cmd_sc |= BDC_CMD_EPC|BDC_CMD_EPN(ep->ep_num)|BDC_SUB_CMD_ADD_EP; bdc_config_ep()
222 ep_bd_list_reinit(ep); bdc_config_ep()
231 int bdc_ep_bla(struct bdc *bdc, struct bdc_ep *ep, dma_addr_t dma_addr) bdc_ep_bla() argument
243 cmd_sc |= BDC_CMD_EPN(ep->ep_num)|BDC_CMD_BLA; bdc_ep_bla()
308 struct bdc_ep *ep; bdc_ep_clear_stall() local
313 ep = bdc->bdc_ep_array[epnum]; bdc_ep_clear_stall()
320 if (!(ep->flags & BDC_EP_STALL)) { bdc_ep_clear_stall()
346 struct bdc_ep *ep; bdc_stop_ep() local
350 ep = bdc->bdc_ep_array[epnum]; bdc_stop_ep()
351 dev_dbg(bdc->dev, "%s: ep:%s ep->flags:%08x\n", __func__, bdc_stop_ep()
352 ep->name, ep->flags); bdc_stop_ep()
353 /* Endpoint has to be in running state to execute stop ep command */ bdc_stop_ep()
354 if (!(ep->flags & BDC_EP_ENABLED)) { bdc_stop_ep()
355 dev_err(bdc->dev, "stop endpoint called for disabled ep\n"); bdc_stop_ep()
358 if ((ep->flags & BDC_EP_STALL) || (ep->flags & BDC_EP_STOP)) bdc_stop_ep()
368 "stop endpoint command didn't complete:%d ep:%s\n", bdc_stop_ep()
369 ret, ep->name); bdc_stop_ep()
372 ep->flags |= BDC_EP_STOP; bdc_stop_ep()
H A Dbdc_dbg.c90 void bdc_dbg_bd_list(struct bdc *bdc, struct bdc_ep *ep) bdc_dbg_bd_list() argument
92 struct bd_list *bd_list = &ep->bd_list; bdc_dbg_bd_list()
101 ep->name, ep->ep_num); bdc_dbg_bd_list()
H A Dbdc_dbg.h31 static inline void bdc_dbg_bd_list(struct bdc *bdc, struct bdc_ep *ep) bdc_dbg_bd_list() argument
/linux-4.4.14/net/sctp/
H A Dendpointola.c57 static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, sctp_endpoint_init() argument
67 ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp); sctp_endpoint_init()
68 if (!ep->digest) sctp_endpoint_init()
71 ep->auth_enable = net->sctp.auth_enable; sctp_endpoint_init()
72 if (ep->auth_enable) { sctp_endpoint_init()
114 ep->base.type = SCTP_EP_TYPE_SOCKET; sctp_endpoint_init()
117 atomic_set(&ep->base.refcnt, 1); sctp_endpoint_init()
118 ep->base.dead = false; sctp_endpoint_init()
121 sctp_inq_init(&ep->base.inqueue); sctp_endpoint_init()
124 sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); sctp_endpoint_init()
127 sctp_bind_addr_init(&ep->base.bind_addr, 0); sctp_endpoint_init()
130 ep->base.sk = sk; sctp_endpoint_init()
131 sock_hold(ep->base.sk); sctp_endpoint_init()
134 INIT_LIST_HEAD(&ep->asocs); sctp_endpoint_init()
137 ep->sndbuf_policy = net->sctp.sndbuf_policy; sctp_endpoint_init()
144 ep->rcvbuf_policy = net->sctp.rcvbuf_policy; sctp_endpoint_init()
147 get_random_bytes(ep->secret_key, sizeof(ep->secret_key)); sctp_endpoint_init()
150 INIT_LIST_HEAD(&ep->endpoint_shared_keys); sctp_endpoint_init()
155 list_add(&null_key->key_list, &ep->endpoint_shared_keys); sctp_endpoint_init()
158 err = sctp_auth_init_hmacs(ep, gfp); sctp_endpoint_init()
165 ep->auth_hmacs_list = auth_hmacs; sctp_endpoint_init()
166 ep->auth_chunk_list = auth_chunks; sctp_endpoint_init()
168 return ep; sctp_endpoint_init()
171 sctp_auth_destroy_keys(&ep->endpoint_shared_keys); sctp_endpoint_init()
176 kfree(ep->digest); sctp_endpoint_init()
186 struct sctp_endpoint *ep; sctp_endpoint_new() local
189 ep = kzalloc(sizeof(*ep), gfp); sctp_endpoint_new()
190 if (!ep) sctp_endpoint_new()
193 if (!sctp_endpoint_init(ep, sk, gfp)) sctp_endpoint_new()
196 SCTP_DBG_OBJCNT_INC(ep); sctp_endpoint_new()
197 return ep; sctp_endpoint_new()
200 kfree(ep); sctp_endpoint_new()
206 void sctp_endpoint_add_asoc(struct sctp_endpoint *ep, sctp_endpoint_add_asoc() argument
209 struct sock *sk = ep->base.sk; sctp_endpoint_add_asoc()
219 list_add_tail(&asoc->asocs, &ep->asocs); sctp_endpoint_add_asoc()
229 void sctp_endpoint_free(struct sctp_endpoint *ep) sctp_endpoint_free() argument
231 ep->base.dead = true; sctp_endpoint_free()
233 ep->base.sk->sk_state = SCTP_SS_CLOSED; sctp_endpoint_free()
236 sctp_unhash_endpoint(ep); sctp_endpoint_free()
238 sctp_endpoint_put(ep); sctp_endpoint_free()
242 static void sctp_endpoint_destroy(struct sctp_endpoint *ep) sctp_endpoint_destroy() argument
246 if (unlikely(!ep->base.dead)) { sctp_endpoint_destroy()
247 WARN(1, "Attempt to destroy undead endpoint %p!\n", ep); sctp_endpoint_destroy()
252 kfree(ep->digest); sctp_endpoint_destroy()
257 sctp_auth_destroy_keys(&ep->endpoint_shared_keys); sctp_endpoint_destroy()
258 kfree(ep->auth_hmacs_list); sctp_endpoint_destroy()
259 kfree(ep->auth_chunk_list); sctp_endpoint_destroy()
262 sctp_auth_destroy_hmacs(ep->auth_hmacs); sctp_endpoint_destroy()
265 sctp_inq_free(&ep->base.inqueue); sctp_endpoint_destroy()
266 sctp_bind_addr_free(&ep->base.bind_addr); sctp_endpoint_destroy()
268 memset(ep->secret_key, 0, sizeof(ep->secret_key)); sctp_endpoint_destroy()
271 sk = ep->base.sk; sctp_endpoint_destroy()
280 kfree(ep); sctp_endpoint_destroy()
281 SCTP_DBG_OBJCNT_DEC(ep); sctp_endpoint_destroy()
285 void sctp_endpoint_hold(struct sctp_endpoint *ep) sctp_endpoint_hold() argument
287 atomic_inc(&ep->base.refcnt); sctp_endpoint_hold()
293 void sctp_endpoint_put(struct sctp_endpoint *ep) sctp_endpoint_put() argument
295 if (atomic_dec_and_test(&ep->base.refcnt)) sctp_endpoint_put()
296 sctp_endpoint_destroy(ep); sctp_endpoint_put()
300 struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep, sctp_endpoint_is_match() argument
306 if ((htons(ep->base.bind_addr.port) == laddr->v4.sin_port) && sctp_endpoint_is_match()
307 net_eq(sock_net(ep->base.sk), net)) { sctp_endpoint_is_match()
308 if (sctp_bind_addr_match(&ep->base.bind_addr, laddr, sctp_endpoint_is_match()
309 sctp_sk(ep->base.sk))) sctp_endpoint_is_match()
310 retval = ep; sctp_endpoint_is_match()
321 const struct sctp_endpoint *ep, __sctp_endpoint_lookup_assoc()
338 if (!ep->base.bind_addr.port) __sctp_endpoint_lookup_assoc()
343 hash = sctp_assoc_hashfn(sock_net(ep->base.sk), ep->base.bind_addr.port, __sctp_endpoint_lookup_assoc()
349 if (tmp->ep != ep || rport != tmp->peer.port) __sctp_endpoint_lookup_assoc()
366 const struct sctp_endpoint *ep, sctp_endpoint_lookup_assoc()
373 asoc = __sctp_endpoint_lookup_assoc(ep, paddr, transport); sctp_endpoint_lookup_assoc()
382 int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, sctp_endpoint_is_peeled_off() argument
387 struct net *net = sock_net(ep->base.sk); sctp_endpoint_is_peeled_off()
389 bp = &ep->base.bind_addr; sctp_endpoint_is_peeled_off()
406 struct sctp_endpoint *ep = sctp_endpoint_bh_rcv() local
420 if (ep->base.dead) sctp_endpoint_bh_rcv()
424 inqueue = &ep->base.inqueue; sctp_endpoint_bh_rcv()
425 sk = ep->base.sk; sctp_endpoint_bh_rcv()
461 asoc = sctp_endpoint_lookup_assoc(ep, sctp_endpoint_bh_rcv()
478 SCTP_INC_STATS(sock_net(ep->base.sk), SCTP_MIB_INCTRLCHUNKS); sctp_endpoint_bh_rcv()
487 ep, asoc, chunk, GFP_ATOMIC); sctp_endpoint_bh_rcv()
495 if (!sctp_sk(sk)->ep) sctp_endpoint_bh_rcv()
320 __sctp_endpoint_lookup_assoc( const struct sctp_endpoint *ep, const union sctp_addr *paddr, struct sctp_transport **transport) __sctp_endpoint_lookup_assoc() argument
365 sctp_endpoint_lookup_assoc( const struct sctp_endpoint *ep, const union sctp_addr *paddr, struct sctp_transport **transport) sctp_endpoint_lookup_assoc() argument
H A Dsm_statefuns.c63 const struct sctp_endpoint *ep,
75 const struct sctp_endpoint *ep,
81 const struct sctp_endpoint *ep,
87 const struct sctp_endpoint *ep,
93 const struct sctp_endpoint *ep,
108 const struct sctp_endpoint *ep,
117 const struct sctp_endpoint *ep,
125 const struct sctp_endpoint *ep,
133 const struct sctp_endpoint *ep,
141 const struct sctp_endpoint *ep,
148 const struct sctp_endpoint *ep,
154 const struct sctp_endpoint *ep,
218 const struct sctp_endpoint *ep, sctp_sf_do_4_C()
228 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_4_C()
236 return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands); sctp_sf_do_4_C()
240 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_do_4_C()
304 const struct sctp_endpoint *ep, sctp_sf_do_5_1B_init()
328 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_5_1B_init()
333 if (ep == sctp_sk(net->sctp.ctl_sock)->ep) { sctp_sf_do_5_1B_init()
335 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); sctp_sf_do_5_1B_init()
342 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); sctp_sf_do_5_1B_init()
350 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_5_1B_init()
358 if (sctp_sstate(ep->base.sk, CLOSING)) sctp_sf_do_5_1B_init()
359 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); sctp_sf_do_5_1B_init()
363 if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, sctp_sf_do_5_1B_init()
370 packet = sctp_abort_pkt_new(net, ep, asoc, arg, sctp_sf_do_5_1B_init()
387 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, sctp_sf_do_5_1B_init()
398 new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC); sctp_sf_do_5_1B_init()
500 const struct sctp_endpoint *ep, sctp_sf_do_5_1C_ack()
512 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_5_1C_ack()
519 return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands); sctp_sf_do_5_1C_ack()
523 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_do_5_1C_ack()
530 if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, sctp_sf_do_5_1C_ack()
542 packet = sctp_abort_pkt_new(net, ep, asoc, arg, sctp_sf_do_5_1C_ack()
570 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_5_1C_ack()
650 const struct sctp_endpoint *ep, sctp_sf_do_5_1D_ce()
667 if (ep == sctp_sk(net->sctp.ctl_sock)->ep) { sctp_sf_do_5_1D_ce()
669 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); sctp_sf_do_5_1D_ce()
678 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_5_1D_ce()
684 sk = ep->base.sk; sctp_sf_do_5_1D_ce()
687 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); sctp_sf_do_5_1D_ce()
702 new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error, sctp_sf_do_5_1D_ce()
720 sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands, sctp_sf_do_5_1D_ce()
722 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_5_1D_ce()
726 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_5_1D_ce()
767 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_5_1D_ce()
779 ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth); sctp_sf_do_5_1D_ce()
782 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_5_1D_ce()
876 const struct sctp_endpoint *ep, sctp_sf_do_5_1E_ca()
885 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_5_1E_ca()
891 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_do_5_1E_ca()
953 static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep, sctp_sf_heartbeat() argument
979 const struct sctp_endpoint *ep, sctp_sf_sendbeat_8_3()
1007 sctp_sf_heartbeat(ep, asoc, type, arg, sctp_sf_sendbeat_8_3()
1050 const struct sctp_endpoint *ep, sctp_sf_beat_8_3()
1062 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_beat_8_3()
1066 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_beat_8_3()
1078 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, sctp_sf_beat_8_3()
1124 const struct sctp_endpoint *ep, sctp_sf_backbeat_8_3()
1137 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_backbeat_8_3()
1142 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_backbeat_8_3()
1208 struct sctp_endpoint *ep; sctp_sf_send_restart_abort() local
1226 ep = sctp_sk(net->sctp.ctl_sock)->ep; sctp_sf_send_restart_abort()
1231 pkt = sctp_abort_pkt_new(net, ep, NULL, init, errhdr, len); sctp_sf_send_restart_abort()
1389 const struct sctp_endpoint *ep, sctp_sf_do_unexpected_init()
1413 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_unexpected_init()
1419 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); sctp_sf_do_unexpected_init()
1426 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_do_unexpected_init()
1436 if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, sctp_sf_do_unexpected_init()
1443 packet = sctp_abort_pkt_new(net, ep, asoc, arg, sctp_sf_do_unexpected_init()
1459 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, sctp_sf_do_unexpected_init()
1471 new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC); sctp_sf_do_unexpected_init()
1602 const struct sctp_endpoint *ep, sctp_sf_do_5_2_1_siminit()
1611 return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands); sctp_sf_do_5_2_1_siminit()
1656 const struct sctp_endpoint *ep, sctp_sf_do_5_2_2_dupinit()
1665 return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands); sctp_sf_do_5_2_2_dupinit()
1679 const struct sctp_endpoint *ep, sctp_sf_do_5_2_3_initack()
1687 if (ep == sctp_sk(net->sctp.ctl_sock)->ep) sctp_sf_do_5_2_3_initack()
1688 return sctp_sf_ootb(net, ep, asoc, type, arg, commands); sctp_sf_do_5_2_3_initack()
1690 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); sctp_sf_do_5_2_3_initack()
1699 const struct sctp_endpoint *ep, sctp_sf_do_dupcook_a()
1735 disposition = sctp_sf_do_9_2_reshutack(net, ep, asoc, sctp_sf_do_dupcook_a()
1789 return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, sctp_sf_do_dupcook_a()
1814 const struct sctp_endpoint *ep, sctp_sf_do_dupcook_b()
1885 const struct sctp_endpoint *ep, sctp_sf_do_dupcook_c()
1907 const struct sctp_endpoint *ep, sctp_sf_do_dupcook_d()
2002 const struct sctp_endpoint *ep, sctp_sf_do_5_2_4_dupcook()
2021 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_do_5_2_4_dupcook()
2037 new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error, sctp_sf_do_5_2_4_dupcook()
2055 sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands, sctp_sf_do_5_2_4_dupcook()
2057 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_5_2_4_dupcook()
2060 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_5_2_4_dupcook()
2071 retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands, sctp_sf_do_5_2_4_dupcook()
2076 retval = sctp_sf_do_dupcook_b(net, ep, asoc, chunk, commands, sctp_sf_do_5_2_4_dupcook()
2081 retval = sctp_sf_do_dupcook_c(net, ep, asoc, chunk, commands, sctp_sf_do_5_2_4_dupcook()
2086 retval = sctp_sf_do_dupcook_d(net, ep, asoc, chunk, commands, sctp_sf_do_5_2_4_dupcook()
2091 retval = sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_5_2_4_dupcook()
2118 const struct sctp_endpoint *ep, sctp_sf_shutdown_pending_abort()
2127 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_shutdown_pending_abort()
2140 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_shutdown_pending_abort()
2149 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); sctp_sf_shutdown_pending_abort()
2151 return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); sctp_sf_shutdown_pending_abort()
2160 const struct sctp_endpoint *ep, sctp_sf_shutdown_sent_abort()
2169 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_shutdown_sent_abort()
2182 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_shutdown_sent_abort()
2191 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); sctp_sf_shutdown_sent_abort()
2201 return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); sctp_sf_shutdown_sent_abort()
2211 const struct sctp_endpoint *ep, sctp_sf_shutdown_ack_sent_abort()
2220 return sctp_sf_shutdown_sent_abort(net, ep, asoc, type, arg, commands); sctp_sf_shutdown_ack_sent_abort()
2238 const struct sctp_endpoint *ep, sctp_sf_cookie_echoed_err()
2248 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_cookie_echoed_err()
2254 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_cookie_echoed_err()
2264 return sctp_sf_do_5_2_6_stale(net, ep, asoc, type, sctp_sf_cookie_echoed_err()
2273 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_cookie_echoed_err()
2302 const struct sctp_endpoint *ep, sctp_sf_do_5_2_6_stale()
2425 const struct sctp_endpoint *ep, sctp_sf_do_9_1_abort()
2434 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_9_1_abort()
2447 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_9_1_abort()
2456 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); sctp_sf_do_9_1_abort()
2458 return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); sctp_sf_do_9_1_abort()
2462 const struct sctp_endpoint *ep, __sctp_sf_do_9_1_abort()
2479 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); __sctp_sf_do_9_1_abort()
2499 const struct sctp_endpoint *ep, sctp_sf_cookie_wait_abort()
2510 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_cookie_wait_abort()
2523 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_cookie_wait_abort()
2538 const struct sctp_endpoint *ep, sctp_sf_cookie_wait_icmp_abort()
2553 const struct sctp_endpoint *ep, sctp_sf_cookie_echoed_abort()
2562 return sctp_sf_cookie_wait_abort(net, ep, asoc, type, arg, commands); sctp_sf_cookie_echoed_abort()
2625 const struct sctp_endpoint *ep, sctp_sf_do_9_2_shutdown()
2638 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_9_2_shutdown()
2643 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_do_9_2_shutdown()
2664 return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands); sctp_sf_do_9_2_shutdown()
2688 disposition = sctp_sf_do_9_2_shutdown_ack(net, ep, asoc, type, sctp_sf_do_9_2_shutdown()
2715 const struct sctp_endpoint *ep, sctp_sf_do_9_2_shut_ctsn()
2726 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_9_2_shut_ctsn()
2731 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_do_9_2_shut_ctsn()
2749 return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands); sctp_sf_do_9_2_shut_ctsn()
2769 const struct sctp_endpoint *ep, sctp_sf_do_9_2_reshutack()
2780 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_do_9_2_reshutack()
2833 const struct sctp_endpoint *ep, sctp_sf_do_ecn_cwr()
2844 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_ecn_cwr()
2847 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_do_ecn_cwr()
2889 const struct sctp_endpoint *ep, sctp_sf_do_ecne()
2899 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_ecne()
2902 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_do_ecne()
2946 const struct sctp_endpoint *ep, sctp_sf_eat_data_6_2()
2959 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_eat_data_6_2()
2963 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_eat_data_6_2()
2981 return sctp_sf_abort_violation(net, ep, asoc, chunk, commands, sctp_sf_eat_data_6_2()
3068 const struct sctp_endpoint *ep, sctp_sf_eat_data_fast_4_4()
3080 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_eat_data_fast_4_4()
3084 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_eat_data_fast_4_4()
3098 return sctp_sf_abort_violation(net, ep, asoc, chunk, commands, sctp_sf_eat_data_fast_4_4()
3159 const struct sctp_endpoint *ep, sctp_sf_eat_sack_6_2()
3170 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_eat_sack_6_2()
3174 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_eat_sack_6_2()
3181 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_eat_sack_6_2()
3203 return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands); sctp_sf_eat_sack_6_2()
3233 const struct sctp_endpoint *ep, sctp_sf_tabort_8_4_8()
3260 abort->skb->sk = ep->base.sk; sctp_sf_tabort_8_4_8()
3269 sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_tabort_8_4_8()
3285 const struct sctp_endpoint *ep, sctp_sf_operr_notify()
3295 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_operr_notify()
3299 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_operr_notify()
3303 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, sctp_sf_operr_notify()
3323 const struct sctp_endpoint *ep, sctp_sf_do_9_2_final()
3334 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_9_2_final()
3338 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_do_9_2_final()
3406 const struct sctp_endpoint *ep, sctp_sf_ootb()
3426 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_ootb()
3441 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_ootb()
3463 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3470 return sctp_sf_shut_8_4_5(net, ep, asoc, type, arg, commands);
3472 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
3474 return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
3499 const struct sctp_endpoint *ep, sctp_sf_shut_8_4_5()
3526 shut->skb->sk = ep->base.sk; sctp_sf_shut_8_4_5()
3539 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_shut_8_4_5()
3545 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_shut_8_4_5()
3563 const struct sctp_endpoint *ep, sctp_sf_do_8_5_1_E_sa()
3573 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_do_8_5_1_E_sa()
3583 return sctp_sf_shut_8_4_5(net, ep, NULL, type, arg, commands); sctp_sf_do_8_5_1_E_sa()
3588 const struct sctp_endpoint *ep, sctp_sf_do_asconf()
3602 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_asconf()
3612 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); sctp_sf_do_asconf()
3616 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_do_asconf()
3624 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, sctp_sf_do_asconf()
3691 sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport, commands); sctp_sf_do_asconf()
3704 const struct sctp_endpoint *ep, sctp_sf_do_asconf_ack()
3719 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_asconf_ack()
3729 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); sctp_sf_do_asconf_ack()
3733 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_do_asconf_ack()
3741 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, sctp_sf_do_asconf_ack()
3833 const struct sctp_endpoint *ep, sctp_sf_eat_fwd_tsn()
3848 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_eat_fwd_tsn()
3853 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_eat_fwd_tsn()
3901 const struct sctp_endpoint *ep, sctp_sf_eat_fwd_tsn_fast()
3916 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_eat_fwd_tsn_fast()
3921 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_eat_fwd_tsn_fast()
3989 const struct sctp_endpoint *ep, sctp_sf_authenticate()
4063 const struct sctp_endpoint *ep, sctp_sf_eat_auth()
4076 return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands); sctp_sf_eat_auth()
4081 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_eat_auth()
4086 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_eat_auth()
4090 error = sctp_sf_authenticate(net, ep, asoc, type, chunk); sctp_sf_eat_auth()
4107 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_eat_auth()
4110 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_eat_auth()
4160 const struct sctp_endpoint *ep, sctp_sf_unk_chunk()
4173 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_unk_chunk()
4180 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_unk_chunk()
4186 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_unk_chunk()
4200 sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_unk_chunk()
4240 const struct sctp_endpoint *ep, sctp_sf_discard_chunk()
4253 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_discard_chunk()
4280 const struct sctp_endpoint *ep, sctp_sf_pdiscard()
4308 const struct sctp_endpoint *ep, sctp_sf_violation()
4318 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_violation()
4329 const struct sctp_endpoint *ep, sctp_sf_abort_violation()
4404 abort->skb->sk = ep->base.sk; sctp_sf_abort_violation()
4417 sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands); sctp_sf_abort_violation()
4447 const struct sctp_endpoint *ep, sctp_sf_violation_chunklen()
4455 return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str, sctp_sf_violation_chunklen()
4467 const struct sctp_endpoint *ep, sctp_sf_violation_paramlen()
4496 sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands); sctp_sf_violation_paramlen()
4510 const struct sctp_endpoint *ep, sctp_sf_violation_ctsn()
4518 return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str, sctp_sf_violation_ctsn()
4530 const struct sctp_endpoint *ep, sctp_sf_violation_chunk()
4539 return sctp_sf_violation(net, ep, asoc, type, arg, commands); sctp_sf_violation_chunk()
4541 return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str, sctp_sf_violation_chunk()
4605 const struct sctp_endpoint *ep, sctp_sf_do_prm_asoc()
4717 const struct sctp_endpoint *ep, sctp_sf_do_prm_send()
4757 const struct sctp_endpoint *ep, sctp_sf_do_9_2_prm_shutdown()
4778 disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type, sctp_sf_do_9_2_prm_shutdown()
4813 const struct sctp_endpoint *ep, sctp_sf_do_9_1_prm_abort()
4853 const struct sctp_endpoint *ep, sctp_sf_error_closed()
4867 const struct sctp_endpoint *ep, sctp_sf_error_shutdown()
4894 const struct sctp_endpoint *ep, sctp_sf_cookie_wait_prm_shutdown()
4929 const struct sctp_endpoint *ep, sctp_sf_cookie_echoed_prm_shutdown()
4937 return sctp_sf_cookie_wait_prm_shutdown(net, ep, asoc, type, arg, commands); sctp_sf_cookie_echoed_prm_shutdown()
4956 const struct sctp_endpoint *ep, sctp_sf_cookie_wait_prm_abort()
5007 const struct sctp_endpoint *ep, sctp_sf_cookie_echoed_prm_abort()
5016 return sctp_sf_cookie_wait_prm_abort(net, ep, asoc, type, arg, commands); sctp_sf_cookie_echoed_prm_abort()
5033 const struct sctp_endpoint *ep, sctp_sf_shutdown_pending_prm_abort()
5043 return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands); sctp_sf_shutdown_pending_prm_abort()
5060 const struct sctp_endpoint *ep, sctp_sf_shutdown_sent_prm_abort()
5074 return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands); sctp_sf_shutdown_sent_prm_abort()
5091 const struct sctp_endpoint *ep, sctp_sf_shutdown_ack_sent_prm_abort()
5100 return sctp_sf_shutdown_sent_prm_abort(net, ep, asoc, type, arg, commands); sctp_sf_shutdown_ack_sent_prm_abort()
5127 const struct sctp_endpoint *ep, sctp_sf_do_prm_requestheartbeat()
5133 if (SCTP_DISPOSITION_NOMEM == sctp_sf_heartbeat(ep, asoc, type, sctp_sf_do_prm_requestheartbeat()
5159 const struct sctp_endpoint *ep, sctp_sf_do_prm_asconf()
5181 const struct sctp_endpoint *ep, sctp_sf_ignore_primitive()
5205 const struct sctp_endpoint *ep, sctp_sf_do_no_pending_tsn()
5237 const struct sctp_endpoint *ep, sctp_sf_do_9_2_start_shutdown()
5307 const struct sctp_endpoint *ep, sctp_sf_do_9_2_shutdown_ack()
5325 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); sctp_sf_do_9_2_shutdown_ack()
5329 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, sctp_sf_do_9_2_shutdown_ack()
5378 const struct sctp_endpoint *ep, sctp_sf_ignore_other()
5406 const struct sctp_endpoint *ep, sctp_sf_do_6_3_3_rtx()
5494 const struct sctp_endpoint *ep, sctp_sf_do_6_2_sack()
5525 const struct sctp_endpoint *ep, sctp_sf_t1_init_timer_expire()
5589 const struct sctp_endpoint *ep, sctp_sf_t1_cookie_timer_expire()
5639 const struct sctp_endpoint *ep, sctp_sf_t2_timer_expire()
5710 const struct sctp_endpoint *ep, sctp_sf_t4_timer_expire()
5781 const struct sctp_endpoint *ep, sctp_sf_t5_timer_expire()
5818 const struct sctp_endpoint *ep, sctp_sf_autoclose_timer_expire()
5841 disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type, sctp_sf_autoclose_timer_expire()
5860 const struct sctp_endpoint *ep, sctp_sf_not_impl()
5878 const struct sctp_endpoint *ep, sctp_sf_bug()
5899 const struct sctp_endpoint *ep, sctp_sf_timer_ignore()
5943 const struct sctp_endpoint *ep, sctp_abort_pkt_new()
5974 abort->skb->sk = ep->base.sk; sctp_abort_pkt_new()
6065 const struct sctp_endpoint *ep, sctp_send_stale_cookie_err()
6083 err_chunk->skb->sk = ep->base.sk; sctp_send_stale_cookie_err()
217 sctp_sf_do_4_C(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_4_C() argument
303 sctp_sf_do_5_1B_init(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_5_1B_init() argument
499 sctp_sf_do_5_1C_ack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_5_1C_ack() argument
649 sctp_sf_do_5_1D_ce(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_5_1D_ce() argument
875 sctp_sf_do_5_1E_ca(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_5_1E_ca() argument
978 sctp_sf_sendbeat_8_3(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_sendbeat_8_3() argument
1049 sctp_sf_beat_8_3(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_beat_8_3() argument
1123 sctp_sf_backbeat_8_3(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_backbeat_8_3() argument
1387 sctp_sf_do_unexpected_init( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_unexpected_init() argument
1601 sctp_sf_do_5_2_1_siminit(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_5_2_1_siminit() argument
1655 sctp_sf_do_5_2_2_dupinit(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_5_2_2_dupinit() argument
1678 sctp_sf_do_5_2_3_initack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_5_2_3_initack() argument
1698 sctp_sf_do_dupcook_a(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, sctp_cmd_seq_t *commands, struct sctp_association *new_asoc) sctp_sf_do_dupcook_a() argument
1813 sctp_sf_do_dupcook_b(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, sctp_cmd_seq_t *commands, struct sctp_association *new_asoc) sctp_sf_do_dupcook_b() argument
1884 sctp_sf_do_dupcook_c(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, sctp_cmd_seq_t *commands, struct sctp_association *new_asoc) sctp_sf_do_dupcook_c() argument
1906 sctp_sf_do_dupcook_d(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, sctp_cmd_seq_t *commands, struct sctp_association *new_asoc) sctp_sf_do_dupcook_d() argument
2001 sctp_sf_do_5_2_4_dupcook(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_5_2_4_dupcook() argument
2116 sctp_sf_shutdown_pending_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_shutdown_pending_abort() argument
2159 sctp_sf_shutdown_sent_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_shutdown_sent_abort() argument
2209 sctp_sf_shutdown_ack_sent_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_shutdown_ack_sent_abort() argument
2237 sctp_sf_cookie_echoed_err(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_cookie_echoed_err() argument
2301 sctp_sf_do_5_2_6_stale(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_5_2_6_stale() argument
2424 sctp_sf_do_9_1_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_9_1_abort() argument
2461 __sctp_sf_do_9_1_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) __sctp_sf_do_9_1_abort() argument
2498 sctp_sf_cookie_wait_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_cookie_wait_abort() argument
2537 sctp_sf_cookie_wait_icmp_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_cookie_wait_icmp_abort() argument
2552 sctp_sf_cookie_echoed_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_cookie_echoed_abort() argument
2624 sctp_sf_do_9_2_shutdown(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_9_2_shutdown() argument
2714 sctp_sf_do_9_2_shut_ctsn(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_9_2_shut_ctsn() argument
2768 sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_9_2_reshutack() argument
2832 sctp_sf_do_ecn_cwr(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_ecn_cwr() argument
2888 sctp_sf_do_ecne(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_ecne() argument
2945 sctp_sf_eat_data_6_2(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_eat_data_6_2() argument
3067 sctp_sf_eat_data_fast_4_4(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_eat_data_fast_4_4() argument
3158 sctp_sf_eat_sack_6_2(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_eat_sack_6_2() argument
3232 sctp_sf_tabort_8_4_8(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_tabort_8_4_8() argument
3284 sctp_sf_operr_notify(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_operr_notify() argument
3322 sctp_sf_do_9_2_final(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_9_2_final() argument
3405 sctp_sf_ootb(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_ootb() argument
3498 sctp_sf_shut_8_4_5(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_shut_8_4_5() argument
3562 sctp_sf_do_8_5_1_E_sa(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_8_5_1_E_sa() argument
3587 sctp_sf_do_asconf(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_asconf() argument
3703 sctp_sf_do_asconf_ack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_asconf_ack() argument
3832 sctp_sf_eat_fwd_tsn(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_eat_fwd_tsn() argument
3899 sctp_sf_eat_fwd_tsn_fast( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_eat_fwd_tsn_fast() argument
3988 sctp_sf_authenticate(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, struct sctp_chunk *chunk) sctp_sf_authenticate() argument
4062 sctp_sf_eat_auth(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_eat_auth() argument
4159 sctp_sf_unk_chunk(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_unk_chunk() argument
4239 sctp_sf_discard_chunk(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_discard_chunk() argument
4279 sctp_sf_pdiscard(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_pdiscard() argument
4307 sctp_sf_violation(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_violation() argument
4327 sctp_sf_abort_violation( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, void *arg, sctp_cmd_seq_t *commands, const __u8 *payload, const size_t paylen) sctp_sf_abort_violation() argument
4445 sctp_sf_violation_chunklen( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_violation_chunklen() argument
4465 sctp_sf_violation_paramlen( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, void *ext, sctp_cmd_seq_t *commands) sctp_sf_violation_paramlen() argument
4508 sctp_sf_violation_ctsn( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_violation_ctsn() argument
4528 sctp_sf_violation_chunk( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_violation_chunk() argument
4604 sctp_sf_do_prm_asoc(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_prm_asoc() argument
4716 sctp_sf_do_prm_send(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_prm_send() argument
4755 sctp_sf_do_9_2_prm_shutdown( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_9_2_prm_shutdown() argument
4811 sctp_sf_do_9_1_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_9_1_prm_abort() argument
4852 sctp_sf_error_closed(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_error_closed() argument
4866 sctp_sf_error_shutdown(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_error_shutdown() argument
4892 sctp_sf_cookie_wait_prm_shutdown( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_cookie_wait_prm_shutdown() argument
4927 sctp_sf_cookie_echoed_prm_shutdown( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_cookie_echoed_prm_shutdown() argument
4954 sctp_sf_cookie_wait_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_cookie_wait_prm_abort() argument
5005 sctp_sf_cookie_echoed_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_cookie_echoed_prm_abort() argument
5031 sctp_sf_shutdown_pending_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_shutdown_pending_prm_abort() argument
5058 sctp_sf_shutdown_sent_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_shutdown_sent_prm_abort() argument
5089 sctp_sf_shutdown_ack_sent_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_shutdown_ack_sent_prm_abort() argument
5125 sctp_sf_do_prm_requestheartbeat( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_prm_requestheartbeat() argument
5158 sctp_sf_do_prm_asconf(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_prm_asconf() argument
5179 sctp_sf_ignore_primitive( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_ignore_primitive() argument
5203 sctp_sf_do_no_pending_tsn( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_no_pending_tsn() argument
5235 sctp_sf_do_9_2_start_shutdown( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_9_2_start_shutdown() argument
5305 sctp_sf_do_9_2_shutdown_ack( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_9_2_shutdown_ack() argument
5377 sctp_sf_ignore_other(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_ignore_other() argument
5405 sctp_sf_do_6_3_3_rtx(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_6_3_3_rtx() argument
5493 sctp_sf_do_6_2_sack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_do_6_2_sack() argument
5524 sctp_sf_t1_init_timer_expire(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_t1_init_timer_expire() argument
5588 sctp_sf_t1_cookie_timer_expire(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_t1_cookie_timer_expire() argument
5638 sctp_sf_t2_timer_expire(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_t2_timer_expire() argument
5708 sctp_sf_t4_timer_expire( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_t4_timer_expire() argument
5780 sctp_sf_t5_timer_expire(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_t5_timer_expire() argument
5816 sctp_sf_autoclose_timer_expire( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_autoclose_timer_expire() argument
5859 sctp_sf_not_impl(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_not_impl() argument
5877 sctp_sf_bug(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_bug() argument
5898 sctp_sf_timer_ignore(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) sctp_sf_timer_ignore() argument
5942 sctp_abort_pkt_new(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, const void *payload, size_t paylen) sctp_abort_pkt_new() argument
6064 sctp_send_stale_cookie_err(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *chunk, sctp_cmd_seq_t *commands, struct sctp_chunk *err_chunk) sctp_send_stale_cookie_err() argument
H A Dauth.c357 int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep, sctp_auth_asoc_copy_shkeys() argument
366 key_for_each(sh_key, &ep->endpoint_shared_keys) { sctp_auth_asoc_copy_shkeys()
396 if (!asoc->ep->auth_enable || !asoc->peer.auth_capable) sctp_auth_asoc_init_active_key()
449 int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp) sctp_auth_init_hmacs() argument
455 if (!ep->auth_enable) { sctp_auth_init_hmacs()
456 ep->auth_hmacs = NULL; sctp_auth_init_hmacs()
461 if (ep->auth_hmacs) sctp_auth_init_hmacs()
465 ep->auth_hmacs = kzalloc( sctp_auth_init_hmacs()
468 if (!ep->auth_hmacs) sctp_auth_init_hmacs()
482 if (ep->auth_hmacs[id]) sctp_auth_init_hmacs()
491 ep->auth_hmacs[id] = tfm; sctp_auth_init_hmacs()
498 sctp_auth_destroy_hmacs(ep->auth_hmacs); sctp_auth_init_hmacs()
609 struct sctp_endpoint *ep; sctp_auth_asoc_set_default_hmac() local
620 ep = asoc->ep; sctp_auth_asoc_set_default_hmac()
629 if (ep->auth_hmacs[id]) { sctp_auth_asoc_set_default_hmac()
679 if (!asoc->ep->auth_enable || !asoc->peer.auth_capable) sctp_auth_send_cid()
691 if (!asoc->ep->auth_enable) sctp_auth_recv_cid()
747 desc.tfm = asoc->ep->auth_hmacs[hmac_id]; sctp_auth_calculate_hmac()
764 int sctp_auth_ep_add_chunkid(struct sctp_endpoint *ep, __u8 chunk_id) sctp_auth_ep_add_chunkid() argument
766 struct sctp_chunks_param *p = ep->auth_chunk_list; sctp_auth_ep_add_chunkid()
786 int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep, sctp_auth_ep_set_hmacs() argument
813 ep->auth_hmacs_list->hmac_ids[i] = htons(hmacs->shmac_idents[i]); sctp_auth_ep_set_hmacs()
814 ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) + sctp_auth_ep_set_hmacs()
823 int sctp_auth_set_key(struct sctp_endpoint *ep, sctp_auth_set_key() argument
838 sh_keys = &ep->endpoint_shared_keys; sctp_auth_set_key()
882 int sctp_auth_set_active_key(struct sctp_endpoint *ep, sctp_auth_set_active_key() argument
894 sh_keys = &ep->endpoint_shared_keys; sctp_auth_set_active_key()
910 ep->active_key_id = key_id;
915 int sctp_auth_del_key_id(struct sctp_endpoint *ep, sctp_auth_del_key_id() argument
932 if (ep->active_key_id == key_id) sctp_auth_del_key_id()
935 sh_keys = &ep->endpoint_shared_keys; sctp_auth_del_key_id()
H A Dinput.c111 struct sctp_endpoint *ep = NULL; sctp_rcv() local
177 ep = __sctp_rcv_lookup_endpoint(net, &dest); sctp_rcv()
180 rcvr = asoc ? &asoc->base : &ep->base; sctp_rcv()
192 sctp_endpoint_put(ep); sctp_rcv()
193 ep = NULL; sctp_rcv()
196 ep = sctp_sk(sk)->ep; sctp_rcv()
197 sctp_endpoint_hold(ep); sctp_rcv()
198 rcvr = &ep->base; sctp_rcv()
275 /* Release the asoc/ep ref we took in the lookup calls. */ sctp_rcv()
279 sctp_endpoint_put(ep); sctp_rcv()
289 /* Release the asoc/ep ref we took in the lookup calls. */ sctp_rcv()
293 sctp_endpoint_put(ep); sctp_rcv()
373 /* Hold the assoc/ep while hanging on the backlog queue. sctp_add_backlog()
464 asoc->state, asoc->ep, asoc, t, sctp_icmp_proto_unreachable()
709 static void __sctp_hash_endpoint(struct sctp_endpoint *ep) __sctp_hash_endpoint() argument
711 struct net *net = sock_net(ep->base.sk); __sctp_hash_endpoint()
715 epb = &ep->base; __sctp_hash_endpoint()
726 void sctp_hash_endpoint(struct sctp_endpoint *ep) sctp_hash_endpoint() argument
729 __sctp_hash_endpoint(ep); sctp_hash_endpoint()
734 static void __sctp_unhash_endpoint(struct sctp_endpoint *ep) __sctp_unhash_endpoint() argument
736 struct net *net = sock_net(ep->base.sk); __sctp_unhash_endpoint()
740 epb = &ep->base; __sctp_unhash_endpoint()
752 void sctp_unhash_endpoint(struct sctp_endpoint *ep) sctp_unhash_endpoint() argument
755 __sctp_unhash_endpoint(ep); sctp_unhash_endpoint()
765 struct sctp_endpoint *ep; __sctp_rcv_lookup_endpoint() local
772 ep = sctp_ep(epb); __sctp_rcv_lookup_endpoint()
773 if (sctp_endpoint_is_match(ep, net, laddr)) __sctp_rcv_lookup_endpoint()
777 ep = sctp_sk(net->sctp.ctl_sock)->ep; __sctp_rcv_lookup_endpoint()
780 sctp_endpoint_hold(ep); __sctp_rcv_lookup_endpoint()
782 return ep; __sctp_rcv_lookup_endpoint()
H A Dassociola.c67 const struct sctp_endpoint *ep, sctp_association_init()
82 asoc->ep = (struct sctp_endpoint *)ep; sctp_association_init()
85 sctp_endpoint_hold(asoc->ep); sctp_association_init()
95 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); sctp_association_init()
187 asoc->c.my_vtag = sctp_generate_tag(ep); sctp_association_init()
188 asoc->c.my_port = ep->base.bind_addr.port; sctp_association_init()
190 asoc->c.initial_tsn = sctp_generate_tsn(ep); sctp_association_init()
266 err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp); sctp_association_init()
270 asoc->active_key_id = ep->active_key_id; sctp_association_init()
273 if (ep->auth_hmacs_list) sctp_association_init()
274 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list, sctp_association_init()
275 ntohs(ep->auth_hmacs_list->param_hdr.length)); sctp_association_init()
276 if (ep->auth_chunk_list) sctp_association_init()
277 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list, sctp_association_init()
278 ntohs(ep->auth_chunk_list->param_hdr.length)); sctp_association_init()
290 sctp_endpoint_put(asoc->ep); sctp_association_init()
295 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, sctp_association_new() argument
306 if (!sctp_association_init(asoc, ep, sk, scope, gfp)) sctp_association_new()
413 sctp_endpoint_put(asoc->ep); sctp_association_destroy()
1000 struct sctp_endpoint *ep; sctp_assoc_bh_rcv() local
1008 ep = asoc->ep; sctp_assoc_bh_rcv()
1043 state, ep, asoc, chunk, GFP_ATOMIC); sctp_assoc_bh_rcv()
1074 sctp_endpoint_put(assoc->ep); sctp_assoc_migrate()
1078 assoc->ep = newsp->ep; sctp_assoc_migrate()
1079 sctp_endpoint_hold(assoc->ep); sctp_assoc_migrate()
1086 sctp_endpoint_add_asoc(newsp->ep, assoc); sctp_assoc_migrate()
1511 if (asoc->ep->rcvbuf_policy) sctp_assoc_rwnd_decrease()
1559 &asoc->ep->base.bind_addr, sctp_assoc_set_bind_addr_from_ep()
1573 asoc->ep->base.bind_addr.port, gfp); sctp_assoc_set_bind_addr_from_cookie()
66 sctp_association_init(struct sctp_association *asoc, const struct sctp_endpoint *ep, const struct sock *sk, sctp_scope_t scope, gfp_t gfp) sctp_association_init() argument
H A Dsocket.c120 if (asoc->ep->sndbuf_policy) sctp_wspace()
209 if (!list_empty(&sctp_sk(sk)->ep->asocs)) sctp_id2assoc()
210 asoc = list_entry(sctp_sk(sk)->ep->asocs.next, sctp_id2assoc()
241 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, sctp_addr_id2transport()
278 if (!sctp_sk(sk)->ep->base.bind_addr.port) sctp_bind()
326 struct sctp_endpoint *ep = sp->ep; sctp_do_bind() local
327 struct sctp_bind_addr *bp = &ep->base.bind_addr; sctp_do_bind()
504 struct sctp_endpoint *ep; sctp_send_asconf_add_ip() local
521 ep = sp->ep; sctp_send_asconf_add_ip()
526 list_for_each_entry(asoc, &ep->asocs, asocs) { sctp_send_asconf_add_ip()
625 struct sctp_endpoint *ep = sp->ep; sctp_bindx_rem() local
627 struct sctp_bind_addr *bp = &ep->base.bind_addr; sctp_bindx_rem()
707 struct sctp_endpoint *ep; sctp_send_asconf_del_ip() local
725 ep = sp->ep; sctp_send_asconf_del_ip()
730 list_for_each_entry(asoc, &ep->asocs, asocs) { sctp_send_asconf_del_ip()
854 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ sctp_asconf_mgmt()
863 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); sctp_asconf_mgmt()
1046 struct sctp_endpoint *ep; __sctp_connect() local
1062 ep = sp->ep; __sctp_connect()
1116 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); __sctp_connect()
1129 if (sctp_endpoint_is_peeled_off(ep, &to)) { __sctp_connect()
1140 if (!ep->base.bind_addr.port) { __sctp_connect()
1153 if (ep->base.bind_addr.port < PROT_SOCK && __sctp_connect()
1161 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); __sctp_connect()
1479 struct sctp_endpoint *ep; sctp_close() local
1490 ep = sctp_sk(sk)->ep; sctp_close()
1497 list_for_each_safe(pos, temp, &ep->asocs) { sctp_close()
1592 struct sctp_endpoint *ep; sctp_sendmsg() local
1613 ep = sp->ep; sctp_sendmsg()
1615 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, sctp_sendmsg()
1616 msg, msg_len, ep); sctp_sendmsg()
1707 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); sctp_sendmsg()
1716 sctp_endpoint_is_peeled_off(ep, &to)) { sctp_sendmsg()
1804 if (!ep->base.bind_addr.port) { sctp_sendmsg()
1817 if (ep->base.bind_addr.port < PROT_SOCK && sctp_sendmsg()
1825 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); sctp_sendmsg()
3381 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_setsockopt_auth_chunk() local
3384 if (!ep->auth_enable) sctp_setsockopt_auth_chunk()
3401 return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); sctp_setsockopt_auth_chunk()
3414 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_setsockopt_hmac_ident() local
3419 if (!ep->auth_enable) sctp_setsockopt_hmac_ident()
3436 err = sctp_auth_ep_set_hmacs(ep, hmacs); sctp_setsockopt_hmac_ident()
3452 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_setsockopt_auth_key() local
3457 if (!ep->auth_enable) sctp_setsockopt_auth_key()
3478 ret = sctp_auth_set_key(ep, asoc, authkey); sctp_setsockopt_auth_key()
3494 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_setsockopt_active_key() local
3498 if (!ep->auth_enable) sctp_setsockopt_active_key()
3510 return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); sctp_setsockopt_active_key()
3522 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_setsockopt_del_key() local
3526 if (!ep->auth_enable) sctp_setsockopt_del_key()
3538 return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); sctp_setsockopt_del_key()
3895 struct sctp_endpoint *ep; sctp_accept() local
3904 ep = sp->ep; sctp_accept()
3925 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); sctp_accept()
4101 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); sctp_init_sock()
4102 if (!sp->ep) sctp_init_sock()
4147 if (sp->ep == NULL) sctp_destroy_sock()
4154 sctp_endpoint_free(sp->ep); sctp_destroy_sock()
4191 struct sctp_endpoint *ep; sctp_shutdown() local
4198 ep = sctp_sk(sk)->ep; sctp_shutdown()
4199 if (!list_empty(&ep->asocs)) { sctp_shutdown()
4200 asoc = list_entry(ep->asocs.next, sctp_shutdown()
4918 bp = &sctp_sk(sk)->ep->base.bind_addr; sctp_getsockopt_local_addrs()
5540 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_getsockopt_hmac_ident() local
5547 if (!ep->auth_enable) sctp_getsockopt_hmac_ident()
5550 hmacs = ep->auth_hmacs_list; sctp_getsockopt_hmac_ident()
5575 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_getsockopt_active_key() local
5579 if (!ep->auth_enable) sctp_getsockopt_active_key()
5594 val.scact_keynumber = ep->active_key_id; sctp_getsockopt_active_key()
5608 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_getsockopt_peer_auth_chunks() local
5616 if (!ep->auth_enable) sctp_getsockopt_peer_auth_chunks()
5653 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_getsockopt_local_auth_chunks() local
5661 if (!ep->auth_enable) sctp_getsockopt_local_auth_chunks()
5678 ch = ep->auth_chunk_list; sctp_getsockopt_local_auth_chunks()
5719 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { sctp_getsockopt_assoc_number()
5774 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { sctp_getsockopt_assoc_ids()
5789 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { sctp_getsockopt_assoc_ids()
6233 ep2 = sctp_sk(sk2)->ep; sctp_get_port_local()
6310 struct sctp_endpoint *ep = sp->ep; sctp_listen_start() local
6338 if (!ep->base.bind_addr.port) { sctp_listen_start()
6349 sctp_hash_endpoint(ep); sctp_listen_start()
6370 struct sctp_endpoint *ep = sctp_sk(sk)->ep; sctp_inet_listen() local
6391 sctp_unhash_endpoint(ep); sctp_inet_listen()
6438 return (!list_empty(&sp->ep->asocs)) ? sctp_poll()
6714 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) sctp_wait_for_packet()
6844 if (asoc->ep->sndbuf_policy) sctp_wake_up_waiters()
6866 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) sctp_wake_up_waiters()
7003 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { sctp_write_space()
7096 struct sctp_endpoint *ep; sctp_wait_for_accept() local
7100 ep = sctp_sk(sk)->ep; sctp_wait_for_accept()
7107 if (list_empty(&ep->asocs)) { sctp_wait_for_accept()
7118 if (!list_empty(&ep->asocs)) sctp_wait_for_accept()
7141 if (list_empty(&sctp_sk(sk)->ep->asocs)) sctp_wait_for_close()
7238 struct sctp_endpoint *newep = newsp->ep; sctp_sock_migrate()
7251 /* Restore the ep value that was overwritten with the above structure sctp_sock_migrate()
7254 newsp->ep = newep; sctp_sock_migrate()
7272 sctp_bind_addr_dup(&newsp->ep->base.bind_addr, sctp_sock_migrate()
7273 &oldsp->ep->base.bind_addr, GFP_KERNEL); sctp_sock_migrate()
H A Dprimitive.c58 struct sctp_endpoint *ep; \
63 ep = asoc ? asoc->ep : NULL; \
65 error = sctp_do_sm(net, event_type, subtype, state, ep, asoc, \
/linux-4.4.14/drivers/usb/isp1760/
H A Disp1760-udc.c32 struct isp1760_ep *ep; member in struct:isp1760_request
41 static inline struct isp1760_ep *ep_to_udc_ep(struct usb_ep *ep) ep_to_udc_ep() argument
43 return container_of(ep, struct isp1760_ep, ep); ep_to_udc_ep()
71 return &udc->ep[0]; isp1760_udc_find_ep()
73 for (i = 1; i < ARRAY_SIZE(udc->ep); ++i) { isp1760_udc_find_ep()
74 if (udc->ep[i].addr == index) isp1760_udc_find_ep()
75 return udc->ep[i].desc ? &udc->ep[i] : NULL; isp1760_udc_find_ep()
81 static void __isp1760_udc_select_ep(struct isp1760_ep *ep, int dir) __isp1760_udc_select_ep() argument
83 isp1760_udc_write(ep->udc, DC_EPINDEX, __isp1760_udc_select_ep()
84 DC_ENDPIDX(ep->addr & USB_ENDPOINT_NUMBER_MASK) | __isp1760_udc_select_ep()
90 * @ep: The endpoint
99 static void isp1760_udc_select_ep(struct isp1760_ep *ep) isp1760_udc_select_ep() argument
101 __isp1760_udc_select_ep(ep, ep->addr & USB_ENDPOINT_DIR_MASK); isp1760_udc_select_ep()
105 static void isp1760_udc_ctrl_send_status(struct isp1760_ep *ep, int dir) isp1760_udc_ctrl_send_status() argument
107 struct isp1760_udc *udc = ep->udc; isp1760_udc_ctrl_send_status()
126 static void isp1760_udc_request_complete(struct isp1760_ep *ep, isp1760_udc_request_complete() argument
130 struct isp1760_udc *udc = ep->udc; isp1760_udc_request_complete()
133 dev_dbg(ep->udc->isp->dev, "completing request %p with status %d\n", isp1760_udc_request_complete()
136 req->ep = NULL; isp1760_udc_request_complete()
138 req->req.complete(&ep->ep, &req->req); isp1760_udc_request_complete()
147 if (status == 0 && ep->addr == 0 && udc->ep0_dir == USB_DIR_OUT) isp1760_udc_request_complete()
148 isp1760_udc_ctrl_send_status(ep, USB_DIR_OUT); isp1760_udc_request_complete()
153 static void isp1760_udc_ctrl_send_stall(struct isp1760_ep *ep) isp1760_udc_ctrl_send_stall() argument
155 struct isp1760_udc *udc = ep->udc; isp1760_udc_ctrl_send_stall()
158 dev_dbg(ep->udc->isp->dev, "%s(ep%02x)\n", __func__, ep->addr); isp1760_udc_ctrl_send_stall()
163 __isp1760_udc_select_ep(ep, USB_DIR_OUT); isp1760_udc_ctrl_send_stall()
165 __isp1760_udc_select_ep(ep, USB_DIR_IN); isp1760_udc_ctrl_send_stall()
179 static bool isp1760_udc_receive(struct isp1760_ep *ep, isp1760_udc_receive() argument
182 struct isp1760_udc *udc = ep->udc; isp1760_udc_receive()
187 isp1760_udc_select_ep(ep); isp1760_udc_receive()
228 __func__, req, req->req.actual, req->req.length, ep->maxpacket, isp1760_udc_receive()
231 ep->rx_pending = false; isp1760_udc_receive()
237 if (req->req.actual == req->req.length || len < ep->maxpacket) { isp1760_udc_receive()
245 static void isp1760_udc_transmit(struct isp1760_ep *ep, isp1760_udc_transmit() argument
248 struct isp1760_udc *udc = ep->udc; isp1760_udc_transmit()
253 ep->maxpacket); isp1760_udc_transmit()
259 __isp1760_udc_select_ep(ep, USB_DIR_IN); isp1760_udc_transmit()
275 if (ep->addr == 0) isp1760_udc_transmit()
281 static void isp1760_ep_rx_ready(struct isp1760_ep *ep) isp1760_ep_rx_ready() argument
283 struct isp1760_udc *udc = ep->udc; isp1760_ep_rx_ready()
289 if (ep->addr == 0 && udc->ep0_state != ISP1760_CTRL_DATA_OUT) { isp1760_ep_rx_ready()
296 if (ep->addr != 0 && !ep->desc) { isp1760_ep_rx_ready()
298 dev_dbg(udc->isp->dev, "%s: ep%02x is disabled\n", __func__, isp1760_ep_rx_ready()
299 ep->addr); isp1760_ep_rx_ready()
303 if (list_empty(&ep->queue)) { isp1760_ep_rx_ready()
304 ep->rx_pending = true; isp1760_ep_rx_ready()
306 dev_dbg(udc->isp->dev, "%s: ep%02x (%p) has no request queued\n", isp1760_ep_rx_ready()
307 __func__, ep->addr, ep); isp1760_ep_rx_ready()
311 req = list_first_entry(&ep->queue, struct isp1760_request, isp1760_ep_rx_ready()
313 complete = isp1760_udc_receive(ep, req); isp1760_ep_rx_ready()
318 isp1760_udc_request_complete(ep, req, 0); isp1760_ep_rx_ready()
321 static void isp1760_ep_tx_complete(struct isp1760_ep *ep) isp1760_ep_tx_complete() argument
323 struct isp1760_udc *udc = ep->udc; isp1760_ep_tx_complete()
330 if (ep->addr == 0 && udc->ep0_state != ISP1760_CTRL_DATA_IN) { isp1760_ep_tx_complete()
337 if (list_empty(&ep->queue)) { isp1760_ep_tx_complete()
343 if (ep->addr == 0) { isp1760_ep_tx_complete()
344 isp1760_udc_ctrl_send_status(ep, USB_DIR_IN); isp1760_ep_tx_complete()
350 dev_dbg(udc->isp->dev, "%s: ep%02x has no request queued\n", isp1760_ep_tx_complete()
351 __func__, ep->addr); isp1760_ep_tx_complete()
355 req = list_first_entry(&ep->queue, struct isp1760_request, isp1760_ep_tx_complete()
360 !(req->req.length % ep->maxpacket) && isp1760_ep_tx_complete()
365 req, req->req.actual, req->req.length, ep->maxpacket, isp1760_ep_tx_complete()
376 if (ep->addr == 0) isp1760_ep_tx_complete()
377 isp1760_udc_ctrl_send_status(ep, USB_DIR_IN); isp1760_ep_tx_complete()
379 if (!list_empty(&ep->queue)) isp1760_ep_tx_complete()
380 req = list_first_entry(&ep->queue, isp1760_ep_tx_complete()
393 isp1760_udc_transmit(ep, req); isp1760_ep_tx_complete()
398 isp1760_udc_request_complete(ep, complete, 0); isp1760_ep_tx_complete()
401 static int __isp1760_udc_set_halt(struct isp1760_ep *ep, bool halt) __isp1760_udc_set_halt() argument
403 struct isp1760_udc *udc = ep->udc; __isp1760_udc_set_halt()
405 dev_dbg(udc->isp->dev, "%s: %s halt on ep%02x\n", __func__, __isp1760_udc_set_halt()
406 halt ? "set" : "clear", ep->addr); __isp1760_udc_set_halt()
408 if (ep->desc && usb_endpoint_xfer_isoc(ep->desc)) { __isp1760_udc_set_halt()
409 dev_dbg(udc->isp->dev, "%s: ep%02x is isochronous\n", __func__, __isp1760_udc_set_halt()
410 ep->addr); __isp1760_udc_set_halt()
414 isp1760_udc_select_ep(ep); __isp1760_udc_set_halt()
417 if (ep->addr == 0) { __isp1760_udc_set_halt()
419 __isp1760_udc_select_ep(ep, USB_DIR_IN); __isp1760_udc_set_halt()
435 if ((ep->addr & USB_DIR_IN) && !list_empty(&ep->queue)) { __isp1760_udc_set_halt()
438 req = list_first_entry(&ep->queue, __isp1760_udc_set_halt()
440 isp1760_udc_transmit(ep, req); __isp1760_udc_set_halt()
444 ep->halted = halt; __isp1760_udc_set_halt()
456 struct isp1760_ep *ep; isp1760_udc_get_status() local
472 ep = isp1760_udc_find_ep(udc, le16_to_cpu(req->wIndex)); isp1760_udc_get_status()
473 if (!ep) isp1760_udc_get_status()
477 if (ep->halted) isp1760_udc_get_status()
517 isp1760_udc_ctrl_send_status(&udc->ep[0], USB_DIR_OUT); isp1760_udc_set_address()
541 struct isp1760_ep *ep; isp1760_ep0_setup_standard() local
547 ep = isp1760_udc_find_ep(udc, index); isp1760_ep0_setup_standard()
548 if (!ep) isp1760_ep0_setup_standard()
558 if (!ep->wedged) isp1760_ep0_setup_standard()
559 stall = __isp1760_udc_set_halt(ep, false); isp1760_ep0_setup_standard()
564 isp1760_udc_ctrl_send_status(&udc->ep[0], isp1760_ep0_setup_standard()
585 struct isp1760_ep *ep; isp1760_ep0_setup_standard() local
591 ep = isp1760_udc_find_ep(udc, index); isp1760_ep0_setup_standard()
592 if (!ep) isp1760_ep0_setup_standard()
597 stall = __isp1760_udc_set_halt(ep, true); isp1760_ep0_setup_standard()
599 isp1760_udc_ctrl_send_status(&udc->ep[0], isp1760_ep0_setup_standard()
665 isp1760_udc_ctrl_send_stall(&udc->ep[0]); isp1760_ep0_setup()
703 isp1760_udc_ctrl_send_stall(&udc->ep[0]); isp1760_ep0_setup()
710 static int isp1760_ep_enable(struct usb_ep *ep, isp1760_ep_enable() argument
713 struct isp1760_ep *uep = ep_to_udc_ep(ep); isp1760_ep_enable()
727 le16_to_cpu(desc->wMaxPacketSize) > ep->maxpacket) { isp1760_ep_enable()
729 "%s: invalid descriptor type %u addr %02x ep addr %02x max packet size %u/%u\n", isp1760_ep_enable()
732 le16_to_cpu(desc->wMaxPacketSize), ep->maxpacket); isp1760_ep_enable()
771 static int isp1760_ep_disable(struct usb_ep *ep) isp1760_ep_disable() argument
773 struct isp1760_ep *uep = ep_to_udc_ep(ep); isp1760_ep_disable()
809 static struct usb_request *isp1760_ep_alloc_request(struct usb_ep *ep, isp1760_ep_alloc_request() argument
821 static void isp1760_ep_free_request(struct usb_ep *ep, struct usb_request *_req) isp1760_ep_free_request() argument
828 static int isp1760_ep_queue(struct usb_ep *ep, struct usb_request *_req, isp1760_ep_queue() argument
832 struct isp1760_ep *uep = ep_to_udc_ep(ep); isp1760_ep_queue()
844 "%s: req %p (%u bytes%s) ep %p(0x%02x)\n", __func__, _req, isp1760_ep_queue()
847 req->ep = uep; isp1760_ep_queue()
894 "%s: can't queue request to disabled ep%02x\n", isp1760_ep_queue()
901 req->ep = NULL; isp1760_ep_queue()
911 static int isp1760_ep_dequeue(struct usb_ep *ep, struct usb_request *_req) isp1760_ep_dequeue() argument
914 struct isp1760_ep *uep = ep_to_udc_ep(ep); isp1760_ep_dequeue()
918 dev_dbg(uep->udc->isp->dev, "%s(ep%02x)\n", __func__, uep->addr); isp1760_ep_dequeue()
922 if (req->ep != uep) isp1760_ep_dequeue()
955 dev_dbg(udc->isp->dev, "%s: ep%02x is disabled\n", __func__, __isp1760_ep_set_halt()
964 "%s: ep%02x has request pending\n", __func__, __isp1760_ep_set_halt()
991 static int isp1760_ep_set_halt(struct usb_ep *ep, int value) isp1760_ep_set_halt() argument
993 struct isp1760_ep *uep = ep_to_udc_ep(ep); isp1760_ep_set_halt()
997 dev_dbg(uep->udc->isp->dev, "%s: %s halt on ep%02x\n", __func__, isp1760_ep_set_halt()
1007 static int isp1760_ep_set_wedge(struct usb_ep *ep) isp1760_ep_set_wedge() argument
1009 struct isp1760_ep *uep = ep_to_udc_ep(ep); isp1760_ep_set_wedge()
1013 dev_dbg(uep->udc->isp->dev, "%s: set wedge on ep%02x)\n", __func__, isp1760_ep_set_wedge()
1023 static void isp1760_ep_fifo_flush(struct usb_ep *ep) isp1760_ep_fifo_flush() argument
1025 struct isp1760_ep *uep = ep_to_udc_ep(ep); isp1760_ep_fifo_flush()
1291 struct isp1760_ep *ep = &udc->ep[i*2]; isp1760_udc_irq() local
1295 isp1760_ep_tx_complete(ep); isp1760_udc_irq()
1300 isp1760_ep_rx_ready(i ? ep - 1 : ep); isp1760_udc_irq()
1360 for (i = 0; i < ARRAY_SIZE(udc->ep); ++i) { isp1760_udc_init_eps()
1361 struct isp1760_ep *ep = &udc->ep[i]; isp1760_udc_init_eps() local
1365 ep->udc = udc; isp1760_udc_init_eps()
1367 INIT_LIST_HEAD(&ep->queue); isp1760_udc_init_eps()
1369 ep->addr = (ep_num && is_in ? USB_DIR_IN : USB_DIR_OUT) isp1760_udc_init_eps()
1371 ep->desc = NULL; isp1760_udc_init_eps()
1373 sprintf(ep->name, "ep%u%s", ep_num, isp1760_udc_init_eps()
1376 ep->ep.ops = &isp1760_ep_ops; isp1760_udc_init_eps()
1377 ep->ep.name = ep->name; isp1760_udc_init_eps()
1385 usb_ep_set_maxpacket_limit(&ep->ep, 64); isp1760_udc_init_eps()
1386 ep->ep.caps.type_control = true; isp1760_udc_init_eps()
1387 ep->ep.caps.dir_in = true; isp1760_udc_init_eps()
1388 ep->ep.caps.dir_out = true; isp1760_udc_init_eps()
1389 ep->maxpacket = 64; isp1760_udc_init_eps()
1390 udc->gadget.ep0 = &ep->ep; isp1760_udc_init_eps()
1392 usb_ep_set_maxpacket_limit(&ep->ep, 512); isp1760_udc_init_eps()
1393 ep->ep.caps.type_iso = true; isp1760_udc_init_eps()
1394 ep->ep.caps.type_bulk = true; isp1760_udc_init_eps()
1395 ep->ep.caps.type_int = true; isp1760_udc_init_eps()
1396 ep->maxpacket = 0; isp1760_udc_init_eps()
1397 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); isp1760_udc_init_eps()
1401 ep->ep.caps.dir_in = true; isp1760_udc_init_eps()
1403 ep->ep.caps.dir_out = true; isp1760_udc_init_eps()
H A Disp1760-udc.h35 struct usb_ep ep; member in struct:isp1760_ep
57 * lock: Protects driver, vbus_timer, ep, ep0_*, DC_EPINDEX register
58 * ep: Array of endpoints
78 struct isp1760_ep ep[15]; member in struct:isp1760_udc
/linux-4.4.14/drivers/scsi/libfc/
H A Dfc_exch.c255 * @ep: Echange to be held
257 static inline void fc_exch_hold(struct fc_exch *ep) fc_exch_hold() argument
259 atomic_inc(&ep->ex_refcnt); fc_exch_hold()
265 * @ep: The exchange to that will use the header
272 static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, fc_exch_setup_hdr() argument
278 fr_sof(fp) = ep->class; fc_exch_setup_hdr()
279 if (ep->seq.cnt) fc_exch_setup_hdr()
280 fr_sof(fp) = fc_sof_normal(ep->class); fc_exch_setup_hdr()
284 if (fc_sof_needs_ack(ep->class)) fc_exch_setup_hdr()
308 fh->fh_ox_id = htons(ep->oxid); fc_exch_setup_hdr()
309 fh->fh_rx_id = htons(ep->rxid); fc_exch_setup_hdr()
310 fh->fh_seq_id = ep->seq.id; fc_exch_setup_hdr()
311 fh->fh_seq_cnt = htons(ep->seq.cnt); fc_exch_setup_hdr()
316 * @ep: Exchange to be released
321 static void fc_exch_release(struct fc_exch *ep) fc_exch_release() argument
325 if (atomic_dec_and_test(&ep->ex_refcnt)) { fc_exch_release()
326 mp = ep->em; fc_exch_release()
327 if (ep->destructor) fc_exch_release()
328 ep->destructor(&ep->seq, ep->arg); fc_exch_release()
329 WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE)); fc_exch_release()
330 mempool_free(ep, mp->ep_pool); fc_exch_release()
336 * @ep: The exchange whose timer to be canceled
338 static inline void fc_exch_timer_cancel(struct fc_exch *ep) fc_exch_timer_cancel() argument
340 if (cancel_delayed_work(&ep->timeout_work)) { fc_exch_timer_cancel()
341 FC_EXCH_DBG(ep, "Exchange timer canceled\n"); fc_exch_timer_cancel()
342 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ fc_exch_timer_cancel()
349 * @ep: The exchange whose timer will start
355 static inline void fc_exch_timer_set_locked(struct fc_exch *ep, fc_exch_timer_set_locked() argument
358 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) fc_exch_timer_set_locked()
361 FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec); fc_exch_timer_set_locked()
363 fc_exch_hold(ep); /* hold for timer */ fc_exch_timer_set_locked()
364 if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, fc_exch_timer_set_locked()
366 fc_exch_release(ep); fc_exch_timer_set_locked()
371 * @ep: The exchange whose timer will start
374 static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) fc_exch_timer_set() argument
376 spin_lock_bh(&ep->ex_lock); fc_exch_timer_set()
377 fc_exch_timer_set_locked(ep, timer_msec); fc_exch_timer_set()
378 spin_unlock_bh(&ep->ex_lock); fc_exch_timer_set()
383 * @ep: The exchange that is complete
387 static int fc_exch_done_locked(struct fc_exch *ep) fc_exch_done_locked() argument
394 * ep, and in that case we only clear the resp and set it as fc_exch_done_locked()
397 if (ep->state & FC_EX_DONE) fc_exch_done_locked()
399 ep->esb_stat |= ESB_ST_COMPLETE; fc_exch_done_locked()
401 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) { fc_exch_done_locked()
402 ep->state |= FC_EX_DONE; fc_exch_done_locked()
403 fc_exch_timer_cancel(ep); fc_exch_done_locked()
429 * @ep: The exchange to assign to the pool
432 struct fc_exch *ep) fc_exch_ptr_set()
434 ((struct fc_exch **)(pool + 1))[index] = ep; fc_exch_ptr_set()
439 * @ep: The exchange to be deleted
441 static void fc_exch_delete(struct fc_exch *ep) fc_exch_delete() argument
446 pool = ep->pool; fc_exch_delete()
452 index = (ep->xid - ep->em->min_xid) >> fc_cpu_order; fc_exch_delete()
461 list_del(&ep->ex_list); fc_exch_delete()
463 fc_exch_release(ep); /* drop hold for exch in mp */ fc_exch_delete()
469 struct fc_exch *ep; fc_seq_send_locked() local
475 ep = fc_seq_exch(sp); fc_seq_send_locked()
477 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) { fc_seq_send_locked()
482 WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT)); fc_seq_send_locked()
485 fc_exch_setup_hdr(ep, fp, f_ctl); fc_seq_send_locked()
486 fr_encaps(fp) = ep->encaps; fc_seq_send_locked()
512 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ fc_seq_send_locked()
514 ep->esb_stat &= ~ESB_ST_SEQ_INIT; fc_seq_send_locked()
531 struct fc_exch *ep; fc_seq_send() local
533 ep = fc_seq_exch(sp); fc_seq_send()
534 spin_lock_bh(&ep->ex_lock); fc_seq_send()
536 spin_unlock_bh(&ep->ex_lock); fc_seq_send()
542 * @ep: The exchange to allocate a new sequence for
549 static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id) fc_seq_alloc() argument
553 sp = &ep->seq; fc_seq_alloc()
567 struct fc_exch *ep = fc_seq_exch(sp); fc_seq_start_next_locked() local
569 sp = fc_seq_alloc(ep, ep->seq_id++); fc_seq_start_next_locked()
570 FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n", fc_seq_start_next_locked()
571 ep->f_ctl, sp->id); fc_seq_start_next_locked()
582 struct fc_exch *ep = fc_seq_exch(sp); fc_seq_start_next() local
584 spin_lock_bh(&ep->ex_lock); fc_seq_start_next()
586 spin_unlock_bh(&ep->ex_lock); fc_seq_start_next()
601 struct fc_exch *ep = fc_seq_exch(sp); fc_seq_set_resp() local
604 spin_lock_bh(&ep->ex_lock); fc_seq_set_resp()
605 while (ep->resp_active && ep->resp_task != current) { fc_seq_set_resp()
606 prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE); fc_seq_set_resp()
607 spin_unlock_bh(&ep->ex_lock); fc_seq_set_resp()
611 spin_lock_bh(&ep->ex_lock); fc_seq_set_resp()
613 finish_wait(&ep->resp_wq, &wait); fc_seq_set_resp()
614 ep->resp = resp; fc_seq_set_resp()
615 ep->arg = arg; fc_seq_set_resp()
616 spin_unlock_bh(&ep->ex_lock); fc_seq_set_resp()
621 * @ep: The exchange to be aborted
628 static int fc_exch_abort_locked(struct fc_exch *ep, fc_exch_abort_locked() argument
635 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || fc_exch_abort_locked()
636 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) fc_exch_abort_locked()
642 sp = fc_seq_start_next_locked(&ep->seq); fc_exch_abort_locked()
647 fc_exch_timer_set_locked(ep, timer_msec); fc_exch_abort_locked()
649 if (ep->sid) { fc_exch_abort_locked()
653 fp = fc_frame_alloc(ep->lp, 0); fc_exch_abort_locked()
655 ep->esb_stat |= ESB_ST_SEQ_INIT; fc_exch_abort_locked()
656 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, fc_exch_abort_locked()
659 error = fc_seq_send_locked(ep->lp, sp, fp); fc_exch_abort_locked()
670 ep->esb_stat |= ESB_ST_ABNORMAL; fc_exch_abort_locked()
686 struct fc_exch *ep; fc_seq_exch_abort() local
689 ep = fc_seq_exch(req_sp); fc_seq_exch_abort()
690 spin_lock_bh(&ep->ex_lock); fc_seq_exch_abort()
691 error = fc_exch_abort_locked(ep, timer_msec); fc_seq_exch_abort()
692 spin_unlock_bh(&ep->ex_lock); fc_seq_exch_abort()
697 * fc_invoke_resp() - invoke ep->resp()
701 * first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are
703 * two variables changes if ep->resp_active > 0.
705 * If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when
710 * ep->resp() won't be invoked after fc_exch_done() has returned.
713 * ep->resp pointer.
716 * Returns true if and only if ep->resp has been invoked.
718 static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp, fc_invoke_resp() argument
725 spin_lock_bh(&ep->ex_lock); fc_invoke_resp()
726 ep->resp_active++; fc_invoke_resp()
727 if (ep->resp_task != current) fc_invoke_resp()
728 ep->resp_task = !ep->resp_task ? current : NULL; fc_invoke_resp()
729 resp = ep->resp; fc_invoke_resp()
730 arg = ep->arg; fc_invoke_resp()
731 spin_unlock_bh(&ep->ex_lock); fc_invoke_resp()
738 spin_lock_bh(&ep->ex_lock); fc_invoke_resp()
739 if (--ep->resp_active == 0) fc_invoke_resp()
740 ep->resp_task = NULL; fc_invoke_resp()
741 spin_unlock_bh(&ep->ex_lock); fc_invoke_resp()
743 if (ep->resp_active == 0) fc_invoke_resp()
744 wake_up(&ep->resp_wq); fc_invoke_resp()
755 struct fc_exch *ep = container_of(work, struct fc_exch, fc_exch_timeout() local
757 struct fc_seq *sp = &ep->seq; fc_exch_timeout()
761 FC_EXCH_DBG(ep, "Exchange timed out\n"); fc_exch_timeout()
763 spin_lock_bh(&ep->ex_lock); fc_exch_timeout()
764 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) fc_exch_timeout()
767 e_stat = ep->esb_stat; fc_exch_timeout()
769 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL; fc_exch_timeout()
770 spin_unlock_bh(&ep->ex_lock); fc_exch_timeout()
772 fc_exch_rrq(ep); fc_exch_timeout()
776 rc = fc_exch_done_locked(ep); fc_exch_timeout()
777 spin_unlock_bh(&ep->ex_lock); fc_exch_timeout()
779 fc_exch_delete(ep); fc_exch_timeout()
780 fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT)); fc_exch_timeout()
781 fc_seq_set_resp(sp, NULL, ep->arg); fc_exch_timeout()
782 fc_seq_exch_abort(sp, 2 * ep->r_a_tov); fc_exch_timeout()
786 spin_unlock_bh(&ep->ex_lock); fc_exch_timeout()
791 fc_exch_release(ep); fc_exch_timeout()
804 struct fc_exch *ep; fc_exch_em_alloc() local
810 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); fc_exch_em_alloc()
811 if (!ep) { fc_exch_em_alloc()
815 memset(ep, 0, sizeof(*ep)); fc_exch_em_alloc()
843 fc_exch_hold(ep); /* hold for exch in mp */ fc_exch_em_alloc()
844 spin_lock_init(&ep->ex_lock); fc_exch_em_alloc()
850 spin_lock_bh(&ep->ex_lock); fc_exch_em_alloc()
852 fc_exch_ptr_set(pool, index, ep); fc_exch_em_alloc()
853 list_add_tail(&ep->ex_list, &pool->ex_list); fc_exch_em_alloc()
854 fc_seq_alloc(ep, ep->seq_id++); fc_exch_em_alloc()
861 ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid; fc_exch_em_alloc()
862 ep->em = mp; fc_exch_em_alloc()
863 ep->pool = pool; fc_exch_em_alloc()
864 ep->lp = lport; fc_exch_em_alloc()
865 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ fc_exch_em_alloc()
866 ep->rxid = FC_XID_UNKNOWN; fc_exch_em_alloc()
867 ep->class = mp->class; fc_exch_em_alloc()
868 ep->resp_active = 0; fc_exch_em_alloc()
869 init_waitqueue_head(&ep->resp_wq); fc_exch_em_alloc()
870 INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout); fc_exch_em_alloc()
872 return ep; fc_exch_em_alloc()
876 mempool_free(ep, mp->ep_pool); fc_exch_em_alloc()
910 struct fc_exch *ep = NULL; fc_exch_find() local
915 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); fc_exch_find()
916 if (ep) { fc_exch_find()
917 WARN_ON(ep->xid != xid); fc_exch_find()
918 fc_exch_hold(ep); fc_exch_find()
922 return ep; fc_exch_find()
935 struct fc_exch *ep = fc_seq_exch(sp); fc_exch_done() local
938 spin_lock_bh(&ep->ex_lock); fc_exch_done()
939 rc = fc_exch_done_locked(ep); fc_exch_done()
940 spin_unlock_bh(&ep->ex_lock); fc_exch_done()
942 fc_seq_set_resp(sp, NULL, ep->arg); fc_exch_done()
944 fc_exch_delete(ep); fc_exch_done()
959 struct fc_exch *ep; fc_exch_resp() local
962 ep = fc_exch_alloc(lport, fp); fc_exch_resp()
963 if (ep) { fc_exch_resp()
964 ep->class = fc_frame_class(fp); fc_exch_resp()
969 ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */ fc_exch_resp()
970 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */ fc_exch_resp()
972 ep->sid = ntoh24(fh->fh_d_id); fc_exch_resp()
973 ep->did = ntoh24(fh->fh_s_id); fc_exch_resp()
974 ep->oid = ep->did; fc_exch_resp()
981 ep->rxid = ep->xid; fc_exch_resp()
982 ep->oxid = ntohs(fh->fh_ox_id); fc_exch_resp()
983 ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT; fc_exch_resp()
985 ep->esb_stat &= ~ESB_ST_SEQ_INIT; fc_exch_resp()
987 fc_exch_hold(ep); /* hold for caller */ fc_exch_resp()
988 spin_unlock_bh(&ep->ex_lock); /* lock from fc_exch_alloc */ fc_exch_resp()
990 return ep; fc_exch_resp()
1001 * on the ep that should be released by the caller.
1008 struct fc_exch *ep = NULL; fc_seq_lookup_recip() local
1022 ep = fc_exch_find(mp, xid); fc_seq_lookup_recip()
1023 if (!ep) { fc_seq_lookup_recip()
1028 if (ep->rxid == FC_XID_UNKNOWN) fc_seq_lookup_recip()
1029 ep->rxid = ntohs(fh->fh_rx_id); fc_seq_lookup_recip()
1030 else if (ep->rxid != ntohs(fh->fh_rx_id)) { fc_seq_lookup_recip()
1051 ep = fc_exch_find(mp, xid); fc_seq_lookup_recip()
1053 if (ep) { fc_seq_lookup_recip()
1058 ep = fc_exch_resp(lport, mp, fp); fc_seq_lookup_recip()
1059 if (!ep) { fc_seq_lookup_recip()
1063 xid = ep->xid; /* get our XID */ fc_seq_lookup_recip()
1064 } else if (!ep) { fc_seq_lookup_recip()
1071 spin_lock_bh(&ep->ex_lock); fc_seq_lookup_recip()
1077 sp = &ep->seq; fc_seq_lookup_recip()
1081 sp = &ep->seq; fc_seq_lookup_recip()
1102 spin_unlock_bh(&ep->ex_lock); fc_seq_lookup_recip()
1110 WARN_ON(ep != fc_seq_exch(sp)); fc_seq_lookup_recip()
1113 ep->esb_stat |= ESB_ST_SEQ_INIT; fc_seq_lookup_recip()
1114 spin_unlock_bh(&ep->ex_lock); fc_seq_lookup_recip()
1120 fc_exch_done(&ep->seq); fc_seq_lookup_recip()
1121 fc_exch_release(ep); /* hold from fc_exch_find/fc_exch_resp */ fc_seq_lookup_recip()
1137 struct fc_exch *ep; fc_seq_lookup_orig() local
1145 ep = fc_exch_find(mp, xid); fc_seq_lookup_orig()
1146 if (!ep) fc_seq_lookup_orig()
1148 if (ep->seq.id == fh->fh_seq_id) { fc_seq_lookup_orig()
1152 sp = &ep->seq; fc_seq_lookup_orig()
1154 ep->rxid == FC_XID_UNKNOWN) { fc_seq_lookup_orig()
1155 ep->rxid = ntohs(fh->fh_rx_id); fc_seq_lookup_orig()
1158 fc_exch_release(ep); fc_seq_lookup_orig()
1164 * @ep: The exchange to set the addresses for
1170 static void fc_exch_set_addr(struct fc_exch *ep, fc_exch_set_addr() argument
1173 ep->oid = orig_id; fc_exch_set_addr()
1174 if (ep->esb_stat & ESB_ST_RESP) { fc_exch_set_addr()
1175 ep->sid = resp_id; fc_exch_set_addr()
1176 ep->did = orig_id; fc_exch_set_addr()
1178 ep->sid = orig_id; fc_exch_set_addr()
1179 ep->did = resp_id; fc_exch_set_addr()
1224 struct fc_exch *ep = fc_seq_exch(sp); fc_seq_send_last() local
1227 f_ctl |= ep->f_ctl; fc_seq_send_last()
1228 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0); fc_seq_send_last()
1229 fc_seq_send_locked(ep->lp, sp, fp); fc_seq_send_last()
1244 struct fc_exch *ep = fc_seq_exch(sp); fc_seq_send_ack() local
1245 struct fc_lport *lport = ep->lp; fc_seq_send_ack()
1276 fc_exch_setup_hdr(ep, fp, f_ctl); fc_seq_send_ack()
1361 * @ep: The exchange the abort was on
1368 static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) fc_exch_recv_abts() argument
1375 if (!ep) fc_exch_recv_abts()
1378 fp = fc_frame_alloc(ep->lp, sizeof(*ap)); fc_exch_recv_abts()
1382 spin_lock_bh(&ep->ex_lock); fc_exch_recv_abts()
1383 if (ep->esb_stat & ESB_ST_COMPLETE) { fc_exch_recv_abts()
1384 spin_unlock_bh(&ep->ex_lock); fc_exch_recv_abts()
1389 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) { fc_exch_recv_abts()
1390 ep->esb_stat |= ESB_ST_REC_QUAL; fc_exch_recv_abts()
1391 fc_exch_hold(ep); /* hold for REC_QUAL */ fc_exch_recv_abts()
1393 fc_exch_timer_set_locked(ep, ep->r_a_tov); fc_exch_recv_abts()
1397 sp = &ep->seq; fc_exch_recv_abts()
1407 ep->esb_stat |= ESB_ST_ABNORMAL; fc_exch_recv_abts()
1408 spin_unlock_bh(&ep->ex_lock); fc_exch_recv_abts()
1466 struct fc_exch *ep = NULL; fc_exch_recv_req() local
1491 ep = fc_seq_exch(sp); fc_exch_recv_req()
1493 ep->encaps = fr_encaps(fp); fc_exch_recv_req()
1506 if (!fc_invoke_resp(ep, sp, fp)) fc_exch_recv_req()
1508 fc_exch_release(ep); /* release from lookup */ fc_exch_recv_req()
1527 struct fc_exch *ep; fc_exch_recv_seq_resp() local
1532 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); fc_exch_recv_seq_resp()
1533 if (!ep) { fc_exch_recv_seq_resp()
1537 if (ep->esb_stat & ESB_ST_COMPLETE) { fc_exch_recv_seq_resp()
1541 if (ep->rxid == FC_XID_UNKNOWN) fc_exch_recv_seq_resp()
1542 ep->rxid = ntohs(fh->fh_rx_id); fc_exch_recv_seq_resp()
1543 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { fc_exch_recv_seq_resp()
1547 if (ep->did != ntoh24(fh->fh_s_id) && fc_exch_recv_seq_resp()
1548 ep->did != FC_FID_FLOGI) { fc_exch_recv_seq_resp()
1553 sp = &ep->seq; fc_exch_recv_seq_resp()
1565 spin_lock_bh(&ep->ex_lock); fc_exch_recv_seq_resp()
1567 ep->esb_stat |= ESB_ST_SEQ_INIT; fc_exch_recv_seq_resp()
1568 spin_unlock_bh(&ep->ex_lock); fc_exch_recv_seq_resp()
1576 spin_lock_bh(&ep->ex_lock); fc_exch_recv_seq_resp()
1577 rc = fc_exch_done_locked(ep); fc_exch_recv_seq_resp()
1578 WARN_ON(fc_seq_exch(sp) != ep); fc_exch_recv_seq_resp()
1579 spin_unlock_bh(&ep->ex_lock); fc_exch_recv_seq_resp()
1581 fc_exch_delete(ep); fc_exch_recv_seq_resp()
1597 if (!fc_invoke_resp(ep, sp, fp)) fc_exch_recv_seq_resp()
1600 fc_exch_release(ep); fc_exch_recv_seq_resp()
1603 fc_exch_release(ep); fc_exch_recv_seq_resp()
1630 * @ep: The exchange that the frame is on
1636 static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) fc_exch_abts_resp() argument
1646 FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl, fc_exch_abts_resp()
1649 if (cancel_delayed_work_sync(&ep->timeout_work)) { fc_exch_abts_resp()
1650 FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n"); fc_exch_abts_resp()
1651 fc_exch_release(ep); /* release from pending timer hold */ fc_exch_abts_resp()
1654 spin_lock_bh(&ep->ex_lock); fc_exch_abts_resp()
1668 if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 && fc_exch_abts_resp()
1670 ap->ba_seq_id == ep->seq_id) && low != high) { fc_exch_abts_resp()
1671 ep->esb_stat |= ESB_ST_REC_QUAL; fc_exch_abts_resp()
1672 fc_exch_hold(ep); /* hold for recovery qualifier */ fc_exch_abts_resp()
1685 sp = &ep->seq; fc_exch_abts_resp()
1689 if (ep->fh_type != FC_TYPE_FCP && fc_exch_abts_resp()
1691 rc = fc_exch_done_locked(ep); fc_exch_abts_resp()
1692 spin_unlock_bh(&ep->ex_lock); fc_exch_abts_resp()
1694 fc_exch_hold(ep); fc_exch_abts_resp()
1696 fc_exch_delete(ep); fc_exch_abts_resp()
1697 if (!fc_invoke_resp(ep, sp, fp)) fc_exch_abts_resp()
1700 fc_exch_timer_set(ep, ep->r_a_tov); fc_exch_abts_resp()
1701 fc_exch_release(ep); fc_exch_abts_resp()
1715 struct fc_exch *ep; fc_exch_recv_bls() local
1722 ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ? fc_exch_recv_bls()
1724 if (ep && (f_ctl & FC_FC_SEQ_INIT)) { fc_exch_recv_bls()
1725 spin_lock_bh(&ep->ex_lock); fc_exch_recv_bls()
1726 ep->esb_stat |= ESB_ST_SEQ_INIT; fc_exch_recv_bls()
1727 spin_unlock_bh(&ep->ex_lock); fc_exch_recv_bls()
1739 if (ep) fc_exch_recv_bls()
1740 FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n", fc_exch_recv_bls()
1750 if (ep) fc_exch_recv_bls()
1751 fc_exch_abts_resp(ep, fp); fc_exch_recv_bls()
1756 fc_exch_recv_abts(ep, fp); fc_exch_recv_bls()
1763 if (ep) fc_exch_recv_bls()
1764 fc_exch_release(ep); /* release hold taken by fc_exch_find */ fc_exch_recv_bls()
1822 * @ep: The exchange to be reset
1826 static void fc_exch_reset(struct fc_exch *ep) fc_exch_reset() argument
1831 spin_lock_bh(&ep->ex_lock); fc_exch_reset()
1832 fc_exch_abort_locked(ep, 0); fc_exch_reset()
1833 ep->state |= FC_EX_RST_CLEANUP; fc_exch_reset()
1834 fc_exch_timer_cancel(ep); fc_exch_reset()
1835 if (ep->esb_stat & ESB_ST_REC_QUAL) fc_exch_reset()
1836 atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */ fc_exch_reset()
1837 ep->esb_stat &= ~ESB_ST_REC_QUAL; fc_exch_reset()
1838 sp = &ep->seq; fc_exch_reset()
1839 rc = fc_exch_done_locked(ep); fc_exch_reset()
1840 spin_unlock_bh(&ep->ex_lock); fc_exch_reset()
1842 fc_exch_hold(ep); fc_exch_reset()
1845 fc_exch_delete(ep); fc_exch_reset()
1847 fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); fc_exch_reset()
1848 fc_seq_set_resp(sp, NULL, ep->arg); fc_exch_reset()
1849 fc_exch_release(ep); fc_exch_reset()
1868 struct fc_exch *ep; fc_exch_pool_reset() local
1873 list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) { fc_exch_pool_reset()
1874 if ((lport == ep->lp) && fc_exch_pool_reset()
1875 (sid == 0 || sid == ep->sid) && fc_exch_pool_reset()
1876 (did == 0 || did == ep->did)) { fc_exch_pool_reset()
1877 fc_exch_hold(ep); fc_exch_pool_reset()
1880 fc_exch_reset(ep); fc_exch_pool_reset()
1882 fc_exch_release(ep); fc_exch_pool_reset()
1950 struct fc_exch *ep; fc_exch_els_rec() local
1968 ep = fc_exch_lookup(lport, fc_exch_els_rec()
1971 if (!ep) fc_exch_els_rec()
1973 if (ep->oid != sid || oxid != ep->oxid) fc_exch_els_rec()
1975 if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid) fc_exch_els_rec()
1986 acc->reca_rx_id = htons(ep->rxid); fc_exch_els_rec()
1987 if (ep->sid == ep->oid) fc_exch_els_rec()
1988 hton24(acc->reca_rfid, ep->did); fc_exch_els_rec()
1990 hton24(acc->reca_rfid, ep->sid); fc_exch_els_rec()
1991 acc->reca_fc4value = htonl(ep->seq.rec_data); fc_exch_els_rec()
1992 acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP | fc_exch_els_rec()
1998 fc_exch_release(ep); fc_exch_els_rec()
2002 fc_exch_release(ep); fc_exch_els_rec()
2080 struct fc_exch *ep; fc_exch_seq_send() local
2086 ep = fc_exch_alloc(lport, fp); fc_exch_seq_send()
2087 if (!ep) { fc_exch_seq_send()
2091 ep->esb_stat |= ESB_ST_SEQ_INIT; fc_exch_seq_send()
2093 fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id)); fc_exch_seq_send()
2094 ep->resp = resp; fc_exch_seq_send()
2095 ep->destructor = destructor; fc_exch_seq_send()
2096 ep->arg = arg; fc_exch_seq_send()
2097 ep->r_a_tov = FC_DEF_R_A_TOV; fc_exch_seq_send()
2098 ep->lp = lport; fc_exch_seq_send()
2099 sp = &ep->seq; fc_exch_seq_send()
2101 ep->fh_type = fh->fh_type; /* save for possbile timeout handling */ fc_exch_seq_send()
2102 ep->f_ctl = ntoh24(fh->fh_f_ctl); fc_exch_seq_send()
2103 fc_exch_setup_hdr(ep, fp, ep->f_ctl); fc_exch_seq_send()
2106 if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) { fc_exch_seq_send()
2108 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); fc_exch_seq_send()
2115 fc_exch_timer_set_locked(ep, timer_msec); fc_exch_seq_send()
2116 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */ fc_exch_seq_send()
2118 if (ep->f_ctl & FC_FC_SEQ_INIT) fc_exch_seq_send()
2119 ep->esb_stat &= ~ESB_ST_SEQ_INIT; fc_exch_seq_send()
2120 spin_unlock_bh(&ep->ex_lock); fc_exch_seq_send()
2125 rc = fc_exch_done_locked(ep); fc_exch_seq_send()
2126 spin_unlock_bh(&ep->ex_lock); fc_exch_seq_send()
2128 fc_exch_delete(ep); fc_exch_seq_send()
2134 * @ep: The exchange to send the RRQ on
2139 static void fc_exch_rrq(struct fc_exch *ep) fc_exch_rrq() argument
2146 lport = ep->lp; fc_exch_rrq()
2155 hton24(rrq->rrq_s_id, ep->sid); fc_exch_rrq()
2156 rrq->rrq_ox_id = htons(ep->oxid); fc_exch_rrq()
2157 rrq->rrq_rx_id = htons(ep->rxid); fc_exch_rrq()
2159 did = ep->did; fc_exch_rrq()
2160 if (ep->esb_stat & ESB_ST_RESP) fc_exch_rrq()
2161 did = ep->sid; fc_exch_rrq()
2167 if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep, fc_exch_rrq()
2172 spin_lock_bh(&ep->ex_lock); fc_exch_rrq()
2173 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) { fc_exch_rrq()
2174 spin_unlock_bh(&ep->ex_lock); fc_exch_rrq()
2176 fc_exch_release(ep); fc_exch_rrq()
2179 ep->esb_stat |= ESB_ST_REC_QUAL; fc_exch_rrq()
2180 fc_exch_timer_set_locked(ep, ep->r_a_tov); fc_exch_rrq()
2181 spin_unlock_bh(&ep->ex_lock); fc_exch_rrq()
2191 struct fc_exch *ep = NULL; /* request or subject exchange */ fc_exch_els_rrq() local
2209 ep = fc_exch_lookup(lport, xid); fc_exch_els_rrq()
2211 if (!ep) fc_exch_els_rrq()
2213 spin_lock_bh(&ep->ex_lock); fc_exch_els_rrq()
2214 if (ep->oxid != ntohs(rp->rrq_ox_id)) fc_exch_els_rrq()
2216 if (ep->rxid != ntohs(rp->rrq_rx_id) && fc_exch_els_rrq()
2217 ep->rxid != FC_XID_UNKNOWN) fc_exch_els_rrq()
2220 if (ep->sid != sid) fc_exch_els_rrq()
2226 if (ep->esb_stat & ESB_ST_REC_QUAL) { fc_exch_els_rrq()
2227 ep->esb_stat &= ~ESB_ST_REC_QUAL; fc_exch_els_rrq()
2228 atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */ fc_exch_els_rrq()
2230 if (ep->esb_stat & ESB_ST_COMPLETE) fc_exch_els_rrq()
2231 fc_exch_timer_cancel(ep); fc_exch_els_rrq()
2233 spin_unlock_bh(&ep->ex_lock); fc_exch_els_rrq()
2242 spin_unlock_bh(&ep->ex_lock); fc_exch_els_rrq()
2246 if (ep) fc_exch_els_rrq()
2247 fc_exch_release(ep); /* drop hold from fc_exch_find */ fc_exch_els_rrq()
431 fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, struct fc_exch *ep) fc_exch_ptr_set() argument
/linux-4.4.14/drivers/staging/emxx_udc/
H A Demxx_udc.c222 static int _nbu2ss_ep_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep) _nbu2ss_ep_init() argument
228 if (ep->epnum == 0) _nbu2ss_ep_init()
231 num = ep->epnum - 1; _nbu2ss_ep_init()
236 data = (begin_adrs << 16) | ep->ep.maxpacket; _nbu2ss_ep_init()
241 data = 1 << (ep->epnum + 8); _nbu2ss_ep_init()
247 switch (ep->ep_type) { _nbu2ss_ep_init()
266 _nbu2ss_endpoint_toggle_reset(udc, (ep->epnum|ep->direct)); _nbu2ss_ep_init()
268 if (ep->direct == USB_DIR_OUT) { _nbu2ss_ep_init()
297 static int _nbu2ss_epn_exit(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep) _nbu2ss_epn_exit() argument
302 if ((ep->epnum == 0) || (udc->vbus_active == 0)) _nbu2ss_epn_exit()
305 num = ep->epnum - 1; _nbu2ss_epn_exit()
313 data = 1 << (ep->epnum + 8); _nbu2ss_epn_exit()
316 if (ep->direct == USB_DIR_OUT) { _nbu2ss_epn_exit()
345 static void _nbu2ss_ep_dma_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep) _nbu2ss_ep_dma_init() argument
351 if (((ep->epnum == 0) || (data & (1 << ep->epnum)) == 0)) _nbu2ss_ep_dma_init()
354 num = ep->epnum - 1; _nbu2ss_ep_dma_init()
356 if (ep->direct == USB_DIR_OUT) { _nbu2ss_ep_dma_init()
359 data = ep->ep.maxpacket; _nbu2ss_ep_dma_init()
385 static void _nbu2ss_ep_dma_exit(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep) _nbu2ss_ep_dma_exit() argument
395 if ((ep->epnum == 0) || ((data & (1 << ep->epnum)) == 0)) _nbu2ss_ep_dma_exit()
398 num = ep->epnum - 1; _nbu2ss_ep_dma_exit()
400 _nbu2ss_ep_dma_abort(udc, ep); _nbu2ss_ep_dma_exit()
402 if (ep->direct == USB_DIR_OUT) { _nbu2ss_ep_dma_exit()
418 static void _nbu2ss_ep_dma_abort(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep) _nbu2ss_ep_dma_abort() argument
422 _nbu2ss_bitclr(&preg->EP_DCR[ep->epnum-1].EP_DCR1, DCR1_EPn_REQEN); _nbu2ss_ep_dma_abort()
424 _nbu2ss_bitclr(&preg->EP_REGS[ep->epnum-1].EP_DMA_CTRL, EPn_DMA_EN); _nbu2ss_ep_dma_abort()
474 struct nbu2ss_ep *ep, _nbu2ss_dma_map_single()
481 req->req.dma = ep->phys_buf; _nbu2ss_dma_map_single()
507 struct nbu2ss_ep *ep, _nbu2ss_dma_unmap_single()
528 memcpy(req->req.buf, ep->virt_buf, _nbu2ss_dma_unmap_single()
884 struct nbu2ss_ep *ep, _nbu2ss_epn_out_pio()
913 _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_READ); _nbu2ss_epn_out_pio()
923 Temp32.dw = _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_READ); _nbu2ss_epn_out_pio()
932 || ((req->req.actual % ep->ep.maxpacket) != 0)) { _nbu2ss_epn_out_pio()
943 struct nbu2ss_ep *ep, _nbu2ss_epn_out_data()
952 if (ep->epnum == 0) _nbu2ss_epn_out_data()
955 num = ep->epnum - 1; _nbu2ss_epn_out_data()
959 if ((ep->ep_type != USB_ENDPOINT_XFER_INT) _nbu2ss_epn_out_data()
964 iBufSize = min_t(u32, iBufSize, ep->ep.maxpacket); _nbu2ss_epn_out_data()
965 nret = _nbu2ss_epn_out_pio(udc, ep, req, iBufSize); _nbu2ss_epn_out_data()
974 struct nbu2ss_ep *ep, _nbu2ss_epn_out_transfer()
983 if (ep->epnum == 0) _nbu2ss_epn_out_transfer()
986 num = ep->epnum - 1; _nbu2ss_epn_out_transfer()
994 result = _nbu2ss_epn_out_data(udc, ep, req, iRecvLength); _nbu2ss_epn_out_transfer()
995 if (iRecvLength < ep->ep.maxpacket) { _nbu2ss_epn_out_transfer()
1003 || ((req->req.actual % ep->ep.maxpacket) != 0)) { _nbu2ss_epn_out_transfer()
1010 if ((req->req.actual % ep->ep.maxpacket) == 0) { _nbu2ss_epn_out_transfer()
1031 struct nbu2ss_ep *ep, _nbu2ss_in_dma()
1051 _nbu2ss_dma_map_single(udc, ep, req, USB_DIR_IN); _nbu2ss_in_dma()
1107 struct nbu2ss_ep *ep, _nbu2ss_epn_in_pio()
1132 &preg->EP_REGS[ep->epnum-1].EP_WRITE _nbu2ss_epn_in_pio()
1142 if (result != ep->ep.maxpacket) { _nbu2ss_epn_in_pio()
1148 _nbu2ss_ep_in_end(udc, ep->epnum, Temp32.dw, data); _nbu2ss_epn_in_pio()
1160 struct nbu2ss_ep *ep, _nbu2ss_epn_in_data()
1168 if (ep->epnum == 0) _nbu2ss_epn_in_data()
1171 num = ep->epnum - 1; _nbu2ss_epn_in_data()
1173 if ((ep->ep_type != USB_ENDPOINT_XFER_INT) _nbu2ss_epn_in_data()
1176 nret = _nbu2ss_in_dma(udc, ep, req, num, data_size); _nbu2ss_epn_in_data()
1178 data_size = min_t(u32, data_size, ep->ep.maxpacket); _nbu2ss_epn_in_data()
1179 nret = _nbu2ss_epn_in_pio(udc, ep, req, data_size); _nbu2ss_epn_in_data()
1188 struct nbu2ss_ep *ep, _nbu2ss_epn_in_transfer()
1197 if (ep->epnum == 0) _nbu2ss_epn_in_transfer()
1200 num = ep->epnum - 1; _nbu2ss_epn_in_transfer()
1219 result = _nbu2ss_epn_in_data(udc, ep, req, iBufSize); _nbu2ss_epn_in_transfer()
1221 _nbu2ss_zero_len_pkt(udc, ep->epnum); _nbu2ss_epn_in_transfer()
1229 struct nbu2ss_ep *ep, _nbu2ss_start_transfer()
1241 if ((req->req.length % ep->ep.maxpacket) == 0) _nbu2ss_start_transfer()
1247 if (ep->epnum == 0) { _nbu2ss_start_transfer()
1268 if (ep->direct == USB_DIR_OUT) { _nbu2ss_start_transfer()
1271 nret = _nbu2ss_epn_out_transfer(udc, ep, req); _nbu2ss_start_transfer()
1274 nret = _nbu2ss_epn_in_transfer(udc, ep, req); _nbu2ss_start_transfer()
1282 static void _nbu2ss_restert_transfer(struct nbu2ss_ep *ep) _nbu2ss_restert_transfer() argument
1288 if (list_empty(&ep->queue)) _nbu2ss_restert_transfer()
1291 req = list_entry(ep->queue.next, struct nbu2ss_req, queue); _nbu2ss_restert_transfer()
1296 if (ep->epnum > 0) { _nbu2ss_restert_transfer()
1298 &ep->udc->p_regs->EP_REGS[ep->epnum-1].EP_LEN_DCNT); _nbu2ss_restert_transfer()
1301 if (length < ep->ep.maxpacket) _nbu2ss_restert_transfer()
1305 _nbu2ss_start_transfer(ep->udc, ep, req, bflag); _nbu2ss_restert_transfer()
1339 struct nbu2ss_ep *ep; _nbu2ss_set_endpoint_stall() local
1353 ep = &udc->ep[epnum]; _nbu2ss_set_endpoint_stall()
1357 ep->halted = TRUE; _nbu2ss_set_endpoint_stall()
1367 ep->stalled = FALSE; _nbu2ss_set_endpoint_stall()
1382 ep->stalled = FALSE; _nbu2ss_set_endpoint_stall()
1383 if (ep->halted) { _nbu2ss_set_endpoint_stall()
1384 ep->halted = FALSE; _nbu2ss_set_endpoint_stall()
1385 _nbu2ss_restert_transfer(ep); _nbu2ss_set_endpoint_stall()
1551 struct nbu2ss_ep *ep _nbu2ss_epn_set_stall()
1560 if (ep->direct == USB_DIR_IN) { _nbu2ss_epn_set_stall()
1566 &preg->EP_REGS[ep->epnum-1].EP_STATUS); _nbu2ss_epn_set_stall()
1575 ep_adrs = ep->epnum | ep->direct; _nbu2ss_epn_set_stall()
1785 struct nbu2ss_ep *ep = &udc->ep[0]; _nbu2ss_ep0_in_data_stage() local
1787 if (list_empty(&ep->queue)) _nbu2ss_ep0_in_data_stage()
1790 req = list_entry(ep->queue.next, struct nbu2ss_req, queue); _nbu2ss_ep0_in_data_stage()
1812 struct nbu2ss_ep *ep = &udc->ep[0]; _nbu2ss_ep0_out_data_stage() local
1814 if (list_empty(&ep->queue)) _nbu2ss_ep0_out_data_stage()
1817 req = list_entry(ep->queue.next, struct nbu2ss_req, queue); _nbu2ss_ep0_out_data_stage()
1839 struct nbu2ss_ep *ep = &udc->ep[0]; _nbu2ss_ep0_status_stage() local
1841 if (list_empty(&ep->queue)) _nbu2ss_ep0_status_stage()
1844 req = list_entry(ep->queue.next, struct nbu2ss_req, queue); _nbu2ss_ep0_status_stage()
1849 req->req.complete(&ep->ep, &req->req); _nbu2ss_ep0_status_stage()
1853 _nbu2ss_ep_done(ep, req, 0); _nbu2ss_ep0_status_stage()
1945 struct nbu2ss_ep *ep, _nbu2ss_ep_done()
1949 struct nbu2ss_udc *udc = ep->udc; _nbu2ss_ep_done()
1954 _nbu2ss_fifo_flush(udc, ep); _nbu2ss_ep_done()
1959 if (ep->stalled) _nbu2ss_ep_done()
1960 _nbu2ss_epn_set_stall(udc, ep); _nbu2ss_ep_done()
1962 if (!list_empty(&ep->queue)) _nbu2ss_ep_done()
1963 _nbu2ss_restert_transfer(ep); _nbu2ss_ep_done()
1967 if ((ep->direct == USB_DIR_OUT) && (ep->epnum > 0) && _nbu2ss_ep_done()
1969 _nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_OUT); _nbu2ss_ep_done()
1973 req->req.complete(&ep->ep, &req->req); _nbu2ss_ep_done()
1980 struct nbu2ss_ep *ep, _nbu2ss_epn_in_int()
1997 result = _nbu2ss_epn_in_transfer(udc, ep, req); _nbu2ss_epn_in_int()
2000 if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) { _nbu2ss_epn_in_int()
2003 _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_STATUS); _nbu2ss_epn_in_int()
2009 _nbu2ss_zero_len_pkt(udc, ep->epnum); _nbu2ss_epn_in_int()
2018 _nbu2ss_ep_done(ep, req, result); _nbu2ss_epn_in_int()
2025 struct nbu2ss_ep *ep, _nbu2ss_epn_out_int()
2030 result = _nbu2ss_epn_out_transfer(udc, ep, req); _nbu2ss_epn_out_int()
2032 _nbu2ss_ep_done(ep, req, result); _nbu2ss_epn_out_int()
2038 struct nbu2ss_ep *ep, _nbu2ss_epn_in_dma_int()
2055 _nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_IN); _nbu2ss_epn_in_dma_int()
2059 _nbu2ss_epn_in_transfer(udc, ep, req); _nbu2ss_epn_in_dma_int()
2061 mpkt = ep->ep.maxpacket; _nbu2ss_epn_in_dma_int()
2065 _nbu2ss_ep_in_end(udc, ep->epnum, 0, 0); _nbu2ss_epn_in_dma_int()
2067 _nbu2ss_epn_in_int(udc, ep, req); _nbu2ss_epn_in_dma_int()
2075 struct nbu2ss_ep *ep, _nbu2ss_epn_out_dma_int()
2084 num = ep->epnum - 1; _nbu2ss_epn_out_dma_int()
2087 if ((req->req.length % ep->ep.maxpacket) && !req->zero) { _nbu2ss_epn_out_dma_int()
2090 _nbu2ss_ep_done(ep, req, 0); _nbu2ss_epn_out_dma_int()
2110 mpkt = ep->ep.maxpacket; _nbu2ss_epn_out_dma_int()
2115 if ((req->req.actual % ep->ep.maxpacket) > 0) { _nbu2ss_epn_out_dma_int()
2119 _nbu2ss_ep_done(ep, req, 0); _nbu2ss_epn_out_dma_int()
2128 _nbu2ss_epn_out_int(udc, ep, req); _nbu2ss_epn_out_dma_int()
2138 struct nbu2ss_ep *ep = &udc->ep[epnum]; _nbu2ss_epn_int() local
2148 if (list_empty(&ep->queue)) _nbu2ss_epn_int()
2151 req = list_entry(ep->queue.next, struct nbu2ss_req, queue); _nbu2ss_epn_int()
2160 _nbu2ss_epn_out_dma_int(udc, ep, req); _nbu2ss_epn_int()
2164 _nbu2ss_epn_out_int(udc, ep, req); _nbu2ss_epn_int()
2168 _nbu2ss_epn_in_dma_int(udc, ep, req); _nbu2ss_epn_int()
2172 _nbu2ss_epn_in_int(udc, ep, req); _nbu2ss_epn_int()
2193 struct nbu2ss_ep *ep, _nbu2ss_nuke()
2199 _nbu2ss_epn_exit(udc, ep); _nbu2ss_nuke()
2202 _nbu2ss_ep_dma_exit(udc, ep); _nbu2ss_nuke()
2204 if (list_empty(&ep->queue)) _nbu2ss_nuke()
2208 list_for_each_entry(req, &ep->queue, queue) { _nbu2ss_nuke()
2209 _nbu2ss_ep_done(ep, req, status); _nbu2ss_nuke()
2218 struct nbu2ss_ep *ep; _nbu2ss_quiesce() local
2222 _nbu2ss_nuke(udc, &udc->ep[0], -ESHUTDOWN); _nbu2ss_quiesce()
2225 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { _nbu2ss_quiesce()
2226 _nbu2ss_nuke(udc, ep, -ESHUTDOWN); _nbu2ss_quiesce()
2260 static void _nbu2ss_fifo_flush(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep) _nbu2ss_fifo_flush() argument
2267 if (ep->epnum == 0) { _nbu2ss_fifo_flush()
2273 _nbu2ss_ep_dma_abort(udc, ep); _nbu2ss_fifo_flush()
2274 _nbu2ss_bitset(&p->EP_REGS[ep->epnum - 1].EP_CONTROL, EPn_BCLR); _nbu2ss_fifo_flush()
2555 struct nbu2ss_ep *ep; nbu2ss_ep_enable() local
2563 ep = container_of(_ep, struct nbu2ss_ep, ep); nbu2ss_ep_enable()
2564 if ((!ep) || (!ep->udc)) { nbu2ss_ep_enable()
2565 pr_err(" *** %s, ep == NULL !!\n", __func__); nbu2ss_ep_enable()
2577 udc = ep->udc; nbu2ss_ep_enable()
2584 dev_err(ep->udc->dev, " *** %s, udc !!\n", __func__); nbu2ss_ep_enable()
2590 ep->desc = desc; nbu2ss_ep_enable()
2591 ep->epnum = usb_endpoint_num(desc); nbu2ss_ep_enable()
2592 ep->direct = desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK; nbu2ss_ep_enable()
2593 ep->ep_type = ep_type; nbu2ss_ep_enable()
2594 ep->wedged = 0; nbu2ss_ep_enable()
2595 ep->halted = FALSE; nbu2ss_ep_enable()
2596 ep->stalled = FALSE; nbu2ss_ep_enable()
2598 ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize); nbu2ss_ep_enable()
2601 _nbu2ss_ep_dma_init(udc, ep); nbu2ss_ep_enable()
2604 _nbu2ss_ep_init(udc, ep); nbu2ss_ep_enable()
2614 struct nbu2ss_ep *ep; nbu2ss_ep_disable() local
2623 ep = container_of(_ep, struct nbu2ss_ep, ep); nbu2ss_ep_disable()
2624 if ((!ep) || (!ep->udc)) { nbu2ss_ep_disable()
2625 pr_err("udc: *** %s, ep == NULL !!\n", __func__); nbu2ss_ep_disable()
2629 udc = ep->udc; nbu2ss_ep_disable()
2634 _nbu2ss_nuke(udc, ep, -EINPROGRESS); /* dequeue request */ nbu2ss_ep_disable()
2642 struct usb_ep *ep, nbu2ss_ep_alloc_request()
2680 struct nbu2ss_ep *ep; nbu2ss_ep_queue() local
2714 ep = container_of(_ep, struct nbu2ss_ep, ep); nbu2ss_ep_queue()
2715 udc = ep->udc; nbu2ss_ep_queue()
2737 if (!ep->virt_buf) nbu2ss_ep_queue()
2738 ep->virt_buf = (u8 *)dma_alloc_coherent( nbu2ss_ep_queue()
2740 &ep->phys_buf, GFP_ATOMIC | GFP_DMA); nbu2ss_ep_queue()
2741 if (ep->epnum > 0) { nbu2ss_ep_queue()
2742 if (ep->direct == USB_DIR_IN) nbu2ss_ep_queue()
2743 memcpy(ep->virt_buf, req->req.buf, nbu2ss_ep_queue()
2748 if ((ep->epnum > 0) && (ep->direct == USB_DIR_OUT) && nbu2ss_ep_queue()
2750 _nbu2ss_dma_map_single(udc, ep, req, USB_DIR_OUT); nbu2ss_ep_queue()
2756 bflag = list_empty(&ep->queue); nbu2ss_ep_queue()
2757 list_add_tail(&req->queue, &ep->queue); nbu2ss_ep_queue()
2759 if (bflag && !ep->stalled) { nbu2ss_ep_queue()
2761 result = _nbu2ss_start_transfer(udc, ep, req, FALSE); nbu2ss_ep_queue()
2766 } else if ((ep->epnum > 0) && (ep->direct == USB_DIR_OUT)) { nbu2ss_ep_queue()
2773 _nbu2ss_ep_done(ep, req, result); nbu2ss_ep_queue()
2788 struct nbu2ss_ep *ep; nbu2ss_ep_dequeue() local
2798 ep = container_of(_ep, struct nbu2ss_ep, ep); nbu2ss_ep_dequeue()
2799 if (!ep) { nbu2ss_ep_dequeue()
2800 pr_err("%s, ep == NULL !!\n", __func__); nbu2ss_ep_dequeue()
2804 udc = ep->udc; nbu2ss_ep_dequeue()
2811 list_for_each_entry(req, &ep->queue, queue) { nbu2ss_ep_dequeue()
2821 _nbu2ss_ep_done(ep, req, -ECONNRESET); nbu2ss_ep_dequeue()
2834 struct nbu2ss_ep *ep; nbu2ss_ep_set_halt() local
2842 ep = container_of(_ep, struct nbu2ss_ep, ep); nbu2ss_ep_set_halt()
2843 if (!ep) { nbu2ss_ep_set_halt()
2844 pr_err("%s, bad ep\n", __func__); nbu2ss_ep_set_halt()
2848 udc = ep->udc; nbu2ss_ep_set_halt()
2850 dev_err(ep->udc->dev, " *** %s, bad udc\n", __func__); nbu2ss_ep_set_halt()
2856 ep_adrs = ep->epnum | ep->direct; nbu2ss_ep_set_halt()
2859 ep->stalled = FALSE; nbu2ss_ep_set_halt()
2861 if (list_empty(&ep->queue)) nbu2ss_ep_set_halt()
2862 _nbu2ss_epn_set_stall(udc, ep); nbu2ss_ep_set_halt()
2864 ep->stalled = TRUE; nbu2ss_ep_set_halt()
2868 ep->wedged = 0; nbu2ss_ep_set_halt()
2884 struct nbu2ss_ep *ep; nbu2ss_ep_fifo_status() local
2894 ep = container_of(_ep, struct nbu2ss_ep, ep); nbu2ss_ep_fifo_status()
2895 if (!ep) { nbu2ss_ep_fifo_status()
2896 pr_err("%s, bad ep\n", __func__); nbu2ss_ep_fifo_status()
2900 udc = ep->udc; nbu2ss_ep_fifo_status()
2902 dev_err(ep->udc->dev, "%s, bad udc\n", __func__); nbu2ss_ep_fifo_status()
2914 if (ep->epnum == 0) { nbu2ss_ep_fifo_status()
2918 data = _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_LEN_DCNT) nbu2ss_ep_fifo_status()
2931 struct nbu2ss_ep *ep; nbu2ss_ep_fifo_flush() local
2940 ep = container_of(_ep, struct nbu2ss_ep, ep); nbu2ss_ep_fifo_flush()
2941 if (!ep) { nbu2ss_ep_fifo_flush()
2942 pr_err("udc: %s, bad ep\n", __func__); nbu2ss_ep_fifo_flush()
2946 udc = ep->udc; nbu2ss_ep_fifo_flush()
2948 dev_err(ep->udc->dev, "%s, bad udc\n", __func__); nbu2ss_ep_fifo_flush()
2957 _nbu2ss_fifo_flush(udc, ep); nbu2ss_ep_fifo_flush()
3189 udc->gadget.ep0 = &udc->ep[0].ep; nbu2ss_drv_ep_init()
3192 struct nbu2ss_ep *ep = &udc->ep[i]; nbu2ss_drv_ep_init() local
3194 ep->udc = udc; nbu2ss_drv_ep_init()
3195 ep->desc = NULL; nbu2ss_drv_ep_init()
3197 ep->ep.driver_data = NULL; nbu2ss_drv_ep_init()
3198 ep->ep.name = ep_info[i].name; nbu2ss_drv_ep_init()
3199 ep->ep.caps = ep_info[i].caps; nbu2ss_drv_ep_init()
3200 ep->ep.ops = &nbu2ss_ep_ops; nbu2ss_drv_ep_init()
3202 usb_ep_set_maxpacket_limit(&ep->ep, nbu2ss_drv_ep_init()
3205 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); nbu2ss_drv_ep_init()
3206 INIT_LIST_HEAD(&ep->queue); nbu2ss_drv_ep_init()
3209 list_del_init(&udc->ep[0].ep.ep_list); nbu2ss_drv_ep_init()
3233 udc->gadget.ep0 = &udc->ep[0].ep; nbu2ss_drv_contest_init()
472 _nbu2ss_dma_map_single( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req, u8 direct ) _nbu2ss_dma_map_single() argument
505 _nbu2ss_dma_unmap_single( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req, u8 direct ) _nbu2ss_dma_unmap_single() argument
882 _nbu2ss_epn_out_pio( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req, u32 length ) _nbu2ss_epn_out_pio() argument
941 _nbu2ss_epn_out_data( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req, u32 data_size ) _nbu2ss_epn_out_data() argument
972 _nbu2ss_epn_out_transfer( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req ) _nbu2ss_epn_out_transfer() argument
1029 _nbu2ss_in_dma( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req, u32 num, u32 length ) _nbu2ss_in_dma() argument
1105 _nbu2ss_epn_in_pio( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req, u32 length ) _nbu2ss_epn_in_pio() argument
1158 _nbu2ss_epn_in_data( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req, u32 data_size ) _nbu2ss_epn_in_data() argument
1186 _nbu2ss_epn_in_transfer( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req ) _nbu2ss_epn_in_transfer() argument
1227 _nbu2ss_start_transfer( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req, bool bflag) _nbu2ss_start_transfer() argument
1944 _nbu2ss_ep_done( struct nbu2ss_ep *ep, struct nbu2ss_req *req, int status) _nbu2ss_ep_done() argument
1978 _nbu2ss_epn_in_int( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req) _nbu2ss_epn_in_int() argument
2023 _nbu2ss_epn_out_int( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req) _nbu2ss_epn_out_int() argument
2036 _nbu2ss_epn_in_dma_int( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req) _nbu2ss_epn_in_dma_int() argument
2073 _nbu2ss_epn_out_dma_int( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req) _nbu2ss_epn_out_dma_int() argument
2192 _nbu2ss_nuke(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, int status) _nbu2ss_nuke() argument
2641 nbu2ss_ep_alloc_request( struct usb_ep *ep, gfp_t gfp_flags) nbu2ss_ep_alloc_request() argument
/linux-4.4.14/drivers/usb/host/
H A Dfhci-tds.c86 void fhci_push_dummy_bd(struct endpoint *ep) fhci_push_dummy_bd() argument
88 if (ep->already_pushed_dummy_bd == false) { fhci_push_dummy_bd()
89 u16 td_status = in_be16(&ep->empty_td->status); fhci_push_dummy_bd()
91 out_be32(&ep->empty_td->buf_ptr, DUMMY_BD_BUFFER); fhci_push_dummy_bd()
93 ep->empty_td = next_bd(ep->td_base, ep->empty_td, td_status); fhci_push_dummy_bd()
94 ep->already_pushed_dummy_bd = true; fhci_push_dummy_bd()
101 struct endpoint *ep; fhci_ep0_free() local
104 ep = usb->ep0; fhci_ep0_free()
105 if (ep) { fhci_ep0_free()
106 if (ep->td_base) fhci_ep0_free()
107 cpm_muram_free(cpm_muram_offset(ep->td_base)); fhci_ep0_free()
109 if (kfifo_initialized(&ep->conf_frame_Q)) { fhci_ep0_free()
110 size = cq_howmany(&ep->conf_frame_Q); fhci_ep0_free()
112 struct packet *pkt = cq_get(&ep->conf_frame_Q); fhci_ep0_free()
116 cq_delete(&ep->conf_frame_Q); fhci_ep0_free()
119 if (kfifo_initialized(&ep->empty_frame_Q)) { fhci_ep0_free()
120 size = cq_howmany(&ep->empty_frame_Q); fhci_ep0_free()
122 struct packet *pkt = cq_get(&ep->empty_frame_Q); fhci_ep0_free()
126 cq_delete(&ep->empty_frame_Q); fhci_ep0_free()
129 if (kfifo_initialized(&ep->dummy_packets_Q)) { fhci_ep0_free()
130 size = cq_howmany(&ep->dummy_packets_Q); fhci_ep0_free()
132 u8 *buff = cq_get(&ep->dummy_packets_Q); fhci_ep0_free()
136 cq_delete(&ep->dummy_packets_Q); fhci_ep0_free()
139 kfree(ep); fhci_ep0_free()
155 struct endpoint *ep; fhci_create_ep() local
168 ep = kzalloc(sizeof(*ep), GFP_KERNEL); fhci_create_ep()
169 if (!ep) fhci_create_ep()
176 ep->td_base = cpm_muram_addr(ep_offset); fhci_create_ep()
179 if (cq_new(&ep->conf_frame_Q, ring_len + 2) || fhci_create_ep()
180 cq_new(&ep->empty_frame_Q, ring_len + 2) || fhci_create_ep()
181 cq_new(&ep->dummy_packets_Q, ring_len + 2)) { fhci_create_ep()
202 cq_put(&ep->empty_frame_Q, pkt); fhci_create_ep()
203 cq_put(&ep->dummy_packets_Q, buff); fhci_create_ep()
207 ep->ep_pram_ptr = (void __iomem *)ep->td_base + sizeof(*td) * ring_len; fhci_create_ep()
209 ep->conf_td = ep->td_base; fhci_create_ep()
210 ep->empty_td = ep->td_base; fhci_create_ep()
212 ep->already_pushed_dummy_bd = false; fhci_create_ep()
215 td = ep->td_base; fhci_create_ep()
228 usb->ep0 = ep; fhci_create_ep()
233 kfree(ep); fhci_create_ep()
243 * ep A pointer to the endpoint structre
246 void fhci_init_ep_registers(struct fhci_usb *usb, struct endpoint *ep, fhci_init_ep_registers() argument
255 cpm_muram_offset(ep->ep_pram_ptr)); fhci_init_ep_registers()
262 out_8(&ep->ep_pram_ptr->rx_func_code, rt); fhci_init_ep_registers()
263 out_8(&ep->ep_pram_ptr->tx_func_code, rt); fhci_init_ep_registers()
264 out_be16(&ep->ep_pram_ptr->rx_buff_len, 1028); fhci_init_ep_registers()
265 out_be16(&ep->ep_pram_ptr->rx_base, 0); fhci_init_ep_registers()
266 out_be16(&ep->ep_pram_ptr->tx_base, cpm_muram_offset(ep->td_base)); fhci_init_ep_registers()
267 out_be16(&ep->ep_pram_ptr->rx_bd_ptr, 0); fhci_init_ep_registers()
268 out_be16(&ep->ep_pram_ptr->tx_bd_ptr, cpm_muram_offset(ep->td_base)); fhci_init_ep_registers()
269 out_be32(&ep->ep_pram_ptr->tx_state, 0); fhci_init_ep_registers()
283 struct endpoint *ep = usb->ep0; fhci_td_transaction_confirm() local
297 td = ep->conf_td; fhci_td_transaction_confirm()
316 ep->conf_td = next_bd(ep->td_base, ep->conf_td, td_status); fhci_td_transaction_confirm()
322 pkt = cq_get(&ep->conf_frame_Q); fhci_td_transaction_confirm()
389 struct endpoint *ep = usb->ep0; fhci_host_transaction() local
396 td = ep->empty_td; fhci_host_transaction()
406 ep->empty_td = next_bd(ep->td_base, ep->empty_td, td_status); fhci_host_transaction()
463 cq_put(&ep->conf_frame_Q, pkt); fhci_host_transaction()
465 if (cq_howmany(&ep->conf_frame_Q) == 1) fhci_host_transaction()
478 struct endpoint *ep = usb->ep0; fhci_flush_bds() local
480 td = ep->td_base; fhci_flush_bds()
501 td = ep->td_base; fhci_flush_bds()
514 out_be16(&ep->ep_pram_ptr->tx_bd_ptr, fhci_flush_bds()
515 in_be16(&ep->ep_pram_ptr->tx_base)); fhci_flush_bds()
516 out_be32(&ep->ep_pram_ptr->tx_state, 0); fhci_flush_bds()
517 out_be16(&ep->ep_pram_ptr->tx_cnt, 0); fhci_flush_bds()
518 ep->empty_td = ep->td_base; fhci_flush_bds()
519 ep->conf_td = ep->td_base; fhci_flush_bds()
535 struct endpoint *ep = usb->ep0; fhci_flush_actual_frame() local
541 tb_ptr = in_be16(&ep->ep_pram_ptr->tx_bd_ptr); fhci_flush_actual_frame()
551 ep->already_pushed_dummy_bd = false; fhci_flush_actual_frame()
556 td = next_bd(ep->td_base, td, td_status); fhci_flush_actual_frame()
564 out_be16(&ep->ep_pram_ptr->tx_bd_ptr, fhci_flush_actual_frame()
565 in_be16(&ep->ep_pram_ptr->tx_base)); fhci_flush_actual_frame()
566 out_be32(&ep->ep_pram_ptr->tx_state, 0); fhci_flush_actual_frame()
567 out_be16(&ep->ep_pram_ptr->tx_cnt, 0); fhci_flush_actual_frame()
568 ep->empty_td = ep->td_base; fhci_flush_actual_frame()
569 ep->conf_td = ep->td_base; fhci_flush_actual_frame()
599 struct endpoint *ep = usb->ep0; fhci_host_transmit_actual_frame() local
601 tb_ptr = in_be16(&ep->ep_pram_ptr->tx_bd_ptr); fhci_host_transmit_actual_frame()
607 ep->already_pushed_dummy_bd = false; fhci_host_transmit_actual_frame()
610 td = next_bd(ep->td_base, td, td_status); fhci_host_transmit_actual_frame()
612 out_be16(&ep->ep_pram_ptr->tx_bd_ptr, tb_ptr); fhci_host_transmit_actual_frame()
618 if (in_be32(&ep->conf_td->buf_ptr) == DUMMY_BD_BUFFER) { fhci_host_transmit_actual_frame()
620 ep->conf_td = next_bd(ep->td_base, ep->conf_td, fhci_host_transmit_actual_frame()
H A Dsl811-hcd.c135 struct sl811h_ep *ep, setup_packet()
153 writeb(SL_SETUP /* | ep->epnum */, data_reg); setup_packet()
159 ep->length = 0; setup_packet()
160 PACKET("SETUP qh%p\n", ep); setup_packet()
166 struct sl811h_ep *ep, status_packet()
181 writeb((do_out ? SL_OUT : SL_IN) /* | ep->epnum */, data_reg); status_packet()
189 ep->length = 0; status_packet()
190 PACKET("STATUS%s/%s qh%p\n", ep->nak_count ? "/retry" : "", status_packet()
191 do_out ? "out" : "in", ep); status_packet()
200 struct sl811h_ep *ep, in_packet()
211 len = ep->maxpacket; in_packet()
214 && usb_gettoggle(urb->dev, ep->epnum, 0)) in_packet()
221 writeb(SL_IN | ep->epnum, data_reg); in_packet()
225 ep->length = min_t(u32, len, in_packet()
227 PACKET("IN%s/%d qh%p len%d\n", ep->nak_count ? "/retry" : "", in_packet()
228 !!usb_gettoggle(urb->dev, ep->epnum, 0), ep, len); in_packet()
236 struct sl811h_ep *ep, out_packet()
250 len = min_t(u32, ep->maxpacket, out_packet()
254 && usb_gettoggle(urb->dev, ep->epnum, 1)) out_packet()
264 writeb(SL_OUT | ep->epnum, data_reg); out_packet()
269 ep->length = len; out_packet()
270 PACKET("OUT%s/%d qh%p len%d\n", ep->nak_count ? "/retry" : "", out_packet()
271 !!usb_gettoggle(urb->dev, ep->epnum, 1), ep, len); out_packet()
303 struct sl811h_ep *ep; start() local
310 ep = sl811->next_periodic; start()
311 sl811->next_periodic = ep->next; start()
314 ep = sl811->next_async; start()
316 ep = container_of(sl811->async.next, start()
326 if ((bank && sl811->active_b == ep) || sl811->active_a == ep) start()
330 if (ep->schedule.next == &sl811->async) start()
333 sl811->next_async = container_of(ep->schedule.next, start()
337 if (unlikely(list_empty(&ep->hep->urb_list))) { start()
339 "empty %p queue?\n", ep); start()
343 urb = container_of(ep->hep->urb_list.next, struct urb, urb_list); start()
344 control = ep->defctrl; start()
356 fclock -= ep->maxpacket << 8; start()
360 if (ep->period) start()
368 if (ep->period) start()
373 } else if (ep->nak_count) start()
378 switch (ep->nextpid) { start()
380 in_packet(sl811, ep, urb, bank, control); start()
383 out_packet(sl811, ep, urb, bank, control); start()
386 setup_packet(sl811, ep, urb, bank, control); start()
389 status_packet(sl811, ep, urb, bank, control); start()
393 "bad ep%p pid %02x\n", ep, ep->nextpid); start()
394 ep = NULL; start()
396 return ep; start()
421 struct sl811h_ep *ep,
429 ep->nextpid = USB_PID_SETUP;
437 if (!list_empty(&ep->hep->urb_list))
441 if (!list_empty(&ep->schedule)) {
442 list_del_init(&ep->schedule);
443 if (ep == sl811->next_async)
450 "deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
451 for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
455 while (*prev && ((temp = *prev) != ep))
458 *prev = ep->next;
459 sl811->load[i] -= ep->load;
461 ep->branch = PERIODIC_SIZE;
464 -= ep->load / ep->period;
465 if (ep == sl811->next_periodic)
466 sl811->next_periodic = ep->next;
474 done(struct sl811 *sl811, struct sl811h_ep *ep, u8 bank) done() argument
480 if (unlikely(!ep)) done()
485 urb = container_of(ep->hep->urb_list.next, struct urb, urb_list); done()
489 // PACKET("...NAK_%02x qh%p\n", bank, ep); done()
490 if (!ep->period) done()
491 ep->nak_count++; done()
492 ep->error_count = 0; done()
502 ep->nak_count = ep->error_count = 0; done()
503 switch (ep->nextpid) { done()
505 // PACKET("...ACK/out_%02x qh%p\n", bank, ep); done()
506 urb->actual_length += ep->length; done()
507 usb_dotoggle(udev, ep->epnum, 1); done()
511 ep->nextpid = USB_PID_ACK; done()
516 else if (ep->length < ep->maxpacket done()
523 // PACKET("...ACK/in_%02x qh%p\n", bank, ep); done()
526 len = ep->maxpacket - sl811_read(sl811, done()
528 if (len > ep->length) { done()
529 len = ep->length; done()
535 usb_dotoggle(udev, ep->epnum, 0); done()
537 (len < ep->maxpacket || done()
541 ep->nextpid = USB_PID_ACK; done()
547 // PACKET("...ACK/setup_%02x qh%p\n", bank, ep); done()
549 ep->nextpid = USB_PID_ACK; done()
552 ep->nextpid = USB_PID_OUT; done()
555 ep->nextpid = USB_PID_IN; done()
559 // PACKET("...ACK/status_%02x qh%p\n", bank, ep); done()
566 PACKET("...STALL_%02x qh%p\n", bank, ep); done()
567 ep->nak_count = ep->error_count = 0; done()
571 } else if (++ep->error_count >= 3) { done()
578 ep->error_count = 0; done()
580 bank, status, ep, urbstat); done()
584 finish_request(sl811, ep, urb, urbstat); done()
800 struct sl811h_ep *ep = NULL; sl811h_urb_enqueue() local
804 struct usb_host_endpoint *hep = urb->ep; sl811h_urb_enqueue()
813 ep = kzalloc(sizeof *ep, mem_flags); sl811h_urb_enqueue()
814 if (ep == NULL) sl811h_urb_enqueue()
824 kfree(ep); sl811h_urb_enqueue()
829 kfree(ep); sl811h_urb_enqueue()
834 kfree(ep); sl811h_urb_enqueue()
835 ep = hep->hcpriv; sl811h_urb_enqueue()
836 } else if (!ep) { sl811h_urb_enqueue()
841 INIT_LIST_HEAD(&ep->schedule); sl811h_urb_enqueue()
842 ep->udev = udev; sl811h_urb_enqueue()
843 ep->epnum = epnum; sl811h_urb_enqueue()
844 ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out); sl811h_urb_enqueue()
845 ep->defctrl = SL11H_HCTLMASK_ARM | SL11H_HCTLMASK_ENABLE; sl811h_urb_enqueue()
849 ep->nextpid = USB_PID_SETUP; sl811h_urb_enqueue()
851 ep->nextpid = USB_PID_OUT; sl811h_urb_enqueue()
853 ep->nextpid = USB_PID_IN; sl811h_urb_enqueue()
855 if (ep->maxpacket > H_MAXPACKET) { sl811h_urb_enqueue()
858 "dev %d ep%d maxpacket %d\n", udev->devnum, sl811h_urb_enqueue()
859 epnum, ep->maxpacket); sl811h_urb_enqueue()
861 kfree(ep); sl811h_urb_enqueue()
868 ep->defctrl |= SL11H_HCTLMASK_PREAMBLE; sl811h_urb_enqueue()
875 ep->period = urb->interval; sl811h_urb_enqueue()
876 ep->branch = PERIODIC_SIZE; sl811h_urb_enqueue()
878 ep->defctrl |= SL11H_HCTLMASK_ISOCH; sl811h_urb_enqueue()
879 ep->load = usb_calc_bus_time(udev->speed, !is_out, sl811h_urb_enqueue()
886 ep->hep = hep; sl811h_urb_enqueue()
887 hep->hcpriv = ep; sl811h_urb_enqueue()
894 if (list_empty(&ep->schedule)) sl811h_urb_enqueue()
895 list_add_tail(&ep->schedule, &sl811->async); sl811h_urb_enqueue()
899 urb->interval = ep->period; sl811h_urb_enqueue()
900 if (ep->branch < PERIODIC_SIZE) { sl811h_urb_enqueue()
907 + ep->branch; sl811h_urb_enqueue()
911 retval = balance(sl811, ep->period, ep->load); sl811h_urb_enqueue()
914 ep->branch = retval; sl811h_urb_enqueue()
917 + ep->branch; sl811h_urb_enqueue()
924 ep->period, ep, ep->branch); sl811h_urb_enqueue()
925 for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) { sl811h_urb_enqueue()
929 while (here && ep != here) { sl811h_urb_enqueue()
930 if (ep->period > here->period) sl811h_urb_enqueue()
935 if (ep != here) { sl811h_urb_enqueue()
936 ep->next = here; sl811h_urb_enqueue()
937 *prev = ep; sl811h_urb_enqueue()
939 sl811->load[i] += ep->load; sl811h_urb_enqueue()
942 hcd->self.bandwidth_allocated += ep->load / ep->period; sl811h_urb_enqueue()
962 struct sl811h_ep *ep; sl811h_urb_dequeue() local
971 ep = hep->hcpriv; sl811h_urb_dequeue()
972 if (ep) { sl811h_urb_dequeue()
976 if (ep->hep->urb_list.next != &urb->urb_list) { sl811h_urb_dequeue()
980 } else if (sl811->active_a == ep) { sl811h_urb_dequeue()
995 } else if (sl811->active_b == ep) { sl811h_urb_dequeue()
1015 finish_request(sl811, ep, urb, 0); sl811h_urb_dequeue()
1019 (sl811->active_a == ep) ? "A" : "B"); sl811h_urb_dequeue()
1030 struct sl811h_ep *ep = hep->hcpriv; sl811h_endpoint_disable() local
1032 if (!ep) sl811h_endpoint_disable()
1039 dev_warn(hcd->self.controller, "ep %p not empty?\n", ep); sl811h_endpoint_disable()
1041 kfree(ep); sl811h_endpoint_disable()
1386 struct sl811h_ep *ep; sl811h_show() local
1434 list_for_each_entry (ep, &sl811->async, schedule) { sl811h_show()
1437 seq_printf(s, "%s%sqh%p, ep%d%s, maxpacket %d" sl811h_show()
1439 (ep == sl811->active_a) ? "(A) " : "", sl811h_show()
1440 (ep == sl811->active_b) ? "(B) " : "", sl811h_show()
1441 ep, ep->epnum, sl811h_show()
1442 ({ char *s; switch (ep->nextpid) { sl811h_show()
1449 ep->maxpacket, sl811h_show()
1450 ep->nak_count, ep->error_count); sl811h_show()
1451 list_for_each_entry (urb, &ep->hep->urb_list, urb_list) { sl811h_show()
1463 ep = sl811->periodic[i]; sl811h_show()
1464 if (!ep) sl811h_show()
1471 " %s%sqh%d/%p (%sdev%d ep%d%s max %d) " sl811h_show()
1473 (ep == sl811->active_a) ? "(A) " : "", sl811h_show()
1474 (ep == sl811->active_b) ? "(B) " : "", sl811h_show()
1475 ep->period, ep, sl811h_show()
1476 (ep->udev->speed == USB_SPEED_FULL) sl811h_show()
1478 ep->udev->devnum, ep->epnum, sl811h_show()
1479 (ep->epnum == 0) ? "" sl811h_show()
1480 : ((ep->nextpid == USB_PID_IN) sl811h_show()
1483 ep->maxpacket, ep->error_count); sl811h_show()
1484 ep = ep->next; sl811h_show()
1485 } while (ep); sl811h_show()
133 setup_packet( struct sl811 *sl811, struct sl811h_ep *ep, struct urb *urb, u8 bank, u8 control ) setup_packet() argument
164 status_packet( struct sl811 *sl811, struct sl811h_ep *ep, struct urb *urb, u8 bank, u8 control ) status_packet() argument
198 in_packet( struct sl811 *sl811, struct sl811h_ep *ep, struct urb *urb, u8 bank, u8 control ) in_packet() argument
234 out_packet( struct sl811 *sl811, struct sl811h_ep *ep, struct urb *urb, u8 bank, u8 control ) out_packet() argument
H A Disp1362-hcd.c185 struct isp1362_ep *ep, u16 len) claim_ptd_buffers()
196 if (ep->num_ptds) claim_ptd_buffers()
199 BUG_ON(ep->num_ptds != 0); claim_ptd_buffers()
210 ep->ptd_offset = ptd_offset; claim_ptd_buffers()
211 ep->num_ptds += num_ptds; claim_ptd_buffers()
214 ep->ptd_index = found; claim_ptd_buffers()
217 __func__, epq->name, ep->ptd_index, ep->ptd_offset, claim_ptd_buffers()
223 static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep) release_ptd_buffers() argument
225 int last = ep->ptd_index + ep->num_ptds; release_ptd_buffers()
228 pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n", release_ptd_buffers()
229 __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index, release_ptd_buffers()
230 ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail, release_ptd_buffers()
234 bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds); release_ptd_buffers()
235 bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds); release_ptd_buffers()
236 epq->buf_avail += ep->num_ptds; release_ptd_buffers()
244 ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count); release_ptd_buffers()
248 ep->num_ptds = 0; release_ptd_buffers()
249 ep->ptd_offset = -EINVAL; release_ptd_buffers()
250 ep->ptd_index = -EINVAL; release_ptd_buffers()
259 struct isp1362_ep *ep, struct isp1362_ep_queue *epq, prepare_ptd()
268 DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep); prepare_ptd()
270 ptd = &ep->ptd; prepare_ptd()
272 ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length; prepare_ptd()
274 switch (ep->nextpid) { prepare_ptd()
276 toggle = usb_gettoggle(urb->dev, ep->epnum, 0); prepare_ptd()
279 len = min_t(size_t, ep->maxpacket, buf_len); prepare_ptd()
282 ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset; prepare_ptd()
284 len = max_transfer_size(epq, buf_len, ep->maxpacket); prepare_ptd()
285 DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket, prepare_ptd()
289 toggle = usb_gettoggle(urb->dev, ep->epnum, 1); prepare_ptd()
292 len = min_t(size_t, ep->maxpacket, buf_len); prepare_ptd()
296 len = max_transfer_size(epq, buf_len, ep->maxpacket); prepare_ptd()
300 DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket, prepare_ptd()
308 ep->data = urb->setup_packet; prepare_ptd()
319 pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid); prepare_ptd()
323 ep->length = len; prepare_ptd()
325 ep->data = NULL; prepare_ptd()
328 ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) | prepare_ptd()
329 PTD_EP(ep->epnum); prepare_ptd()
334 ptd->faddr |= PTD_SF_INT(ep->branch); prepare_ptd()
335 ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0); prepare_ptd()
343 static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep, isp1362_write_ptd() argument
346 struct ptd *ptd = &ep->ptd; isp1362_write_ptd()
347 int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length; isp1362_write_ptd()
350 isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE); isp1362_write_ptd()
352 isp1362_write_buffer(isp1362_hcd, ep->data, isp1362_write_ptd()
353 ep->ptd_offset + PTD_HEADER_SIZE, len); isp1362_write_ptd()
356 dump_ptd_out_data(ptd, ep->data); isp1362_write_ptd()
359 static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep, isp1362_read_ptd() argument
362 struct ptd *ptd = &ep->ptd; isp1362_read_ptd()
365 WARN_ON(list_empty(&ep->active)); isp1362_read_ptd()
366 BUG_ON(ep->ptd_offset < 0); isp1362_read_ptd()
368 list_del_init(&ep->active); isp1362_read_ptd()
369 DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active); isp1362_read_ptd()
372 isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE); isp1362_read_ptd()
377 if (act_len > ep->length) isp1362_read_ptd()
378 pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep, isp1362_read_ptd()
379 ep->ptd_offset, act_len, ep->length); isp1362_read_ptd()
380 BUG_ON(act_len > ep->length); isp1362_read_ptd()
385 prefetchw(ep->data); isp1362_read_ptd()
386 isp1362_read_buffer(isp1362_hcd, ep->data, isp1362_read_ptd()
387 ep->ptd_offset + PTD_HEADER_SIZE, act_len); isp1362_read_ptd()
388 dump_ptd_in_data(ptd, ep->data); isp1362_read_ptd()
396 static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep) remove_ptd() argument
402 DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset); remove_ptd()
403 BUG_ON(ep->ptd_offset < 0); remove_ptd()
405 epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset); remove_ptd()
408 /* put ep in remove_list for cleanup */ remove_ptd()
409 WARN_ON(!list_empty(&ep->remove_list)); remove_ptd()
410 list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list); remove_ptd()
414 index = ep->ptd_index; remove_ptd()
420 index, ep->ptd_offset, epq->skip_map, 1 << index); remove_ptd()
443 static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
449 ep->error_count = 0;
452 ep->nextpid = USB_PID_SETUP;
454 URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
455 ep->num_req, usb_pipedevice(urb->pipe),
473 if (!list_empty(&ep->hep->urb_list))
477 if (!list_empty(&ep->schedule)) {
478 list_del_init(&ep->schedule);
483 if (ep->interval) {
485 DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
486 ep, ep->branch, ep->load,
487 isp1362_hcd->load[ep->branch],
488 isp1362_hcd->load[ep->branch] - ep->load);
489 isp1362_hcd->load[ep->branch] -= ep->load;
490 ep->branch = PERIODIC_SIZE;
497 static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep) postproc_ep() argument
499 struct urb *urb = get_urb(ep); postproc_ep()
507 DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req); postproc_ep()
510 ptd = &ep->ptd; postproc_ep()
514 ep->num_req, ptd); postproc_ep()
530 __func__, ep->num_req, short_ok ? "" : "not_", postproc_ep()
531 PTD_GET_COUNT(ptd), ep->maxpacket, len); postproc_ep()
536 __func__, ep->num_req, postproc_ep()
537 usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid, postproc_ep()
539 PTD_GET_COUNT(ptd), ep->maxpacket, len); postproc_ep()
545 ep->nextpid = USB_PID_ACK; postproc_ep()
551 usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT, postproc_ep()
560 if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) { postproc_ep()
563 __func__, ep->num_req, ep->nextpid, urbstat, cc, postproc_ep()
564 ep->error_count); postproc_ep()
569 switch (ep->nextpid) { postproc_ep()
571 if (PTD_GET_COUNT(ptd) != ep->length) postproc_ep()
573 PTD_GET_COUNT(ptd), ep->length); postproc_ep()
574 BUG_ON(PTD_GET_COUNT(ptd) != ep->length); postproc_ep()
575 urb->actual_length += ep->length; postproc_ep()
577 usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd)); postproc_ep()
580 ep->num_req, len, ep->maxpacket, urbstat); postproc_ep()
583 ep->num_req, postproc_ep()
585 ep->nextpid = USB_PID_ACK; postproc_ep()
587 if (len % ep->maxpacket || postproc_ep()
591 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT", postproc_ep()
592 urbstat, len, ep->maxpacket, urb->actual_length); postproc_ep()
599 BUG_ON(len > ep->length); postproc_ep()
602 usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd)); postproc_ep()
605 len % ep->maxpacket) { postproc_ep()
607 ep->num_req, len, ep->maxpacket, urbstat); postproc_ep()
610 ep->num_req, postproc_ep()
612 ep->nextpid = USB_PID_ACK; postproc_ep()
616 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT", postproc_ep()
617 urbstat, len, ep->maxpacket, urb->actual_length); postproc_ep()
623 ep->nextpid = USB_PID_ACK; postproc_ep()
626 ep->nextpid = USB_PID_OUT; postproc_ep()
629 ep->nextpid = USB_PID_IN; postproc_ep()
633 DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req, postproc_ep()
637 ep->nextpid = 0; postproc_ep()
645 DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__, postproc_ep()
646 ep, ep->num_req, urb, urbstat); postproc_ep()
647 finish_request(isp1362_hcd, ep, urb, urbstat); postproc_ep()
653 struct isp1362_ep *ep; finish_unlinks() local
656 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) { finish_unlinks()
658 get_ptd_queue(isp1362_hcd, ep->ptd_offset); finish_unlinks()
659 int index = ep->ptd_index; finish_unlinks()
663 DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset); finish_unlinks()
664 BUG_ON(ep->num_ptds == 0); finish_unlinks()
665 release_ptd_buffers(epq, ep); finish_unlinks()
667 if (!list_empty(&ep->hep->urb_list)) { finish_unlinks()
668 struct urb *urb = get_urb(ep); finish_unlinks()
670 DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__, finish_unlinks()
671 ep->num_req, ep); finish_unlinks()
672 finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN); finish_unlinks()
674 WARN_ON(list_empty(&ep->active)); finish_unlinks()
675 if (!list_empty(&ep->active)) { finish_unlinks()
676 list_del_init(&ep->active); finish_unlinks()
677 DBG(1, "%s: ep %p removed from active list\n", __func__, ep); finish_unlinks()
679 list_del_init(&ep->remove_list); finish_unlinks()
680 DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep); finish_unlinks()
712 struct isp1362_ep *ep, struct isp1362_ep_queue *epq) submit_req()
716 prepare_ptd(isp1362_hcd, urb, ep, epq, 0); submit_req()
717 index = claim_ptd_buffers(epq, ep, ep->length); submit_req()
720 ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map); submit_req()
724 __func__, ep->num_req, ep->length, epq->name, ep->num_ptds, submit_req()
729 list_add_tail(&ep->active, &epq->active); submit_req()
730 DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__, submit_req()
731 ep, ep->num_req, ep->length, &epq->active); submit_req()
732 DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name, submit_req()
733 ep->ptd_offset, ep, ep->num_req); submit_req()
734 isp1362_write_ptd(isp1362_hcd, ep, epq); submit_req()
735 __clear_bit(ep->ptd_index, &epq->skip_map); submit_req()
744 struct isp1362_ep *ep; start_atl_transfers() local
752 list_for_each_entry(ep, &isp1362_hcd->async, schedule) { start_atl_transfers()
753 struct urb *urb = get_urb(ep); start_atl_transfers()
756 if (!list_empty(&ep->active)) { start_atl_transfers()
757 DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep); start_atl_transfers()
761 DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name, start_atl_transfers()
762 ep, ep->num_req); start_atl_transfers()
764 ret = submit_req(isp1362_hcd, urb, ep, epq); start_atl_transfers()
773 defer = ep->nextpid == USB_PID_SETUP; start_atl_transfers()
797 struct isp1362_ep *ep; start_intl_transfers() local
804 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) { start_intl_transfers()
805 struct urb *urb = get_urb(ep); start_intl_transfers()
808 if (!list_empty(&ep->active)) { start_intl_transfers()
809 DBG(1, "%s: Skipping active %s ep %p\n", __func__, start_intl_transfers()
810 epq->name, ep); start_intl_transfers()
814 DBG(1, "%s: Processing %s ep %p req %d\n", __func__, start_intl_transfers()
815 epq->name, ep, ep->num_req); start_intl_transfers()
816 ret = submit_req(isp1362_hcd, urb, ep, epq); start_intl_transfers()
839 static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep) next_ptd() argument
841 u16 ptd_offset = ep->ptd_offset; next_ptd()
842 int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size; next_ptd()
845 ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size); next_ptd()
860 struct isp1362_ep *ep; start_iso_transfers() local
875 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) { start_iso_transfers()
876 struct urb *urb = get_urb(ep); start_iso_transfers()
879 DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep); start_iso_transfers()
883 finish_request(isp1362_hcd, ep, urb, -EOVERFLOW); start_iso_transfers()
893 prepare_ptd(isp1362_hcd, urb, ep, epq, fno); start_iso_transfers()
894 if (ptd_offset + PTD_HEADER_SIZE + ep->length > start_iso_transfers()
897 __func__, ep->length); start_iso_transfers()
900 ep->ptd_offset = ptd_offset; start_iso_transfers()
901 list_add_tail(&ep->active, &epq->active); start_iso_transfers()
903 ptd_offset = next_ptd(epq, ep); start_iso_transfers()
906 ep->num_req, epq->name); start_iso_transfers()
911 list_for_each_entry(ep, &epq->active, active) { start_iso_transfers()
912 if (epq->active.next == &ep->active) start_iso_transfers()
913 ep->ptd.mps |= PTD_LAST_MSK; start_iso_transfers()
914 isp1362_write_ptd(isp1362_hcd, ep, epq); start_iso_transfers()
938 struct isp1362_ep *ep; finish_transfers() local
949 list_for_each_entry_safe(ep, tmp, &epq->active, active) { finish_transfers()
950 int index = ep->ptd_index; finish_transfers()
953 index, ep->ptd_offset); finish_transfers()
957 isp1362_read_ptd(isp1362_hcd, ep, epq); finish_transfers()
959 BUG_ON(ep->num_ptds == 0); finish_transfers()
960 release_ptd_buffers(epq, ep); finish_transfers()
962 DBG(1, "%s: ep %p req %d removed from active list\n", __func__, finish_transfers()
963 ep, ep->num_req); finish_transfers()
964 if (!list_empty(&ep->remove_list)) { finish_transfers()
965 list_del_init(&ep->remove_list); finish_transfers()
966 DBG(1, "%s: ep %p removed from remove list\n", __func__, ep); finish_transfers()
968 DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name, finish_transfers()
969 ep, ep->num_req); finish_transfers()
970 postproc_ep(isp1362_hcd, ep); finish_transfers()
983 struct isp1362_ep *ep; finish_iso_transfers() local
994 list_for_each_entry_safe(ep, tmp, &epq->active, active) { finish_iso_transfers()
995 DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset); finish_iso_transfers()
997 isp1362_read_ptd(isp1362_hcd, ep, epq); finish_iso_transfers()
998 DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep); finish_iso_transfers()
999 postproc_ep(isp1362_hcd, ep); finish_iso_transfers()
1224 struct usb_host_endpoint *hep = urb->ep; isp1362_urb_enqueue()
1225 struct isp1362_ep *ep = NULL; isp1362_urb_enqueue() local
1236 URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__, isp1362_urb_enqueue()
1250 ep = kzalloc(sizeof *ep, mem_flags); isp1362_urb_enqueue()
1251 if (!ep) isp1362_urb_enqueue()
1260 kfree(ep); isp1362_urb_enqueue()
1267 kfree(ep); isp1362_urb_enqueue()
1272 ep = hep->hcpriv; isp1362_urb_enqueue()
1274 INIT_LIST_HEAD(&ep->schedule); isp1362_urb_enqueue()
1275 INIT_LIST_HEAD(&ep->active); isp1362_urb_enqueue()
1276 INIT_LIST_HEAD(&ep->remove_list); isp1362_urb_enqueue()
1277 ep->udev = usb_get_dev(udev); isp1362_urb_enqueue()
1278 ep->hep = hep; isp1362_urb_enqueue()
1279 ep->epnum = epnum; isp1362_urb_enqueue()
1280 ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out); isp1362_urb_enqueue()
1281 ep->ptd_offset = -EINVAL; isp1362_urb_enqueue()
1282 ep->ptd_index = -EINVAL; isp1362_urb_enqueue()
1286 ep->nextpid = USB_PID_SETUP; isp1362_urb_enqueue()
1288 ep->nextpid = USB_PID_OUT; isp1362_urb_enqueue()
1290 ep->nextpid = USB_PID_IN; isp1362_urb_enqueue()
1297 ep->interval = urb->interval; isp1362_urb_enqueue()
1298 ep->branch = PERIODIC_SIZE; isp1362_urb_enqueue()
1299 ep->load = usb_calc_bus_time(udev->speed, !is_out, isp1362_urb_enqueue()
1304 hep->hcpriv = ep; isp1362_urb_enqueue()
1306 ep->num_req = isp1362_hcd->req_serial++; isp1362_urb_enqueue()
1312 if (list_empty(&ep->schedule)) { isp1362_urb_enqueue()
1313 DBG(1, "%s: Adding ep %p req %d to async schedule\n", isp1362_urb_enqueue()
1314 __func__, ep, ep->num_req); isp1362_urb_enqueue()
1315 list_add_tail(&ep->schedule, &isp1362_hcd->async); isp1362_urb_enqueue()
1320 urb->interval = ep->interval; isp1362_urb_enqueue()
1323 if (ep->branch < PERIODIC_SIZE) isp1362_urb_enqueue()
1326 retval = balance(isp1362_hcd, ep->interval, ep->load); isp1362_urb_enqueue()
1331 ep->branch = retval; isp1362_urb_enqueue()
1335 __func__, isp1362_hcd->fmindex, ep->branch, isp1362_urb_enqueue()
1337 ~(PERIODIC_SIZE - 1)) + ep->branch, isp1362_urb_enqueue()
1338 (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch); isp1362_urb_enqueue()
1340 if (list_empty(&ep->schedule)) { isp1362_urb_enqueue()
1344 frame += max_t(u16, 8, ep->interval); isp1362_urb_enqueue()
1345 frame &= ~(ep->interval - 1); isp1362_urb_enqueue()
1346 frame |= ep->branch; isp1362_urb_enqueue()
1348 frame += ep->interval; isp1362_urb_enqueue()
1351 DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep); isp1362_urb_enqueue()
1352 list_add_tail(&ep->schedule, &isp1362_hcd->isoc); isp1362_urb_enqueue()
1354 DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep); isp1362_urb_enqueue()
1355 list_add_tail(&ep->schedule, &isp1362_hcd->periodic); isp1362_urb_enqueue()
1358 DBG(1, "%s: ep %p already scheduled\n", __func__, ep); isp1362_urb_enqueue()
1361 ep->load / ep->interval, isp1362_hcd->load[ep->branch], isp1362_urb_enqueue()
1362 isp1362_hcd->load[ep->branch] + ep->load); isp1362_urb_enqueue()
1363 isp1362_hcd->load[ep->branch] += ep->load; isp1362_urb_enqueue()
1400 struct isp1362_ep *ep; isp1362_urb_dequeue() local
1417 ep = hep->hcpriv; isp1362_urb_dequeue()
1418 if (ep) { isp1362_urb_dequeue()
1420 if (ep->hep->urb_list.next == &urb->urb_list) { isp1362_urb_dequeue()
1421 if (!list_empty(&ep->active)) { isp1362_urb_dequeue()
1422 DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__, isp1362_urb_dequeue()
1423 urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset); isp1362_urb_dequeue()
1425 remove_ptd(isp1362_hcd, ep); isp1362_urb_dequeue()
1430 DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep, isp1362_urb_dequeue()
1431 ep->num_req); isp1362_urb_dequeue()
1432 finish_request(isp1362_hcd, ep, urb, status); isp1362_urb_dequeue()
1449 struct isp1362_ep *ep = hep->hcpriv; isp1362_endpoint_disable() local
1453 DBG(1, "%s: ep %p\n", __func__, ep); isp1362_endpoint_disable()
1454 if (!ep) isp1362_endpoint_disable()
1458 if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) { isp1362_endpoint_disable()
1459 DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__, isp1362_endpoint_disable()
1460 ep, ep->num_req, ep->ptd_index, ep->ptd_offset); isp1362_endpoint_disable()
1461 remove_ptd(isp1362_hcd, ep); isp1362_endpoint_disable()
1467 while (!list_empty(&ep->active)) isp1362_endpoint_disable()
1470 DBG(1, "%s: Freeing EP %p\n", __func__, ep); isp1362_endpoint_disable()
1472 usb_put_dev(ep->udev); isp1362_endpoint_disable()
1473 kfree(ep); isp1362_endpoint_disable()
2059 struct isp1362_ep *ep; isp1362_show() local
2092 list_for_each_entry(ep, &isp1362_hcd->async, schedule) { isp1362_show()
2095 seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum, isp1362_show()
2098 switch (ep->nextpid) { isp1362_show()
2115 s;}), ep->maxpacket) ; isp1362_show()
2116 list_for_each_entry(urb, &ep->hep->urb_list, urb_list) { isp1362_show()
2128 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) { isp1362_show()
2129 seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch, isp1362_show()
2130 isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset); isp1362_show()
2132 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n", isp1362_show()
2133 ep->interval, ep, isp1362_show()
2134 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ", isp1362_show()
2135 ep->udev->devnum, ep->epnum, isp1362_show()
2136 (ep->epnum == 0) ? "" : isp1362_show()
2137 ((ep->nextpid == USB_PID_IN) ? isp1362_show()
2138 "in" : "out"), ep->maxpacket); isp1362_show()
2144 list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) { isp1362_show()
2145 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n", isp1362_show()
2146 ep->interval, ep, isp1362_show()
2147 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ", isp1362_show()
2148 ep->udev->devnum, ep->epnum, isp1362_show()
2149 (ep->epnum == 0) ? "" : isp1362_show()
2150 ((ep->nextpid == USB_PID_IN) ? isp1362_show()
2151 "in" : "out"), ep->maxpacket); isp1362_show()
184 claim_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep, u16 len) claim_ptd_buffers() argument
258 prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb, struct isp1362_ep *ep, struct isp1362_ep_queue *epq, u16 fno) prepare_ptd() argument
711 submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb, struct isp1362_ep *ep, struct isp1362_ep_queue *epq) submit_req() argument
H A Disp116x-hcd.c167 struct isp116x_ep *ep; pack_fifo() local
175 for (ep = isp116x->atl_active; ep; ep = ep->active) { pack_fifo()
176 ptd = &ep->ptd; pack_fifo()
178 dump_ptd_out_data(ptd, ep->data); pack_fifo()
185 if (ep->active || (isp116x->atl_last_dir != PTD_DIR_IN)) { pack_fifo()
186 write_ptddata_to_fifo(isp116x, ep->data, ep->length); pack_fifo()
187 buflen -= ALIGN(ep->length, 4); pack_fifo()
199 struct isp116x_ep *ep; unpack_fifo() local
207 for (ep = isp116x->atl_active; ep; ep = ep->active) { unpack_fifo()
208 ptd = &ep->ptd; unpack_fifo()
215 if (ep->active || (isp116x->atl_last_dir == PTD_DIR_IN)) { unpack_fifo()
216 read_ptddata_from_fifo(isp116x, ep->data, ep->length); unpack_fifo()
217 buflen -= ALIGN(ep->length, 4); unpack_fifo()
220 dump_ptd_in_data(ptd, ep->data); unpack_fifo()
232 struct isp116x_ep *ep; preproc_atl_queue() local
237 for (ep = isp116x->atl_active; ep; ep = ep->active) { preproc_atl_queue()
240 BUG_ON(list_empty(&ep->hep->urb_list)); preproc_atl_queue()
241 urb = container_of(ep->hep->urb_list.next, preproc_atl_queue()
243 ptd = &ep->ptd; preproc_atl_queue()
244 len = ep->length; preproc_atl_queue()
245 ep->data = (unsigned char *)urb->transfer_buffer preproc_atl_queue()
248 switch (ep->nextpid) { preproc_atl_queue()
250 toggle = usb_gettoggle(urb->dev, ep->epnum, 0); preproc_atl_queue()
254 toggle = usb_gettoggle(urb->dev, ep->epnum, 1); preproc_atl_queue()
259 ep->data = urb->setup_packet; preproc_atl_queue()
269 ERR("%s %d: ep->nextpid %d\n", __func__, __LINE__, preproc_atl_queue()
270 ep->nextpid); preproc_atl_queue()
275 ptd->mps = PTD_MPS(ep->maxpacket) preproc_atl_queue()
277 | PTD_EP(ep->epnum); preproc_atl_queue()
280 if (!ep->active) { preproc_atl_queue()
293 static void finish_request(struct isp116x *isp116x, struct isp116x_ep *ep,
299 ep->error_count = 0;
302 ep->nextpid = USB_PID_SETUP;
312 if (!list_empty(&ep->hep->urb_list))
316 if (!list_empty(&ep->schedule)) {
317 list_del_init(&ep->schedule);
322 DBG("deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
323 for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
327 while (*prev && ((temp = *prev) != ep))
330 *prev = ep->next;
331 isp116x->load[i] -= ep->load;
333 ep->branch = PERIODIC_SIZE;
335 ep->load / ep->period;
349 struct isp116x_ep *ep; postproc_atl_queue() local
357 for (ep = isp116x->atl_active; ep; ep = ep->active) { postproc_atl_queue()
358 BUG_ON(list_empty(&ep->hep->urb_list)); postproc_atl_queue()
360 container_of(ep->hep->urb_list.next, struct urb, urb_list); postproc_atl_queue()
362 ptd = &ep->ptd; postproc_atl_queue()
379 ep->error_count = 1; postproc_atl_queue()
380 usb_settoggle(udev, ep->epnum, postproc_atl_queue()
381 ep->nextpid == USB_PID_OUT, postproc_atl_queue()
390 && (++ep->error_count >= 3 || cc == TD_CC_STALL postproc_atl_queue()
393 if (ep->nextpid == USB_PID_ACK) postproc_atl_queue()
394 ep->nextpid = 0; postproc_atl_queue()
407 if (ep->error_count postproc_atl_queue()
409 ep->error_count = 0; postproc_atl_queue()
413 if (ep->nextpid == USB_PID_OUT) postproc_atl_queue()
414 usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd) postproc_atl_queue()
415 ^ (ep->error_count > 0)); postproc_atl_queue()
416 else if (ep->nextpid == USB_PID_IN) postproc_atl_queue()
417 usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd) postproc_atl_queue()
418 ^ (ep->error_count > 0)); postproc_atl_queue()
420 switch (ep->nextpid) { postproc_atl_queue()
432 && ep->nextpid == USB_PID_OUT postproc_atl_queue()
433 && !(PTD_GET_COUNT(ptd) % ep->maxpacket)) { postproc_atl_queue()
440 ep->nextpid = USB_PID_ACK; postproc_atl_queue()
449 ep->nextpid = USB_PID_ACK; postproc_atl_queue()
452 ep->nextpid = USB_PID_OUT; postproc_atl_queue()
455 ep->nextpid = USB_PID_IN; postproc_atl_queue()
463 ep->nextpid = 0; postproc_atl_queue()
471 finish_request(isp116x, ep, urb, status); postproc_atl_queue()
481 struct isp116x_ep *last_ep = NULL, *ep; start_atl_transfers() local
516 list_for_each_entry(ep, &isp116x->async, schedule) { start_atl_transfers()
517 urb = container_of(ep->hep->urb_list.next, start_atl_transfers()
523 if (ep->nextpid == USB_PID_SETUP) { start_atl_transfers()
525 } else if (ep->nextpid == USB_PID_ACK) { start_atl_transfers()
541 len -= len % ep->maxpacket; start_atl_transfers()
554 ep->active = NULL; start_atl_transfers()
555 ep->length = len; start_atl_transfers()
557 last_ep->active = ep; start_atl_transfers()
559 isp116x->atl_active = ep; start_atl_transfers()
560 last_ep = ep; start_atl_transfers()
690 struct usb_host_endpoint *hep = urb->ep; isp116x_urb_enqueue()
691 struct isp116x_ep *ep = NULL; isp116x_urb_enqueue() local
705 ep = kzalloc(sizeof *ep, mem_flags); isp116x_urb_enqueue()
706 if (!ep) isp116x_urb_enqueue()
712 kfree(ep); isp116x_urb_enqueue()
718 kfree(ep); isp116x_urb_enqueue()
723 ep = hep->hcpriv; isp116x_urb_enqueue()
725 INIT_LIST_HEAD(&ep->schedule); isp116x_urb_enqueue()
726 ep->udev = udev; isp116x_urb_enqueue()
727 ep->epnum = epnum; isp116x_urb_enqueue()
728 ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out); isp116x_urb_enqueue()
732 ep->nextpid = USB_PID_SETUP; isp116x_urb_enqueue()
734 ep->nextpid = USB_PID_OUT; isp116x_urb_enqueue()
736 ep->nextpid = USB_PID_IN; isp116x_urb_enqueue()
754 ep->period = urb->interval >> 1; isp116x_urb_enqueue()
755 ep->branch = PERIODIC_SIZE; isp116x_urb_enqueue()
756 ep->load = usb_calc_bus_time(udev->speed, isp116x_urb_enqueue()
763 hep->hcpriv = ep; isp116x_urb_enqueue()
764 ep->hep = hep; isp116x_urb_enqueue()
771 if (list_empty(&ep->schedule)) isp116x_urb_enqueue()
772 list_add_tail(&ep->schedule, &isp116x->async); isp116x_urb_enqueue()
775 urb->interval = ep->period; isp116x_urb_enqueue()
776 ep->length = min_t(u32, ep->maxpacket, isp116x_urb_enqueue()
780 if (ep->branch < PERIODIC_SIZE) isp116x_urb_enqueue()
783 ep->branch = ret = balance(isp116x, ep->period, ep->load); isp116x_urb_enqueue()
789 + ep->branch; isp116x_urb_enqueue()
794 DBG("schedule qh%d/%p branch %d\n", ep->period, ep, ep->branch); isp116x_urb_enqueue()
795 for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) { isp116x_urb_enqueue()
799 while (here && ep != here) { isp116x_urb_enqueue()
800 if (ep->period > here->period) isp116x_urb_enqueue()
805 if (ep != here) { isp116x_urb_enqueue()
806 ep->next = here; isp116x_urb_enqueue()
807 *prev = ep; isp116x_urb_enqueue()
809 isp116x->load[i] += ep->load; isp116x_urb_enqueue()
811 hcd->self.bandwidth_allocated += ep->load / ep->period; isp116x_urb_enqueue()
841 struct isp116x_ep *ep, *ep_act; isp116x_urb_dequeue() local
851 ep = hep->hcpriv; isp116x_urb_dequeue()
852 WARN_ON(hep != ep->hep); isp116x_urb_dequeue()
855 if (ep->hep->urb_list.next == &urb->urb_list) isp116x_urb_dequeue()
859 if (ep_act == ep) { isp116x_urb_dequeue()
867 finish_request(isp116x, ep, urb, status); isp116x_urb_dequeue()
877 struct isp116x_ep *ep = hep->hcpriv; isp116x_endpoint_disable() local
879 if (!ep) isp116x_endpoint_disable()
886 WARNING("ep %p not empty?\n", ep); isp116x_endpoint_disable()
888 kfree(ep); isp116x_endpoint_disable()
H A Dimx21-hcd.c202 etd->ep = NULL; reset_etd()
365 struct usb_host_endpoint *ep) alloc_dmem()
393 area->ep = ep; alloc_dmem()
453 offset = alloc_dmem(imx21, etd->dmem_size, etd->ep); free_dmem()
461 static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep) free_epdmem() argument
466 if (area->ep == ep) { free_epdmem()
468 "Active DMEM %d for disabled ep=%p\n", free_epdmem()
469 area->offset, ep); free_epdmem()
511 if (list_empty(&ep_priv->ep->urb_list)) { ep_idle()
512 dev_err(imx21->dev, "No urb for queued ep!\n"); ep_idle()
516 &ep_priv->ep->urb_list, struct urb, urb_list)); ep_idle()
525 struct ep_priv *ep_priv = urb->ep->hcpriv;
538 if (list_empty(&ep_priv->ep->urb_list))
545 struct usb_host_endpoint *ep = etd->ep; nonisoc_urb_completed_for_etd() local
550 if (!list_empty(&ep->urb_list)) { nonisoc_urb_completed_for_etd()
552 &ep->urb_list, struct urb, urb_list); nonisoc_urb_completed_for_etd()
565 struct usb_host_endpoint *ep) schedule_isoc_etds()
568 struct ep_priv *ep_priv = ep->hcpriv; schedule_isoc_etds()
609 etd->ep = td->ep; schedule_isoc_etds()
639 struct usb_host_endpoint *ep = etd->ep; isoc_etd_done() local
685 etd->ep = NULL; isoc_etd_done()
690 schedule_isoc_etds(hcd, ep); isoc_etd_done()
694 struct imx21 *imx21, struct usb_host_endpoint *ep) alloc_isoc_ep()
707 ep_priv->ep = ep; alloc_isoc_ep()
708 ep->hcpriv = ep_priv; alloc_isoc_ep()
725 imx21->etd[etd_num].ep = ep_priv->ep; alloc_isoc_etds()
740 struct usb_host_endpoint *ep, imx21_hc_urb_enqueue_isoc()
766 if (ep->hcpriv == NULL) { imx21_hc_urb_enqueue_isoc()
767 ep_priv = alloc_isoc_ep(imx21, ep); imx21_hc_urb_enqueue_isoc()
773 ep_priv = ep->hcpriv; imx21_hc_urb_enqueue_isoc()
788 urb_priv->ep = ep; imx21_hc_urb_enqueue_isoc()
804 etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep); imx21_hc_urb_enqueue_isoc()
847 td->ep = ep; imx21_hc_urb_enqueue_isoc()
861 schedule_isoc_etds(hcd, ep); imx21_hc_urb_enqueue_isoc()
915 struct ep_priv *ep_priv = urb_priv->ep->hcpriv; schedule_nonisoc_etd()
984 etd->ep = urb_priv->ep; schedule_nonisoc_etd()
1019 etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep); schedule_nonisoc_etd()
1168 struct usb_host_endpoint *ep = urb->ep; imx21_hc_urb_enqueue() local
1176 "enqueue urb=%p ep=%p len=%d " imx21_hc_urb_enqueue()
1178 urb, ep, imx21_hc_urb_enqueue()
1184 return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags); imx21_hc_urb_enqueue()
1192 ep_priv = ep->hcpriv; imx21_hc_urb_enqueue()
1199 ep->hcpriv = ep_priv; imx21_hc_urb_enqueue()
1200 ep_priv->ep = ep; imx21_hc_urb_enqueue()
1211 urb_priv->ep = ep; imx21_hc_urb_enqueue()
1265 struct usb_host_endpoint *ep; imx21_hc_urb_dequeue() local
1278 ep = urb_priv->ep; imx21_hc_urb_dequeue()
1279 ep_priv = ep->hcpriv; imx21_hc_urb_dequeue()
1285 schedule_isoc_etds(hcd, ep); imx21_hc_urb_dequeue()
1365 "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n", process_etds()
1385 if (etd->ep == NULL || etd->urb == NULL) { process_etds()
1388 " ep=%p urb=%p\n", process_etds()
1389 etd_num, etd->ep, etd->urb); process_etds()
1429 struct usb_host_endpoint *ep) imx21_hc_endpoint_disable()
1436 if (ep == NULL) imx21_hc_endpoint_disable()
1440 ep_priv = ep->hcpriv; imx21_hc_endpoint_disable()
1441 dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv); imx21_hc_endpoint_disable()
1443 if (!list_empty(&ep->urb_list)) imx21_hc_endpoint_disable()
1444 dev_dbg(imx21->dev, "ep's URB list is not empty\n"); imx21_hc_endpoint_disable()
1455 ep->hcpriv = NULL; imx21_hc_endpoint_disable()
1459 if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) { imx21_hc_endpoint_disable()
1461 "Active etd %d for disabled ep=%p!\n", i, ep); imx21_hc_endpoint_disable()
1465 free_epdmem(imx21, ep); imx21_hc_endpoint_disable()
364 alloc_dmem(struct imx21 *imx21, unsigned int size, struct usb_host_endpoint *ep) alloc_dmem() argument
564 schedule_isoc_etds(struct usb_hcd *hcd, struct usb_host_endpoint *ep) schedule_isoc_etds() argument
693 alloc_isoc_ep( struct imx21 *imx21, struct usb_host_endpoint *ep) alloc_isoc_ep() argument
739 imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd, struct usb_host_endpoint *ep, struct urb *urb, gfp_t mem_flags) imx21_hc_urb_enqueue_isoc() argument
1428 imx21_hc_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep) imx21_hc_endpoint_disable() argument
H A Dxhci-ring.c331 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; xhci_ring_ep_doorbell() local
332 unsigned int ep_state = ep->ep_state; xhci_ring_ep_doorbell()
355 struct xhci_virt_ep *ep; ring_doorbell_for_active_rings() local
357 ep = &xhci->devs[slot_id]->eps[ep_index]; ring_doorbell_for_active_rings()
360 if (!(ep->ep_state & EP_HAS_STREAMS)) { ring_doorbell_for_active_rings()
361 if (ep->ring && !(list_empty(&ep->ring->td_list))) ring_doorbell_for_active_rings()
366 for (stream_id = 1; stream_id < ep->stream_info->num_streams; ring_doorbell_for_active_rings()
368 struct xhci_stream_info *stream_info = ep->stream_info; ring_doorbell_for_active_rings()
379 struct xhci_virt_ep *ep; xhci_triad_to_transfer_ring() local
381 ep = &xhci->devs[slot_id]->eps[ep_index]; xhci_triad_to_transfer_ring()
383 if (!(ep->ep_state & EP_HAS_STREAMS)) xhci_triad_to_transfer_ring()
384 return ep->ring; xhci_triad_to_transfer_ring()
388 "WARN: Slot ID %u, ep index %u has streams, " xhci_triad_to_transfer_ring()
394 if (stream_id < ep->stream_info->num_streams) xhci_triad_to_transfer_ring()
395 return ep->stream_info->stream_rings[stream_id]; xhci_triad_to_transfer_ring()
398 "WARN: Slot ID %u, ep index %u has " xhci_triad_to_transfer_ring()
402 ep->stream_info->num_streams - 1, xhci_triad_to_transfer_ring()
415 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id); xhci_urb_to_transfer_ring()
442 struct xhci_virt_ep *ep = &dev->eps[ep_index]; xhci_find_new_dequeue_state() local
460 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ xhci_find_new_dequeue_state()
464 if (ep->ep_state & EP_HAS_STREAMS) { xhci_find_new_dequeue_state()
466 &ep->stream_info->stream_ctx_array[stream_id]; xhci_find_new_dequeue_state()
502 if (new_deq == ep->ring->dequeue) { xhci_find_new_dequeue_state()
584 struct xhci_virt_ep *ep) xhci_stop_watchdog_timer_in_irq()
586 ep->ep_state &= ~EP_HALT_PENDING; xhci_stop_watchdog_timer_in_irq()
591 if (del_timer(&ep->stop_cmd_timer)) xhci_stop_watchdog_timer_in_irq()
592 ep->stop_cmds_pending--; xhci_stop_watchdog_timer_in_irq()
641 struct xhci_virt_ep *ep; xhci_handle_cmd_stop_ep() local
658 ep = &xhci->devs[slot_id]->eps[ep_index]; xhci_handle_cmd_stop_ep()
660 if (list_empty(&ep->cancelled_td_list)) { xhci_handle_cmd_stop_ep()
661 xhci_stop_watchdog_timer_in_irq(xhci, ep); xhci_handle_cmd_stop_ep()
662 ep->stopped_td = NULL; xhci_handle_cmd_stop_ep()
667 /* Fix up the ep ring first, so HW stops executing cancelled TDs. xhci_handle_cmd_stop_ep()
672 list_for_each(entry, &ep->cancelled_td_list) { xhci_handle_cmd_stop_ep()
701 if (cur_td == ep->stopped_td) xhci_handle_cmd_stop_ep()
716 xhci_stop_watchdog_timer_in_irq(xhci, ep); xhci_handle_cmd_stop_ep()
721 ep->stopped_td->urb->stream_id, &deq_state); xhci_handle_cmd_stop_ep()
728 ep->stopped_td = NULL; xhci_handle_cmd_stop_ep()
737 cur_td = list_entry(ep->cancelled_td_list.next, xhci_handle_cmd_stop_ep()
775 struct xhci_virt_ep *ep; xhci_kill_endpoint_urbs() local
778 ep = &xhci->devs[slot_id]->eps[ep_index]; xhci_kill_endpoint_urbs()
779 if ((ep->ep_state & EP_HAS_STREAMS) || xhci_kill_endpoint_urbs()
780 (ep->ep_state & EP_GETTING_NO_STREAMS)) { xhci_kill_endpoint_urbs()
783 for (stream_id = 0; stream_id < ep->stream_info->num_streams; xhci_kill_endpoint_urbs()
786 "Killing URBs for slot ID %u, ep index %u, stream %u", xhci_kill_endpoint_urbs()
789 ep->stream_info->stream_rings[stream_id]); xhci_kill_endpoint_urbs()
792 ring = ep->ring; xhci_kill_endpoint_urbs()
796 "Killing URBs for slot ID %u, ep index %u", xhci_kill_endpoint_urbs()
800 while (!list_empty(&ep->cancelled_td_list)) { xhci_kill_endpoint_urbs()
801 cur_td = list_first_entry(&ep->cancelled_td_list, xhci_kill_endpoint_urbs()
830 struct xhci_virt_ep *ep; xhci_stop_endpoint_command_watchdog() local
834 ep = (struct xhci_virt_ep *) arg; xhci_stop_endpoint_command_watchdog()
835 xhci = ep->xhci; xhci_stop_endpoint_command_watchdog()
839 ep->stop_cmds_pending--; xhci_stop_endpoint_command_watchdog()
847 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { xhci_stop_endpoint_command_watchdog()
960 struct xhci_virt_ep *ep; xhci_handle_cmd_set_deq() local
967 ep = &dev->eps[ep_index]; xhci_handle_cmd_set_deq()
989 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); xhci_handle_cmd_set_deq()
1016 if (ep->ep_state & EP_HAS_STREAMS) { xhci_handle_cmd_set_deq()
1018 &ep->stream_info->stream_ctx_array[stream_id]; xhci_handle_cmd_set_deq()
1025 if (xhci_trb_virt_to_dma(ep->queued_deq_seg, xhci_handle_cmd_set_deq()
1026 ep->queued_deq_ptr) == deq) { xhci_handle_cmd_set_deq()
1034 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", xhci_handle_cmd_set_deq()
1035 ep->queued_deq_seg, ep->queued_deq_ptr); xhci_handle_cmd_set_deq()
1057 "Ignoring reset ep completion code of %u", cmd_comp_code); xhci_handle_cmd_reset_ep()
1067 xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n"); xhci_handle_cmd_reset_ep()
1146 "Completed config ep cmd - " xhci_handle_cmd_config_ep()
1147 "last ep index = %d, state = %d", xhci_handle_cmd_config_ep()
1739 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; xhci_cleanup_halted_endpoint() local
1745 ep->ep_state |= EP_HALTED; xhci_cleanup_halted_endpoint()
1746 ep->stopped_stream = stream_id; xhci_cleanup_halted_endpoint()
1751 ep->stopped_stream = 0; xhci_cleanup_halted_endpoint()
1803 struct xhci_virt_ep *ep, int *status, bool skip) finish_td()
1818 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); finish_td()
1832 ep->stopped_td = td; finish_td()
1898 struct xhci_virt_ep *ep, int *status) process_ctrl_td()
1910 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); process_ctrl_td()
1941 return finish_td(xhci, td, event_trb, event, ep, status, false); process_ctrl_td()
1950 return finish_td(xhci, td, event_trb, event, ep, status, false); process_ctrl_td()
1969 return finish_td(xhci, td, event_trb, event, ep, status, false); process_ctrl_td()
2008 return finish_td(xhci, td, event_trb, event, ep, status, false); process_ctrl_td()
2016 struct xhci_virt_ep *ep, int *status) process_isoc_td()
2028 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); process_isoc_td()
2101 return finish_td(xhci, td, event_trb, event, ep, status, false); process_isoc_td()
2106 struct xhci_virt_ep *ep, int *status) skip_isoc_td()
2113 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); skip_isoc_td()
2129 return finish_td(xhci, td, NULL, event, ep, status, true); skip_isoc_td()
2137 struct xhci_virt_ep *ep, int *status) process_bulk_intr_td()
2144 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); process_bulk_intr_td()
2176 xhci_dbg(xhci, "ep %#x - asked for %d bytes, " process_bulk_intr_td()
2178 td->urb->ep->desc.bEndpointAddress, process_bulk_intr_td()
2248 return finish_td(xhci, td, event_trb, event, ep, status, false); process_bulk_intr_td()
2262 struct xhci_virt_ep *ep; variable in typeref:struct:xhci_virt_ep
2299 ep = &xdev->eps[ep_index];
2300 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2320 /* Count current td numbers if ep->skip is set */
2321 if (ep->skip) {
2354 ep->ep_state |= EP_HALTED;
2388 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2396 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2412 ep->skip = true;
2416 ep->skip = true;
2441 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2449 if (ep->skip) {
2450 ep->skip = false;
2458 /* We've skipped all the TDs on the ep ring when ep->skip set */
2459 if (ep->skip && td_num == 0) {
2460 ep->skip = false;
2468 if (ep->skip)
2490 if (!ep->skip ||
2491 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2514 ret = skip_isoc_td(xhci, td, event, ep, &status);
2522 if (ep->skip) {
2524 ep->skip = false;
2544 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2545 ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2547 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2548 ret = process_isoc_td(xhci, td, event_trb, event, ep,
2552 ep, &status);
2557 handling_skipped_tds = ep->skip &&
2579 !usb_endpoint_xfer_isoc(&urb->ep->desc)))
2596 * If ep->skip is set, it means there are missed tds on the
2816 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); prepare_ring()
2819 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); prepare_ring()
2829 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); prepare_ring()
2847 "ERROR no room on ep ring, try ring expansion"); prepare_ring()
2976 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " check_trb_math()
2978 urb->ep->desc.bEndpointAddress, num_trbs); check_trb_math()
2980 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " check_trb_math()
2983 urb->ep->desc.bEndpointAddress, check_trb_math()
3071 maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); xhci_td_remainder()
3111 usb_endpoint_maxp(&urb->ep->desc)); queue_bulk_sg_tx()
3331 usb_endpoint_maxp(&urb->ep->desc)); xhci_queue_bulk_tx()
3563 max_burst = urb->ep->ss_ep_comp.bMaxBurst; xhci_get_burst_count()
3588 max_burst = urb->ep->ss_ep_comp.bMaxBurst; xhci_get_last_burst_packet_count()
3739 usb_endpoint_maxp(&urb->ep->desc))); xhci_queue_isoc_tx()
4127 struct xhci_virt_ep *ep; xhci_queue_new_dequeue_state() local
4148 ep = &xhci->devs[slot_id]->eps[ep_index]; xhci_queue_new_dequeue_state()
4149 if ((ep->ep_state & SET_DEQ_PENDING)) { xhci_queue_new_dequeue_state()
4162 ep->queued_deq_seg = deq_state->new_deq_seg; xhci_queue_new_dequeue_state()
4163 ep->queued_deq_ptr = deq_state->new_deq_ptr; xhci_queue_new_dequeue_state()
4180 ep->ep_state |= SET_DEQ_PENDING; xhci_queue_new_dequeue_state()
583 xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, struct xhci_virt_ep *ep) xhci_stop_watchdog_timer_in_irq() argument
1801 finish_td(struct xhci_hcd *xhci, struct xhci_td *td, union xhci_trb *event_trb, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status, bool skip) finish_td() argument
1896 process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, union xhci_trb *event_trb, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status) process_ctrl_td() argument
2014 process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, union xhci_trb *event_trb, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status) process_isoc_td() argument
2104 skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status) skip_isoc_td() argument
2135 process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, union xhci_trb *event_trb, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status) process_bulk_intr_td() argument
H A Dxhci-mem.c561 /* increment ep index by offset of start of ep ctx array */ xhci_get_ep_ctx()
620 struct xhci_virt_ep *ep, xhci_dma_to_transfer_ring()
623 if (ep->ep_state & EP_HAS_STREAMS) xhci_dma_to_transfer_ring()
624 return radix_tree_lookup(&ep->stream_info->trb_address_map, xhci_dma_to_transfer_ring()
626 return ep->ring; xhci_dma_to_transfer_ring()
634 struct xhci_virt_ep *ep = &dev->eps[ep_index]; xhci_stream_id_to_ring() local
637 return ep->ring; xhci_stream_id_to_ring()
638 if (!ep->stream_info) xhci_stream_id_to_ring()
641 if (stream_id > ep->stream_info->num_streams) xhci_stream_id_to_ring()
643 return ep->stream_info->stream_rings[stream_id]; xhci_stream_id_to_ring()
788 struct xhci_virt_ep *ep) xhci_setup_no_streams_ep_input_ctx()
792 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); xhci_setup_no_streams_ep_input_ctx()
793 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state); xhci_setup_no_streams_ep_input_ctx()
833 struct xhci_virt_ep *ep) xhci_init_endpoint_timer()
835 setup_timer(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog, xhci_init_endpoint_timer()
836 (unsigned long)ep); xhci_init_endpoint_timer()
837 ep->xhci = xhci; xhci_init_endpoint_timer()
1000 /* Initialize the cancellation list and watchdog timers for each ep */ xhci_alloc_virt_device()
1211 struct usb_host_endpoint *ep) xhci_parse_exponent_interval()
1215 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1; xhci_parse_exponent_interval()
1216 if (interval != ep->desc.bInterval - 1) xhci_parse_exponent_interval()
1218 "ep %#x - rounding interval to %d %sframes\n", xhci_parse_exponent_interval()
1219 ep->desc.bEndpointAddress, xhci_parse_exponent_interval()
1240 struct usb_host_endpoint *ep, unsigned int desc_interval, xhci_microframes_to_exponent()
1249 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", xhci_microframes_to_exponent()
1250 ep->desc.bEndpointAddress, xhci_microframes_to_exponent()
1258 struct usb_host_endpoint *ep) xhci_parse_microframe_interval()
1260 if (ep->desc.bInterval == 0) xhci_parse_microframe_interval()
1262 return xhci_microframes_to_exponent(udev, ep, xhci_parse_microframe_interval()
1263 ep->desc.bInterval, 0, 15); xhci_parse_microframe_interval()
1268 struct usb_host_endpoint *ep) xhci_parse_frame_interval()
1270 return xhci_microframes_to_exponent(udev, ep, xhci_parse_frame_interval()
1271 ep->desc.bInterval * 8, 3, 10); xhci_parse_frame_interval()
1283 struct usb_host_endpoint *ep) xhci_get_endpoint_interval()
1290 if (usb_endpoint_xfer_control(&ep->desc) || xhci_get_endpoint_interval()
1291 usb_endpoint_xfer_bulk(&ep->desc)) { xhci_get_endpoint_interval()
1292 interval = xhci_parse_microframe_interval(udev, ep); xhci_get_endpoint_interval()
1298 if (usb_endpoint_xfer_int(&ep->desc) || xhci_get_endpoint_interval()
1299 usb_endpoint_xfer_isoc(&ep->desc)) { xhci_get_endpoint_interval()
1300 interval = xhci_parse_exponent_interval(udev, ep); xhci_get_endpoint_interval()
1305 if (usb_endpoint_xfer_isoc(&ep->desc)) { xhci_get_endpoint_interval()
1306 interval = xhci_parse_exponent_interval(udev, ep); xhci_get_endpoint_interval()
1316 if (usb_endpoint_xfer_int(&ep->desc) || xhci_get_endpoint_interval()
1317 usb_endpoint_xfer_isoc(&ep->desc)) { xhci_get_endpoint_interval()
1319 interval = xhci_parse_frame_interval(udev, ep); xhci_get_endpoint_interval()
1335 struct usb_host_endpoint *ep) xhci_get_endpoint_mult()
1338 !usb_endpoint_xfer_isoc(&ep->desc)) xhci_get_endpoint_mult()
1340 return ep->ss_ep_comp.bmAttributes; xhci_get_endpoint_mult()
1343 static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep) xhci_get_endpoint_type() argument
1348 in = usb_endpoint_dir_in(&ep->desc); xhci_get_endpoint_type()
1349 if (usb_endpoint_xfer_control(&ep->desc)) { xhci_get_endpoint_type()
1351 } else if (usb_endpoint_xfer_bulk(&ep->desc)) { xhci_get_endpoint_type()
1356 } else if (usb_endpoint_xfer_isoc(&ep->desc)) { xhci_get_endpoint_type()
1361 } else if (usb_endpoint_xfer_int(&ep->desc)) { xhci_get_endpoint_type()
1377 struct usb_host_endpoint *ep) xhci_get_max_esit_payload()
1383 if (usb_endpoint_xfer_control(&ep->desc) || xhci_get_max_esit_payload()
1384 usb_endpoint_xfer_bulk(&ep->desc)) xhci_get_max_esit_payload()
1388 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); xhci_get_max_esit_payload()
1390 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); xhci_get_max_esit_payload()
1391 max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11; xhci_get_max_esit_payload()
1402 struct usb_host_endpoint *ep, xhci_endpoint_init()
1414 ep_index = xhci_get_endpoint_index(&ep->desc); xhci_endpoint_init()
1417 endpoint_type = xhci_get_endpoint_type(ep); xhci_endpoint_init()
1422 type = usb_endpoint_type(&ep->desc); xhci_endpoint_init()
1441 ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep) xhci_endpoint_init()
1442 | EP_MULT(xhci_get_endpoint_mult(udev, ep))); xhci_endpoint_init()
1444 /* FIXME dig Mult and streams info out of ep companion desc */ xhci_endpoint_init()
1449 if (!usb_endpoint_xfer_isoc(&ep->desc)) xhci_endpoint_init()
1455 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); xhci_endpoint_init()
1459 /* dig out max burst from ep companion desc */ xhci_endpoint_init()
1460 max_burst = ep->ss_ep_comp.bMaxBurst; xhci_endpoint_init()
1464 if (usb_endpoint_xfer_bulk(&ep->desc)) xhci_endpoint_init()
1469 if (usb_endpoint_xfer_isoc(&ep->desc) || xhci_endpoint_init()
1470 usb_endpoint_xfer_int(&ep->desc)) { xhci_endpoint_init()
1471 max_burst = (usb_endpoint_maxp(&ep->desc) xhci_endpoint_init()
1483 max_esit_payload = xhci_get_max_esit_payload(udev, ep); xhci_endpoint_init()
1504 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100) xhci_endpoint_init()
1516 struct usb_host_endpoint *ep) xhci_endpoint_zero()
1521 ep_index = xhci_get_endpoint_index(&ep->desc); xhci_endpoint_zero()
1822 struct list_head *ep = &bwt->interval_bw[j].endpoints; xhci_mem_cleanup() local
1823 while (!list_empty(ep)) xhci_mem_cleanup()
1824 list_del_init(ep->next); xhci_mem_cleanup()
619 xhci_dma_to_transfer_ring( struct xhci_virt_ep *ep, u64 address) xhci_dma_to_transfer_ring() argument
787 xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx, struct xhci_virt_ep *ep) xhci_setup_no_streams_ep_input_ctx() argument
832 xhci_init_endpoint_timer(struct xhci_hcd *xhci, struct xhci_virt_ep *ep) xhci_init_endpoint_timer() argument
1210 xhci_parse_exponent_interval(struct usb_device *udev, struct usb_host_endpoint *ep) xhci_parse_exponent_interval() argument
1239 xhci_microframes_to_exponent(struct usb_device *udev, struct usb_host_endpoint *ep, unsigned int desc_interval, unsigned int min_exponent, unsigned int max_exponent) xhci_microframes_to_exponent() argument
1257 xhci_parse_microframe_interval(struct usb_device *udev, struct usb_host_endpoint *ep) xhci_parse_microframe_interval() argument
1267 xhci_parse_frame_interval(struct usb_device *udev, struct usb_host_endpoint *ep) xhci_parse_frame_interval() argument
1282 xhci_get_endpoint_interval(struct usb_device *udev, struct usb_host_endpoint *ep) xhci_get_endpoint_interval() argument
1334 xhci_get_endpoint_mult(struct usb_device *udev, struct usb_host_endpoint *ep) xhci_get_endpoint_mult() argument
1376 xhci_get_max_esit_payload(struct usb_device *udev, struct usb_host_endpoint *ep) xhci_get_max_esit_payload() argument
1399 xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *udev, struct usb_host_endpoint *ep, gfp_t mem_flags) xhci_endpoint_init() argument
1514 xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep) xhci_endpoint_zero() argument
H A Dmax3421-hcd.c166 struct usb_host_endpoint *ep; member in struct:max3421_ep
169 u16 last_active; /* frame # this ep was last active */
320 #define MAX3421_HXFR_BULK_IN(ep) (0x00 | (ep)) /* bulk or interrupt */
322 #define MAX3421_HXFR_BULK_OUT(ep) (0x20 | (ep)) /* bulk or interrupt */
323 #define MAX3421_HXFR_ISO_IN(ep) (0x40 | (ep))
324 #define MAX3421_HXFR_ISO_OUT(ep) (0x60 | (ep))
610 max3421_ep = urb->ep->hcpriv; max3421_next_transfer()
667 struct usb_host_endpoint *ep; max3421_select_and_start_urb() local
680 ep = max3421_ep->ep; max3421_select_and_start_urb()
682 switch (usb_endpoint_type(&ep->desc)) { max3421_select_and_start_urb()
698 if (list_empty(&ep->urb_list)) max3421_select_and_start_urb()
700 urb = list_first_entry(&ep->urb_list, struct urb, max3421_select_and_start_urb()
712 switch (usb_endpoint_type(&ep->desc)) { max3421_select_and_start_urb()
750 /* move current ep to tail: */ max3421_select_and_start_urb()
762 epnum = usb_endpoint_num(&urb->ep->desc); max3421_select_and_start_urb()
768 if (usb_endpoint_xfer_control(&ep->desc)) { max3421_select_and_start_urb()
802 struct usb_host_endpoint *ep; max3421_check_unlink() local
810 ep = max3421_ep->ep; max3421_check_unlink()
811 list_for_each_safe(upos, next_upos, &ep->urb_list) { max3421_check_unlink()
839 max3421_ep = urb->ep->hcpriv; max3421_slow_retransmit()
885 struct max3421_ep *max3421_ep = urb->ep->hcpriv; max3421_handle_error()
1060 max3421_ep = urb->ep->hcpriv; max3421_host_transfer_done()
1186 struct usb_host_endpoint *ep; dump_eps() local
1196 ep = max3421_ep->ep; dump_eps()
1201 list_for_each(upos, &ep->urb_list) { dump_eps()
1213 epnum = usb_endpoint_num(&ep->desc); dump_eps()
1548 max3421_ep = urb->ep->hcpriv; max3421_urb_enqueue()
1556 max3421_ep->ep = urb->ep; max3421_urb_enqueue()
1558 urb->ep->hcpriv = max3421_ep; max3421_urb_enqueue()
1598 max3421_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep) max3421_endpoint_disable() argument
1605 if (ep->hcpriv) { max3421_endpoint_disable()
1606 struct max3421_ep *max3421_ep = ep->hcpriv; max3421_endpoint_disable()
1612 ep->hcpriv = NULL; max3421_endpoint_disable()
H A Dimx21-dbg.c174 static char *format_ep(struct usb_host_endpoint *ep, char *buf, int bufsize) format_ep() argument
176 if (ep) format_ep()
178 ep->desc.bEndpointAddress, format_ep()
179 usb_endpoint_type(&ep->desc), format_ep()
180 ep); format_ep()
189 "addr=%d ep=%d dir=%s speed=%s format=%s halted=%d", format_etd_dword0()
274 "ep=%s\n", debug_dmem_show()
276 format_ep(dmem->ep, ep_text, sizeof(ep_text))); debug_dmem_show()
305 "ep: %s\n" debug_etd_show()
314 format_ep(etd->ep, buf, sizeof(buf)), debug_etd_show()
H A Dxhci.c1187 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1199 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, xhci_check_args()
1204 if (!hcd || (check_ep && !ep) || !udev) { xhci_check_args()
1262 "Max Packet Size for ep 0 changed."); xhci_check_maxpacket()
1333 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, xhci_urb_enqueue()
1338 ep_index = xhci_get_endpoint_index(&urb->ep->desc); xhci_urb_enqueue()
1347 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) xhci_urb_enqueue()
1349 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) && xhci_urb_enqueue()
1352 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc))) xhci_urb_enqueue()
1377 if (usb_endpoint_xfer_control(&urb->ep->desc)) { xhci_urb_enqueue()
1402 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { xhci_urb_enqueue()
1408 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " xhci_urb_enqueue()
1413 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " xhci_urb_enqueue()
1424 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { xhci_urb_enqueue()
1448 urb->ep->desc.bEndpointAddress, urb); xhci_urb_enqueue()
1467 struct xhci_virt_ep *ep; xhci_urb_to_transfer_ring() local
1470 ep_index = xhci_get_endpoint_index(&urb->ep->desc); xhci_urb_to_transfer_ring()
1472 ep = &xhci->devs[slot_id]->eps[ep_index]; xhci_urb_to_transfer_ring()
1474 if (!(ep->ep_state & EP_HAS_STREAMS)) xhci_urb_to_transfer_ring()
1475 return ep->ring; xhci_urb_to_transfer_ring()
1479 "WARN: Slot ID %u, ep index %u has streams, " xhci_urb_to_transfer_ring()
1485 if (stream_id < ep->stream_info->num_streams) xhci_urb_to_transfer_ring()
1486 return ep->stream_info->stream_rings[stream_id]; xhci_urb_to_transfer_ring()
1489 "WARN: Slot ID %u, ep index %u has " xhci_urb_to_transfer_ring()
1493 ep->stream_info->num_streams - 1, xhci_urb_to_transfer_ring()
1539 struct xhci_virt_ep *ep; xhci_urb_dequeue() local
1574 urb->ep->desc.bEndpointAddress, urb); xhci_urb_dequeue()
1583 ep_index = xhci_get_endpoint_index(&urb->ep->desc); xhci_urb_dequeue()
1584 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; xhci_urb_dequeue()
1595 "Cancel URB %p, dev %s, ep 0x%x, " xhci_urb_dequeue()
1598 urb->ep->desc.bEndpointAddress, xhci_urb_dequeue()
1605 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); xhci_urb_dequeue()
1611 if (!(ep->ep_state & EP_HALT_PENDING)) { xhci_urb_dequeue()
1617 ep->ep_state |= EP_HALT_PENDING; xhci_urb_dequeue()
1618 ep->stop_cmds_pending++; xhci_urb_dequeue()
1619 ep->stop_cmd_timer.expires = jiffies + xhci_urb_dequeue()
1621 add_timer(&ep->stop_cmd_timer); xhci_urb_dequeue()
1645 struct usb_host_endpoint *ep) xhci_drop_endpoint()
1656 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); xhci_drop_endpoint()
1664 drop_flag = xhci_get_endpoint_flag(&ep->desc); xhci_drop_endpoint()
1666 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", xhci_drop_endpoint()
1680 ep_index = xhci_get_endpoint_index(&ep->desc); xhci_drop_endpoint()
1688 xhci_get_endpoint_flag(&ep->desc)) { xhci_drop_endpoint()
1691 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", xhci_drop_endpoint()
1692 __func__, ep); xhci_drop_endpoint()
1702 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); xhci_drop_endpoint()
1704 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", xhci_drop_endpoint()
1705 (unsigned int) ep->desc.bEndpointAddress, xhci_drop_endpoint()
1726 struct usb_host_endpoint *ep) xhci_add_endpoint()
1737 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); xhci_add_endpoint()
1739 /* So we won't queue a reset ep command for a root hub */ xhci_add_endpoint()
1740 ep->hcpriv = NULL; xhci_add_endpoint()
1747 added_ctxs = xhci_get_endpoint_flag(&ep->desc); xhci_add_endpoint()
1753 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", xhci_add_endpoint()
1767 ep_index = xhci_get_endpoint_index(&ep->desc); xhci_add_endpoint()
1775 (unsigned int) ep->desc.bEndpointAddress); xhci_add_endpoint()
1783 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", xhci_add_endpoint()
1784 __func__, ep); xhci_add_endpoint()
1793 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { xhci_add_endpoint()
1794 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", xhci_add_endpoint()
1795 __func__, ep->desc.bEndpointAddress); xhci_add_endpoint()
1811 ep->hcpriv = udev; xhci_add_endpoint()
1813 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", xhci_add_endpoint()
1814 (unsigned int) ep->desc.bEndpointAddress, xhci_add_endpoint()
2012 "Not enough ep ctxs: " xhci_reserve_host_resources()
2020 "Adding %u ep ctxs, %u now active.", added_eps, xhci_reserve_host_resources()
2039 "Removing %u failed ep ctxs, %u now active.", xhci_free_host_resources()
2059 "Removing %u dropped ep ctxs, %u now active.", xhci_finish_resource_reservation()
2511 /* Add the new ep before the smaller endpoint */ xhci_add_ep_to_interval_table()
2722 struct xhci_virt_ep *ep = &vdev->eps[i]; xhci_check_bw_drop_ep_streams() local
2724 if (ep->ep_state & EP_HAS_STREAMS) { xhci_check_bw_drop_ep_streams()
2727 xhci_free_stream_info(xhci, ep->stream_info); xhci_check_bw_drop_ep_streams()
2728 ep->stream_info = NULL; xhci_check_bw_drop_ep_streams()
2729 ep->ep_state &= ~EP_HAS_STREAMS; xhci_check_bw_drop_ep_streams()
2810 xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); xhci_check_bandwidth()
2909 xhci_warn(xhci, "WARN Cannot submit config ep after " xhci_setup_input_ctx_for_quirk()
2910 "reset ep command\n"); xhci_setup_input_ctx_for_quirk()
2928 struct xhci_virt_ep *ep; xhci_cleanup_stalled_ring() local
2933 ep = &xhci->devs[udev->slot_id]->eps[ep_index]; xhci_cleanup_stalled_ring()
2938 ep_index, ep->stopped_stream, td, &deq_state); xhci_cleanup_stalled_ring()
2950 ep_index, ep->stopped_stream, &deq_state); xhci_cleanup_stalled_ring()
2974 struct usb_host_endpoint *ep) xhci_endpoint_reset()
2981 * We might need to implement the config ep cmd in xhci 4.8.1 note: xhci_endpoint_reset()
2990 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n", xhci_endpoint_reset()
2991 ep->desc.bEndpointAddress); xhci_endpoint_reset()
2995 struct usb_device *udev, struct usb_host_endpoint *ep, xhci_check_streams_endpoint()
3002 if (!ep) xhci_check_streams_endpoint()
3004 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); xhci_check_streams_endpoint()
3007 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { xhci_check_streams_endpoint()
3009 " descriptor for ep 0x%x does not support streams\n", xhci_check_streams_endpoint()
3010 ep->desc.bEndpointAddress); xhci_check_streams_endpoint()
3014 ep_index = xhci_get_endpoint_index(&ep->desc); xhci_check_streams_endpoint()
3020 ep->desc.bEndpointAddress); xhci_check_streams_endpoint()
3028 ep->desc.bEndpointAddress); xhci_check_streams_endpoint()
3277 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", xhci_alloc_streams()
3418 "Dropped %u ep ctxs, flags = 0x%x, " xhci_free_device_endpoint_resources()
3565 struct xhci_virt_ep *ep = &virt_dev->eps[i]; xhci_discover_or_reset_device() local
3567 if (ep->ep_state & EP_HAS_STREAMS) { xhci_discover_or_reset_device()
3570 xhci_free_stream_info(xhci, ep->stream_info); xhci_discover_or_reset_device()
3571 ep->stream_info = NULL; xhci_discover_or_reset_device()
3572 ep->ep_state &= ~EP_HAS_STREAMS; xhci_discover_or_reset_device()
3575 if (ep->ring) { xhci_discover_or_reset_device()
3681 "Not enough ep ctxs: " xhci_reserve_host_control_ep_resources()
3688 "Adding 1 ep ctx, %u now active.", xhci_reserve_host_control_ep_resources()
4863 /* XHCI controllers don't stop the ep queue on short packets :| */ xhci_gen_setup()
1198 xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, const char *func) xhci_check_args() argument
1644 xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep) xhci_drop_endpoint() argument
1725 xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep) xhci_add_endpoint() argument
2973 xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) xhci_endpoint_reset() argument
2994 xhci_check_streams_endpoint(struct xhci_hcd *xhci, struct usb_device *udev, struct usb_host_endpoint *ep, unsigned int slot_id) xhci_check_streams_endpoint() argument
H A Dimx21-hcd.h332 struct usb_host_endpoint *ep; member in struct:urb_priv
342 struct usb_host_endpoint *ep; member in struct:ep_priv
353 struct usb_host_endpoint *ep; member in struct:td
363 struct usb_host_endpoint *ep; member in struct:etd_priv
386 struct usb_host_endpoint *ep; member in struct:imx21_dmem_area
/linux-4.4.14/drivers/video/fbdev/omap2/dss/
H A Ddss-of.c75 struct device_node *ep = NULL; omapdss_of_get_next_endpoint() local
81 ep = of_get_next_child(parent, prev); omapdss_of_get_next_endpoint()
82 if (!ep) omapdss_of_get_next_endpoint()
84 prev = ep; omapdss_of_get_next_endpoint()
85 } while (of_node_cmp(ep->name, "endpoint") != 0); omapdss_of_get_next_endpoint()
87 return ep; omapdss_of_get_next_endpoint()
143 struct device_node *port, *ep; omapdss_of_get_first_endpoint() local
150 ep = omapdss_of_get_next_endpoint(port, NULL); omapdss_of_get_first_endpoint()
154 return ep; omapdss_of_get_first_endpoint()
161 struct device_node *ep; omapdss_of_find_source_for_first_ep() local
165 ep = omapdss_of_get_first_endpoint(node); omapdss_of_find_source_for_first_ep()
166 if (!ep) omapdss_of_find_source_for_first_ep()
169 src_port = omapdss_of_get_remote_port(ep); omapdss_of_find_source_for_first_ep()
171 of_node_put(ep); omapdss_of_find_source_for_first_ep()
175 of_node_put(ep); omapdss_of_find_source_for_first_ep()
H A Dhdmi_common.c11 int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep, hdmi_parse_lanes_of() argument
17 prop = of_find_property(ep, "lanes", &len); hdmi_parse_lanes_of()
26 r = of_property_read_u32_array(ep, "lanes", lanes, hdmi_parse_lanes_of()
H A Dsdi.c416 struct device_node *ep; sdi_init_port() local
420 ep = omapdss_of_get_next_endpoint(port, NULL); sdi_init_port()
421 if (!ep) sdi_init_port()
424 r = of_property_read_u32(ep, "datapairs", &datapairs); sdi_init_port()
432 of_node_put(ep); sdi_init_port()
443 of_node_put(ep); sdi_init_port()
/linux-4.4.14/drivers/scsi/bnx2i/
H A Dbnx2i_hwi.c25 * bnx2i_get_cid_num - get cid from ep
26 * @ep: endpoint pointer
30 static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep) bnx2i_get_cid_num() argument
34 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) bnx2i_get_cid_num()
35 cid = ep->ep_cid; bnx2i_get_cid_num()
37 cid = GET_CID_NUM(ep->ep_cid); bnx2i_get_cid_num()
131 * @ep: endpoint (transport identifier) structure
138 int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action) bnx2i_arm_cq_event_coalescing() argument
146 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) bnx2i_arm_cq_event_coalescing()
153 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; bnx2i_arm_cq_event_coalescing()
160 num_active_cmds = atomic_read(&ep->num_active_cmds); bnx2i_arm_cq_event_coalescing()
164 next_index = num_active_cmds >> ep->ec_shift; bnx2i_arm_cq_event_coalescing()
170 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1; bnx2i_arm_cq_event_coalescing()
171 if (cq_index > ep->qp.cqe_size * 2) bnx2i_arm_cq_event_coalescing()
172 cq_index -= ep->qp.cqe_size * 2; bnx2i_arm_cq_event_coalescing()
195 if (!bnx2i_conn->ep->qp.rqe_left) bnx2i_get_rq_buf()
198 bnx2i_conn->ep->qp.rqe_left--; bnx2i_get_rq_buf()
199 memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len); bnx2i_get_rq_buf()
200 if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) { bnx2i_get_rq_buf()
201 bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe; bnx2i_get_rq_buf()
202 bnx2i_conn->ep->qp.rq_cons_idx = 0; bnx2i_get_rq_buf()
204 bnx2i_conn->ep->qp.rq_cons_qe++; bnx2i_get_rq_buf()
205 bnx2i_conn->ep->qp.rq_cons_idx++; bnx2i_get_rq_buf()
220 writel(cpu_to_le32(msg), conn->ep->qp.ctx_base); bnx2i_ring_577xx_doorbell()
234 u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000); bnx2i_put_rq_buf()
235 struct bnx2i_endpoint *ep = bnx2i_conn->ep; bnx2i_put_rq_buf() local
237 ep->qp.rqe_left += count; bnx2i_put_rq_buf()
238 ep->qp.rq_prod_idx &= 0x7FFF; bnx2i_put_rq_buf()
239 ep->qp.rq_prod_idx += count; bnx2i_put_rq_buf()
241 if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) { bnx2i_put_rq_buf()
242 ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes; bnx2i_put_rq_buf()
244 ep->qp.rq_prod_idx |= 0x8000; bnx2i_put_rq_buf()
246 ep->qp.rq_prod_idx |= hi_bit; bnx2i_put_rq_buf()
248 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { bnx2i_put_rq_buf()
249 rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt; bnx2i_put_rq_buf()
250 rq_db->prod_idx = ep->qp.rq_prod_idx; bnx2i_put_rq_buf()
253 writew(ep->qp.rq_prod_idx, bnx2i_put_rq_buf()
254 ep->qp.ctx_base + CNIC_RECV_DOORBELL); bnx2i_put_rq_buf()
272 struct bnx2i_endpoint *ep = bnx2i_conn->ep; bnx2i_ring_sq_dbell() local
274 atomic_inc(&ep->num_active_cmds); bnx2i_ring_sq_dbell()
276 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { bnx2i_ring_sq_dbell()
277 sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt; bnx2i_ring_sq_dbell()
278 sq_db->prod_idx = ep->qp.sq_prod_idx; bnx2i_ring_sq_dbell()
281 writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL); bnx2i_ring_sq_dbell()
300 if (bnx2i_conn->ep->qp.sq_prod_qe == bnx2i_ring_dbell_update_sq_params()
301 bnx2i_conn->ep->qp.sq_last_qe) bnx2i_ring_dbell_update_sq_params()
302 bnx2i_conn->ep->qp.sq_prod_qe = bnx2i_ring_dbell_update_sq_params()
303 bnx2i_conn->ep->qp.sq_first_qe; bnx2i_ring_dbell_update_sq_params()
305 bnx2i_conn->ep->qp.sq_prod_qe++; bnx2i_ring_dbell_update_sq_params()
307 if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <= bnx2i_ring_dbell_update_sq_params()
308 bnx2i_conn->ep->qp.sq_last_qe) bnx2i_ring_dbell_update_sq_params()
309 bnx2i_conn->ep->qp.sq_prod_qe += count; bnx2i_ring_dbell_update_sq_params()
311 tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe - bnx2i_ring_dbell_update_sq_params()
312 bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_ring_dbell_update_sq_params()
313 bnx2i_conn->ep->qp.sq_prod_qe = bnx2i_ring_dbell_update_sq_params()
314 &bnx2i_conn->ep->qp.sq_first_qe[count - bnx2i_ring_dbell_update_sq_params()
318 bnx2i_conn->ep->qp.sq_prod_idx += count; bnx2i_ring_dbell_update_sq_params()
320 bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx); bnx2i_ring_dbell_update_sq_params()
343 bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_send_iscsi_login()
402 bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_send_iscsi_tmf()
473 text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_send_iscsi_text()
519 bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_send_iscsi_scsicmd()
543 struct bnx2i_endpoint *ep = bnx2i_conn->ep; bnx2i_send_iscsi_nopout() local
550 nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe; bnx2i_send_iscsi_nopout()
558 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { bnx2i_send_iscsi_nopout()
613 bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_send_iscsi_logout()
632 bnx2i_conn->ep->state = EP_STATE_LOGOUT_SENT; bnx2i_send_iscsi_logout()
660 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type)) bnx2i_update_iscsi_conn()
661 update_wqe->context_id = bnx2i_conn->ep->ep_cid; bnx2i_update_iscsi_conn()
663 update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7); bnx2i_update_iscsi_conn()
703 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data; bnx2i_ep_ofld_timer() local
705 if (ep->state == EP_STATE_OFLD_START) { bnx2i_ep_ofld_timer()
707 ep->state = EP_STATE_OFLD_FAILED; bnx2i_ep_ofld_timer()
708 } else if (ep->state == EP_STATE_DISCONN_START) { bnx2i_ep_ofld_timer()
710 ep->state = EP_STATE_DISCONN_TIMEDOUT; bnx2i_ep_ofld_timer()
711 } else if (ep->state == EP_STATE_CLEANUP_START) { bnx2i_ep_ofld_timer()
713 ep->state = EP_STATE_CLEANUP_FAILED; bnx2i_ep_ofld_timer()
716 wake_up_interruptible(&ep->ofld_wait); bnx2i_ep_ofld_timer()
747 (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe; bnx2i_send_cmd_cleanup_req()
761 * @ep: endpoint (transport identifier) structure
766 int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) bnx2i_send_conn_destroy() argument
778 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) bnx2i_send_conn_destroy()
779 conn_cleanup.context_id = ep->ep_cid; bnx2i_send_conn_destroy()
781 conn_cleanup.context_id = (ep->ep_cid >> 7); bnx2i_send_conn_destroy()
783 conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid; bnx2i_send_conn_destroy()
796 * @ep: endpoint (transport identifier) structure
801 struct bnx2i_endpoint *ep) bnx2i_570x_send_conn_ofld_req()
815 ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid; bnx2i_570x_send_conn_ofld_req()
817 dma_addr = ep->qp.sq_pgtbl_phys; bnx2i_570x_send_conn_ofld_req()
821 dma_addr = ep->qp.cq_pgtbl_phys; bnx2i_570x_send_conn_ofld_req()
829 dma_addr = ep->qp.rq_pgtbl_phys; bnx2i_570x_send_conn_ofld_req()
833 ptbl = (u32 *) ep->qp.sq_pgtbl_virt; bnx2i_570x_send_conn_ofld_req()
838 ptbl = (u32 *) ep->qp.cq_pgtbl_virt; bnx2i_570x_send_conn_ofld_req()
856 * @ep: endpoint (transport identifier) structure
861 struct bnx2i_endpoint *ep) bnx2i_5771x_send_conn_ofld_req()
876 ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid; bnx2i_5771x_send_conn_ofld_req()
878 dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE; bnx2i_5771x_send_conn_ofld_req()
882 dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE; bnx2i_5771x_send_conn_ofld_req()
890 dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE; bnx2i_5771x_send_conn_ofld_req()
894 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); bnx2i_5771x_send_conn_ofld_req()
898 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); bnx2i_5771x_send_conn_ofld_req()
907 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); bnx2i_5771x_send_conn_ofld_req()
925 * @ep: endpoint (transport identifier) structure
929 int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) bnx2i_send_conn_ofld_req() argument
934 rc = bnx2i_5771x_send_conn_ofld_req(hba, ep); bnx2i_send_conn_ofld_req()
936 rc = bnx2i_570x_send_conn_ofld_req(hba, ep); bnx2i_send_conn_ofld_req()
944 * @ep: endpoint (transport identifier) structure
950 static void setup_qp_page_tables(struct bnx2i_endpoint *ep) setup_qp_page_tables() argument
957 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) setup_qp_page_tables()
963 memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); setup_qp_page_tables()
964 num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE; setup_qp_page_tables()
965 page = ep->qp.sq_phys; setup_qp_page_tables()
968 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); setup_qp_page_tables()
970 ptbl = (u32 *) ep->qp.sq_pgtbl_virt; setup_qp_page_tables()
991 memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); setup_qp_page_tables()
992 num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE; setup_qp_page_tables()
993 page = ep->qp.rq_phys; setup_qp_page_tables()
996 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); setup_qp_page_tables()
998 ptbl = (u32 *) ep->qp.rq_pgtbl_virt; setup_qp_page_tables()
1019 memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); setup_qp_page_tables()
1020 num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE; setup_qp_page_tables()
1021 page = ep->qp.cq_phys; setup_qp_page_tables()
1024 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); setup_qp_page_tables()
1026 ptbl = (u32 *) ep->qp.cq_pgtbl_virt; setup_qp_page_tables()
1051 * @ep: endpoint (transport identifier) structure
1058 int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) bnx2i_alloc_qp_resc() argument
1062 ep->hba = hba; bnx2i_alloc_qp_resc()
1063 ep->conn = NULL; bnx2i_alloc_qp_resc()
1064 ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0; bnx2i_alloc_qp_resc()
1067 ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; bnx2i_alloc_qp_resc()
1068 ep->qp.sq_mem_size = bnx2i_alloc_qp_resc()
1069 (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc()
1070 ep->qp.sq_pgtbl_size = bnx2i_alloc_qp_resc()
1071 (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); bnx2i_alloc_qp_resc()
1072 ep->qp.sq_pgtbl_size = bnx2i_alloc_qp_resc()
1073 (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc()
1075 ep->qp.sq_pgtbl_virt = bnx2i_alloc_qp_resc()
1076 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, bnx2i_alloc_qp_resc()
1077 &ep->qp.sq_pgtbl_phys, GFP_KERNEL); bnx2i_alloc_qp_resc()
1078 if (!ep->qp.sq_pgtbl_virt) { bnx2i_alloc_qp_resc()
1080 ep->qp.sq_pgtbl_size); bnx2i_alloc_qp_resc()
1085 ep->qp.sq_virt = bnx2i_alloc_qp_resc()
1086 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, bnx2i_alloc_qp_resc()
1087 &ep->qp.sq_phys, GFP_KERNEL); bnx2i_alloc_qp_resc()
1088 if (!ep->qp.sq_virt) { bnx2i_alloc_qp_resc()
1090 ep->qp.sq_mem_size); bnx2i_alloc_qp_resc()
1094 memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size); bnx2i_alloc_qp_resc()
1095 ep->qp.sq_first_qe = ep->qp.sq_virt; bnx2i_alloc_qp_resc()
1096 ep->qp.sq_prod_qe = ep->qp.sq_first_qe; bnx2i_alloc_qp_resc()
1097 ep->qp.sq_cons_qe = ep->qp.sq_first_qe; bnx2i_alloc_qp_resc()
1098 ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1]; bnx2i_alloc_qp_resc()
1099 ep->qp.sq_prod_idx = 0; bnx2i_alloc_qp_resc()
1100 ep->qp.sq_cons_idx = 0; bnx2i_alloc_qp_resc()
1101 ep->qp.sqe_left = hba->max_sqes; bnx2i_alloc_qp_resc()
1104 ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; bnx2i_alloc_qp_resc()
1105 ep->qp.cq_mem_size = bnx2i_alloc_qp_resc()
1106 (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc()
1107 ep->qp.cq_pgtbl_size = bnx2i_alloc_qp_resc()
1108 (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); bnx2i_alloc_qp_resc()
1109 ep->qp.cq_pgtbl_size = bnx2i_alloc_qp_resc()
1110 (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc()
1112 ep->qp.cq_pgtbl_virt = bnx2i_alloc_qp_resc()
1113 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, bnx2i_alloc_qp_resc()
1114 &ep->qp.cq_pgtbl_phys, GFP_KERNEL); bnx2i_alloc_qp_resc()
1115 if (!ep->qp.cq_pgtbl_virt) { bnx2i_alloc_qp_resc()
1117 ep->qp.cq_pgtbl_size); bnx2i_alloc_qp_resc()
1122 ep->qp.cq_virt = bnx2i_alloc_qp_resc()
1123 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, bnx2i_alloc_qp_resc()
1124 &ep->qp.cq_phys, GFP_KERNEL); bnx2i_alloc_qp_resc()
1125 if (!ep->qp.cq_virt) { bnx2i_alloc_qp_resc()
1127 ep->qp.cq_mem_size); bnx2i_alloc_qp_resc()
1130 memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size); bnx2i_alloc_qp_resc()
1132 ep->qp.cq_first_qe = ep->qp.cq_virt; bnx2i_alloc_qp_resc()
1133 ep->qp.cq_prod_qe = ep->qp.cq_first_qe; bnx2i_alloc_qp_resc()
1134 ep->qp.cq_cons_qe = ep->qp.cq_first_qe; bnx2i_alloc_qp_resc()
1135 ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1]; bnx2i_alloc_qp_resc()
1136 ep->qp.cq_prod_idx = 0; bnx2i_alloc_qp_resc()
1137 ep->qp.cq_cons_idx = 0; bnx2i_alloc_qp_resc()
1138 ep->qp.cqe_left = hba->max_cqes; bnx2i_alloc_qp_resc()
1139 ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN; bnx2i_alloc_qp_resc()
1140 ep->qp.cqe_size = hba->max_cqes; bnx2i_alloc_qp_resc()
1143 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; bnx2i_alloc_qp_resc()
1147 ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; bnx2i_alloc_qp_resc()
1148 ep->qp.rq_mem_size = bnx2i_alloc_qp_resc()
1149 (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc()
1150 ep->qp.rq_pgtbl_size = bnx2i_alloc_qp_resc()
1151 (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); bnx2i_alloc_qp_resc()
1152 ep->qp.rq_pgtbl_size = bnx2i_alloc_qp_resc()
1153 (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc()
1155 ep->qp.rq_pgtbl_virt = bnx2i_alloc_qp_resc()
1156 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, bnx2i_alloc_qp_resc()
1157 &ep->qp.rq_pgtbl_phys, GFP_KERNEL); bnx2i_alloc_qp_resc()
1158 if (!ep->qp.rq_pgtbl_virt) { bnx2i_alloc_qp_resc()
1160 ep->qp.rq_pgtbl_size); bnx2i_alloc_qp_resc()
1165 ep->qp.rq_virt = bnx2i_alloc_qp_resc()
1166 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, bnx2i_alloc_qp_resc()
1167 &ep->qp.rq_phys, GFP_KERNEL); bnx2i_alloc_qp_resc()
1168 if (!ep->qp.rq_virt) { bnx2i_alloc_qp_resc()
1170 ep->qp.rq_mem_size); bnx2i_alloc_qp_resc()
1174 ep->qp.rq_first_qe = ep->qp.rq_virt; bnx2i_alloc_qp_resc()
1175 ep->qp.rq_prod_qe = ep->qp.rq_first_qe; bnx2i_alloc_qp_resc()
1176 ep->qp.rq_cons_qe = ep->qp.rq_first_qe; bnx2i_alloc_qp_resc()
1177 ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1]; bnx2i_alloc_qp_resc()
1178 ep->qp.rq_prod_idx = 0x8000; bnx2i_alloc_qp_resc()
1179 ep->qp.rq_cons_idx = 0; bnx2i_alloc_qp_resc()
1180 ep->qp.rqe_left = hba->max_rqes; bnx2i_alloc_qp_resc()
1182 setup_qp_page_tables(ep); bnx2i_alloc_qp_resc()
1187 bnx2i_free_qp_resc(hba, ep); bnx2i_alloc_qp_resc()
1196 * @ep: endpoint (transport identifier) structure
1200 void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) bnx2i_free_qp_resc() argument
1202 if (ep->qp.ctx_base) { bnx2i_free_qp_resc()
1203 iounmap(ep->qp.ctx_base); bnx2i_free_qp_resc()
1204 ep->qp.ctx_base = NULL; bnx2i_free_qp_resc()
1207 if (ep->qp.sq_pgtbl_virt) { bnx2i_free_qp_resc()
1208 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, bnx2i_free_qp_resc()
1209 ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys); bnx2i_free_qp_resc()
1210 ep->qp.sq_pgtbl_virt = NULL; bnx2i_free_qp_resc()
1211 ep->qp.sq_pgtbl_phys = 0; bnx2i_free_qp_resc()
1213 if (ep->qp.sq_virt) { bnx2i_free_qp_resc()
1214 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, bnx2i_free_qp_resc()
1215 ep->qp.sq_virt, ep->qp.sq_phys); bnx2i_free_qp_resc()
1216 ep->qp.sq_virt = NULL; bnx2i_free_qp_resc()
1217 ep->qp.sq_phys = 0; bnx2i_free_qp_resc()
1221 if (ep->qp.rq_pgtbl_virt) { bnx2i_free_qp_resc()
1222 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, bnx2i_free_qp_resc()
1223 ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys); bnx2i_free_qp_resc()
1224 ep->qp.rq_pgtbl_virt = NULL; bnx2i_free_qp_resc()
1225 ep->qp.rq_pgtbl_phys = 0; bnx2i_free_qp_resc()
1227 if (ep->qp.rq_virt) { bnx2i_free_qp_resc()
1228 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, bnx2i_free_qp_resc()
1229 ep->qp.rq_virt, ep->qp.rq_phys); bnx2i_free_qp_resc()
1230 ep->qp.rq_virt = NULL; bnx2i_free_qp_resc()
1231 ep->qp.rq_phys = 0; bnx2i_free_qp_resc()
1235 if (ep->qp.cq_pgtbl_virt) { bnx2i_free_qp_resc()
1236 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, bnx2i_free_qp_resc()
1237 ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys); bnx2i_free_qp_resc()
1238 ep->qp.cq_pgtbl_virt = NULL; bnx2i_free_qp_resc()
1239 ep->qp.cq_pgtbl_phys = 0; bnx2i_free_qp_resc()
1241 if (ep->qp.cq_virt) { bnx2i_free_qp_resc()
1242 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, bnx2i_free_qp_resc()
1243 ep->qp.cq_virt, ep->qp.cq_phys); bnx2i_free_qp_resc()
1244 ep->qp.cq_virt = NULL; bnx2i_free_qp_resc()
1245 ep->qp.cq_phys = 0; bnx2i_free_qp_resc()
1650 bnx2i_conn->ep->state = EP_STATE_LOGOUT_RESP_RCVD; bnx2i_process_logout_resp()
1986 if (bnx2i_conn->ep == NULL) bnx2i_process_new_cqes()
1989 qp = &bnx2i_conn->ep->qp; bnx2i_process_new_cqes()
2069 if (!atomic_read(&bnx2i_conn->ep->num_active_cmds)) bnx2i_process_new_cqes()
2075 atomic_dec(&bnx2i_conn->ep->num_active_cmds); bnx2i_process_new_cqes()
2121 if (!bnx2i_conn->ep) { bnx2i_fastpath_notification()
2122 printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid); bnx2i_fastpath_notification()
2127 nxt_idx = bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, bnx2i_fastpath_notification()
2130 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP); bnx2i_fastpath_notification()
2154 if (!conn->ep) { bnx2i_process_update_conn_cmpl()
2155 printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid); bnx2i_process_update_conn_cmpl()
2161 conn->ep->state = EP_STATE_ULP_UPDATE_FAILED; bnx2i_process_update_conn_cmpl()
2163 conn->ep->state = EP_STATE_ULP_UPDATE_COMPL; bnx2i_process_update_conn_cmpl()
2165 wake_up_interruptible(&conn->ep->ofld_wait); bnx2i_process_update_conn_cmpl()
2392 bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid, bnx2i_process_iscsi_error()
2393 bnx2i_conn->ep->ep_cid); bnx2i_process_iscsi_error()
2415 struct bnx2i_endpoint *ep; bnx2i_process_conn_destroy_cmpl() local
2417 ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id); bnx2i_process_conn_destroy_cmpl()
2418 if (!ep) { bnx2i_process_conn_destroy_cmpl()
2424 if (hba != ep->hba) { bnx2i_process_conn_destroy_cmpl()
2431 ep->state = EP_STATE_CLEANUP_FAILED; bnx2i_process_conn_destroy_cmpl()
2433 ep->state = EP_STATE_CLEANUP_CMPL; bnx2i_process_conn_destroy_cmpl()
2434 wake_up_interruptible(&ep->ofld_wait); bnx2i_process_conn_destroy_cmpl()
2450 struct bnx2i_endpoint *ep; bnx2i_process_ofld_cmpl() local
2453 ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id); bnx2i_process_ofld_cmpl()
2454 if (!ep) { bnx2i_process_ofld_cmpl()
2459 if (hba != ep->hba) { bnx2i_process_ofld_cmpl()
2465 ep->state = EP_STATE_OFLD_FAILED; bnx2i_process_ofld_cmpl()
2478 ep->state = EP_STATE_OFLD_FAILED_CID_BUSY; bnx2i_process_ofld_cmpl()
2484 ep->state = EP_STATE_OFLD_COMPL; bnx2i_process_ofld_cmpl()
2486 cid_num = bnx2i_get_cid_num(ep); bnx2i_process_ofld_cmpl()
2487 ep->ep_cid = cid_addr; bnx2i_process_ofld_cmpl()
2488 ep->qp.ctx_base = NULL; bnx2i_process_ofld_cmpl()
2490 wake_up_interruptible(&ep->ofld_wait); bnx2i_process_ofld_cmpl()
2596 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; bnx2i_cm_connect_cmpl() local
2598 if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state)) bnx2i_cm_connect_cmpl()
2599 ep->state = EP_STATE_CONNECT_FAILED; bnx2i_cm_connect_cmpl()
2601 ep->state = EP_STATE_CONNECT_COMPL; bnx2i_cm_connect_cmpl()
2603 ep->state = EP_STATE_CONNECT_FAILED; bnx2i_cm_connect_cmpl()
2605 wake_up_interruptible(&ep->ofld_wait); bnx2i_cm_connect_cmpl()
2618 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; bnx2i_cm_close_cmpl() local
2620 ep->state = EP_STATE_DISCONN_COMPL; bnx2i_cm_close_cmpl()
2621 wake_up_interruptible(&ep->ofld_wait); bnx2i_cm_close_cmpl()
2634 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; bnx2i_cm_abort_cmpl() local
2636 ep->state = EP_STATE_DISCONN_COMPL; bnx2i_cm_abort_cmpl()
2637 wake_up_interruptible(&ep->ofld_wait); bnx2i_cm_abort_cmpl()
2651 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; bnx2i_cm_remote_close() local
2653 ep->state = EP_STATE_TCP_FIN_RCVD; bnx2i_cm_remote_close()
2654 if (ep->conn) bnx2i_cm_remote_close()
2655 bnx2i_recovery_que_add_conn(ep->hba, ep->conn); bnx2i_cm_remote_close()
2668 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; bnx2i_cm_remote_abort() local
2669 u32 old_state = ep->state; bnx2i_cm_remote_abort()
2671 ep->state = EP_STATE_TCP_RST_RCVD; bnx2i_cm_remote_abort()
2673 wake_up_interruptible(&ep->ofld_wait); bnx2i_cm_remote_abort()
2675 if (ep->conn) bnx2i_cm_remote_abort()
2676 bnx2i_recovery_que_add_conn(ep->hba, ep->conn); bnx2i_cm_remote_abort()
2723 * @ep: bnx2i endpoint
2729 int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) bnx2i_map_ep_dbell_regs() argument
2738 cid_num = bnx2i_get_cid_num(ep); bnx2i_map_ep_dbell_regs()
2740 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { bnx2i_map_ep_dbell_regs()
2741 reg_base = pci_resource_start(ep->hba->pcidev, bnx2i_map_ep_dbell_regs()
2744 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); bnx2i_map_ep_dbell_regs()
2748 if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) && bnx2i_map_ep_dbell_regs()
2749 (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) { bnx2i_map_ep_dbell_regs()
2750 config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2); bnx2i_map_ep_dbell_regs()
2763 ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off, bnx2i_map_ep_dbell_regs()
2765 if (!ep->qp.ctx_base) bnx2i_map_ep_dbell_regs()
2769 bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE); bnx2i_map_ep_dbell_regs()
800 bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) bnx2i_570x_send_conn_ofld_req() argument
860 bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) bnx2i_5771x_send_conn_ofld_req() argument
H A Dbnx2i_iscsi.c374 * bnx2i_alloc_ep - allocates ep structure from global pool
383 struct iscsi_endpoint *ep; bnx2i_alloc_ep() local
387 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep)); bnx2i_alloc_ep()
388 if (!ep) { bnx2i_alloc_ep()
389 printk(KERN_ERR "bnx2i: Could not allocate ep\n"); bnx2i_alloc_ep()
393 bnx2i_ep = ep->dd_data; bnx2i_alloc_ep()
394 bnx2i_ep->cls_ep = ep; bnx2i_alloc_ep()
407 return ep; bnx2i_alloc_ep()
413 * @ep: pointer to iscsi endpoint structure
415 static void bnx2i_free_ep(struct iscsi_endpoint *ep) bnx2i_free_ep() argument
417 struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; bnx2i_free_ep()
428 bnx2i_ep->conn->ep = NULL; bnx2i_free_ep()
434 iscsi_destroy_endpoint(ep); bnx2i_free_ep()
602 * @ep: pointer to endpoint (transport identifier) structure
607 struct bnx2i_endpoint *ep) bnx2i_ep_destroy_list_add()
610 list_add_tail(&ep->link, &hba->ep_destroy_list); bnx2i_ep_destroy_list_add()
619 * @ep: pointer to endpoint (transport identifier) structure
624 struct bnx2i_endpoint *ep) bnx2i_ep_destroy_list_del()
627 list_del_init(&ep->link); bnx2i_ep_destroy_list_del()
634 * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
636 * @ep: pointer to endpoint (transport identifier) structure
641 struct bnx2i_endpoint *ep) bnx2i_ep_ofld_list_add()
644 list_add_tail(&ep->link, &hba->ep_ofld_list); bnx2i_ep_ofld_list_add()
650 * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
652 * @ep: pointer to endpoint (transport identifier) structure
657 struct bnx2i_endpoint *ep) bnx2i_ep_ofld_list_del()
660 list_del_init(&ep->link); bnx2i_ep_ofld_list_del()
678 struct bnx2i_endpoint *ep; bnx2i_find_ep_in_ofld_list() local
682 ep = (struct bnx2i_endpoint *)list; bnx2i_find_ep_in_ofld_list()
684 if (ep->ep_iscsi_cid == iscsi_cid) bnx2i_find_ep_in_ofld_list()
686 ep = NULL; bnx2i_find_ep_in_ofld_list()
690 if (!ep) bnx2i_find_ep_in_ofld_list()
692 return ep; bnx2i_find_ep_in_ofld_list()
706 struct bnx2i_endpoint *ep; bnx2i_find_ep_in_destroy_list() local
710 ep = (struct bnx2i_endpoint *)list; bnx2i_find_ep_in_destroy_list()
712 if (ep->ep_iscsi_cid == iscsi_cid) bnx2i_find_ep_in_destroy_list()
714 ep = NULL; bnx2i_find_ep_in_destroy_list()
718 if (!ep) bnx2i_find_ep_in_destroy_list()
721 return ep; bnx2i_find_ep_in_destroy_list()
725 * bnx2i_ep_active_list_add - add an entry to ep active list
727 * @ep: pointer to endpoint (transport identifier) structure
732 struct bnx2i_endpoint *ep) bnx2i_ep_active_list_add()
735 list_add_tail(&ep->link, &hba->ep_active_list); bnx2i_ep_active_list_add()
741 * bnx2i_ep_active_list_del - deletes an entry to ep active list
743 * @ep: pointer to endpoint (transport identifier) structure
748 struct bnx2i_endpoint *ep) bnx2i_ep_active_list_del()
751 list_del_init(&ep->link); bnx2i_ep_active_list_del()
1234 if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 > bnx2i_task_xmit()
1287 bnx2i_session_create(struct iscsi_endpoint *ep, bnx2i_session_create() argument
1296 if (!ep) { bnx2i_session_create()
1297 printk(KERN_ERR "bnx2i: missing ep.\n"); bnx2i_session_create()
1301 bnx2i_ep = ep->dd_data; bnx2i_session_create()
1378 /* 'ep' ptr will be assigned in bind() call */ bnx2i_conn_create()
1379 bnx2i_conn->ep = NULL; bnx2i_conn_create()
1396 * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
1416 struct iscsi_endpoint *ep; bnx2i_conn_bind() local
1419 ep = iscsi_lookup_endpoint(transport_fd); bnx2i_conn_bind()
1420 if (!ep) bnx2i_conn_bind()
1429 bnx2i_ep = ep->dd_data; bnx2i_conn_bind()
1442 "conn bind, ep=0x%p (%s) does not", bnx2i_conn_bind()
1450 bnx2i_conn->ep = bnx2i_ep; bnx2i_conn_bind()
1463 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); bnx2i_conn_bind()
1514 * bnx2i_ep_get_param - return iscsi ep parameter to caller
1515 * @ep: pointer to iscsi endpoint
1519 * returns iSCSI ep parameters
1521 static int bnx2i_ep_get_param(struct iscsi_endpoint *ep, bnx2i_ep_get_param() argument
1524 struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; bnx2i_ep_get_param()
1607 bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START; bnx2i_conn_start()
1614 bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies; bnx2i_conn_start()
1615 bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer; bnx2i_conn_start()
1616 bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep; bnx2i_conn_start()
1617 add_timer(&bnx2i_conn->ep->ofld_timer); bnx2i_conn_start()
1619 wait_event_interruptible(bnx2i_conn->ep->ofld_wait, bnx2i_conn_start()
1620 bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START); bnx2i_conn_start()
1624 del_timer_sync(&bnx2i_conn->ep->ofld_timer); bnx2i_conn_start()
1702 * @ep: endpoint (transport identifier) structure
1707 struct bnx2i_endpoint *ep) bnx2i_tear_down_conn()
1709 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk) bnx2i_tear_down_conn()
1710 hba->cnic->cm_destroy(ep->cm_sk); bnx2i_tear_down_conn()
1713 ep->state == EP_STATE_DISCONN_TIMEDOUT) { bnx2i_tear_down_conn()
1714 if (ep->conn && ep->conn->cls_conn && bnx2i_tear_down_conn()
1715 ep->conn->cls_conn->dd_data) { bnx2i_tear_down_conn()
1716 struct iscsi_conn *conn = ep->conn->cls_conn->dd_data; bnx2i_tear_down_conn()
1718 /* Must suspend all rx queue activity for this ep */ bnx2i_tear_down_conn()
1731 ep->state = EP_STATE_CLEANUP_START; bnx2i_tear_down_conn()
1732 init_timer(&ep->ofld_timer); bnx2i_tear_down_conn()
1733 ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies; bnx2i_tear_down_conn()
1734 ep->ofld_timer.function = bnx2i_ep_ofld_timer; bnx2i_tear_down_conn()
1735 ep->ofld_timer.data = (unsigned long) ep; bnx2i_tear_down_conn()
1736 add_timer(&ep->ofld_timer); bnx2i_tear_down_conn()
1738 bnx2i_ep_destroy_list_add(hba, ep); bnx2i_tear_down_conn()
1741 if (bnx2i_send_conn_destroy(hba, ep)) bnx2i_tear_down_conn()
1742 ep->state = EP_STATE_CLEANUP_CMPL; bnx2i_tear_down_conn()
1744 wait_event_interruptible(ep->ofld_wait, bnx2i_tear_down_conn()
1745 (ep->state != EP_STATE_CLEANUP_START)); bnx2i_tear_down_conn()
1749 del_timer_sync(&ep->ofld_timer); bnx2i_tear_down_conn()
1751 bnx2i_ep_destroy_list_del(hba, ep); bnx2i_tear_down_conn()
1753 if (ep->state != EP_STATE_CLEANUP_CMPL) bnx2i_tear_down_conn()
1784 struct iscsi_endpoint *ep; bnx2i_ep_connect() local
1808 ep = bnx2i_alloc_ep(hba); bnx2i_ep_connect()
1809 if (!ep) { bnx2i_ep_connect()
1813 bnx2i_ep = ep->dd_data; bnx2i_ep_connect()
1821 bnx2i_free_ep(ep); bnx2i_ep_connect()
1916 return ep; bnx2i_ep_connect()
1928 bnx2i_free_ep(ep); bnx2i_ep_connect()
1938 * @ep: TCP connection (endpoint) handle
1943 static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) bnx2i_ep_poll() argument
1948 bnx2i_ep = ep->dd_data; bnx2i_ep_poll()
1978 * @ep: endpoint pointer
2023 * @ep: TCP connection (bnx2i endpoint) handle
2117 * @ep: TCP connection (iscsi endpoint) handle
2121 static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep) bnx2i_ep_disconnect() argument
2128 bnx2i_ep = ep->dd_data; bnx2i_ep_disconnect()
2168 bnx2i_conn->ep = NULL; bnx2i_ep_disconnect()
2170 bnx2i_free_ep(ep); bnx2i_ep_disconnect()
606 bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) bnx2i_ep_destroy_list_add() argument
623 bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) bnx2i_ep_destroy_list_del() argument
640 bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) bnx2i_ep_ofld_list_add() argument
656 bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) bnx2i_ep_ofld_list_del() argument
731 bnx2i_ep_active_list_add(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) bnx2i_ep_active_list_add() argument
747 bnx2i_ep_active_list_del(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) bnx2i_ep_active_list_del() argument
1706 bnx2i_tear_down_conn(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) bnx2i_tear_down_conn() argument
/linux-4.4.14/drivers/usb/core/
H A Dendpoint.c39 struct ep_device *ep = to_ep_device(dev); \
40 return sprintf(buf, format_string, ep->desc->field); \
52 struct ep_device *ep = to_ep_device(dev); wMaxPacketSize_show() local
54 usb_endpoint_maxp(ep->desc) & 0x07ff); wMaxPacketSize_show()
61 struct ep_device *ep = to_ep_device(dev); type_show() local
64 switch (usb_endpoint_type(ep->desc)) { type_show()
85 struct ep_device *ep = to_ep_device(dev); interval_show() local
90 in = (ep->desc->bEndpointAddress & USB_DIR_IN); interval_show()
92 switch (usb_endpoint_type(ep->desc)) { interval_show()
94 if (ep->udev->speed == USB_SPEED_HIGH) interval_show()
96 interval = ep->desc->bInterval; interval_show()
100 interval = 1 << (ep->desc->bInterval - 1); interval_show()
104 if (ep->udev->speed == USB_SPEED_HIGH && !in) interval_show()
106 interval = ep->desc->bInterval; interval_show()
110 if (ep->udev->speed == USB_SPEED_HIGH) interval_show()
111 interval = 1 << (ep->desc->bInterval - 1); interval_show()
113 interval = ep->desc->bInterval; interval_show()
116 interval *= (ep->udev->speed == USB_SPEED_HIGH) ? 125 : 1000; interval_show()
131 struct ep_device *ep = to_ep_device(dev); direction_show() local
134 if (usb_endpoint_xfer_control(ep->desc)) direction_show()
136 else if (usb_endpoint_dir_in(ep->desc)) direction_show()
H A Dconfig.c47 int inum, int asnum, struct usb_host_endpoint *ep, usb_parse_ss_endpoint_companion()
60 " interface %d altsetting %d ep %d: " usb_parse_ss_endpoint_companion()
62 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
71 ep->ss_ep_comp.bLength = USB_DT_SS_EP_COMP_SIZE; usb_parse_ss_endpoint_companion()
72 ep->ss_ep_comp.bDescriptorType = USB_DT_SS_ENDPOINT_COMP; usb_parse_ss_endpoint_companion()
73 if (usb_endpoint_xfer_isoc(&ep->desc) || usb_parse_ss_endpoint_companion()
74 usb_endpoint_xfer_int(&ep->desc)) usb_parse_ss_endpoint_companion()
75 ep->ss_ep_comp.wBytesPerInterval = usb_parse_ss_endpoint_companion()
76 ep->desc.wMaxPacketSize; usb_parse_ss_endpoint_companion()
80 memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE); usb_parse_ss_endpoint_companion()
83 if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) { usb_parse_ss_endpoint_companion()
85 "config %d interface %d altsetting %d ep %d: " usb_parse_ss_endpoint_companion()
87 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
88 ep->ss_ep_comp.bMaxBurst = 0; usb_parse_ss_endpoint_companion()
91 "config %d interface %d altsetting %d ep %d: " usb_parse_ss_endpoint_companion()
93 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
94 ep->ss_ep_comp.bMaxBurst = 15; usb_parse_ss_endpoint_companion()
97 if ((usb_endpoint_xfer_control(&ep->desc) || usb_parse_ss_endpoint_companion()
98 usb_endpoint_xfer_int(&ep->desc)) && usb_parse_ss_endpoint_companion()
101 "config %d interface %d altsetting %d ep %d: " usb_parse_ss_endpoint_companion()
103 usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk", usb_parse_ss_endpoint_companion()
105 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
106 ep->ss_ep_comp.bmAttributes = 0; usb_parse_ss_endpoint_companion()
107 } else if (usb_endpoint_xfer_bulk(&ep->desc) && usb_parse_ss_endpoint_companion()
110 "config %d interface %d altsetting %d ep %d: " usb_parse_ss_endpoint_companion()
112 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
113 ep->ss_ep_comp.bmAttributes = 16; usb_parse_ss_endpoint_companion()
114 } else if (usb_endpoint_xfer_isoc(&ep->desc) && usb_parse_ss_endpoint_companion()
117 "config %d interface %d altsetting %d ep %d: " usb_parse_ss_endpoint_companion()
120 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
121 ep->ss_ep_comp.bmAttributes = 2; usb_parse_ss_endpoint_companion()
124 if (usb_endpoint_xfer_isoc(&ep->desc)) usb_parse_ss_endpoint_companion()
127 usb_endpoint_maxp(&ep->desc); usb_parse_ss_endpoint_companion()
128 else if (usb_endpoint_xfer_int(&ep->desc)) usb_parse_ss_endpoint_companion()
129 max_tx = usb_endpoint_maxp(&ep->desc) * usb_parse_ss_endpoint_companion()
135 "config %d interface %d altsetting %d ep %d: " usb_parse_ss_endpoint_companion()
137 usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int", usb_parse_ss_endpoint_companion()
139 cfgno, inum, asnum, ep->desc.bEndpointAddress, usb_parse_ss_endpoint_companion()
141 ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx); usb_parse_ss_endpoint_companion()
46 usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, int inum, int asnum, struct usb_host_endpoint *ep, unsigned char *buffer, int size) usb_parse_ss_endpoint_companion() argument
H A Ddevio.c364 int ep; snoop_urb() local
370 ep = usb_pipeendpoint(pipe); snoop_urb()
376 dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " snoop_urb()
378 userurb, ep, t, d, length); snoop_urb()
380 dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " snoop_urb()
382 userurb, ep, t, d, length, snoop_urb()
386 dev_info(&udev->dev, "ep%d %s-%s, length %u, " snoop_urb()
388 ep, t, d, length, timeout_or_status); snoop_urb()
390 dev_info(&udev->dev, "ep%d %s-%s, actual_length %u, " snoop_urb()
392 ep, t, d, length, timeout_or_status); snoop_urb()
689 static int findintfep(struct usb_device *dev, unsigned int ep) findintfep() argument
696 if (ep & ~(USB_DIR_IN|0xf)) findintfep()
706 if (endpt->bEndpointAddress == ep) findintfep()
758 "%s: process %i (%s) requesting ep %02x but needs %02x\n", check_ctrlrecip()
774 unsigned char ep) ep_to_host_endpoint()
776 if (ep & USB_ENDPOINT_DIR_MASK) ep_to_host_endpoint()
777 return dev->ep_in[ep & USB_ENDPOINT_NUMBER_MASK]; ep_to_host_endpoint()
779 return dev->ep_out[ep & USB_ENDPOINT_NUMBER_MASK]; ep_to_host_endpoint()
792 unsigned char ep; parse_usbdevfs_streams() local
811 if (get_user(ep, &streams->eps[i])) { parse_usbdevfs_streams()
815 eps[i] = ep_to_host_endpoint(ps->dev, ep); parse_usbdevfs_streams()
822 ifnum = findintfep(ps->dev, ep); parse_usbdevfs_streams()
1067 ret = findintfep(ps->dev, bulk.ep); proc_bulk()
1073 if (bulk.ep & USB_DIR_IN) proc_bulk()
1074 pipe = usb_rcvbulkpipe(dev, bulk.ep & 0x7f); proc_bulk()
1076 pipe = usb_sndbulkpipe(dev, bulk.ep & 0x7f); proc_bulk()
1077 if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN))) proc_bulk()
1091 if (bulk.ep & 0x80) { proc_bulk()
1134 struct usb_host_endpoint *ep; check_reset_of_active_ep() local
1137 ep = eps[epnum & 0x0f]; check_reset_of_active_ep()
1138 if (ep && !list_empty(&ep->urb_list)) check_reset_of_active_ep()
1146 unsigned int ep; proc_resetep() local
1149 if (get_user(ep, (unsigned int __user *)arg)) proc_resetep()
1151 ret = findintfep(ps->dev, ep); proc_resetep()
1157 check_reset_of_active_ep(ps->dev, ep, "RESETEP"); proc_resetep()
1158 usb_reset_endpoint(ps->dev, ep); proc_resetep()
1164 unsigned int ep; proc_clearhalt() local
1168 if (get_user(ep, (unsigned int __user *)arg)) proc_clearhalt()
1170 ret = findintfep(ps->dev, ep); proc_clearhalt()
1176 check_reset_of_active_ep(ps->dev, ep, "CLEAR_HALT"); proc_clearhalt()
1177 if (ep & USB_DIR_IN) proc_clearhalt()
1178 pipe = usb_rcvbulkpipe(ps->dev, ep & 0x7f); proc_clearhalt()
1180 pipe = usb_sndbulkpipe(ps->dev, ep & 0x7f); proc_clearhalt()
1291 struct usb_host_endpoint *ep; proc_do_submiturb() local
1318 ep = ep_to_host_endpoint(ps->dev, uurb->endpoint); proc_do_submiturb()
1319 if (!ep) proc_do_submiturb()
1326 if (!usb_endpoint_xfer_control(&ep->desc)) proc_do_submiturb()
1366 switch (usb_endpoint_type(&ep->desc)) { proc_do_submiturb()
1378 if (ep->streams) proc_do_submiturb()
1383 if (!usb_endpoint_xfer_int(&ep->desc)) proc_do_submiturb()
1393 if (!usb_endpoint_xfer_isoc(&ep->desc)) proc_do_submiturb()
1534 as->urb->interval = 1 << min(15, ep->desc.bInterval - 1); proc_do_submiturb()
1536 as->urb->interval = ep->desc.bInterval; proc_do_submiturb()
1565 if (usb_endpoint_xfer_bulk(&ep->desc)) { proc_do_submiturb()
1572 as->bulk_addr = usb_endpoint_num(&ep->desc) | proc_do_submiturb()
1573 ((ep->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK) proc_do_submiturb()
1667 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) { processcompl()
1760 if (get_user(n, &p32->ep) || put_user(n, &p->ep) || proc_bulk_compat()
1836 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) { processcompl_compat()
773 ep_to_host_endpoint(struct usb_device *dev, unsigned char ep) ep_to_host_endpoint() argument
H A Dhcd.c811 if (usb_endpoint_xfer_int(&urb->ep->desc)) rh_urb_enqueue()
813 if (usb_endpoint_xfer_control(&urb->ep->desc)) rh_urb_enqueue()
833 if (usb_endpoint_num(&urb->ep->desc) == 0) { /* Control URB */ usb_rh_urb_dequeue()
1237 if (unlikely(!urb->ep->enabled)) { usb_hcd_link_urb_to_ep()
1253 list_add_tail(&urb->urb_list, &urb->ep->urb_list); usb_hcd_link_urb_to_ep()
1289 list_for_each(tmp, &urb->ep->urb_list) { usb_hcd_check_unlink_urb()
1492 if (usb_endpoint_xfer_control(&urb->ep->desc)) { usb_hcd_map_urb_for_dma()
1526 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) { usb_hcd_map_urb_for_dma()
1765 bh->completing_ep = urb->ep; usb_giveback_urb_bh()
1838 struct usb_host_endpoint *ep) usb_hcd_flush_endpoint()
1843 if (!ep) usb_hcd_flush_endpoint()
1851 list_for_each_entry (urb, &ep->urb_list, urb_list) { usb_hcd_flush_endpoint()
1863 "shutdown urb %p ep%d%s%s\n", usb_hcd_flush_endpoint()
1864 urb, usb_endpoint_num(&ep->desc), usb_hcd_flush_endpoint()
1868 switch (usb_endpoint_type(&ep->desc)) { usb_hcd_flush_endpoint()
1889 while (!list_empty (&ep->urb_list)) { usb_hcd_flush_endpoint()
1894 if (!list_empty (&ep->urb_list)) { usb_hcd_flush_endpoint()
1895 urb = list_entry (ep->urb_list.prev, struct urb, usb_hcd_flush_endpoint()
1938 struct usb_host_endpoint *ep; usb_hcd_alloc_bandwidth() local
1947 ep = udev->ep_out[i]; usb_hcd_alloc_bandwidth()
1948 if (ep) usb_hcd_alloc_bandwidth()
1949 hcd->driver->drop_endpoint(hcd, udev, ep); usb_hcd_alloc_bandwidth()
1950 ep = udev->ep_in[i]; usb_hcd_alloc_bandwidth()
1951 if (ep) usb_hcd_alloc_bandwidth()
1952 hcd->driver->drop_endpoint(hcd, udev, ep); usb_hcd_alloc_bandwidth()
1968 ep = udev->ep_out[i]; usb_hcd_alloc_bandwidth()
1969 if (ep) { usb_hcd_alloc_bandwidth()
1970 ret = hcd->driver->drop_endpoint(hcd, udev, ep); usb_hcd_alloc_bandwidth()
1974 ep = udev->ep_in[i]; usb_hcd_alloc_bandwidth()
1975 if (ep) { usb_hcd_alloc_bandwidth()
1976 ret = hcd->driver->drop_endpoint(hcd, udev, ep); usb_hcd_alloc_bandwidth()
2048 * example: a qh stored in ep->hcpriv, holding state related to endpoint
2052 struct usb_host_endpoint *ep) usb_hcd_disable_endpoint()
2059 hcd->driver->endpoint_disable(hcd, ep); usb_hcd_disable_endpoint()
2065 * @ep: the endpoint to reset.
2071 struct usb_host_endpoint *ep) usb_hcd_reset_endpoint()
2076 hcd->driver->endpoint_reset(hcd, ep); usb_hcd_reset_endpoint()
2078 int epnum = usb_endpoint_num(&ep->desc); usb_hcd_reset_endpoint()
2079 int is_out = usb_endpoint_dir_out(&ep->desc); usb_hcd_reset_endpoint()
2080 int is_control = usb_endpoint_xfer_control(&ep->desc); usb_hcd_reset_endpoint()
1837 usb_hcd_flush_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep) usb_hcd_flush_endpoint() argument
2051 usb_hcd_disable_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep) usb_hcd_disable_endpoint() argument
2070 usb_hcd_reset_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep) usb_hcd_reset_endpoint() argument
/linux-4.4.14/drivers/net/ethernet/freescale/fs_enet/
H A Dmac-fcc.c100 fep->fcc.ep = of_iomap(ofdev->dev.of_node, 1); do_pd_setup()
101 if (!fep->fcc.ep) do_pd_setup()
120 iounmap(fep->fcc.ep); do_pd_setup()
191 fcc_enet_t __iomem *ep = fep->fcc.ep; set_multicast_start() local
193 W32(ep, fen_gaddrh, 0); set_multicast_start()
194 W32(ep, fen_gaddrl, 0); set_multicast_start()
200 fcc_enet_t __iomem *ep = fep->fcc.ep; set_multicast_one() local
207 W16(ep, fen_taddrh, taddrh); set_multicast_one()
208 W16(ep, fen_taddrm, taddrm); set_multicast_one()
209 W16(ep, fen_taddrl, taddrl); set_multicast_one()
217 fcc_enet_t __iomem *ep = fep->fcc.ep; set_multicast_finish() local
226 W32(ep, fen_gaddrh, 0xffffffff); set_multicast_finish()
227 W32(ep, fen_gaddrl, 0xffffffff); set_multicast_finish()
231 fep->fcc.gaddrh = R32(ep, fen_gaddrh); set_multicast_finish()
232 fep->fcc.gaddrl = R32(ep, fen_gaddrl); set_multicast_finish()
254 fcc_enet_t __iomem *ep = fep->fcc.ep; restart() local
263 for (i = 0; i < sizeof(*ep); i++) restart()
264 out_8((u8 __iomem *)ep + i, 0); restart()
271 W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys); restart()
272 W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys); restart()
277 W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE); restart()
279 W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24); restart()
280 W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24); restart()
288 W16(ep, fen_genfcc.fcc_riptr, fpi->dpram_offset); restart()
289 W16(ep, fen_genfcc.fcc_tiptr, fpi->dpram_offset + 32); restart()
291 W16(ep, fen_padptr, fpi->dpram_offset + 64); restart()
296 W32(ep, fen_genfcc.fcc_rbptr, 0); restart()
297 W32(ep, fen_genfcc.fcc_tbptr, 0); restart()
298 W32(ep, fen_genfcc.fcc_rcrc, 0); restart()
299 W32(ep, fen_genfcc.fcc_tcrc, 0); restart()
300 W16(ep, fen_genfcc.fcc_res1, 0); restart()
301 W32(ep, fen_genfcc.fcc_res2, 0); restart()
304 W32(ep, fen_camptr, 0); restart()
307 W32(ep, fen_cmask, 0xdebb20e3); restart()
308 W32(ep, fen_cpres, 0xffffffff); restart()
310 W32(ep, fen_crcec, 0); /* CRC Error counter */ restart()
311 W32(ep, fen_alec, 0); /* alignment error counter */ restart()
312 W32(ep, fen_disfc, 0); /* discard frame counter */ restart()
313 W16(ep, fen_retlim, 15); /* Retry limit threshold */ restart()
314 W16(ep, fen_pper, 0); /* Normal persistence */ restart()
317 W32(ep, fen_gaddrh, fep->fcc.gaddrh); restart()
318 W32(ep, fen_gaddrl, fep->fcc.gaddrh); restart()
321 W32(ep, fen_iaddrh, 0); restart()
322 W32(ep, fen_iaddrl, 0); restart()
325 W16(ep, fen_tfcstat, 0); restart()
326 W16(ep, fen_tfclen, 0); restart()
327 W32(ep, fen_tfcptr, 0); restart()
329 W16(ep, fen_mflr, PKT_MAXBUF_SIZE); /* maximum frame length register */ restart()
330 W16(ep, fen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */ restart()
338 W16(ep, fen_paddrh, paddrh); restart()
339 W16(ep, fen_paddrm, paddrm); restart()
340 W16(ep, fen_paddrl, paddrl); restart()
342 W16(ep, fen_taddrh, 0); restart()
343 W16(ep, fen_taddrm, 0); restart()
344 W16(ep, fen_taddrl, 0); restart()
346 W16(ep, fen_maxd1, 1520); /* maximum DMA1 length */ restart()
347 W16(ep, fen_maxd2, 1520); /* maximum DMA2 length */ restart()
350 W32(ep, fen_octc, 0); restart()
351 W32(ep, fen_colc, 0); restart()
352 W32(ep, fen_broc, 0); restart()
353 W32(ep, fen_mulc, 0); restart()
354 W32(ep, fen_uspc, 0); restart()
355 W32(ep, fen_frgc, 0); restart()
356 W32(ep, fen_ospc, 0); restart()
357 W32(ep, fen_jbrc, 0); restart()
358 W32(ep, fen_p64c, 0); restart()
359 W32(ep, fen_p65c, 0); restart()
360 W32(ep, fen_p128c, 0); restart()
361 W32(ep, fen_p256c, 0); restart()
362 W32(ep, fen_p512c, 0); restart()
363 W32(ep, fen_p1024c, 0); restart()
365 W16(ep, fen_rfthr, 0); /* Suggested by manual */ restart()
366 W16(ep, fen_rfcnt, 0); restart()
367 W16(ep, fen_cftype, 0); restart()
521 memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t)); get_regs()
549 fcc_enet_t __iomem *ep = fep->fcc.ep; tx_restart() local
559 ((R32(ep, fen_genfcc.fcc_tbptr) - fep->ring_mem_addr) + tx_restart()
578 W32(ep, fen_genfcc.fcc_tbptr, tx_restart()
H A Dmac-scc.c109 fep->scc.ep = of_iomap(ofdev->dev.of_node, 1); do_pd_setup()
110 if (!fep->scc.ep) { do_pd_setup()
182 scc_enet_t __iomem *ep = fep->scc.ep; set_multicast_start() local
184 W16(ep, sen_gaddr1, 0); set_multicast_start()
185 W16(ep, sen_gaddr2, 0); set_multicast_start()
186 W16(ep, sen_gaddr3, 0); set_multicast_start()
187 W16(ep, sen_gaddr4, 0); set_multicast_start()
193 scc_enet_t __iomem *ep = fep->scc.ep; set_multicast_one() local
200 W16(ep, sen_taddrh, taddrh); set_multicast_one()
201 W16(ep, sen_taddrm, taddrm); set_multicast_one()
202 W16(ep, sen_taddrl, taddrl); set_multicast_one()
210 scc_enet_t __iomem *ep = fep->scc.ep; set_multicast_finish() local
219 W16(ep, sen_gaddr1, 0xffff); set_multicast_finish()
220 W16(ep, sen_gaddr2, 0xffff); set_multicast_finish()
221 W16(ep, sen_gaddr3, 0xffff); set_multicast_finish()
222 W16(ep, sen_gaddr4, 0xffff); set_multicast_finish()
248 scc_enet_t __iomem *ep = fep->scc.ep; restart() local
257 for (i = 0; i < sizeof(*ep); i++) restart()
258 __fs_out8((u8 __iomem *)ep + i, 0); restart()
261 W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr); restart()
262 W16(ep, sen_genscc.scc_tbase, restart()
268 W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL); restart()
269 W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL); restart()
271 W8(ep, sen_genscc.scc_rfcr, SCC_EB); restart()
272 W8(ep, sen_genscc.scc_tfcr, SCC_EB); restart()
279 W16(ep, sen_genscc.scc_mrblr, 0x5f0); restart()
283 W32(ep, sen_cpres, 0xffffffff); restart()
284 W32(ep, sen_cmask, 0xdebb20e3); restart()
286 W32(ep, sen_crcec, 0); /* CRC Error counter */ restart()
287 W32(ep, sen_alec, 0); /* alignment error counter */ restart()
288 W32(ep, sen_disfc, 0); /* discard frame counter */ restart()
290 W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */ restart()
291 W16(ep, sen_retlim, 15); /* Retry limit threshold */ restart()
293 W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */ restart()
295 W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */ restart()
297 W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */ restart()
298 W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */ restart()
302 W16(ep, sen_gaddr1, 0); restart()
303 W16(ep, sen_gaddr2, 0); restart()
304 W16(ep, sen_gaddr3, 0); restart()
305 W16(ep, sen_gaddr4, 0); restart()
306 W16(ep, sen_iaddr1, 0); restart()
307 W16(ep, sen_iaddr2, 0); restart()
308 W16(ep, sen_iaddr3, 0); restart()
309 W16(ep, sen_iaddr4, 0); restart()
318 W16(ep, sen_paddrh, paddrh); restart()
319 W16(ep, sen_paddrm, paddrm); restart()
320 W16(ep, sen_paddrl, paddrl); restart()
322 W16(ep, sen_pper, 0); restart()
323 W16(ep, sen_taddrl, 0); restart()
324 W16(ep, sen_taddrm, 0); restart()
325 W16(ep, sen_taddrh, 0); restart()
473 memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *)); get_regs()
/linux-4.4.14/arch/arm/mach-pxa/include/mach/
H A Dpxa25x-udc.h121 #define UICR0_IM0 (1 << 0) /* Interrupt mask ep 0 */
122 #define UICR0_IM1 (1 << 1) /* Interrupt mask ep 1 */
123 #define UICR0_IM2 (1 << 2) /* Interrupt mask ep 2 */
124 #define UICR0_IM3 (1 << 3) /* Interrupt mask ep 3 */
125 #define UICR0_IM4 (1 << 4) /* Interrupt mask ep 4 */
126 #define UICR0_IM5 (1 << 5) /* Interrupt mask ep 5 */
127 #define UICR0_IM6 (1 << 6) /* Interrupt mask ep 6 */
128 #define UICR0_IM7 (1 << 7) /* Interrupt mask ep 7 */
132 #define UICR1_IM8 (1 << 0) /* Interrupt mask ep 8 */
133 #define UICR1_IM9 (1 << 1) /* Interrupt mask ep 9 */
134 #define UICR1_IM10 (1 << 2) /* Interrupt mask ep 10 */
135 #define UICR1_IM11 (1 << 3) /* Interrupt mask ep 11 */
136 #define UICR1_IM12 (1 << 4) /* Interrupt mask ep 12 */
137 #define UICR1_IM13 (1 << 5) /* Interrupt mask ep 13 */
138 #define UICR1_IM14 (1 << 6) /* Interrupt mask ep 14 */
139 #define UICR1_IM15 (1 << 7) /* Interrupt mask ep 15 */
143 #define USIR0_IR0 (1 << 0) /* Interrupt request ep 0 */
144 #define USIR0_IR1 (1 << 1) /* Interrupt request ep 1 */
145 #define USIR0_IR2 (1 << 2) /* Interrupt request ep 2 */
146 #define USIR0_IR3 (1 << 3) /* Interrupt request ep 3 */
147 #define USIR0_IR4 (1 << 4) /* Interrupt request ep 4 */
148 #define USIR0_IR5 (1 << 5) /* Interrupt request ep 5 */
149 #define USIR0_IR6 (1 << 6) /* Interrupt request ep 6 */
150 #define USIR0_IR7 (1 << 7) /* Interrupt request ep 7 */
154 #define USIR1_IR8 (1 << 0) /* Interrupt request ep 8 */
155 #define USIR1_IR9 (1 << 1) /* Interrupt request ep 9 */
156 #define USIR1_IR10 (1 << 2) /* Interrupt request ep 10 */
157 #define USIR1_IR11 (1 << 3) /* Interrupt request ep 11 */
158 #define USIR1_IR12 (1 << 4) /* Interrupt request ep 12 */
159 #define USIR1_IR13 (1 << 5) /* Interrupt request ep 13 */
160 #define USIR1_IR14 (1 << 6) /* Interrupt request ep 14 */
161 #define USIR1_IR15 (1 << 7) /* Interrupt request ep 15 */
/linux-4.4.14/drivers/net/wireless/ath/ath10k/
H A Dhtc.c60 static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep, ath10k_htc_notify_tx_completion() argument
63 struct ath10k *ar = ep->htc->ar; ath10k_htc_notify_tx_completion()
65 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__, ath10k_htc_notify_tx_completion()
66 ep->eid, skb); ath10k_htc_notify_tx_completion()
68 ath10k_htc_restore_tx_skb(ep->htc, skb); ath10k_htc_notify_tx_completion()
70 if (!ep->ep_ops.ep_tx_complete) { ath10k_htc_notify_tx_completion()
71 ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid); ath10k_htc_notify_tx_completion()
76 ep->ep_ops.ep_tx_complete(ep->htc->ar, skb); ath10k_htc_notify_tx_completion()
79 static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep, ath10k_htc_prepare_tx_skb() argument
86 hdr->eid = ep->eid; ath10k_htc_prepare_tx_skb()
91 spin_lock_bh(&ep->htc->tx_lock); ath10k_htc_prepare_tx_skb()
92 hdr->seq_no = ep->seq_no++; ath10k_htc_prepare_tx_skb()
93 spin_unlock_bh(&ep->htc->tx_lock); ath10k_htc_prepare_tx_skb()
101 struct ath10k_htc_ep *ep = &htc->endpoint[eid]; ath10k_htc_send() local
118 if (ep->tx_credit_flow_enabled) { ath10k_htc_send()
121 if (ep->tx_credits < credits) { ath10k_htc_send()
126 ep->tx_credits -= credits; ath10k_htc_send()
128 "htc ep %d consumed %d credits (total %d)\n", ath10k_htc_send()
129 eid, credits, ep->tx_credits); ath10k_htc_send()
133 ath10k_htc_prepare_tx_skb(ep, skb); ath10k_htc_send()
143 sg_item.transfer_id = ep->eid; ath10k_htc_send()
149 ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1); ath10k_htc_send()
158 if (ep->tx_credit_flow_enabled) { ath10k_htc_send()
160 ep->tx_credits += credits; ath10k_htc_send()
162 "htc ep %d reverted %d credits back (total %d)\n", ath10k_htc_send()
163 eid, credits, ep->tx_credits); ath10k_htc_send()
166 if (ep->ep_ops.ep_tx_credits) ath10k_htc_send()
167 ep->ep_ops.ep_tx_credits(htc->ar); ath10k_htc_send()
178 struct ath10k_htc_ep *ep; ath10k_htc_tx_completion_handler() local
184 ep = &htc->endpoint[skb_cb->eid]; ath10k_htc_tx_completion_handler()
186 ath10k_htc_notify_tx_completion(ep, skb); ath10k_htc_tx_completion_handler()
202 struct ath10k_htc_ep *ep; ath10k_htc_process_credit_report() local
215 ep = &htc->endpoint[report->eid]; ath10k_htc_process_credit_report()
216 ep->tx_credits += report->credits; ath10k_htc_process_credit_report()
218 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n", ath10k_htc_process_credit_report()
219 report->eid, report->credits, ep->tx_credits); ath10k_htc_process_credit_report()
221 if (ep->ep_ops.ep_tx_credits) { ath10k_htc_process_credit_report()
223 ep->ep_ops.ep_tx_credits(htc->ar); ath10k_htc_process_credit_report()
300 struct ath10k_htc_ep *ep; ath10k_htc_rx_completion_handler() local
319 ep = &htc->endpoint[eid]; ath10k_htc_rx_completion_handler()
381 * sending unsolicited messages on the ep 0 ath10k_htc_rx_completion_handler()
407 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n", ath10k_htc_rx_completion_handler()
409 ep->ep_ops.ep_rx_complete(ar, skb); ath10k_htc_rx_completion_handler()
463 struct ath10k_htc_ep *ep; ath10k_htc_reset_endpoint_states() local
467 ep = &htc->endpoint[i]; ath10k_htc_reset_endpoint_states()
468 ep->service_id = ATH10K_HTC_SVC_ID_UNUSED; ath10k_htc_reset_endpoint_states()
469 ep->max_ep_message_len = 0; ath10k_htc_reset_endpoint_states()
470 ep->max_tx_queue_depth = 0; ath10k_htc_reset_endpoint_states()
471 ep->eid = i; ath10k_htc_reset_endpoint_states()
472 ep->htc = htc; ath10k_htc_reset_endpoint_states()
473 ep->tx_credit_flow_enabled = true; ath10k_htc_reset_endpoint_states()
608 struct ath10k_htc_ep *ep; ath10k_htc_connect_service() local
689 "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n", ath10k_htc_connect_service()
714 ep = &htc->endpoint[assigned_eid]; ath10k_htc_connect_service()
715 ep->eid = assigned_eid; ath10k_htc_connect_service()
717 if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED) ath10k_htc_connect_service()
725 ep->service_id = conn_req->service_id; ath10k_htc_connect_service()
726 ep->max_tx_queue_depth = conn_req->max_send_queue_depth; ath10k_htc_connect_service()
727 ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size); ath10k_htc_connect_service()
728 ep->tx_credits = tx_alloc; ath10k_htc_connect_service()
729 ep->tx_credit_size = htc->target_credit_size; ath10k_htc_connect_service()
730 ep->tx_credits_per_max_message = ep->max_ep_message_len / ath10k_htc_connect_service()
733 if (ep->max_ep_message_len % htc->target_credit_size) ath10k_htc_connect_service()
734 ep->tx_credits_per_max_message++; ath10k_htc_connect_service()
737 ep->ep_ops = conn_req->ep_ops; ath10k_htc_connect_service()
740 ep->service_id, ath10k_htc_connect_service()
741 &ep->ul_pipe_id, ath10k_htc_connect_service()
742 &ep->dl_pipe_id); ath10k_htc_connect_service()
748 htc_service_name(ep->service_id), ep->ul_pipe_id, ath10k_htc_connect_service()
749 ep->dl_pipe_id, ep->eid); ath10k_htc_connect_service()
751 if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) { ath10k_htc_connect_service()
752 ep->tx_credit_flow_enabled = false; ath10k_htc_connect_service()
755 htc_service_name(ep->service_id), assigned_eid); ath10k_htc_connect_service()
810 struct ath10k_htc_ep *ep = NULL; ath10k_htc_init() local
820 ep = &htc->endpoint[ATH10K_HTC_EP_0]; ath10k_htc_init()
822 ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id); ath10k_htc_init()
/linux-4.4.14/drivers/net/wireless/ath/ath6kl/
H A Dhtc_pipe.c37 static void do_send_completion(struct htc_endpoint *ep, do_send_completion() argument
47 if (ep->ep_cb.tx_comp_multi != NULL) { do_send_completion()
49 "%s: calling ep %d, send complete multiple callback (%d pkts)\n", do_send_completion()
50 __func__, ep->eid, do_send_completion()
56 ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate); do_send_completion()
70 "%s: calling ep %d send complete callback on packet 0x%p\n", do_send_completion()
71 __func__, ep->eid, packet); do_send_completion()
72 ep->ep_cb.tx_complete(ep->target, packet); do_send_completion()
80 struct htc_endpoint *ep = &target->endpoint[packet->endpoint]; send_packet_completion() local
88 do_send_completion(ep, &container); send_packet_completion()
92 struct htc_endpoint *ep, get_htc_packet_credit_based()
106 if (list_empty(&ep->txq)) get_htc_packet_credit_based()
110 packet = list_first_entry(&ep->txq, struct htc_packet, list); get_htc_packet_credit_based()
114 __func__, packet, get_queue_depth(&ep->txq)); get_htc_packet_credit_based()
130 __func__, credits_required, ep->cred_dist.credits); get_htc_packet_credit_based()
132 if (ep->eid == ENDPOINT_0) { get_htc_packet_credit_based()
140 if (ep->cred_dist.credits < credits_required) get_htc_packet_credit_based()
143 ep->cred_dist.credits -= credits_required; get_htc_packet_credit_based()
144 ep->ep_st.cred_cosumd += credits_required; get_htc_packet_credit_based()
147 if (ep->cred_dist.credits < get_htc_packet_credit_based()
148 ep->cred_dist.cred_per_msg) { get_htc_packet_credit_based()
151 ep->ep_st.cred_low_indicate += 1; get_htc_packet_credit_based()
159 packet = list_first_entry(&ep->txq, struct htc_packet, list); get_htc_packet_credit_based()
166 packet->info.tx.seqno = ep->seqno; get_htc_packet_credit_based()
167 ep->seqno++; get_htc_packet_credit_based()
174 struct htc_endpoint *ep, get_htc_packet()
183 if (list_empty(&ep->txq)) get_htc_packet()
186 packet = list_first_entry(&ep->txq, struct htc_packet, list); get_htc_packet()
191 __func__, packet, get_queue_depth(&ep->txq)); get_htc_packet()
192 packet->info.tx.seqno = ep->seqno; get_htc_packet()
195 ep->seqno++; get_htc_packet()
204 struct htc_endpoint *ep, htc_issue_packets()
251 list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue); htc_issue_packets()
252 ep->ep_st.tx_issued += 1; htc_issue_packets()
256 ep->pipe.pipeid_ul, NULL, skb); htc_issue_packets()
273 ep->cred_dist.credits += packet->info.tx.cred_used; htc_issue_packets()
302 struct htc_endpoint *ep, htc_try_send()
331 txqueue_depth = get_queue_depth(&ep->txq); htc_try_send()
334 if (txqueue_depth >= ep->max_txq_depth) { htc_try_send()
342 overflow -= ep->max_txq_depth; htc_try_send()
349 __func__, ep->eid, overflow, txqueue_depth, htc_try_send()
350 ep->max_txq_depth); htc_try_send()
353 (ep->ep_cb.tx_full == NULL)) { htc_try_send()
388 action = ep->ep_cb.tx_full(ep->target, packet); list_for_each_entry_safe()
391 ep->ep_st.tx_dropped += 1; list_for_each_entry_safe()
411 if (!ep->pipe.tx_credit_flow_enabled) {
414 ep->pipe.pipeid_ul);
422 list_splice_tail_init(&send_queue, &ep->txq);
432 ep->tx_proc_cnt++;
434 if (ep->tx_proc_cnt > 1) {
440 ep->tx_proc_cnt--;
452 if (get_queue_depth(&ep->txq) == 0)
455 if (ep->pipe.tx_credit_flow_enabled) {
463 get_htc_packet_credit_based(target, ep, &send_queue);
469 get_htc_packet(target, ep, &send_queue, tx_resources);
483 htc_issue_packets(target, ep, &send_queue);
485 if (!ep->pipe.tx_credit_flow_enabled) {
486 pipeid = ep->pipe.pipeid_ul;
495 ep->tx_proc_cnt = 0;
661 struct htc_endpoint *ep; htc_process_credit_report() local
673 ep = &target->endpoint[rpt->eid]; htc_process_credit_report()
674 ep->cred_dist.credits += rpt->credits; htc_process_credit_report()
676 if (ep->cred_dist.credits && get_queue_depth(&ep->txq)) { htc_process_credit_report()
678 htc_try_send(target, ep, NULL); htc_process_credit_report()
693 struct htc_endpoint *ep, u16 tag) htc_flush_tx_endpoint()
698 while (get_queue_depth(&ep->txq)) { htc_flush_tx_endpoint()
699 packet = list_first_entry(&ep->txq, struct htc_packet, list); htc_flush_tx_endpoint()
715 struct htc_endpoint *ep, htc_lookup_tx_packet()
727 list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue, htc_lookup_tx_packet()
747 struct htc_endpoint *ep; ath6kl_htc_pipe_tx_complete() local
758 ep = &target->endpoint[ep_id]; ath6kl_htc_pipe_tx_complete()
760 packet = htc_lookup_tx_packet(target, ep, skb); ath6kl_htc_pipe_tx_complete()
771 if (!ep->pipe.tx_credit_flow_enabled) { ath6kl_htc_pipe_tx_complete()
777 htc_try_send(target, ep, NULL); ath6kl_htc_pipe_tx_complete()
786 struct htc_endpoint *ep; htc_send_packets_multiple() local
792 /* get first packet to find out which ep the packets will go into */ htc_send_packets_multiple()
799 ep = &target->endpoint[packet->endpoint]; htc_send_packets_multiple()
801 htc_try_send(target, ep, pkt_queue); htc_send_packets_multiple()
809 do_send_completion(ep, pkt_queue);
919 static void do_recv_completion(struct htc_endpoint *ep, do_recv_completion() argument
934 ep->ep_cb.rx(ep->target, packet); do_recv_completion()
941 struct htc_endpoint *ep, recv_packet_completion()
949 do_recv_completion(ep, &container); recv_packet_completion()
960 struct htc_endpoint *ep; ath6kl_htc_pipe_rx_complete() local
992 ep = &target->endpoint[htc_hdr->eid]; ath6kl_htc_pipe_rx_complete()
1093 recv_packet_completion(target, ep, packet); ath6kl_htc_pipe_rx_complete()
1106 struct htc_endpoint *ep) htc_flush_rx_queue()
1114 if (list_empty(&ep->rx_bufq)) htc_flush_rx_queue()
1117 packet = list_first_entry(&ep->rx_bufq, htc_flush_rx_queue()
1126 "Flushing RX packet:0x%p, length:%d, ep:%d\n", htc_flush_rx_queue()
1134 do_recv_completion(ep, &container); htc_flush_rx_queue()
1184 struct htc_endpoint *ep; reset_endpoint_states() local
1188 ep = &target->endpoint[i]; reset_endpoint_states()
1189 ep->svc_id = 0; reset_endpoint_states()
1190 ep->len_max = 0; reset_endpoint_states()
1191 ep->max_txq_depth = 0; reset_endpoint_states()
1192 ep->eid = i; reset_endpoint_states()
1193 INIT_LIST_HEAD(&ep->txq); reset_endpoint_states()
1194 INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue); reset_endpoint_states()
1195 INIT_LIST_HEAD(&ep->rx_bufq); reset_endpoint_states()
1196 ep->target = target; reset_endpoint_states()
1197 ep->pipe.tx_credit_flow_enabled = true; reset_endpoint_states()
1239 struct htc_endpoint *ep; ath6kl_htc_pipe_conn_service() local
1292 /* tell target desired recv alloc for this ep */ ath6kl_htc_pipe_conn_service()
1332 "%s: service 0x%X conn resp: status: %d ep: %d\n", ath6kl_htc_pipe_conn_service()
1363 ep = &target->endpoint[assigned_epid]; ath6kl_htc_pipe_conn_service()
1364 ep->eid = assigned_epid; ath6kl_htc_pipe_conn_service()
1365 if (ep->svc_id != 0) { ath6kl_htc_pipe_conn_service()
1376 ep->svc_id = conn_req->svc_id; /* this marks ep in use */ ath6kl_htc_pipe_conn_service()
1377 ep->max_txq_depth = conn_req->max_txq_depth; ath6kl_htc_pipe_conn_service()
1378 ep->len_max = max_msg_size; ath6kl_htc_pipe_conn_service()
1379 ep->cred_dist.credits = tx_alloc; ath6kl_htc_pipe_conn_service()
1380 ep->cred_dist.cred_sz = target->tgt_cred_sz; ath6kl_htc_pipe_conn_service()
1381 ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz; ath6kl_htc_pipe_conn_service()
1383 ep->cred_dist.cred_per_msg++; ath6kl_htc_pipe_conn_service()
1386 ep->ep_cb = conn_req->ep_cb; ath6kl_htc_pipe_conn_service()
1389 ep->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM; ath6kl_htc_pipe_conn_service()
1391 status = ath6kl_hif_pipe_map_service(ar, ep->svc_id, ath6kl_htc_pipe_conn_service()
1392 &ep->pipe.pipeid_ul, ath6kl_htc_pipe_conn_service()
1393 &ep->pipe.pipeid_dl); ath6kl_htc_pipe_conn_service()
1399 ep->svc_id, ep->pipe.pipeid_ul, ath6kl_htc_pipe_conn_service()
1400 ep->pipe.pipeid_dl, ep->eid); ath6kl_htc_pipe_conn_service()
1402 if (disable_credit_flowctrl && ep->pipe.tx_credit_flow_enabled) { ath6kl_htc_pipe_conn_service()
1403 ep->pipe.tx_credit_flow_enabled = false; ath6kl_htc_pipe_conn_service()
1405 "SVC: 0x%4.4X ep:%d TX flow control off\n", ath6kl_htc_pipe_conn_service()
1406 ep->svc_id, assigned_epid); ath6kl_htc_pipe_conn_service()
1419 struct htc_endpoint *ep = NULL; ath6kl_htc_pipe_create() local
1454 ep = &target->endpoint[ENDPOINT_0]; ath6kl_htc_pipe_create()
1456 ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul, ath6kl_htc_pipe_create()
1457 &ep->pipe.pipeid_dl); ath6kl_htc_pipe_create()
1526 struct htc_endpoint *ep; ath6kl_htc_pipe_stop() local
1530 ep = &target->endpoint[i]; ath6kl_htc_pipe_stop()
1531 htc_flush_rx_queue(target, ep); ath6kl_htc_pipe_stop()
1532 htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL); ath6kl_htc_pipe_stop()
1623 struct htc_endpoint *ep = &target->endpoint[endpoint]; ath6kl_htc_pipe_flush_txep() local
1625 if (ep->svc_id == 0) { ath6kl_htc_pipe_flush_txep()
1631 htc_flush_tx_endpoint(target, ep, tag); ath6kl_htc_pipe_flush_txep()
1638 struct htc_endpoint *ep; ath6kl_htc_pipe_add_rxbuf_multiple() local
1655 ep = &target->endpoint[first->endpoint]; ath6kl_htc_pipe_add_rxbuf_multiple()
1660 list_splice_tail_init(pkt_queue, &ep->rx_bufq); ath6kl_htc_pipe_add_rxbuf_multiple()
1670 do_recv_completion(ep, pkt_queue);
1677 enum htc_endpoint_id ep, ath6kl_htc_pipe_activity_changed()
1699 "htc rx flush pkt 0x%p len %d ep %d\n", ath6kl_htc_pipe_flush_rx_buf()
91 get_htc_packet_credit_based(struct htc_target *target, struct htc_endpoint *ep, struct list_head *queue) get_htc_packet_credit_based() argument
173 get_htc_packet(struct htc_target *target, struct htc_endpoint *ep, struct list_head *queue, int resources) get_htc_packet() argument
203 htc_issue_packets(struct htc_target *target, struct htc_endpoint *ep, struct list_head *pkt_queue) htc_issue_packets() argument
301 htc_try_send(struct htc_target *target, struct htc_endpoint *ep, struct list_head *txq) htc_try_send() argument
692 htc_flush_tx_endpoint(struct htc_target *target, struct htc_endpoint *ep, u16 tag) htc_flush_tx_endpoint() argument
714 htc_lookup_tx_packet(struct htc_target *target, struct htc_endpoint *ep, struct sk_buff *skb) htc_lookup_tx_packet() argument
940 recv_packet_completion(struct htc_target *target, struct htc_endpoint *ep, struct htc_packet *packet) recv_packet_completion() argument
1105 htc_flush_rx_queue(struct htc_target *target, struct htc_endpoint *ep) htc_flush_rx_queue() argument
1676 ath6kl_htc_pipe_activity_changed(struct htc_target *target, enum htc_endpoint_id ep, bool active) ath6kl_htc_pipe_activity_changed() argument
H A Dhtc_mbox.c44 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n", ath6kl_credit_deposit()
104 * it use list_for_each_entry_reverse to walk around the whole ep list.
135 "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n", list_for_each_entry()
165 /* reduce an ep's credits back to a set limit */ ath6kl_credit_reduce()
172 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n", ath6kl_credit_reduce()
418 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n", htc_tx_comp_update()
445 "htc tx complete ep %d pkts %d\n", htc_tx_complete()
547 struct htc_endpoint *ep, u8 *flags, htc_check_credits()
555 *req_cred, ep->cred_dist.credits); htc_check_credits()
557 if (ep->cred_dist.credits < *req_cred) { htc_check_credits()
562 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits; htc_check_credits()
564 ath6kl_credit_seek(target->credit_info, &ep->cred_dist); htc_check_credits()
566 ep->cred_dist.seek_cred = 0; htc_check_credits()
568 if (ep->cred_dist.credits < *req_cred) { htc_check_credits()
570 "credit not found for ep %d\n", htc_check_credits()
576 ep->cred_dist.credits -= *req_cred; htc_check_credits()
577 ep->ep_st.cred_cosumd += *req_cred; htc_check_credits()
580 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { htc_check_credits()
581 ep->cred_dist.seek_cred = htc_check_credits()
582 ep->cred_dist.cred_per_msg - ep->cred_dist.credits; htc_check_credits()
584 ath6kl_credit_seek(target->credit_info, &ep->cred_dist); htc_check_credits()
587 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { htc_check_credits()
590 ep->ep_st.cred_low_indicate += 1; htc_check_credits()
649 struct htc_endpoint *ep) htc_get_credit_padding()
659 if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN)) htc_get_credit_padding()
984 "htc tx overflow ep %d depth %d max %d\n", ath6kl_htc_tx_try()
1022 "htc creds ep %d credits %d pkts %d\n", htc_chk_ep_txq()
1098 int i, ep; ath6kl_htc_set_credit_dist() local
1106 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) { ath6kl_htc_set_credit_dist()
1107 endpoint = &target->endpoint[ep]; ath6kl_htc_set_credit_dist()
1114 if (ep >= ENDPOINT_MAX) { ath6kl_htc_set_credit_dist()
1128 "htc tx ep id %d buf 0x%p len %d\n", ath6kl_htc_mbox_tx()
1179 "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n", ath6kl_htc_mbox_flush_txep()
1280 struct htc_endpoint *ep) htc_reclaim_rxbuf()
1285 ep->ep_cb.rx(ep->target, packet); htc_reclaim_rxbuf()
1367 struct htc_endpoint *ep, ath6kl_htc_rx_setup()
1381 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) { ath6kl_htc_rx_setup()
1388 ep_cb = ep->ep_cb; ath6kl_htc_rx_setup()
1399 ep->ep_st.rx_alloc_thresh_hit += 1; ath6kl_htc_rx_setup()
1400 ep->ep_st.rxalloc_thresh_byte += ath6kl_htc_rx_setup()
1406 packet = ep_cb.rx_allocthresh(ep->target, ep->eid, ath6kl_htc_rx_setup()
1411 if (list_empty(&ep->rx_bufq)) { ath6kl_htc_rx_setup()
1414 ep_cb.rx_refill(ep->target, ep->eid); ath6kl_htc_rx_setup()
1419 if (list_empty(&ep->rx_bufq)) { ath6kl_htc_rx_setup()
1422 packet = list_first_entry(&ep->rx_bufq, ath6kl_htc_rx_setup()
1430 target->ep_waiting = ep->eid; ath6kl_htc_rx_setup()
1485 ath6kl_err("invalid ep in look-ahead: %d\n", ath6kl_htc_rx_alloc()
1492 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n", ath6kl_htc_rx_alloc()
1507 ath6kl_err("ep %d is not connected !\n", htc_hdr->eid); ath6kl_htc_rx_alloc()
1613 "credit report ep %d credits %d\n", htc_proc_cred_rpt()
1846 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n", ath6kl_htc_rx_process_hdr()
1904 "htc rx complete ep %d packet 0x%p\n", ath6kl_htc_rx_complete()
2009 struct htc_endpoint *ep; ath6kl_htc_rx_process_packets() local
2013 ep = &target->endpoint[packet->endpoint]; list_for_each_entry_safe()
2033 ep, packet); list_for_each_entry_safe()
2042 ath6kl_htc_rx_update_stats(ep, *n_lk_ahd); list_for_each_entry_safe()
2045 ep->ep_st.rx_bundl += 1; list_for_each_entry_safe()
2047 ath6kl_htc_rx_complete(ep, packet); list_for_each_entry_safe()
2338 "htc rx add multiple ep id %d cnt %d len %d\n", ath6kl_htc_mbox_add_rxbuf_multiple()
2364 "htc rx blocked on ep %d, unblocking\n",
2399 "htc rx flush pkt 0x%p len %d ep %d\n", ath6kl_htc_mbox_flush_rx_buf()
546 htc_check_credits(struct htc_target *target, struct htc_endpoint *ep, u8 *flags, enum htc_endpoint_id eid, unsigned int len, int *req_cred) htc_check_credits() argument
648 htc_get_credit_padding(unsigned int cred_sz, int *len, struct htc_endpoint *ep) htc_get_credit_padding() argument
1278 htc_reclaim_rxbuf(struct htc_target *target, struct htc_packet *packet, struct htc_endpoint *ep) htc_reclaim_rxbuf() argument
1366 ath6kl_htc_rx_setup(struct htc_target *target, struct htc_endpoint *ep, u32 *lk_ahds, struct list_head *queue, int n_msg) ath6kl_htc_rx_setup() argument
/linux-4.4.14/drivers/usb/gadget/
H A Depautoconf.c24 * usb_ep_autoconfig_ss() - choose an endpoint matching the ep
25 * descriptor and ep companion descriptor
59 * the bmAttribute field in the ep companion descriptor is
63 * assigning ep->claimed to true.
73 struct usb_ep *ep; usb_ep_autoconfig_ss() local
79 ep = gadget->ops->match_ep(gadget, desc, ep_comp); usb_ep_autoconfig_ss()
80 if (ep) usb_ep_autoconfig_ss()
85 list_for_each_entry (ep, &gadget->ep_list, ep_list) { usb_ep_autoconfig_ss()
86 if (usb_gadget_ep_match_desc(gadget, ep, desc, ep_comp)) usb_ep_autoconfig_ss()
99 desc->wMaxPacketSize = cpu_to_le16(ep->maxpacket_limit); usb_ep_autoconfig_ss()
103 if (isdigit(ep->name[2])) { usb_ep_autoconfig_ss()
104 u8 num = simple_strtoul(&ep->name[2], NULL, 10); usb_ep_autoconfig_ss()
118 int size = ep->maxpacket_limit; usb_ep_autoconfig_ss()
126 ep->address = desc->bEndpointAddress; usb_ep_autoconfig_ss()
127 ep->desc = NULL; usb_ep_autoconfig_ss()
128 ep->comp_desc = NULL; usb_ep_autoconfig_ss()
129 ep->claimed = true; usb_ep_autoconfig_ss()
130 return ep; usb_ep_autoconfig_ss()
161 * by assigning ep->claimed to true.
176 * @ep: endpoint which should be released
184 void usb_ep_autoconfig_release(struct usb_ep *ep) usb_ep_autoconfig_release() argument
186 ep->claimed = false; usb_ep_autoconfig_release()
187 ep->driver_data = NULL; usb_ep_autoconfig_release()
197 * state such as ep->claimed and the record of assigned endpoints
202 struct usb_ep *ep; usb_ep_autoconfig_reset() local
204 list_for_each_entry (ep, &gadget->ep_list, ep_list) { usb_ep_autoconfig_reset()
205 ep->claimed = false; usb_ep_autoconfig_reset()
206 ep->driver_data = NULL; usb_ep_autoconfig_reset()
H A Du_f.c17 struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len) alloc_ep_req() argument
21 req = usb_ep_alloc_request(ep, GFP_ATOMIC); alloc_ep_req()
26 usb_ep_free_request(ep, req); alloc_ep_req()
H A Du_f.h48 struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len);
/linux-4.4.14/net/sunrpc/xprtrdma/
H A Dverbs.c103 struct rpcrdma_ep *ep = context; rpcrdma_qp_async_error_upcall() local
105 pr_err("RPC: %s: %s on device %s ep %p\n", rpcrdma_qp_async_error_upcall()
108 if (ep->rep_connected == 1) { rpcrdma_qp_async_error_upcall()
109 ep->rep_connected = -EIO; rpcrdma_qp_async_error_upcall()
110 rpcrdma_conn_func(ep); rpcrdma_qp_async_error_upcall()
111 wake_up_all(&ep->rep_connect_wait); rpcrdma_qp_async_error_upcall()
118 struct rpcrdma_ep *ep = context; rpcrdma_cq_async_error_upcall() local
120 pr_err("RPC: %s: %s on device %s ep %p\n", rpcrdma_cq_async_error_upcall()
123 if (ep->rep_connected == 1) { rpcrdma_cq_async_error_upcall()
124 ep->rep_connected = -EIO; rpcrdma_cq_async_error_upcall()
125 rpcrdma_conn_func(ep); rpcrdma_cq_async_error_upcall()
126 wake_up_all(&ep->rep_connect_wait); rpcrdma_cq_async_error_upcall()
265 rpcrdma_flush_cqs(struct rpcrdma_ep *ep) rpcrdma_flush_cqs() argument
269 while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0) rpcrdma_flush_cqs()
271 while (ib_poll_cq(ep->rep_attr.send_cq, 1, &wc) > 0) rpcrdma_flush_cqs()
280 struct rpcrdma_ep *ep = &xprt->rx_ep; rpcrdma_conn_upcall() local
282 struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr; rpcrdma_conn_upcall()
296 dprintk("RPC: %s: CM address resolution error, ep 0x%p\n", rpcrdma_conn_upcall()
297 __func__, ep); rpcrdma_conn_upcall()
302 dprintk("RPC: %s: CM route resolution error, ep 0x%p\n", rpcrdma_conn_upcall()
303 __func__, ep); rpcrdma_conn_upcall()
333 ep->rep_connected = connstate; rpcrdma_conn_upcall()
334 rpcrdma_conn_func(ep); rpcrdma_conn_upcall()
335 wake_up_all(&ep->rep_connect_wait); rpcrdma_conn_upcall()
338 dprintk("RPC: %s: %pIS:%u (ep 0x%p): %s\n", rpcrdma_conn_upcall()
339 __func__, sap, rpc_get_port(sap), ep, rpcrdma_conn_upcall()
347 int tird = ep->rep_remote_cma.responder_resources; rpcrdma_conn_upcall()
566 rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, rpcrdma_ep_create() argument
592 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; rpcrdma_ep_create()
593 ep->rep_attr.qp_context = ep; rpcrdma_ep_create()
594 ep->rep_attr.srq = NULL; rpcrdma_ep_create()
595 ep->rep_attr.cap.max_send_wr = cdata->max_requests; rpcrdma_ep_create()
596 ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS; rpcrdma_ep_create()
597 rc = ia->ri_ops->ro_open(ia, ep, cdata); rpcrdma_ep_create()
600 ep->rep_attr.cap.max_recv_wr = cdata->max_requests; rpcrdma_ep_create()
601 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; rpcrdma_ep_create()
602 ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS; rpcrdma_ep_create()
603 ep->rep_attr.cap.max_recv_sge = 1; rpcrdma_ep_create()
604 ep->rep_attr.cap.max_inline_data = 0; rpcrdma_ep_create()
605 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; rpcrdma_ep_create()
606 ep->rep_attr.qp_type = IB_QPT_RC; rpcrdma_ep_create()
607 ep->rep_attr.port_num = ~0; rpcrdma_ep_create()
612 ep->rep_attr.cap.max_send_wr, rpcrdma_ep_create()
613 ep->rep_attr.cap.max_recv_wr, rpcrdma_ep_create()
614 ep->rep_attr.cap.max_send_sge, rpcrdma_ep_create()
615 ep->rep_attr.cap.max_recv_sge); rpcrdma_ep_create()
618 ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1; rpcrdma_ep_create()
619 if (ep->rep_cqinit > RPCRDMA_MAX_UNSIGNALED_SENDS) rpcrdma_ep_create()
620 ep->rep_cqinit = RPCRDMA_MAX_UNSIGNALED_SENDS; rpcrdma_ep_create()
621 else if (ep->rep_cqinit <= 2) rpcrdma_ep_create()
622 ep->rep_cqinit = 0; rpcrdma_ep_create()
623 INIT_CQCOUNT(ep); rpcrdma_ep_create()
624 init_waitqueue_head(&ep->rep_connect_wait); rpcrdma_ep_create()
625 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); rpcrdma_ep_create()
627 cq_attr.cqe = ep->rep_attr.cap.max_send_wr + 1; rpcrdma_ep_create()
644 cq_attr.cqe = ep->rep_attr.cap.max_recv_wr + 1; rpcrdma_ep_create()
662 ep->rep_attr.send_cq = sendcq; rpcrdma_ep_create()
663 ep->rep_attr.recv_cq = recvcq; rpcrdma_ep_create()
668 ep->rep_remote_cma.private_data = NULL; rpcrdma_ep_create()
669 ep->rep_remote_cma.private_data_len = 0; rpcrdma_ep_create()
672 ep->rep_remote_cma.initiator_depth = 0; rpcrdma_ep_create()
674 ep->rep_remote_cma.responder_resources = 32; rpcrdma_ep_create()
676 ep->rep_remote_cma.responder_resources = rpcrdma_ep_create()
679 ep->rep_remote_cma.retry_count = 7; rpcrdma_ep_create()
680 ep->rep_remote_cma.flow_control = 0; rpcrdma_ep_create()
681 ep->rep_remote_cma.rnr_retry_count = 0; rpcrdma_ep_create()
700 * valid operations on the ep are to free it (if dynamically
704 rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) rpcrdma_ep_destroy() argument
709 __func__, ep->rep_connected); rpcrdma_ep_destroy()
711 cancel_delayed_work_sync(&ep->rep_connect_worker); rpcrdma_ep_destroy()
714 rpcrdma_ep_disconnect(ep, ia); rpcrdma_ep_destroy()
716 rpcrdma_clean_cq(ep->rep_attr.recv_cq); rpcrdma_ep_destroy()
717 rpcrdma_clean_cq(ep->rep_attr.send_cq); rpcrdma_ep_destroy()
724 rc = ib_destroy_cq(ep->rep_attr.recv_cq); rpcrdma_ep_destroy()
729 rc = ib_destroy_cq(ep->rep_attr.send_cq); rpcrdma_ep_destroy()
745 rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) rpcrdma_ep_connect() argument
751 if (ep->rep_connected != 0) { rpcrdma_ep_connect()
756 rpcrdma_ep_disconnect(ep, ia); rpcrdma_ep_connect()
757 rpcrdma_flush_cqs(ep); rpcrdma_ep_connect()
781 rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr); rpcrdma_ep_connect()
799 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); rpcrdma_ep_connect()
803 /* do not update ep->rep_connected */ rpcrdma_ep_connect()
808 ep->rep_connected = 0; rpcrdma_ep_connect()
810 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); rpcrdma_ep_connect()
817 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); rpcrdma_ep_connect()
825 if (ep->rep_connected == -ECONNREFUSED && rpcrdma_ep_connect()
830 if (ep->rep_connected <= 0) { rpcrdma_ep_connect()
834 (ep->rep_remote_cma.responder_resources == 0 || rpcrdma_ep_connect()
835 ep->rep_remote_cma.initiator_depth != rpcrdma_ep_connect()
836 ep->rep_remote_cma.responder_resources)) { rpcrdma_ep_connect()
837 if (ep->rep_remote_cma.responder_resources == 0) rpcrdma_ep_connect()
838 ep->rep_remote_cma.responder_resources = 1; rpcrdma_ep_connect()
839 ep->rep_remote_cma.initiator_depth = rpcrdma_ep_connect()
840 ep->rep_remote_cma.responder_resources; rpcrdma_ep_connect()
843 rc = ep->rep_connected; rpcrdma_ep_connect()
864 ep->rep_connected = rc; rpcrdma_ep_connect()
878 rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) rpcrdma_ep_disconnect() argument
882 rpcrdma_flush_cqs(ep); rpcrdma_ep_disconnect()
886 wait_event_interruptible(ep->rep_connect_wait, rpcrdma_ep_disconnect()
887 ep->rep_connected != 1); rpcrdma_ep_disconnect()
889 (ep->rep_connected == 1) ? "still " : "dis"); rpcrdma_ep_disconnect()
892 ep->rep_connected = rc; rpcrdma_ep_disconnect()
1254 struct rpcrdma_ep *ep, rpcrdma_ep_post()
1264 rc = rpcrdma_ep_post_recv(ia, ep, rep); rpcrdma_ep_post()
1282 if (DECR_CQCOUNT(ep) > 0) rpcrdma_ep_post()
1285 INIT_CQCOUNT(ep); rpcrdma_ep_post()
1302 struct rpcrdma_ep *ep, rpcrdma_ep_post_recv()
1338 struct rpcrdma_ep *ep = &r_xprt->rx_ep; rpcrdma_ep_post_extra_recv() local
1350 rc = rpcrdma_ep_post_recv(ia, ep, rep); rpcrdma_ep_post_extra_recv()
1253 rpcrdma_ep_post(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, struct rpcrdma_req *req) rpcrdma_ep_post() argument
1301 rpcrdma_ep_post_recv(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, struct rpcrdma_rep *rep) rpcrdma_ep_post_recv() argument
/linux-4.4.14/include/linux/usb/
H A Datmel_usba_udc.h20 struct usba_ep_data ep[0]; member in struct:usba_platform_data
H A Dmusb.h43 #define MUSB_EP_FIFO(ep, st, m, pkt) \
45 .hw_ep_num = ep, \
51 #define MUSB_EP_FIFO_SINGLE(ep, st, pkt) \
52 MUSB_EP_FIFO(ep, st, BUF_SINGLE, pkt)
54 #define MUSB_EP_FIFO_DOUBLE(ep, st, pkt) \
55 MUSB_EP_FIFO(ep, st, BUF_DOUBLE, pkt)
72 unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */
73 unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */
74 unsigned high_iso_tx:1; /* Tx ep required for HB iso */
75 unsigned high_iso_rx:1; /* Rx ep required for HD iso */
H A Dcdc-wdm.h17 struct usb_endpoint_descriptor *ep,
H A Dgadget.h105 void (*complete)(struct usb_ep *ep,
124 int (*enable) (struct usb_ep *ep,
126 int (*disable) (struct usb_ep *ep);
128 struct usb_request *(*alloc_request) (struct usb_ep *ep,
130 void (*free_request) (struct usb_ep *ep, struct usb_request *req);
132 int (*queue) (struct usb_ep *ep, struct usb_request *req,
134 int (*dequeue) (struct usb_ep *ep, struct usb_request *req);
136 int (*set_halt) (struct usb_ep *ep, int value);
137 int (*set_wedge) (struct usb_ep *ep);
139 int (*fifo_status) (struct usb_ep *ep);
140 void (*fifo_flush) (struct usb_ep *ep);
183 * @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk"
233 * @ep:the endpoint being configured
239 static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep, usb_ep_set_maxpacket_limit() argument
242 ep->maxpacket_limit = maxpacket_limit; usb_ep_set_maxpacket_limit()
243 ep->maxpacket = maxpacket_limit; usb_ep_set_maxpacket_limit()
248 * @ep:the endpoint being configured. may not be the endpoint named "ep0".
261 * configurable, with more generic names like "ep-a". (remember that for
266 static inline int usb_ep_enable(struct usb_ep *ep) usb_ep_enable() argument
270 if (ep->enabled) usb_ep_enable()
273 ret = ep->ops->enable(ep, ep->desc); usb_ep_enable()
277 ep->enabled = true; usb_ep_enable()
284 * @ep:the endpoint being unconfigured. may not be the endpoint named "ep0".
294 static inline int usb_ep_disable(struct usb_ep *ep) usb_ep_disable() argument
298 if (!ep->enabled) usb_ep_disable()
301 ret = ep->ops->disable(ep); usb_ep_disable()
305 ep->enabled = false; usb_ep_disable()
312 * @ep:the endpoint to be used with with the request
324 static inline struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, usb_ep_alloc_request() argument
327 return ep->ops->alloc_request(ep, gfp_flags); usb_ep_alloc_request()
332 * @ep:the endpoint associated with the request
339 static inline void usb_ep_free_request(struct usb_ep *ep, usb_ep_free_request() argument
342 ep->ops->free_request(ep, req); usb_ep_free_request()
347 * @ep:the endpoint associated with the request
402 static inline int usb_ep_queue(struct usb_ep *ep, usb_ep_queue() argument
405 return ep->ops->queue(ep, req, gfp_flags); usb_ep_queue()
410 * @ep:the endpoint associated with the request
423 static inline int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req) usb_ep_dequeue() argument
425 return ep->ops->dequeue(ep, req); usb_ep_dequeue()
430 * @ep: the non-isochronous endpoint being stalled
449 static inline int usb_ep_set_halt(struct usb_ep *ep) usb_ep_set_halt() argument
451 return ep->ops->set_halt(ep, 1); usb_ep_set_halt()
456 * @ep:the bulk or interrupt endpoint being reset
467 static inline int usb_ep_clear_halt(struct usb_ep *ep) usb_ep_clear_halt() argument
469 return ep->ops->set_halt(ep, 0); usb_ep_clear_halt()
474 * @ep: the endpoint being wedged
483 usb_ep_set_wedge(struct usb_ep *ep) usb_ep_set_wedge() argument
485 if (ep->ops->set_wedge) usb_ep_set_wedge()
486 return ep->ops->set_wedge(ep); usb_ep_set_wedge()
488 return ep->ops->set_halt(ep, 1); usb_ep_set_wedge()
493 * @ep: the endpoint whose fifo status is being checked.
506 static inline int usb_ep_fifo_status(struct usb_ep *ep) usb_ep_fifo_status() argument
508 if (ep->ops->fifo_status) usb_ep_fifo_status()
509 return ep->ops->fifo_status(ep); usb_ep_fifo_status()
516 * @ep: the endpoint whose fifo is being flushed.
523 static inline void usb_ep_fifo_flush(struct usb_ep *ep) usb_ep_fifo_flush() argument
525 if (ep->ops->fifo_flush) usb_ep_fifo_flush()
526 ep->ops->fifo_flush(ep); usb_ep_fifo_flush()
580 * @out_epnum: last used out ep number
581 * @in_epnum: last used in ep number
666 * usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget
669 * @ep: the endpoint whose maxpacketsize is used to align @len
670 * @len: buffer size's length to align to @ep's maxpacketsize
673 * align buffer's size to an ep's maxpacketsize.
676 usb_ep_align_maybe(struct usb_gadget *g, struct usb_ep *ep, size_t len) usb_ep_align_maybe() argument
679 round_up(len, (size_t)ep->desc->wMaxPacketSize); usb_ep_align_maybe()
1224 extern void usb_gadget_giveback_request(struct usb_ep *ep,
1239 struct usb_ep *ep, struct usb_endpoint_descriptor *desc,
H A Dehci-dbgp.h42 #define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
/linux-4.4.14/drivers/char/xillybus/
H A Dxillybus_of.c42 static void xilly_dma_sync_single_for_cpu_of(struct xilly_endpoint *ep, xilly_dma_sync_single_for_cpu_of() argument
47 dma_sync_single_for_cpu(ep->dev, dma_handle, size, direction); xilly_dma_sync_single_for_cpu_of()
50 static void xilly_dma_sync_single_for_device_of(struct xilly_endpoint *ep, xilly_dma_sync_single_for_device_of() argument
55 dma_sync_single_for_device(ep->dev, dma_handle, size, direction); xilly_dma_sync_single_for_device_of()
58 static void xilly_dma_sync_single_nop(struct xilly_endpoint *ep, xilly_dma_sync_single_nop() argument
75 static int xilly_map_single_of(struct xilly_endpoint *ep, xilly_map_single_of() argument
90 addr = dma_map_single(ep->dev, ptr, size, direction); xilly_map_single_of()
92 if (dma_mapping_error(ep->dev, addr)) { xilly_map_single_of()
97 this->device = ep->dev; xilly_map_single_of()
104 rc = devm_add_action(ep->dev, xilly_of_unmap, this); xilly_map_single_of()
107 dma_unmap_single(ep->dev, addr, size, direction); xilly_map_single_of()
H A Dxillybus_pcie.c53 static void xilly_dma_sync_single_for_cpu_pci(struct xilly_endpoint *ep, xilly_dma_sync_single_for_cpu_pci() argument
58 pci_dma_sync_single_for_cpu(ep->pdev, xilly_dma_sync_single_for_cpu_pci()
64 static void xilly_dma_sync_single_for_device_pci(struct xilly_endpoint *ep, xilly_dma_sync_single_for_device_pci() argument
69 pci_dma_sync_single_for_device(ep->pdev, xilly_dma_sync_single_for_device_pci()
91 static int xilly_map_single_pci(struct xilly_endpoint *ep, xilly_map_single_pci() argument
109 addr = pci_map_single(ep->pdev, ptr, size, pci_direction); xilly_map_single_pci()
111 if (pci_dma_mapping_error(ep->pdev, addr)) { xilly_map_single_pci()
116 this->device = ep->pdev; xilly_map_single_pci()
123 rc = devm_add_action(ep->dev, xilly_pci_unmap, this); xilly_map_single_pci()
125 pci_unmap_single(ep->pdev, addr, size, pci_direction); xilly_map_single_pci()
H A Dxillybus_core.c128 struct xilly_endpoint *ep = data; xillybus_isr() local
136 buf = ep->msgbuf_addr; xillybus_isr()
137 buf_size = ep->msg_buf_size/sizeof(u32); xillybus_isr()
139 ep->ephw->hw_sync_sgl_for_cpu(ep, xillybus_isr()
140 ep->msgbuf_dma_addr, xillybus_isr()
141 ep->msg_buf_size, xillybus_isr()
145 if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) { xillybus_isr()
146 malformed_message(ep, &buf[i]); xillybus_isr()
147 dev_warn(ep->dev, xillybus_isr()
150 ep->msg_counter, xillybus_isr()
153 if (++ep->failed_messages > 10) { xillybus_isr()
154 dev_err(ep->dev, xillybus_isr()
157 ep->ephw->hw_sync_sgl_for_device( xillybus_isr()
158 ep, xillybus_isr()
159 ep->msgbuf_dma_addr, xillybus_isr()
160 ep->msg_buf_size, xillybus_isr()
164 ep->registers + fpga_msg_ctrl_reg); xillybus_isr()
172 dev_err(ep->dev, "Bad interrupt message. Stopping.\n"); xillybus_isr()
188 if ((msg_channel > ep->num_channels) || xillybus_isr()
190 malformed_message(ep, &buf[i]); xillybus_isr()
194 channel = ep->channels[msg_channel]; xillybus_isr()
198 malformed_message(ep, &buf[i]); xillybus_isr()
215 malformed_message(ep, &buf[i]); xillybus_isr()
234 if ((msg_channel > ep->num_channels) || xillybus_isr()
236 !ep->channels[msg_channel]->wr_supports_nonempty) { xillybus_isr()
237 malformed_message(ep, &buf[i]); xillybus_isr()
241 channel = ep->channels[msg_channel]; xillybus_isr()
244 malformed_message(ep, &buf[i]); xillybus_isr()
256 ep->idtlen = msg_data; xillybus_isr()
257 wake_up_interruptible(&ep->ep_wait); xillybus_isr()
261 if ((msg_channel > ep->num_channels) || xillybus_isr()
263 !ep->channels[msg_channel]->num_wr_buffers) { xillybus_isr()
264 malformed_message(ep, &buf[i]); xillybus_isr()
267 channel = ep->channels[msg_channel]; xillybus_isr()
281 ep->fatal_error = 1; xillybus_isr()
282 wake_up_interruptible(&ep->ep_wait); /* For select() */ xillybus_isr()
283 dev_err(ep->dev, xillybus_isr()
287 malformed_message(ep, &buf[i]); xillybus_isr()
292 ep->ephw->hw_sync_sgl_for_device(ep, xillybus_isr()
293 ep->msgbuf_dma_addr, xillybus_isr()
294 ep->msg_buf_size, xillybus_isr()
297 ep->msg_counter = (ep->msg_counter + 1) & 0xf; xillybus_isr()
298 ep->failed_messages = 0; xillybus_isr()
299 iowrite32(0x03, ep->registers + fpga_msg_ctrl_reg); /* Message ACK */ xillybus_isr()
321 static int xilly_get_dma_buffers(struct xilly_endpoint *ep, xilly_get_dma_buffers() argument
328 struct device *dev = ep->dev; xilly_get_dma_buffers()
347 dev_err(ep->dev, xilly_get_dma_buffers()
372 rc = ep->ephw->map_single(ep, s->salami, xilly_get_dma_buffers()
379 ep->registers + fpga_dma_bufaddr_lowaddr_reg); xilly_get_dma_buffers()
381 ep->registers + fpga_dma_bufaddr_highaddr_reg); xilly_get_dma_buffers()
389 ep->registers + fpga_dma_bufno_reg); xilly_get_dma_buffers()
391 ep->msgbuf_addr = s->salami; xilly_get_dma_buffers()
392 ep->msgbuf_dma_addr = dma_addr; xilly_get_dma_buffers()
393 ep->msg_buf_size = bytebufsize; xilly_get_dma_buffers()
396 ep->registers + fpga_dma_bufno_reg); xilly_get_dma_buffers()
405 static int xilly_setupchannels(struct xilly_endpoint *ep, xilly_setupchannels() argument
409 struct device *dev = ep->dev; xilly_setupchannels()
434 channel = devm_kcalloc(dev, ep->num_channels, xilly_setupchannels()
439 ep->channels = devm_kcalloc(dev, ep->num_channels + 1, xilly_setupchannels()
442 if (!ep->channels) xilly_setupchannels()
445 ep->channels[0] = NULL; /* Channel 0 is message buf. */ xilly_setupchannels()
449 for (i = 1; i <= ep->num_channels; i++) { xilly_setupchannels()
477 channel->endpoint = ep; xilly_setupchannels()
482 ep->channels[i] = channel++; xilly_setupchannels()
499 if ((channelnum > ep->num_channels) || xilly_setupchannels()
501 dev_err(ep->dev, xilly_setupchannels()
506 channel = ep->channels[channelnum]; /* NULL for msg channel */ xilly_setupchannels()
532 rc = xilly_get_dma_buffers(ep, &rd_alloc, buffers, xilly_setupchannels()
545 rc = xilly_get_dma_buffers(ep, &wr_alloc, buffers, xilly_setupchannels()
548 rc = xilly_get_dma_buffers(ep, &wr_alloc, NULL, xilly_setupchannels()
558 dev_err(ep->dev, xilly_setupchannels()
/linux-4.4.14/drivers/hwtracing/coresight/
H A Dof_coresight.c59 struct device_node *ep = NULL; of_coresight_get_ports() local
63 ep = of_graph_get_next_endpoint(node, ep); of_coresight_get_ports()
64 if (!ep) of_coresight_get_ports()
67 if (of_property_read_bool(ep, "slave-mode")) of_coresight_get_ports()
72 } while (ep); of_coresight_get_ports()
113 struct device_node *ep = NULL; of_get_coresight_platform_data() local
135 ep = of_graph_get_next_endpoint(node, ep); of_get_coresight_platform_data()
136 if (!ep) of_get_coresight_platform_data()
143 if (of_find_property(ep, "slave-mode", NULL)) of_get_coresight_platform_data()
147 ret = of_graph_parse_endpoint(ep, &endpoint); of_get_coresight_platform_data()
159 rparent = of_graph_get_remote_port_parent(ep); of_get_coresight_platform_data()
160 rport = of_graph_get_remote_port(ep); of_get_coresight_platform_data()
176 } while (ep); of_get_coresight_platform_data()
/linux-4.4.14/tools/perf/util/
H A Dstrfilter.c77 const char **ep) strfilter_node__new()
141 *ep = s; strfilter_node__new()
146 *ep = s; strfilter_node__new()
153 * Return NULL if fail, and *ep == NULL if memory allocation failed.
158 const char *ep = NULL; strfilter__new() local
161 filter->root = strfilter_node__new(rules, &ep); strfilter__new()
163 if (!filter || !filter->root || *ep != '\0') { strfilter__new()
165 *err = ep; strfilter__new()
177 const char *ep = NULL; strfilter__append() local
182 right = strfilter_node__new(rules, &ep); strfilter__append()
183 if (!right || *ep != '\0') { strfilter__append()
185 *err = ep; strfilter__append()
190 ep = NULL; strfilter__append()
199 return ep ? -EINVAL : -ENOMEM; strfilter__append()
76 strfilter_node__new(const char *s, const char **ep) strfilter_node__new() argument
/linux-4.4.14/drivers/usb/chipidea/
H A Dudc.c207 * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
364 u32 mul = hwreq->req.length / hwep->ep.maxpacket; add_td_to_list()
367 || hwreq->req.length % hwep->ep.maxpacket) add_td_to_list()
399 * @ep: endpoint
401 static inline u8 _usb_addr(struct ci_hw_ep *ep) _usb_addr() argument
403 return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num; _usb_addr()
449 && (hwreq->req.length % hwep->ep.maxpacket == 0)) _hardware_enqueue()
494 u32 mul = hwreq->req.length / hwep->ep.maxpacket; _hardware_enqueue()
497 || hwreq->req.length % hwep->ep.maxpacket) _hardware_enqueue()
502 wmb(); /* synchronize before ep prime */ _hardware_enqueue()
530 /* Synchronize before ep prime */ reprime_dtd()
648 usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
659 static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer) _ep_set_halt() argument
661 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); _ep_set_halt()
665 if (ep == NULL || hwep->ep.desc == NULL) _ep_set_halt()
668 if (usb_endpoint_xfer_isoc(hwep->ep.desc)) _ep_set_halt()
675 !usb_endpoint_xfer_control(hwep->ep.desc)) { _ep_set_halt()
705 struct usb_ep *ep; _gadget_stop_activity() local
716 gadget_for_each_ep(ep, gadget) { gadget_for_each_ep()
717 usb_ep_fifo_flush(ep); gadget_for_each_ep()
719 usb_ep_fifo_flush(&ci->ep0out->ep);
720 usb_ep_fifo_flush(&ci->ep0in->ep);
723 gadget_for_each_ep(ep, gadget) { gadget_for_each_ep()
724 usb_ep_disable(ep); gadget_for_each_ep()
728 usb_ep_free_request(&ci->ep0in->ep, ci->status);
762 ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
775 * @ep: endpoint
780 static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req) isr_get_status_complete() argument
782 if (ep == NULL || req == NULL) isr_get_status_complete()
786 usb_ep_free_request(ep, req); isr_get_status_complete()
794 static int _ep_queue(struct usb_ep *ep, struct usb_request *req, _ep_queue() argument
797 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); _ep_queue()
802 if (ep == NULL || req == NULL || hwep->ep.desc == NULL) _ep_queue()
817 if (usb_endpoint_xfer_isoc(hwep->ep.desc) && _ep_queue()
818 hwreq->req.length > (1 + hwep->ep.mult) * hwep->ep.maxpacket) { _ep_queue()
864 req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
889 retval = _ep_queue(&hwep->ep, req, gfp_flags);
899 usb_ep_free_request(&hwep->ep, req);
906 * @ep: endpoint
913 isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req) isr_setup_status_complete() argument
946 retval = _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC); isr_setup_status_phase()
977 usb_gadget_giveback_request(&hweptemp->ep, &hwreq->req);
1042 &ci->ci_hw_ep[num].ep);
1092 err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
1158 if (_ep_set_halt(&hwep->ep, 1, false))
1180 if (hwep->ep.desc == NULL)
1190 if (_ep_set_halt(&hwep->ep, 1, false))
1213 static int ep_enable(struct usb_ep *ep, ep_enable() argument
1216 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); ep_enable()
1221 if (ep == NULL || desc == NULL) ep_enable()
1234 hwep->ep.desc = desc; ep_enable()
1240 hwep->ep.maxpacket = usb_endpoint_maxp(desc) & 0x07ff; ep_enable()
1241 hwep->ep.mult = QH_ISO_MULT(usb_endpoint_maxp(desc)); ep_enable()
1247 cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT; ep_enable()
1281 static int ep_disable(struct usb_ep *ep) ep_disable() argument
1283 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); ep_disable()
1287 if (ep == NULL) ep_disable()
1289 else if (hwep->ep.desc == NULL) ep_disable()
1306 hwep->ep.desc = NULL; ep_disable()
1317 static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) ep_alloc_request() argument
1321 if (ep == NULL) ep_alloc_request()
1338 static void ep_free_request(struct usb_ep *ep, struct usb_request *req) ep_free_request() argument
1340 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); ep_free_request()
1345 if (ep == NULL || req == NULL) { ep_free_request()
1371 static int ep_queue(struct usb_ep *ep, struct usb_request *req, ep_queue() argument
1374 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); ep_queue()
1378 if (ep == NULL || req == NULL || hwep->ep.desc == NULL) ep_queue()
1382 retval = _ep_queue(ep, req, gfp_flags); ep_queue()
1392 static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) ep_dequeue() argument
1394 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); ep_dequeue()
1399 if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY || ep_dequeue()
1400 hwep->ep.desc == NULL || list_empty(&hwreq->queue) || ep_dequeue()
1423 usb_gadget_giveback_request(&hwep->ep, &hwreq->req); ep_dequeue()
1436 static int ep_set_halt(struct usb_ep *ep, int value) ep_set_halt() argument
1438 return _ep_set_halt(ep, value, true); ep_set_halt()
1446 static int ep_set_wedge(struct usb_ep *ep) ep_set_wedge() argument
1448 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); ep_set_wedge()
1451 if (ep == NULL || hwep->ep.desc == NULL) ep_set_wedge()
1458 return usb_ep_set_halt(ep); ep_set_wedge()
1466 static void ep_fifo_flush(struct usb_ep *ep) ep_fifo_flush() argument
1468 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); ep_fifo_flush()
1471 if (ep == NULL) { ep_fifo_flush()
1629 scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i, init_eps()
1636 hwep->ep.name = hwep->name; init_eps()
1637 hwep->ep.ops = &usb_ep_ops; init_eps()
1640 hwep->ep.caps.type_control = true; init_eps()
1642 hwep->ep.caps.type_iso = true; init_eps()
1643 hwep->ep.caps.type_bulk = true; init_eps()
1644 hwep->ep.caps.type_int = true; init_eps()
1648 hwep->ep.caps.dir_in = true; init_eps()
1650 hwep->ep.caps.dir_out = true; init_eps()
1657 usb_ep_set_maxpacket_limit(&hwep->ep, (unsigned short)~0); init_eps()
1677 usb_ep_set_maxpacket_limit(&hwep->ep, CTRL_PAYLOAD_MAX); init_eps()
1681 list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list); init_eps()
1718 ci->ep0out->ep.desc = &ctrl_endpt_out_desc; ci_udc_start()
1719 retval = usb_ep_enable(&ci->ep0out->ep); ci_udc_start()
1723 ci->ep0in->ep.desc = &ctrl_endpt_in_desc; ci_udc_start()
1724 retval = usb_ep_enable(&ci->ep0in->ep); ci_udc_start()
1908 ci->gadget.ep0 = &ci->ep0in->ep; udc_start()
/linux-4.4.14/fs/
H A Deventpoll.c51 * 2) ep->mtx (mutex)
52 * 3) ep->lock (spinlock)
55 * We need a spinlock (ep->lock) because we manipulate objects
62 * mutex (ep->mtx). It is acquired during the event transfer loop,
77 * It is necessary to acquire multiple "ep->mtx"es at once in the
86 * It is possible to drop the "ep->mtx" and to use the global
87 * mutex "epmutex" (together with "ep->lock") to have it working,
88 * but having "ep->mtx" will make the interface more scalable.
90 * normal operations the epoll private "ep->mtx" will guarantee
163 struct eventpoll *ep; member in struct:epitem
372 * @ep: Pointer to the eventpoll context.
377 static inline int ep_events_available(struct eventpoll *ep) ep_events_available() argument
379 return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR; ep_events_available()
533 static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) ep_unregister_pollwait() argument
547 /* call only when ep->mtx is held */ ep_wakeup_source()
550 return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx)); ep_wakeup_source()
553 /* call only when ep->mtx is held */ ep_pm_stay_awake()
567 /* call when ep->mtx cannot be held (ep_poll_callback) */ ep_pm_stay_awake_rcu()
584 * @ep: Pointer to the epoll private data structure.
588 * @ep_locked: caller already holds ep->mtx
592 static int ep_scan_ready_list(struct eventpoll *ep, ep_scan_ready_list() argument
608 mutex_lock_nested(&ep->mtx, depth); ep_scan_ready_list()
612 * empty list. Also, set ep->ovflist to NULL so that events ep_scan_ready_list()
614 * have the poll callback to queue directly on ep->rdllist, ep_scan_ready_list()
618 spin_lock_irqsave(&ep->lock, flags); ep_scan_ready_list()
619 list_splice_init(&ep->rdllist, &txlist); ep_scan_ready_list()
620 ep->ovflist = NULL; ep_scan_ready_list()
621 spin_unlock_irqrestore(&ep->lock, flags); ep_scan_ready_list()
626 error = (*sproc)(ep, &txlist, priv); ep_scan_ready_list()
628 spin_lock_irqsave(&ep->lock, flags); ep_scan_ready_list()
634 for (nepi = ep->ovflist; (epi = nepi) != NULL; ep_scan_ready_list()
643 list_add_tail(&epi->rdllink, &ep->rdllist); ep_scan_ready_list()
648 * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after ep_scan_ready_list()
650 * ep->rdllist. ep_scan_ready_list()
652 ep->ovflist = EP_UNACTIVE_PTR; ep_scan_ready_list()
657 list_splice(&txlist, &ep->rdllist); ep_scan_ready_list()
658 __pm_relax(ep->ws); ep_scan_ready_list()
660 if (!list_empty(&ep->rdllist)) { ep_scan_ready_list()
665 if (waitqueue_active(&ep->wq)) ep_scan_ready_list()
666 wake_up_locked(&ep->wq); ep_scan_ready_list()
667 if (waitqueue_active(&ep->poll_wait)) ep_scan_ready_list()
670 spin_unlock_irqrestore(&ep->lock, flags); ep_scan_ready_list()
673 mutex_unlock(&ep->mtx); ep_scan_ready_list()
677 ep_poll_safewake(&ep->poll_wait); ep_scan_ready_list()
692 static int ep_remove(struct eventpoll *ep, struct epitem *epi) ep_remove() argument
699 * the "ep->lock" otherwise a deadlock might occur. This because of the ep_remove()
700 * sequence of the lock acquisition. Here we do "ep->lock" then the wait ep_remove()
703 * that will try to get "ep->lock". ep_remove()
705 ep_unregister_pollwait(ep, epi); ep_remove()
712 rb_erase(&epi->rbn, &ep->rbr); ep_remove()
714 spin_lock_irqsave(&ep->lock, flags); ep_remove()
717 spin_unlock_irqrestore(&ep->lock, flags); ep_remove()
724 * ep->mtx. The rcu read side, reverse_path_check_proc(), does not make ep_remove()
729 atomic_long_dec(&ep->user->epoll_watches); ep_remove()
734 static void ep_free(struct eventpoll *ep) ep_free() argument
740 if (waitqueue_active(&ep->poll_wait)) ep_free()
741 ep_poll_safewake(&ep->poll_wait); ep_free()
746 * We do not need to hold "ep->mtx" here because the epoll file ep_free()
756 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { ep_free()
759 ep_unregister_pollwait(ep, epi); ep_free()
767 * us during this operation. So we can avoid the lock on "ep->lock". ep_free()
768 * We do not need to lock ep->mtx, either, we only do it to prevent ep_free()
771 mutex_lock(&ep->mtx); ep_free()
772 while ((rbp = rb_first(&ep->rbr)) != NULL) { ep_free()
774 ep_remove(ep, epi); ep_free()
777 mutex_unlock(&ep->mtx); ep_free()
780 mutex_destroy(&ep->mtx); ep_free()
781 free_uid(ep->user); ep_free()
782 wakeup_source_unregister(ep->ws); ep_free()
783 kfree(ep); ep_free()
788 struct eventpoll *ep = file->private_data; ep_eventpoll_release() local
790 if (ep) ep_eventpoll_release()
791 ep_free(ep); ep_eventpoll_release()
803 static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, ep_read_events_proc() argument
832 struct eventpoll *ep; member in struct:readyevents_arg
840 return ep_scan_ready_list(arg->ep, ep_read_events_proc, NULL, ep_poll_readyevents_proc()
847 struct eventpoll *ep = file->private_data; ep_eventpoll_poll() local
851 * During ep_insert() we already hold the ep->mtx for the tfile. ep_eventpoll_poll()
855 arg.ep = ep; ep_eventpoll_poll()
858 poll_wait(file, &ep->poll_wait, wait); ep_eventpoll_poll()
867 ep_poll_readyevents_proc, &arg, ep, current); ep_eventpoll_poll()
875 struct eventpoll *ep = f->private_data; ep_show_fdinfo() local
878 mutex_lock(&ep->mtx); ep_show_fdinfo()
879 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { ep_show_fdinfo()
888 mutex_unlock(&ep->mtx); ep_show_fdinfo()
909 struct eventpoll *ep; eventpoll_release_file() local
920 * "ep->mtx" after "epmutex" because ep_remove() requires it when called eventpoll_release_file()
927 ep = epi->ep; eventpoll_release_file()
928 mutex_lock_nested(&ep->mtx, 0); eventpoll_release_file()
929 ep_remove(ep, epi); eventpoll_release_file()
930 mutex_unlock(&ep->mtx); eventpoll_release_file()
939 struct eventpoll *ep; ep_alloc() local
943 ep = kzalloc(sizeof(*ep), GFP_KERNEL); ep_alloc()
944 if (unlikely(!ep)) ep_alloc()
947 spin_lock_init(&ep->lock); ep_alloc()
948 mutex_init(&ep->mtx); ep_alloc()
949 init_waitqueue_head(&ep->wq); ep_alloc()
950 init_waitqueue_head(&ep->poll_wait); ep_alloc()
951 INIT_LIST_HEAD(&ep->rdllist); ep_alloc()
952 ep->rbr = RB_ROOT; ep_alloc()
953 ep->ovflist = EP_UNACTIVE_PTR; ep_alloc()
954 ep->user = user; ep_alloc()
956 *pep = ep; ep_alloc()
970 static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) ep_find() argument
978 for (rbp = ep->rbr.rb_node; rbp; ) { ep_find()
1004 struct eventpoll *ep = epi->ep; ep_poll_callback() local
1017 spin_lock_irqsave(&ep->lock, flags); ep_poll_callback()
1041 * chained in ep->ovflist and requeued later on. ep_poll_callback()
1043 if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) { ep_poll_callback()
1045 epi->next = ep->ovflist; ep_poll_callback()
1046 ep->ovflist = epi; ep_poll_callback()
1049 * Activate ep->ws since epi->ws may get ep_poll_callback()
1052 __pm_stay_awake(ep->ws); ep_poll_callback()
1061 list_add_tail(&epi->rdllink, &ep->rdllist); ep_poll_callback()
1069 if (waitqueue_active(&ep->wq)) ep_poll_callback()
1070 wake_up_locked(&ep->wq); ep_poll_callback()
1071 if (waitqueue_active(&ep->poll_wait)) ep_poll_callback()
1075 spin_unlock_irqrestore(&ep->lock, flags); ep_poll_callback()
1079 ep_poll_safewake(&ep->poll_wait); ep_poll_callback()
1107 static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi) ep_rbtree_insert() argument
1110 struct rb_node **p = &ep->rbr.rb_node, *parent = NULL; ep_rbtree_insert()
1123 rb_insert_color(&epi->rbn, &ep->rbr); ep_rbtree_insert()
1134 * uncontrolled wakeup storms. The common use case should be a single ep which
1172 child_file = epi->ep->file; reverse_path_check_proc()
1190 "file is not an ep!\n"); reverse_path_check_proc()
1229 if (!epi->ep->ws) { ep_create_wakeup_source()
1230 epi->ep->ws = wakeup_source_register("eventpoll"); ep_create_wakeup_source()
1231 if (!epi->ep->ws) ep_create_wakeup_source()
1264 static int ep_insert(struct eventpoll *ep, struct epoll_event *event, ep_insert() argument
1273 user_watches = atomic_long_read(&ep->user->epoll_watches); ep_insert()
1283 epi->ep = ep; ep_insert()
1327 ep_rbtree_insert(ep, epi); ep_insert()
1335 spin_lock_irqsave(&ep->lock, flags); ep_insert()
1339 list_add_tail(&epi->rdllink, &ep->rdllist); ep_insert()
1343 if (waitqueue_active(&ep->wq)) ep_insert()
1344 wake_up_locked(&ep->wq); ep_insert()
1345 if (waitqueue_active(&ep->poll_wait)) ep_insert()
1349 spin_unlock_irqrestore(&ep->lock, flags); ep_insert()
1351 atomic_long_inc(&ep->user->epoll_watches); ep_insert()
1355 ep_poll_safewake(&ep->poll_wait); ep_insert()
1364 rb_erase(&epi->rbn, &ep->rbr); ep_insert()
1367 ep_unregister_pollwait(ep, epi); ep_insert()
1371 * allocated wait queue. Note that we don't care about the ep->ovflist ep_insert()
1375 spin_lock_irqsave(&ep->lock, flags); ep_insert()
1378 spin_unlock_irqrestore(&ep->lock, flags); ep_insert()
1392 static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event) ep_modify() argument
1420 * We need this because we did not take ep->lock while ep_modify()
1422 * ep->lock). ep_modify()
1445 spin_lock_irq(&ep->lock); ep_modify()
1447 list_add_tail(&epi->rdllink, &ep->rdllist); ep_modify()
1451 if (waitqueue_active(&ep->wq)) ep_modify()
1452 wake_up_locked(&ep->wq); ep_modify()
1453 if (waitqueue_active(&ep->poll_wait)) ep_modify()
1456 spin_unlock_irq(&ep->lock); ep_modify()
1461 ep_poll_safewake(&ep->poll_wait); ep_modify()
1466 static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, ep_send_events_proc() argument
1489 * Activate ep->ws before deactivating epi->ws to prevent ep_send_events_proc()
1500 __pm_stay_awake(ep->ws); ep_send_events_proc()
1532 * into ep->rdllist besides us. The epoll_ctl() ep_send_events_proc()
1535 * poll callback will queue them in ep->ovflist. ep_send_events_proc()
1537 list_add_tail(&epi->rdllink, &ep->rdllist); ep_send_events_proc()
1546 static int ep_send_events(struct eventpoll *ep, ep_send_events() argument
1554 return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false); ep_send_events()
1572 * @ep: Pointer to the eventpoll context.
1585 static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, ep_poll() argument
1606 spin_lock_irqsave(&ep->lock, flags); ep_poll()
1611 spin_lock_irqsave(&ep->lock, flags); ep_poll()
1613 if (!ep_events_available(ep)) { ep_poll()
1620 __add_wait_queue_exclusive(&ep->wq, &wait); ep_poll()
1629 if (ep_events_available(ep) || timed_out) ep_poll()
1636 spin_unlock_irqrestore(&ep->lock, flags); ep_poll()
1640 spin_lock_irqsave(&ep->lock, flags); ep_poll()
1643 __remove_wait_queue(&ep->wq, &wait); ep_poll()
1648 eavail = ep_events_available(ep); ep_poll()
1650 spin_unlock_irqrestore(&ep->lock, flags); ep_poll()
1658 !(res = ep_send_events(ep, events, maxevents)) && !timed_out) ep_poll()
1677 * structure @ep does not violate the constraints, or -1 otherwise.
1683 struct eventpoll *ep = file->private_data; ep_loop_check_proc() local
1688 mutex_lock_nested(&ep->mtx, call_nests + 1); ep_loop_check_proc()
1689 ep->visited = 1; ep_loop_check_proc()
1690 list_add(&ep->visited_list_link, &visited_list); ep_loop_check_proc()
1691 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { ep_loop_check_proc()
1705 * an ep, then we need to check if the newly added ep_loop_check_proc()
1716 mutex_unlock(&ep->mtx); ep_loop_check_proc()
1723 * another epoll file (represented by @ep) does not create
1726 * @ep: Pointer to the epoll private data structure.
1730 * structure @ep does not violate the constraints, or -1 otherwise.
1732 static int ep_loop_check(struct eventpoll *ep, struct file *file) ep_loop_check() argument
1738 ep_loop_check_proc, file, ep, current); ep_loop_check()
1767 struct eventpoll *ep = NULL; SYSCALL_DEFINE1() local
1778 error = ep_alloc(&ep); SYSCALL_DEFINE1()
1790 file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep, SYSCALL_DEFINE1()
1796 ep->file = file; SYSCALL_DEFINE1()
1803 ep_free(ep); SYSCALL_DEFINE1()
1826 struct eventpoll *ep; SYSCALL_DEFINE4() local
1868 ep = f.file->private_data; SYSCALL_DEFINE4()
1885 mutex_lock_nested(&ep->mtx, 0); SYSCALL_DEFINE4()
1890 mutex_unlock(&ep->mtx); SYSCALL_DEFINE4()
1894 if (ep_loop_check(ep, tf.file) != 0) { SYSCALL_DEFINE4()
1901 mutex_lock_nested(&ep->mtx, 0); SYSCALL_DEFINE4()
1914 epi = ep_find(ep, tf.file, fd); SYSCALL_DEFINE4()
1921 error = ep_insert(ep, &epds, tf.file, fd, full_check); SYSCALL_DEFINE4()
1929 error = ep_remove(ep, epi); SYSCALL_DEFINE4()
1936 error = ep_modify(ep, epi, &epds); SYSCALL_DEFINE4()
1943 mutex_unlock(&ep->mtx); SYSCALL_DEFINE4()
1966 struct eventpoll *ep; SYSCALL_DEFINE4() local
1993 ep = f.file->private_data; SYSCALL_DEFINE4()
1996 error = ep_poll(ep, events, maxevents, timeout); SYSCALL_DEFINE4()
/linux-4.4.14/drivers/usb/wusbcore/
H A Dwa-rpipe.c175 if (rpipe->ep) rpipe_destroy()
176 rpipe->ep->hcpriv = NULL; rpipe_destroy()
264 struct device *dev, struct usb_host_endpoint *ep) rpipe_epc_find()
271 if (ep->desc.bEndpointAddress == 0) { rpipe_epc_find()
275 itr = ep->extra; rpipe_epc_find()
276 itr_size = ep->extralen; rpipe_epc_find()
280 dev_err(dev, "HW Bug? ep 0x%02x: extra descriptors " rpipe_epc_find()
282 ep->desc.bEndpointAddress, rpipe_epc_find()
283 itr - (void *) ep->extra, itr_size); rpipe_epc_find()
292 dev_err(dev, "HW Bug? ep 0x%02x: extra descriptor " rpipe_epc_find()
295 ep->desc.bEndpointAddress, rpipe_epc_find()
296 itr - (void *) ep->extra, hdr->bDescriptorType, rpipe_epc_find()
314 struct usb_host_endpoint *ep, struct urb *urb, gfp_t gfp) rpipe_aim()
323 epcd = rpipe_epc_find(dev, ep); rpipe_aim()
325 dev_err(dev, "ep 0x%02x: can't find companion descriptor\n", rpipe_aim()
326 ep->desc.bEndpointAddress); rpipe_aim()
334 /* FIXME: compute so seg_size > ep->maxpktsize */ rpipe_aim()
337 if (usb_endpoint_xfer_isoc(&ep->desc)) rpipe_aim()
340 rpipe->descr.wMaxPacketSize = ep->desc.wMaxPacketSize; rpipe_aim()
350 dev_dbg(dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n", rpipe_aim()
357 rpipe->descr.bEndpointAddress = ep->desc.bEndpointAddress; rpipe_aim()
367 rpipe->descr.bInterval = ep->desc.bInterval; rpipe_aim()
368 if (usb_endpoint_xfer_isoc(&ep->desc)) rpipe_aim()
373 rpipe->descr.bmAttribute = (ep->desc.bmAttributes & rpipe_aim()
397 const struct usb_host_endpoint *ep, rpipe_check_aim()
418 AIM_CHECK(bEndpointAddress, ep->desc.bEndpointAddress, "(%u vs %u)"); rpipe_check_aim()
419 AIM_CHECK(bInterval, ep->desc.bInterval, "(%u vs %u)"); rpipe_check_aim()
420 AIM_CHECK(bmAttribute, ep->desc.bmAttributes & 0x03, "(%u vs %u)"); rpipe_check_aim()
435 * Attaches to ep->hcpriv and rpipe->ep to ep.
437 int rpipe_get_by_ep(struct wahc *wa, struct usb_host_endpoint *ep, rpipe_get_by_ep() argument
446 rpipe = ep->hcpriv; rpipe_get_by_ep()
449 result = rpipe_check_aim(rpipe, wa, ep, urb, gfp); rpipe_get_by_ep()
454 dev_dbg(dev, "ep 0x%02x: reusing rpipe %u\n", rpipe_get_by_ep()
455 ep->desc.bEndpointAddress, rpipe_get_by_ep()
460 eptype = ep->desc.bmAttributes & 0x03; rpipe_get_by_ep()
464 result = rpipe_aim(rpipe, wa, ep, urb, gfp); rpipe_get_by_ep()
469 ep->hcpriv = rpipe; rpipe_get_by_ep()
470 rpipe->ep = ep; rpipe_get_by_ep()
471 __rpipe_get(rpipe); /* for caching into ep->hcpriv */ rpipe_get_by_ep()
472 dev_dbg(dev, "ep 0x%02x: using rpipe %u\n", rpipe_get_by_ep()
473 ep->desc.bEndpointAddress, rpipe_get_by_ep()
511 * __rpipe_destroy() will cleanup ep->hcpriv.
516 void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep) rpipe_ep_disable() argument
521 rpipe = ep->hcpriv; rpipe_ep_disable()
537 void rpipe_clear_feature_stalled(struct wahc *wa, struct usb_host_endpoint *ep) rpipe_clear_feature_stalled() argument
542 rpipe = ep->hcpriv; rpipe_clear_feature_stalled()
263 rpipe_epc_find( struct device *dev, struct usb_host_endpoint *ep) rpipe_epc_find() argument
313 rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa, struct usb_host_endpoint *ep, struct urb *urb, gfp_t gfp) rpipe_aim() argument
396 rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa, const struct usb_host_endpoint *ep, const struct urb *urb, gfp_t gfp) rpipe_check_aim() argument
/linux-4.4.14/drivers/gpu/drm/
H A Ddrm_of.c47 struct device_node *remote_port, *ep; drm_of_find_possible_crtcs() local
50 for_each_endpoint_of_node(port, ep) { for_each_endpoint_of_node()
51 remote_port = of_graph_get_remote_port(ep); for_each_endpoint_of_node()
53 of_node_put(ep); for_each_endpoint_of_node()
83 struct device_node *ep, *port, *remote; drm_of_component_probe() local
131 for_each_child_of_node(port, ep) { for_each_child_of_node()
132 remote = of_graph_get_remote_port_parent(ep); for_each_child_of_node()
/linux-4.4.14/include/net/sctp/
H A Dauth.h89 int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep,
92 int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp);
107 int sctp_auth_ep_add_chunkid(struct sctp_endpoint *ep, __u8 chunk_id);
108 int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
110 int sctp_auth_set_key(struct sctp_endpoint *ep,
113 int sctp_auth_set_active_key(struct sctp_endpoint *ep,
116 int sctp_auth_del_key_id(struct sctp_endpoint *ep,
/linux-4.4.14/drivers/usb/dwc3/
H A Dgadget.h27 #define to_dwc3_ep(ep) (container_of(ep, struct dwc3_ep, endpoint))
85 int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
86 int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
87 int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
/linux-4.4.14/drivers/usb/renesas_usbhs/
H A Dmod_gadget.c38 struct usb_ep ep; member in struct:usbhsg_uep
100 #define usbhsg_ep_to_uep(e) container_of(e, struct usbhsg_uep, ep)
139 usb_gadget_giveback_request(&uep->ep, &ureq->req); __usbhsg_queue_pop()
309 static void __usbhsg_recip_send_complete(struct usb_ep *ep, __usbhsg_recip_send_complete() argument
316 usb_ep_free_request(ep, req); __usbhsg_recip_send_complete()
329 req = usb_ep_alloc_request(&dcp->ep, GFP_ATOMIC); __usbhsg_recip_send_status()
338 usb_ep_free_request(&dcp->ep, req); __usbhsg_recip_send_status()
581 static int usbhsg_ep_enable(struct usb_ep *ep, usbhsg_ep_enable() argument
584 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); usbhsg_ep_enable()
628 static int usbhsg_ep_disable(struct usb_ep *ep) usbhsg_ep_disable() argument
630 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); usbhsg_ep_disable()
645 static struct usb_request *usbhsg_ep_alloc_request(struct usb_ep *ep, usbhsg_ep_alloc_request() argument
659 static void usbhsg_ep_free_request(struct usb_ep *ep, usbhsg_ep_free_request() argument
668 static int usbhsg_ep_queue(struct usb_ep *ep, struct usb_request *req, usbhsg_ep_queue() argument
671 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); usbhsg_ep_queue()
687 static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) usbhsg_ep_dequeue() argument
689 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); usbhsg_ep_dequeue()
705 static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) __usbhsg_ep_set_halt_wedge() argument
707 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); __usbhsg_ep_set_halt_wedge()
738 static int usbhsg_ep_set_halt(struct usb_ep *ep, int value) usbhsg_ep_set_halt() argument
740 return __usbhsg_ep_set_halt_wedge(ep, value, 0); usbhsg_ep_set_halt()
743 static int usbhsg_ep_set_wedge(struct usb_ep *ep) usbhsg_ep_set_wedge() argument
745 return __usbhsg_ep_set_halt_wedge(ep, 1, 1); usbhsg_ep_set_wedge()
882 usbhsg_ep_disable(&dcp->ep); usbhsg_try_stop()
1061 dev_err(dev, "Could not allocate ep\n"); usbhs_mod_gadget_probe()
1107 snprintf(uep->ep_name, EP_NAME_SIZE, "ep%d", i); usbhsg_for_each_uep_with_dcp()
1109 uep->ep.name = uep->ep_name; usbhsg_for_each_uep_with_dcp()
1110 uep->ep.ops = &usbhsg_ep_ops; usbhsg_for_each_uep_with_dcp()
1111 INIT_LIST_HEAD(&uep->ep.ep_list); usbhsg_for_each_uep_with_dcp()
1115 gpriv->gadget.ep0 = &uep->ep; usbhsg_for_each_uep_with_dcp()
1116 usb_ep_set_maxpacket_limit(&uep->ep, 64); usbhsg_for_each_uep_with_dcp()
1117 uep->ep.caps.type_control = true; usbhsg_for_each_uep_with_dcp()
1121 usb_ep_set_maxpacket_limit(&uep->ep, 512); usbhsg_for_each_uep_with_dcp()
1122 uep->ep.caps.type_iso = true; usbhsg_for_each_uep_with_dcp()
1123 uep->ep.caps.type_bulk = true; usbhsg_for_each_uep_with_dcp()
1124 uep->ep.caps.type_int = true; usbhsg_for_each_uep_with_dcp()
1125 list_add_tail(&uep->ep.ep_list, &gpriv->gadget.ep_list); usbhsg_for_each_uep_with_dcp()
1127 uep->ep.caps.dir_in = true; usbhsg_for_each_uep_with_dcp()
1128 uep->ep.caps.dir_out = true; usbhsg_for_each_uep_with_dcp()
H A Dmod_host.c86 struct usb_host_endpoint *ep; member in struct:usbhsh_ep
135 #define usbhsh_uep_to_ep(u) ((u)->ep)
217 int maxp = usb_endpoint_maxp(&urb->ep->desc); usbhsh_endpoint_sequence_save()
261 struct usbhsh_ep *uep = usbhsh_ep_to_uep(urb->ep); usbhsh_pipe_attach()
264 struct usb_endpoint_descriptor *desc = &urb->ep->desc; usbhsh_pipe_attach()
358 struct usb_host_endpoint *ep = usbhsh_uep_to_ep(uep); usbhsh_pipe_detach() local
367 usb_endpoint_num(&ep->desc), usbhsh_pipe_detach()
384 struct usb_host_endpoint *ep = urb->ep; usbhsh_endpoint_attach() local
387 struct usb_endpoint_descriptor *desc = &ep->desc; usbhsh_endpoint_attach()
407 usbhsh_uep_to_ep(uep) = ep; usbhsh_endpoint_attach()
408 usbhsh_ep_to_uep(ep) = uep; usbhsh_endpoint_attach()
421 struct usb_host_endpoint *ep) usbhsh_endpoint_detach()
425 struct usbhsh_ep *uep = usbhsh_ep_to_uep(ep); usbhsh_endpoint_detach()
433 usb_endpoint_num(&ep->desc)); usbhsh_endpoint_detach()
446 usbhsh_ep_to_uep(ep) = NULL; usbhsh_endpoint_detach()
668 usbhsh_pipe_detach(hpriv, usbhsh_ep_to_uep(urb->ep)); usbhsh_queue_done()
679 struct usbhsh_ep *uep = usbhsh_ep_to_uep(urb->ep); usbhsh_queue_push()
885 struct usbhsh_ep *uep = usbhsh_ep_to_uep(urb->ep); usbhsh_dcp_queue_push()
969 struct usb_host_endpoint *ep = urb->ep; usbhsh_urb_enqueue() local
1005 if (!usbhsh_ep_to_uep(ep)) { usbhsh_urb_enqueue()
1034 usbhsh_endpoint_detach(hpriv, ep); usbhsh_urb_enqueue()
1062 struct usb_host_endpoint *ep) usbhsh_endpoint_disable()
1064 struct usbhsh_ep *uep = usbhsh_ep_to_uep(ep); usbhsh_endpoint_disable()
1069 * this function might be called manytimes by same hcd/ep usbhsh_endpoint_disable()
1070 * in-endpoint == out-endpoint if ep == dcp. usbhsh_endpoint_disable()
1078 usbhsh_endpoint_detach(hpriv, ep); usbhsh_endpoint_disable()
420 usbhsh_endpoint_detach(struct usbhsh_hpriv *hpriv, struct usb_host_endpoint *ep) usbhsh_endpoint_detach() argument
1061 usbhsh_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep) usbhsh_endpoint_disable() argument
/linux-4.4.14/drivers/usb/musb/
H A Dmusb_gadget.c110 struct musb_ep *musb_ep = request->ep; unmap_dma_buffer()
147 struct musb_ep *ep,
150 __releases(ep->musb->lock)
151 __acquires(ep->musb->lock)
155 int busy = ep->busy;
164 ep->busy = 1;
172 ep->end_point.name, request,
176 ep->end_point.name, request,
179 usb_gadget_giveback_request(&req->ep->end_point, &req->request);
181 ep->busy = busy;
188 * caller locked controller and blocked irqs, and selected this ep.
190 static void nuke(struct musb_ep *ep, const int status) nuke() argument
192 struct musb *musb = ep->musb; nuke()
194 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; nuke()
196 ep->busy = 1; nuke()
198 if (is_dma_capable() && ep->dma) { nuke()
199 struct dma_controller *c = ep->musb->dma_controller; nuke()
202 if (ep->is_in) { nuke()
219 value = c->channel_abort(ep->dma); nuke()
221 ep->name, value); nuke()
222 c->channel_release(ep->dma); nuke()
223 ep->dma = NULL; nuke()
226 while (!list_empty(&ep->req_list)) { nuke()
227 req = list_first_entry(&ep->req_list, struct musb_request, list); nuke()
228 musb_g_giveback(ep, &req->request, status); nuke()
241 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) max_ep_writesize() argument
243 if (can_bulk_split(musb, ep->type)) max_ep_writesize()
244 return ep->hw_ep->max_packet_sz_tx; max_ep_writesize()
246 return ep->packet_sz; max_ep_writesize()
251 * the IRQ routine or from ep.queue() to kickstart a request on an
265 musb_ep = req->ep; txstate()
269 dev_dbg(musb->controller, "ep:%s disabled - ignore request\n", txstate()
475 dev_vdbg(musb->controller, "underrun on ep%d, req %p\n", musb_g_tx()
583 dev_dbg(musb->controller, "ep:%s disabled - ignore request\n", rxstate()
953 static int musb_gadget_enable(struct usb_ep *ep, musb_gadget_enable() argument
967 if (!ep || !desc) musb_gadget_enable()
970 musb_ep = to_musb_ep(ep); musb_gadget_enable()
1140 static int musb_gadget_disable(struct usb_ep *ep) musb_gadget_disable() argument
1149 musb_ep = to_musb_ep(ep); musb_gadget_disable()
1187 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) musb_alloc_request() argument
1189 struct musb_ep *musb_ep = to_musb_ep(ep); musb_alloc_request()
1201 request->ep = musb_ep; musb_alloc_request()
1210 void musb_free_request(struct usb_ep *ep, struct usb_request *req) musb_free_request() argument
1240 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, musb_gadget_queue() argument
1249 if (!ep || !req) musb_gadget_queue()
1254 musb_ep = to_musb_ep(ep); musb_gadget_queue()
1260 if (request->ep != musb_ep) musb_gadget_queue()
1263 dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req); musb_gadget_queue()
1275 /* don't queue if the ep is down */ musb_gadget_queue()
1277 dev_dbg(musb->controller, "req %p queued to %s while ep %s\n", musb_gadget_queue()
1278 req, ep->name, "disabled"); musb_gadget_queue()
1296 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) musb_gadget_dequeue() argument
1298 struct musb_ep *musb_ep = to_musb_ep(ep); musb_gadget_dequeue()
1305 if (!ep || !request || to_musb_request(request)->ep != musb_ep) musb_gadget_dequeue()
1315 dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name); musb_gadget_dequeue()
1353 static int musb_gadget_set_halt(struct usb_ep *ep, int value) musb_gadget_set_halt() argument
1355 struct musb_ep *musb_ep = to_musb_ep(ep); musb_gadget_set_halt()
1365 if (!ep) musb_gadget_set_halt()
1382 ep->name); musb_gadget_set_halt()
1390 dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name); musb_gadget_set_halt()
1399 dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear"); musb_gadget_set_halt()
1438 static int musb_gadget_set_wedge(struct usb_ep *ep) musb_gadget_set_wedge() argument
1440 struct musb_ep *musb_ep = to_musb_ep(ep); musb_gadget_set_wedge()
1442 if (!ep) musb_gadget_set_wedge()
1447 return usb_ep_set_halt(ep); musb_gadget_set_wedge()
1450 static int musb_gadget_fifo_status(struct usb_ep *ep) musb_gadget_fifo_status() argument
1452 struct musb_ep *musb_ep = to_musb_ep(ep); musb_gadget_fifo_status()
1473 static void musb_gadget_fifo_flush(struct usb_ep *ep) musb_gadget_fifo_flush() argument
1475 struct musb_ep *musb_ep = to_musb_ep(ep); musb_gadget_fifo_flush()
1689 struct usb_ep *ep = NULL; musb_match_ep() local
1695 ep = gadget_find_ep_by_name(g, "ep5in"); musb_match_ep()
1697 ep = gadget_find_ep_by_name(g, "ep6out"); musb_match_ep()
1701 ep = gadget_find_ep_by_name(g, "ep1in"); musb_match_ep()
1703 ep = gadget_find_ep_by_name(g, "ep2out"); musb_match_ep()
1709 if (ep && usb_gadget_ep_match_desc(g, ep, desc, ep_comp)) musb_match_ep()
1710 return ep; musb_match_ep()
1744 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) init_peripheral_ep() argument
1748 memset(ep, 0, sizeof *ep); init_peripheral_ep()
1750 ep->current_epnum = epnum; init_peripheral_ep()
1751 ep->musb = musb; init_peripheral_ep()
1752 ep->hw_ep = hw_ep; init_peripheral_ep()
1753 ep->is_in = is_in; init_peripheral_ep()
1755 INIT_LIST_HEAD(&ep->req_list); init_peripheral_ep()
1757 sprintf(ep->name, "ep%d%s", epnum, init_peripheral_ep()
1760 ep->end_point.name = ep->name; init_peripheral_ep()
1761 INIT_LIST_HEAD(&ep->end_point.ep_list); init_peripheral_ep()
1763 usb_ep_set_maxpacket_limit(&ep->end_point, 64); init_peripheral_ep()
1764 ep->end_point.caps.type_control = true; init_peripheral_ep()
1765 ep->end_point.ops = &musb_g_ep0_ops; init_peripheral_ep()
1766 musb->g.ep0 = &ep->end_point; init_peripheral_ep()
1769 usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx); init_peripheral_ep()
1771 usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx); init_peripheral_ep()
1772 ep->end_point.caps.type_iso = true; init_peripheral_ep()
1773 ep->end_point.caps.type_bulk = true; init_peripheral_ep()
1774 ep->end_point.caps.type_int = true; init_peripheral_ep()
1775 ep->end_point.ops = &musb_ep_ops; init_peripheral_ep()
1776 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); init_peripheral_ep()
1780 ep->end_point.caps.dir_in = true; init_peripheral_ep()
1781 ep->end_point.caps.dir_out = true; init_peripheral_ep()
1783 ep->end_point.caps.dir_in = true; init_peripheral_ep()
1785 ep->end_point.caps.dir_out = true; init_peripheral_ep()
H A Dmusb_gadget.h81 struct musb_ep *ep; member in struct:musb_request
94 musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
95 extern void musb_free_request(struct usb_ep *ep, struct usb_request *req);
127 static inline struct musb_ep *to_musb_ep(struct usb_ep *ep) to_musb_ep() argument
129 return ep ? container_of(ep, struct musb_ep, end_point) : NULL; to_musb_ep()
132 static inline struct musb_request *next_request(struct musb_ep *ep) next_request() argument
134 struct list_head *queue = &ep->req_list; next_request()
H A Dmusb_host.c110 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) musb_h_tx_flush_fifo() argument
112 struct musb *musb = ep->musb; musb_h_tx_flush_fifo()
113 void __iomem *epio = ep->regs; musb_h_tx_flush_fifo()
139 ep->epnum, csr)) musb_h_tx_flush_fifo()
144 static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep) musb_h_ep0_flush_fifo() argument
146 void __iomem *epio = ep->regs; musb_h_ep0_flush_fifo()
161 ep->epnum, csr); musb_h_ep0_flush_fifo()
171 static inline void musb_h_tx_start(struct musb_hw_ep *ep) musb_h_tx_start() argument
176 if (ep->epnum) { musb_h_tx_start()
177 txcsr = musb_readw(ep->regs, MUSB_TXCSR); musb_h_tx_start()
179 musb_writew(ep->regs, MUSB_TXCSR, txcsr); musb_h_tx_start()
182 musb_writew(ep->regs, MUSB_CSR0, txcsr); musb_h_tx_start()
187 static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep) musb_h_tx_dma_start() argument
192 txcsr = musb_readw(ep->regs, MUSB_TXCSR); musb_h_tx_dma_start()
194 if (is_cppi_enabled(ep->musb)) musb_h_tx_dma_start()
196 musb_writew(ep->regs, MUSB_TXCSR, txcsr); musb_h_tx_dma_start()
199 static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh) musb_ep_set_qh() argument
201 if (is_in != 0 || ep->is_shared_fifo) musb_ep_set_qh()
202 ep->in_qh = qh; musb_ep_set_qh()
203 if (is_in == 0 || ep->is_shared_fifo) musb_ep_set_qh()
204 ep->out_qh = qh; musb_ep_set_qh()
207 static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in) musb_ep_get_qh() argument
209 return is_in ? ep->in_qh : ep->out_qh; musb_ep_get_qh()
257 dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", musb_start_urb()
318 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
363 struct musb_hw_ep *ep = qh->hw_ep; musb_advance_schedule() local
393 ep->rx_reinit = 1; musb_advance_schedule()
394 if (ep->rx_channel) { musb_advance_schedule()
395 dma->channel_release(ep->rx_channel); musb_advance_schedule()
396 ep->rx_channel = NULL; musb_advance_schedule()
399 ep->tx_reinit = 1; musb_advance_schedule()
400 if (ep->tx_channel) { musb_advance_schedule()
401 dma->channel_release(ep->tx_channel); musb_advance_schedule()
402 ep->tx_channel = NULL; musb_advance_schedule()
407 musb_ep_set_qh(ep, is_in, NULL); musb_advance_schedule()
438 dev_dbg(musb->controller, "... next ep%d %cX urb %p\n", musb_advance_schedule()
570 struct musb_hw_ep *ep = musb->endpoints + epnum; musb_rx_reinit() local
579 if (ep->is_shared_fifo) { musb_rx_reinit()
580 csr = musb_readw(ep->regs, MUSB_TXCSR); musb_rx_reinit()
582 musb_h_tx_flush_fifo(ep); musb_rx_reinit()
583 csr = musb_readw(ep->regs, MUSB_TXCSR); musb_rx_reinit()
584 musb_writew(ep->regs, MUSB_TXCSR, musb_rx_reinit()
593 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE); musb_rx_reinit()
594 musb_writew(ep->regs, MUSB_TXCSR, 0); musb_rx_reinit()
598 csr = musb_readw(ep->regs, MUSB_RXCSR); musb_rx_reinit()
600 WARNING("rx%d, packet/%d ready?\n", ep->epnum, musb_rx_reinit()
601 musb_readw(ep->regs, MUSB_RXCOUNT)); musb_rx_reinit()
603 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); musb_rx_reinit()
615 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); musb_rx_reinit()
616 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); musb_rx_reinit()
622 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); musb_rx_reinit()
624 musb_writew(ep->regs, MUSB_RXMAXP, musb_rx_reinit()
627 ep->rx_reinit = 0; musb_rx_reinit()
756 dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s " musb_ep_program()
940 ERR("broken !rx_reinit, ep%d csr %04x\n", musb_ep_program()
984 static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep, musb_bulk_nak_timeout() argument
990 void __iomem *epio = ep->regs; musb_bulk_nak_timeout()
994 musb_ep_select(mbase, ep->epnum); musb_bulk_nak_timeout()
996 dma = is_dma_capable() ? ep->rx_channel : NULL; musb_bulk_nak_timeout()
1006 dma = is_dma_capable() ? ep->tx_channel : NULL; musb_bulk_nak_timeout()
1034 ep->rx_reinit = 1; musb_bulk_nak_timeout()
1043 ep->tx_reinit = 1; musb_bulk_nak_timeout()
1310 dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum); musb_host_tx()
1318 "NAK timeout on TX%d ep\n", epnum); musb_host_tx()
1971 "ep %d dma %s, rxcsr %04x, rxcount %d\n", musb_host_rx()
2151 /* use bulk reserved ep1 if no other ep is free */ musb_schedule()
2198 struct usb_host_endpoint *hep = urb->ep; musb_urb_enqueue()
2382 struct musb_hw_ep *ep = qh->hw_ep; musb_cleanup_urb() local
2383 struct musb *musb = ep->musb; musb_cleanup_urb()
2384 void __iomem *epio = ep->regs; musb_cleanup_urb()
2385 unsigned hw_end = ep->epnum; musb_cleanup_urb()
2386 void __iomem *regs = ep->musb->mregs; musb_cleanup_urb()
2396 dma = is_in ? ep->rx_channel : ep->tx_channel; musb_cleanup_urb()
2398 status = ep->musb->dma_controller->channel_abort(dma); musb_cleanup_urb()
2401 is_in ? 'R' : 'T', ep->epnum, musb_cleanup_urb()
2408 if (ep->epnum && is_in) { musb_cleanup_urb()
2410 csr = musb_h_flush_rxfifo(ep, 0); musb_cleanup_urb()
2416 } else if (ep->epnum) { musb_cleanup_urb()
2417 musb_h_tx_flush_fifo(ep); musb_cleanup_urb()
2431 musb_h_ep0_flush_fifo(ep); musb_cleanup_urb()
2434 musb_advance_schedule(ep->musb, urb, ep, is_in); musb_cleanup_urb()
2446 dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb, musb_urb_dequeue()
H A Dmusb_gadget_ep0.c106 struct musb_ep *ep; service_tx_status_request() local
119 ep = &musb->endpoints[epnum].ep_in; service_tx_status_request()
121 ep = &musb->endpoints[epnum].ep_out; service_tx_status_request()
125 if (epnum >= MUSB_C_NUM_EPS || !ep->desc) { service_tx_status_request()
260 struct musb_hw_ep *ep; variable in typeref:struct:musb_hw_ep
270 ep = musb->endpoints + epnum;
271 regs = ep->regs;
274 musb_ep = &ep->ep_in;
276 musb_ep = &ep->ep_out;
425 struct musb_hw_ep *ep; variable in typeref:struct:musb_hw_ep
434 ep = musb->endpoints + epnum;
435 regs = ep->regs;
438 musb_ep = &ep->ep_in;
440 musb_ep = &ep->ep_out;
915 musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) musb_g_ep0_enable() argument
930 struct musb_ep *ep; musb_g_ep0_queue() local
940 ep = to_musb_ep(e); musb_g_ep0_queue()
941 musb = ep->musb; musb_g_ep0_queue()
948 req->tx = ep->is_in; musb_g_ep0_queue()
952 if (!list_empty(&ep->req_list)) { musb_g_ep0_queue()
971 list_add_tail(&req->list, &ep->req_list); musb_g_ep0_queue()
974 ep->name, ep->is_in ? "IN/TX" : "OUT/RX", musb_g_ep0_queue()
992 musb_g_ep0_giveback(ep->musb, r); musb_g_ep0_queue()
1009 static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req) musb_g_ep0_dequeue() argument
1017 struct musb_ep *ep; musb_g_ep0_halt() local
1027 ep = to_musb_ep(e); musb_g_ep0_halt()
1028 musb = ep->musb; musb_g_ep0_halt()
1035 if (!list_empty(&ep->req_list)) { musb_g_ep0_halt()
H A Dtusb6010.h181 /* Offsets from each ep base register */
204 #define TUSB_EP_FIFO(ep) (TUSB_FIFO_BASE + (ep) * 0x20)
H A Dblackfin.h80 #define USB_DMA_REG(ep, reg) (USB_DMA_BASE + 0x20 * ep + reg)
H A Dtusb6010_omap.c76 dev_dbg(chdat->musb->controller, "ep%i dmareq0 is busy for ep%i\n", tusb_omap_use_shared_dmareq()
96 printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n", tusb_omap_free_shared_dmareq()
130 dev_dbg(musb->controller, "ep%i %s dma callback ch: %i status: %x\n", tusb_omap_dma_cb()
270 dev_dbg(musb->controller, "could not get dma for ep%i\n", chdat->epnum); tusb_omap_dma_program()
313 dev_dbg(musb->controller, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n", tusb_omap_dma_program()
357 dev_dbg(musb->controller, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n", tusb_omap_dma_program()
572 dev_dbg(musb->controller, "ep%i %s dma: %s dma%i dmareq%i sync%i\n", tusb_omap_dma_allocate()
585 dev_dbg(musb->controller, "ep%i: Could not get a DMA channel\n", chdat->epnum); tusb_omap_dma_allocate()
598 dev_dbg(musb->controller, "ep%i ch%i\n", chdat->epnum, chdat->ch); tusb_omap_dma_release()
/linux-4.4.14/drivers/usb/gadget/legacy/
H A Ddbgp.c80 static void __disable_ep(struct usb_ep *ep) __disable_ep() argument
82 usb_ep_disable(ep); __disable_ep()
91 static void dbgp_complete(struct usb_ep *ep, struct usb_request *req) dbgp_complete() argument
97 if (ep == dbgp.i_ep) { dbgp_complete()
110 err = usb_ep_queue(ep, req, GFP_ATOMIC); dbgp_complete()
127 static int dbgp_enable_ep_req(struct usb_ep *ep) dbgp_enable_ep_req() argument
132 req = usb_ep_alloc_request(ep, GFP_KERNEL); dbgp_enable_ep_req()
148 err = usb_ep_queue(ep, req, GFP_ATOMIC); dbgp_enable_ep_req()
162 "enable ep req: failure (%d:%d)\n", stp, err); dbgp_enable_ep_req()
166 static int __enable_ep(struct usb_ep *ep, struct usb_endpoint_descriptor *desc) __enable_ep() argument
169 ep->desc = desc; __enable_ep()
170 err = usb_ep_enable(ep); __enable_ep()
203 dev_dbg(&dbgp.gadget->dev, "enable ep: failure (%d:%d)\n", stp, err); dbgp_enable_ep()
272 dev_dbg(&dbgp.gadget->dev, "ep config: failure (%d)\n", stp); dbgp_configure_endpoints()
329 static void dbgp_setup_complete(struct usb_ep *ep, dbgp_setup_complete() argument
H A Dinode.c57 * called when each /dev/gadget/ep* file is configured (by writing
195 /* must hold dev->lock before accessing ep or req */
196 struct usb_ep *ep; member in struct:ep_data
272 static void epio_complete (struct usb_ep *ep, struct usb_request *req) epio_complete() argument
274 struct ep_data *epdata = ep->driver_data; epio_complete()
286 * still need dev->lock to use epdata->ep.
321 pr_debug ("%s: ep %p not available, state %d\n", get_ready_ep()
335 if (likely (epdata->ep != NULL)) { ep_io()
342 value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC); ep_io()
351 if (likely (epdata->ep != NULL)) { ep_io()
354 usb_ep_dequeue (epdata->ep, epdata->req); ep_io()
387 usb_ep_disable(data->ep); ep_release()
403 if (likely (data->ep != NULL)) { ep_ioctl()
406 status = usb_ep_fifo_status (data->ep); ep_ioctl()
409 usb_ep_fifo_flush (data->ep); ep_ioctl()
412 status = usb_ep_clear_halt (data->ep); ep_ioctl()
449 if (likely(epdata && epdata->ep && priv->req)) ep_aio_cancel()
450 value = usb_ep_dequeue (epdata->ep, priv->req); ep_aio_cancel()
480 static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) ep_aio_complete() argument
508 ep->name, req->status, req->actual); ep_aio_complete()
517 usb_ep_free_request(ep, req); ep_aio_complete()
544 if (unlikely(epdata->ep)) ep_aio()
547 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); ep_aio()
557 value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); ep_aio()
559 usb_ep_free_request(epdata->ep, req); ep_aio()
594 if (likely(epdata->ep != NULL)) ep_read_iter()
595 usb_ep_set_halt(epdata->ep); ep_read_iter()
656 if (likely(epdata->ep != NULL)) ep_write_iter()
657 usb_ep_set_halt(epdata->ep); ep_write_iter()
723 struct usb_ep *ep; ep_config() local
773 ep = data->ep; ep_config()
774 if (ep == NULL) { ep_config()
782 ep->desc = &data->desc; ep_config()
786 ep->desc = &data->hs_desc; ep_config()
794 value = usb_ep_enable(ep); ep_config()
852 static void clean_req (struct usb_ep *ep, struct usb_request *req) clean_req() argument
854 struct dev_data *dev = ep->driver_data; clean_req()
864 static void ep0_complete (struct usb_ep *ep, struct usb_request *req) ep0_complete() argument
866 struct dev_data *dev = ep->driver_data; ep0_complete()
882 clean_req (ep, req); ep0_complete()
887 static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len) setup_req() argument
889 struct dev_data *dev = ep->driver_data; setup_req()
937 struct usb_ep *ep = dev->gadget->ep0; ep0_read() local
940 if ((retval = setup_req (ep, req, 0)) == 0) ep0_read()
941 retval = usb_ep_queue (ep, req, GFP_ATOMIC); ep0_read()
1502 struct ep_data *ep; destroy_ep_files() local
1507 ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles); destroy_ep_files()
1508 list_del_init (&ep->epfiles); destroy_ep_files()
1509 dentry = ep->dentry; destroy_ep_files()
1510 ep->dentry = NULL; destroy_ep_files()
1514 if (ep->state == STATE_EP_ENABLED) destroy_ep_files()
1515 (void) usb_ep_disable (ep->ep); destroy_ep_files()
1516 ep->state = STATE_EP_UNBOUND; destroy_ep_files()
1517 usb_ep_free_request (ep->ep, ep->req); destroy_ep_files()
1518 ep->ep = NULL; destroy_ep_files()
1519 wake_up (&ep->wait); destroy_ep_files()
1520 put_ep (ep); destroy_ep_files()
1542 struct usb_ep *ep; activate_ep_files() local
1545 gadget_for_each_ep (ep, dev->gadget) { activate_ep_files()
1554 strncpy (data->name, ep->name, sizeof (data->name) - 1); activate_ep_files()
1559 data->ep = ep; activate_ep_files()
1560 ep->driver_data = data; activate_ep_files()
1562 data->req = usb_ep_alloc_request (ep, GFP_KERNEL); activate_ep_files()
1575 usb_ep_free_request (ep, data->req); activate_ep_files()
H A Dtcm_usb_gadget.c55 static void bot_status_complete(struct usb_ep *ep, struct usb_request *req) bot_status_complete() argument
94 static void bot_err_compl(struct usb_ep *ep, struct usb_request *req) bot_err_compl() argument
103 if (cmd->data_len > ep->maxpacket) { bot_err_compl()
104 req->length = ep->maxpacket; bot_err_compl()
105 cmd->data_len -= ep->maxpacket; bot_err_compl()
111 usb_ep_queue(ep, req, GFP_ATOMIC); bot_err_compl()
122 struct usb_ep *ep; bot_send_bad_status() local
128 ep = fu->ep_in; bot_send_bad_status()
131 ep = fu->ep_out; bot_send_bad_status()
136 req->length = ep->maxpacket; bot_send_bad_status()
137 cmd->data_len -= ep->maxpacket; bot_send_bad_status()
145 usb_ep_queue(ep, req, GFP_KERNEL); bot_send_bad_status()
197 static void bot_read_compl(struct usb_ep *ep, struct usb_request *req) bot_read_compl() argument
295 static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req) bot_cmd_complete() argument
515 static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req);
570 static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req) uasp_status_data_cmpl() argument
726 static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req) uasp_cmd_complete() argument
958 static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req) usbg_data_write_cmpl() argument
2019 struct usb_ep *ep; usbg_bind() local
2030 ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bi_desc, usbg_bind()
2032 if (!ep) usbg_bind()
2034 fu->ep_in = ep; usbg_bind()
2036 ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bo_desc, usbg_bind()
2038 if (!ep) usbg_bind()
2040 fu->ep_out = ep; usbg_bind()
2042 ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_status_desc, usbg_bind()
2044 if (!ep) usbg_bind()
2046 fu->ep_status = ep; usbg_bind()
2048 ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_cmd_desc, usbg_bind()
2050 if (!ep) usbg_bind()
2052 fu->ep_cmd = ep; usbg_bind()
/linux-4.4.14/drivers/usb/gadget/function/
H A Df_sourcesink.c299 static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) ss_alloc_ep_req() argument
301 struct f_sourcesink *ss = ep->driver_data; ss_alloc_ep_req()
303 return alloc_ep_req(ep, len, ss->buflen); ss_alloc_ep_req()
306 void free_ep_req(struct usb_ep *ep, struct usb_request *req) free_ep_req() argument
309 usb_ep_free_request(ep, req); free_ep_req()
312 static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep) disable_ep() argument
316 value = usb_ep_disable(ep); disable_ep()
318 DBG(cdev, "disable %s --> %d\n", ep->name, value); disable_ep()
518 static void reinit_write_data(struct usb_ep *ep, struct usb_request *req) reinit_write_data() argument
522 int max_packet_size = le16_to_cpu(ep->desc->wMaxPacketSize); reinit_write_data()
523 struct f_sourcesink *ss = ep->driver_data; reinit_write_data()
538 static void source_sink_complete(struct usb_ep *ep, struct usb_request *req) source_sink_complete() argument
541 struct f_sourcesink *ss = ep->driver_data; source_sink_complete()
544 /* driver_data will be null if ep has been disabled */ source_sink_complete()
553 if (ep == ss->out_ep) { source_sink_complete()
561 case -ECONNABORTED: /* hardware forced ep reset */ source_sink_complete()
564 VDBG(cdev, "%s gone (%d), %d/%d\n", ep->name, status, source_sink_complete()
566 if (ep == ss->out_ep) source_sink_complete()
568 free_ep_req(ep, req); source_sink_complete()
577 DBG(cdev, "%s complete --> %d, %d/%d\n", ep->name, source_sink_complete()
584 status = usb_ep_queue(ep, req, GFP_ATOMIC); source_sink_complete()
587 ep->name, req->length, status); source_sink_complete()
588 usb_ep_set_halt(ep); source_sink_complete()
596 struct usb_ep *ep; source_sink_start_ep() local
616 ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; source_sink_start_ep()
617 req = ss_alloc_ep_req(ep, size); source_sink_start_ep()
619 ep = is_in ? ss->in_ep : ss->out_ep; source_sink_start_ep()
620 req = ss_alloc_ep_req(ep, 0); source_sink_start_ep()
628 reinit_write_data(ep, req); source_sink_start_ep()
632 status = usb_ep_queue(ep, req, GFP_ATOMIC); source_sink_start_ep()
639 ep->name, status); source_sink_start_ep()
640 free_ep_req(ep, req); source_sink_start_ep()
666 struct usb_ep *ep; enable_source_sink() local
669 ep = ss->in_ep; enable_source_sink()
670 result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); enable_source_sink()
673 result = usb_ep_enable(ep); enable_source_sink()
676 ep->driver_data = ss; enable_source_sink()
681 ep = ss->in_ep; enable_source_sink()
682 usb_ep_disable(ep); enable_source_sink()
687 ep = ss->out_ep; enable_source_sink()
688 result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); enable_source_sink()
691 result = usb_ep_enable(ep); enable_source_sink()
694 ep->driver_data = ss; enable_source_sink()
699 ep = ss->out_ep; enable_source_sink()
700 usb_ep_disable(ep); enable_source_sink()
708 ep = ss->iso_in_ep; enable_source_sink()
709 if (ep) { enable_source_sink()
710 result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); enable_source_sink()
713 result = usb_ep_enable(ep); enable_source_sink()
716 ep->driver_data = ss; enable_source_sink()
721 ep = ss->iso_in_ep; enable_source_sink()
722 if (ep) enable_source_sink()
723 usb_ep_disable(ep); enable_source_sink()
729 ep = ss->iso_out_ep; enable_source_sink()
730 if (ep) { enable_source_sink()
731 result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); enable_source_sink()
734 result = usb_ep_enable(ep); enable_source_sink()
737 ep->driver_data = ss; enable_source_sink()
741 usb_ep_disable(ep); enable_source_sink()
H A Df_loopback.c239 static void loopback_complete(struct usb_ep *ep, struct usb_request *req) loopback_complete() argument
241 struct f_loopback *loop = ep->driver_data; loopback_complete()
247 if (ep == loop->out_ep) { loopback_complete()
250 * queue it so host can read the from our in ep loopback_complete()
256 ep = loop->in_ep; loopback_complete()
264 ep = loop->out_ep; loopback_complete()
268 status = usb_ep_queue(ep, req, GFP_ATOMIC); loopback_complete()
273 ep->name, status); loopback_complete()
279 ERROR(cdev, "%s loop complete --> %d, %d/%d\n", ep->name, loopback_complete()
288 case -ECONNABORTED: /* hardware forced ep reset */ loopback_complete()
292 usb_ep_free_request(ep == loop->in_ep ? loopback_complete()
295 free_ep_req(ep, req); loopback_complete()
309 static inline struct usb_request *lb_alloc_ep_req(struct usb_ep *ep, int len) lb_alloc_ep_req() argument
311 struct f_loopback *loop = ep->driver_data; lb_alloc_ep_req()
313 return alloc_ep_req(ep, len, loop->buflen); lb_alloc_ep_req()
367 struct f_loopback *loop, struct usb_ep *ep) enable_endpoint()
371 result = config_ep_by_speed(cdev->gadget, &(loop->function), ep); enable_endpoint()
375 result = usb_ep_enable(ep); enable_endpoint()
378 ep->driver_data = loop; enable_endpoint()
366 enable_endpoint(struct usb_composite_dev *cdev, struct f_loopback *loop, struct usb_ep *ep) enable_endpoint() argument
H A Df_fs.c112 struct usb_ep *ep; /* P: ffs->eps_lock */ member in struct:ffs_ep
124 /* Protects ep->ep and ep->req. */
129 struct ffs_ep *ep; /* P: ffs->eps_lock */ member in struct:ffs_epfile
155 struct usb_ep *ep; member in struct:ffs_io_data
198 static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req) ffs_ep0_complete() argument
637 struct ffs_ep *ep = _ep->driver_data; ffs_epfile_io_complete() local
638 ep->status = req->status ? req->status : req->actual; ffs_epfile_io_complete()
664 usb_ep_free_request(io_data->ep, io_data->req); ffs_user_copy_worker()
686 struct ffs_ep *ep; ffs_epfile_io() local
698 ep = epfile->ep; ffs_epfile_io()
699 if (!ep) { ffs_epfile_io()
705 ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep)); ffs_epfile_io()
730 if (epfile->ep != ep) { ffs_epfile_io()
740 data_len = usb_ep_align_maybe(gadget, ep->ep, data_len); ffs_epfile_io()
762 if (epfile->ep != ep) { ffs_epfile_io()
768 if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep)) ffs_epfile_io()
769 usb_ep_set_halt(ep->ep); ffs_epfile_io()
794 req = usb_ep_alloc_request(ep->ep, GFP_KERNEL); ffs_epfile_io()
802 io_data->ep = ep->ep; ffs_epfile_io()
809 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); ffs_epfile_io()
811 usb_ep_free_request(ep->ep, req); ffs_epfile_io()
820 req = ep->req; ffs_epfile_io()
827 ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); ffs_epfile_io()
836 usb_ep_dequeue(ep->ep, req); ffs_epfile_io()
845 ret = ep->status; ffs_epfile_io()
893 if (likely(io_data && io_data->ep && io_data->req)) ffs_aio_cancel()
894 value = usb_ep_dequeue(io_data->ep, io_data->req); ffs_aio_cancel()
1011 if (likely(epfile->ep)) { ffs_epfile_ioctl()
1014 ret = usb_ep_fifo_status(epfile->ep->ep); ffs_epfile_ioctl()
1017 usb_ep_fifo_flush(epfile->ep->ep); ffs_epfile_ioctl()
1021 ret = usb_ep_clear_halt(epfile->ep->ep); ffs_epfile_ioctl()
1024 ret = epfile->ep->num; ffs_epfile_ioctl()
1041 desc = epfile->ep->descs[desc_idx]; ffs_epfile_ioctl()
1577 sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]); ffs_epfiles_create()
1579 sprintf(epfile->name, "ep%u", i); ffs_epfiles_create()
1614 struct ffs_ep *ep = func->eps; ffs_func_eps_disable() local
1622 if (likely(ep->ep)) ffs_func_eps_disable()
1623 usb_ep_disable(ep->ep); ffs_func_eps_disable()
1624 ++ep; ffs_func_eps_disable()
1627 epfile->ep = NULL; ffs_func_eps_disable()
1637 struct ffs_ep *ep = func->eps; ffs_func_eps_enable() local
1657 ds = ep->descs[desc_idx]; ffs_func_eps_enable()
1665 ep->ep->driver_data = ep; ffs_func_eps_enable()
1666 ep->ep->desc = ds; ffs_func_eps_enable()
1667 ret = usb_ep_enable(ep->ep); ffs_func_eps_enable()
1669 epfile->ep = ep; ffs_func_eps_enable()
1678 ++ep; ffs_func_eps_enable()
2508 ffs_dump_mem(": Original ep desc", ds, ds->bLength); __ffs_func_bind_do_descs()
2509 if (ffs_ep->ep) { __ffs_func_bind_do_descs()
2515 struct usb_ep *ep; __ffs_func_bind_do_descs() local
2524 ep = usb_ep_autoconfig(func->gadget, ds); __ffs_func_bind_do_descs()
2525 if (unlikely(!ep)) __ffs_func_bind_do_descs()
2527 ep->driver_data = func->eps + idx; __ffs_func_bind_do_descs()
2529 req = usb_ep_alloc_request(ep, GFP_KERNEL); __ffs_func_bind_do_descs()
2533 ffs_ep->ep = ep; __ffs_func_bind_do_descs()
2544 ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength); __ffs_func_bind_do_descs()
2588 if (unlikely(!func->eps[idx].ep)) __ffs_func_bind_do_nums()
3219 struct ffs_ep *ep = func->eps; ffs_func_unbind() local
3235 if (ep->ep && ep->req) ffs_func_unbind()
3236 usb_ep_free_request(ep->ep, ep->req); ffs_func_unbind()
3237 ep->req = NULL; ffs_func_unbind()
3238 ++ep; ffs_func_unbind()
H A Duvc_video.c163 uvc_video_complete(struct usb_ep *ep, struct usb_request *req) uvc_video_complete() argument
196 if ((ret = usb_ep_queue(ep, req, GFP_ATOMIC)) < 0) { uvc_video_complete()
198 usb_ep_set_halt(ep); uvc_video_complete()
220 usb_ep_free_request(video->ep, video->req[i]); uvc_video_free_requests()
244 req_size = video->ep->maxpacket uvc_video_alloc_requests()
245 * max_t(unsigned int, video->ep->maxburst, 1) uvc_video_alloc_requests()
246 * (video->ep->mult + 1); uvc_video_alloc_requests()
253 video->req[i] = usb_ep_alloc_request(video->ep, GFP_KERNEL); uvc_video_alloc_requests()
323 ret = usb_ep_queue(video->ep, req, GFP_ATOMIC); uvcg_video_pump()
326 usb_ep_set_halt(video->ep); uvcg_video_pump()
348 if (video->ep == NULL) { uvcg_video_enable()
357 usb_ep_dequeue(video->ep, video->req[i]); uvcg_video_enable()
H A Df_uvc.c212 uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req) uvc_function_ep0_complete() argument
283 return uvc->video.ep->enabled ? 1 : 0; uvc_function_get_alt()
335 if (uvc->video.ep) uvc_function_set_alt()
336 usb_ep_disable(uvc->video.ep); uvc_function_set_alt()
349 if (!uvc->video.ep) uvc_function_set_alt()
353 usb_ep_disable(uvc->video.ep); uvc_function_set_alt()
356 &(uvc->func), uvc->video.ep); uvc_function_set_alt()
359 usb_ep_enable(uvc->video.ep); uvc_function_set_alt()
385 usb_ep_disable(uvc->video.ep); uvc_function_disable()
568 uvc_streaming_header->bEndpointAddress = uvc->video.ep->address; uvc_copy_descriptors()
584 struct usb_ep *ep; uvc_function_bind() local
631 ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep); uvc_function_bind()
632 if (!ep) { uvc_function_bind()
636 uvc->control_ep = ep; uvc_function_bind()
639 ep = usb_ep_autoconfig_ss(cdev->gadget, &uvc_ss_streaming_ep, uvc_function_bind()
642 ep = usb_ep_autoconfig(cdev->gadget, &uvc_hs_streaming_ep); uvc_function_bind()
644 ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep); uvc_function_bind()
646 if (!ep) { uvc_function_bind()
650 uvc->video.ep = ep; uvc_function_bind()
652 uvc_fs_streaming_ep.bEndpointAddress = uvc->video.ep->address; uvc_function_bind()
653 uvc_hs_streaming_ep.bEndpointAddress = uvc->video.ep->address; uvc_function_bind()
654 uvc_ss_streaming_ep.bEndpointAddress = uvc->video.ep->address; uvc_function_bind()
H A Df_midi.c198 static inline struct usb_request *midi_alloc_ep_req(struct usb_ep *ep, midi_alloc_ep_req() argument
201 return alloc_ep_req(ep, length, length); midi_alloc_ep_req()
204 static void free_ep_req(struct usb_ep *ep, struct usb_request *req) free_ep_req() argument
207 usb_ep_free_request(ep, req); free_ep_req()
217 static void f_midi_read_data(struct usb_ep *ep, int cable, f_midi_read_data() argument
220 struct f_midi *midi = ep->driver_data; f_midi_read_data()
233 static void f_midi_handle_out_data(struct usb_ep *ep, struct usb_request *req) f_midi_handle_out_data() argument
242 f_midi_read_data(ep, cable, &buf[i + 1], length); f_midi_handle_out_data()
247 f_midi_complete(struct usb_ep *ep, struct usb_request *req) f_midi_complete() argument
249 struct f_midi *midi = ep->driver_data; f_midi_complete()
255 if (ep == midi->out_ep) { f_midi_complete()
257 f_midi_handle_out_data(ep, req); f_midi_complete()
258 } else if (ep == midi->in_ep) { f_midi_complete()
267 case -ECONNABORTED: /* hardware forced ep reset */ f_midi_complete()
270 VDBG(cdev, "%s gone (%d), %d/%d\n", ep->name, status, f_midi_complete()
272 if (ep == midi->out_ep) f_midi_complete()
273 f_midi_handle_out_data(ep, req); f_midi_complete()
275 free_ep_req(ep, req); f_midi_complete()
282 DBG(cdev, "%s complete --> %d, %d/%d\n", ep->name, f_midi_complete()
289 status = usb_ep_queue(ep, req, GFP_ATOMIC); f_midi_complete()
292 ep->name, req->length, status); f_midi_complete()
293 usb_ep_set_halt(ep); f_midi_complete()
300 struct usb_ep *ep) f_midi_start_ep()
305 usb_ep_disable(ep); f_midi_start_ep()
307 err = config_ep_by_speed(midi->gadget, f, ep); f_midi_start_ep()
309 ERROR(cdev, "can't configure %s: %d\n", ep->name, err); f_midi_start_ep()
313 err = usb_ep_enable(ep); f_midi_start_ep()
315 ERROR(cdev, "can't start %s: %d\n", ep->name, err); f_midi_start_ep()
319 ep->driver_data = midi; f_midi_start_ep()
516 struct usb_ep *ep = midi->in_ep; f_midi_transmit() local
519 if (!ep) f_midi_transmit()
523 req = midi_alloc_ep_req(ep, midi->buflen); f_midi_transmit()
549 if (req->length > 0 && ep->enabled) { f_midi_transmit()
552 err = usb_ep_queue(ep, req, GFP_ATOMIC); f_midi_transmit()
557 free_ep_req(ep, req); f_midi_transmit()
298 f_midi_start_ep(struct f_midi *midi, struct usb_function *f, struct usb_ep *ep) f_midi_start_ep() argument
H A Df_serial.c194 struct usb_ep *ep; gser_bind() local
218 ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_in_desc); gser_bind()
219 if (!ep) gser_bind()
221 gser->port.in = ep; gser_bind()
223 ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_out_desc); gser_bind()
224 if (!ep) gser_bind()
226 gser->port.out = ep; gser_bind()
H A Df_acm.c308 static void acm_complete_set_line_coding(struct usb_ep *ep, acm_complete_set_line_coding() argument
311 struct f_acm *acm = ep->driver_data; acm_complete_set_line_coding()
324 usb_ep_set_halt(ep); acm_complete_set_line_coding()
496 struct usb_ep *ep = acm->notify; acm_cdc_notify() local
521 status = usb_ep_queue(ep, req, GFP_ATOMIC); acm_cdc_notify()
553 static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req) acm_cdc_notify_complete() argument
613 struct usb_ep *ep; acm_bind() local
650 ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_in_desc); acm_bind()
651 if (!ep) acm_bind()
653 acm->port.in = ep; acm_bind()
655 ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_out_desc); acm_bind()
656 if (!ep) acm_bind()
658 acm->port.out = ep; acm_bind()
660 ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_notify_desc); acm_bind()
661 if (!ep) acm_bind()
663 acm->notify = ep; acm_bind()
666 acm->notify_req = gs_alloc_req(ep, acm_bind()
H A Df_phonet.c203 static void pn_tx_complete(struct usb_ep *ep, struct usb_request *req) pn_tx_complete() argument
205 struct f_phonet *fp = ep->driver_data; pn_tx_complete()
320 static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req) pn_rx_complete() argument
322 struct f_phonet *fp = ep->driver_data; pn_rx_complete()
491 struct usb_ep *ep; pn_bind() local
529 ep = usb_ep_autoconfig(gadget, &pn_fs_sink_desc); pn_bind()
530 if (!ep) pn_bind()
532 fp->out_ep = ep; pn_bind()
534 ep = usb_ep_autoconfig(gadget, &pn_fs_source_desc); pn_bind()
535 if (!ep) pn_bind()
537 fp->in_ep = ep; pn_bind()
H A Df_uac1.c321 static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req) f_audio_out_ep_complete() argument
350 err = usb_ep_queue(ep, req, GFP_ATOMIC); f_audio_out_ep_complete()
352 ERROR(cdev, "%s queue req: %d\n", ep->name, err); f_audio_out_ep_complete()
358 static void f_audio_complete(struct usb_ep *ep, struct usb_request *req) f_audio_complete() argument
368 if (ep == out_ep) f_audio_complete()
369 f_audio_out_ep_complete(ep, req); f_audio_complete()
461 u16 ep = le16_to_cpu(ctrl->wIndex); audio_set_endpoint_req() local
466 ctrl->bRequest, w_value, len, ep); audio_set_endpoint_req()
497 u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF); audio_get_endpoint_req() local
502 ctrl->bRequest, w_value, len, ep); audio_get_endpoint_req()
674 struct usb_ep *ep = NULL; f_audio_bind() local
715 ep = usb_ep_autoconfig(cdev->gadget, &as_out_ep_desc); f_audio_bind()
716 if (!ep) f_audio_bind()
718 audio->out_ep = ep; f_audio_bind()
H A Df_obex.c312 struct usb_ep *ep; obex_bind() local
347 ep = usb_ep_autoconfig(cdev->gadget, &obex_fs_ep_in_desc); obex_bind()
348 if (!ep) obex_bind()
350 obex->port.in = ep; obex_bind()
352 ep = usb_ep_autoconfig(cdev->gadget, &obex_fs_ep_out_desc); obex_bind()
353 if (!ep) obex_bind()
355 obex->port.out = ep; obex_bind()
H A Du_serial.c293 gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) gs_alloc_req() argument
297 req = usb_ep_alloc_request(ep, kmalloc_flags); gs_alloc_req()
303 usb_ep_free_request(ep, req); gs_alloc_req()
317 void gs_free_req(struct usb_ep *ep, struct usb_request *req) gs_free_req() argument
320 usb_ep_free_request(ep, req); gs_free_req()
584 static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) gs_read_complete() argument
586 struct gs_port *port = ep->driver_data; gs_read_complete()
595 static void gs_write_complete(struct usb_ep *ep, struct usb_request *req) gs_write_complete() argument
597 struct gs_port *port = ep->driver_data; gs_write_complete()
607 __func__, ep->name, req->status); gs_write_complete()
616 pr_vdebug("%s: %s shutdown\n", __func__, ep->name); gs_write_complete()
623 static void gs_free_requests(struct usb_ep *ep, struct list_head *head, gs_free_requests() argument
631 gs_free_req(ep, req); gs_free_requests()
637 static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, gs_alloc_requests() argument
650 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC); gs_alloc_requests()
673 struct usb_ep *ep = port->port_usb->out; gs_start_io() local
683 status = gs_alloc_requests(ep, head, gs_read_complete, gs_start_io()
691 gs_free_requests(ep, head, &port->read_allocated); gs_start_io()
703 gs_free_requests(ep, head, &port->read_allocated); gs_start_io()
1165 * On success, ep->driver_data will be overwritten.
H A Df_ecm.c446 static void ecm_notify_complete(struct usb_ep *ep, struct usb_request *req) ecm_notify_complete() argument
686 struct usb_ep *ep; ecm_bind() local
743 ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_in_desc); ecm_bind()
744 if (!ep) ecm_bind()
746 ecm->port.in_ep = ep; ecm_bind()
748 ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_out_desc); ecm_bind()
749 if (!ep) ecm_bind()
751 ecm->port.out_ep = ep; ecm_bind()
757 ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_notify_desc); ecm_bind()
758 if (!ep) ecm_bind()
760 ecm->notify = ep; ecm_bind()
765 ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); ecm_bind()
H A Df_hid.c255 static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req) f_hidg_req_complete() argument
257 struct f_hidg *hidg = (struct f_hidg *)ep->driver_data; f_hidg_req_complete()
365 static inline struct usb_request *hidg_alloc_ep_req(struct usb_ep *ep, hidg_alloc_ep_req() argument
368 return alloc_ep_req(ep, length, length); hidg_alloc_ep_req()
371 static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req) hidg_set_report_complete() argument
586 struct usb_ep *ep; hidg_bind() local
608 ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_in_ep_desc); hidg_bind()
609 if (!ep) hidg_bind()
611 hidg->in_ep = ep; hidg_bind()
613 ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_out_ep_desc); hidg_bind()
614 if (!ep) hidg_bind()
616 hidg->out_ep = ep; hidg_bind()
H A Df_eem.c251 struct usb_ep *ep; eem_bind() local
289 ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_in_desc); eem_bind()
290 if (!ep) eem_bind()
292 eem->port.in_ep = ep; eem_bind()
294 ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_out_desc); eem_bind()
295 if (!ep) eem_bind()
297 eem->port.out_ep = ep; eem_bind()
328 static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req) eem_cmd_complete() argument
H A Df_uac2.c66 bool ep_enabled; /* if the ep is enabled */
171 agdev_iso_complete(struct usb_ep *ep, struct usb_request *req) agdev_iso_complete() argument
259 if (usb_ep_queue(ep, req, GFP_ATOMIC)) agdev_iso_complete()
955 free_ep(struct uac2_rtd_params *prm, struct usb_ep *ep) free_ep() argument
967 usb_ep_dequeue(ep, prm->ureq[i].req); free_ep()
968 usb_ep_free_request(ep, prm->ureq[i].req); free_ep()
973 if (usb_ep_disable(ep)) free_ep()
1145 struct usb_ep *ep; afunc_set_alt() local
1165 ep = agdev->out_ep; afunc_set_alt()
1167 config_ep_by_speed(gadget, fn, ep); afunc_set_alt()
1175 ep = agdev->in_ep; afunc_set_alt()
1177 config_ep_by_speed(gadget, fn, ep); afunc_set_alt()
1210 free_ep(prm, ep); afunc_set_alt()
1215 usb_ep_enable(ep); afunc_set_alt()
1219 req = usb_ep_alloc_request(ep, GFP_ATOMIC); afunc_set_alt()
1233 if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC)) afunc_set_alt()
H A Dg_zero.h62 void free_ep_req(struct usb_ep *ep, struct usb_request *req);
H A Df_subset.c302 struct usb_ep *ep; geth_bind() local
342 ep = usb_ep_autoconfig(cdev->gadget, &fs_subset_in_desc); geth_bind()
343 if (!ep) geth_bind()
345 geth->port.in_ep = ep; geth_bind()
347 ep = usb_ep_autoconfig(cdev->gadget, &fs_subset_out_desc); geth_bind()
348 if (!ep) geth_bind()
350 geth->port.out_ep = ep; geth_bind()
/linux-4.4.14/drivers/target/tcm_fc/
H A Dtfc_io.c58 struct fc_exch *ep; ft_queue_data_in() local
83 ep = fc_seq_exch(cmd->seq); ft_queue_data_in()
84 lport = ep->lp; ft_queue_data_in()
108 __func__, ep->xid); ft_queue_data_in()
175 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, ft_queue_data_in()
182 __func__, fp, ep->xid, ft_queue_data_in()
213 struct fc_exch *ep; ft_recv_write_data() local
234 ep = fc_seq_exch(seq); ft_recv_write_data()
235 lport = ep->lp; ft_recv_write_data()
237 BUG_ON(!ep); ft_recv_write_data()
250 "not set\n", __func__, ep->xid, f_ctl, ft_recv_write_data()
345 struct fc_exch *ep = NULL; ft_invl_hw_context() local
353 ep = fc_seq_exch(seq); ft_invl_hw_context()
354 if (ep) { ft_invl_hw_context()
355 lport = ep->lp; ft_invl_hw_context()
356 if (lport && (ep->xid <= lport->lro_xid)) { ft_invl_hw_context()
362 ep->xid); ft_invl_hw_context()
368 * identified using ep->xid) ft_invl_hw_context()
H A Dtfc_cmd.c47 struct fc_exch *ep; _ft_dump_cmd() local
69 ep = fc_seq_exch(sp); _ft_dump_cmd()
72 caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid, _ft_dump_cmd()
73 sp->id, ep->esb_stat); _ft_dump_cmd()
123 struct fc_exch *ep; ft_queue_status() local
130 ep = fc_seq_exch(cmd->seq); ft_queue_status()
131 lport = ep->lp; ft_queue_status()
166 fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP, ft_queue_status()
172 "xid <0x%x>\n", __func__, fp, ep->xid); ft_queue_status()
201 struct fc_exch *ep; ft_write_pending() local
209 ep = fc_seq_exch(cmd->seq); ft_write_pending()
210 lport = ep->lp; ft_write_pending()
220 fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP, ft_write_pending()
231 if ((ep->xid <= lport->lro_xid) && ft_write_pending()
234 lport->tt.ddp_target(lport, ep->xid, ft_write_pending()
/linux-4.4.14/drivers/usb/dwc2/
H A Dgadget.c43 static inline struct dwc2_hsotg_ep *our_ep(struct usb_ep *ep) our_ep() argument
45 return container_of(ep, struct dwc2_hsotg_ep, ep); our_ep()
136 * @ep: The endpoint index
144 unsigned int ep, unsigned int dir_in, dwc2_hsotg_ctrl_epint()
148 u32 bit = 1 << ep; dwc2_hsotg_ctrl_epint()
170 unsigned int ep; dwc2_hsotg_init_fifo() local
200 for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) { dwc2_hsotg_init_fifo()
201 if (!hsotg->g_tx_fifo_sz[ep]) dwc2_hsotg_init_fifo()
204 val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT; dwc2_hsotg_init_fifo()
205 WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem, dwc2_hsotg_init_fifo()
207 addr += hsotg->g_tx_fifo_sz[ep]; dwc2_hsotg_init_fifo()
209 dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep)); dwc2_hsotg_init_fifo()
242 * @ep: USB endpoint to allocate request for.
247 static struct usb_request *dwc2_hsotg_ep_alloc_request(struct usb_ep *ep, dwc2_hsotg_ep_alloc_request() argument
390 max_transfer = hs_ep->ep.maxpacket * hs_ep->mc; dwc2_hsotg_write_fifo()
500 if ((maxpkt * hs_ep->ep.maxpacket) < maxsize) get_ep_limit()
501 maxsize = maxpkt * hs_ep->ep.maxpacket; get_ep_limit()
548 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n", dwc2_hsotg_start_req()
556 dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index); dwc2_hsotg_start_req()
566 int round = maxreq % hs_ep->ep.maxpacket; dwc2_hsotg_start_req()
579 packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket); dwc2_hsotg_start_req()
583 if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) { dwc2_hsotg_start_req()
602 if ((ureq->length >= hs_ep->ep.maxpacket) && dwc2_hsotg_start_req()
603 !(ureq->length % hs_ep->ep.maxpacket)) dwc2_hsotg_start_req()
634 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */ dwc2_hsotg_start_req()
674 /* check ep is enabled */ dwc2_hsotg_start_req()
677 "ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n", dwc2_hsotg_start_req()
683 /* enable ep interrupts */ dwc2_hsotg_start_req()
735 hs_ep->ep.name, req_buf, hs_req->req.length); dwc2_hsotg_handle_unaligned_buf_start()
762 hs_ep->ep.name, hs_req->req.status, hs_req->req.actual); dwc2_hsotg_handle_unaligned_buf_complete()
776 static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req, dwc2_hsotg_ep_queue() argument
780 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); dwc2_hsotg_ep_queue()
786 ep->name, req, req->length, req->buf, req->no_interrupt, dwc2_hsotg_ep_queue()
821 static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req, dwc2_hsotg_ep_queue_lock() argument
824 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); dwc2_hsotg_ep_queue_lock()
830 ret = dwc2_hsotg_ep_queue(ep, req, gfp_flags); dwc2_hsotg_ep_queue_lock()
836 static void dwc2_hsotg_ep_free_request(struct usb_ep *ep, dwc2_hsotg_ep_free_request() argument
846 * @ep: The endpoint the request was on.
852 static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, dwc2_hsotg_complete_oursetup() argument
855 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); dwc2_hsotg_complete_oursetup()
858 dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req); dwc2_hsotg_complete_oursetup()
860 dwc2_hsotg_ep_free_request(ep, req); dwc2_hsotg_complete_oursetup()
874 struct dwc2_hsotg_ep *ep; ep_from_windex() local
884 ep = index_to_ep(hsotg, idx, dir); ep_from_windex()
886 if (idx && ep->dir_in != dir) ep_from_windex()
889 return ep; ep_from_windex()
921 * @ep: Endpoint 0
929 struct dwc2_hsotg_ep *ep, dwc2_hsotg_send_reply()
938 req = dwc2_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC); dwc2_hsotg_send_reply()
957 ret = dwc2_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC); dwc2_hsotg_send_reply()
975 struct dwc2_hsotg_ep *ep; dwc2_hsotg_process_req_status() local
998 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex)); dwc2_hsotg_process_req_status()
999 if (!ep) dwc2_hsotg_process_req_status()
1002 reply = cpu_to_le16(ep->halted ? 1 : 0); dwc2_hsotg_process_req_status()
1021 static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value);
1049 struct dwc2_hsotg_ep *ep; dwc2_hsotg_process_req_feature() local
1086 ep = ep_from_windex(hsotg, wIndex); dwc2_hsotg_process_req_feature()
1087 if (!ep) { dwc2_hsotg_process_req_feature()
1095 halted = ep->halted; dwc2_hsotg_process_req_feature()
1097 dwc2_hsotg_ep_sethalt(&ep->ep, set); dwc2_hsotg_process_req_feature()
1107 * we have to complete all requests for ep if it was dwc2_hsotg_process_req_feature()
1116 if (ep->req) { dwc2_hsotg_process_req_feature()
1117 hs_req = ep->req; dwc2_hsotg_process_req_feature()
1118 ep->req = NULL; dwc2_hsotg_process_req_feature()
1123 &ep->ep, &hs_req->req); dwc2_hsotg_process_req_feature()
1129 if (!ep->req) { dwc2_hsotg_process_req_feature()
1130 restart = !list_empty(&ep->queue); dwc2_hsotg_process_req_feature()
1132 hs_req = get_ep_head(ep); dwc2_hsotg_process_req_feature()
1133 dwc2_hsotg_start_req(hsotg, ep, dwc2_hsotg_process_req_feature()
1268 * @ep: The endpoint the request was on.
1274 static void dwc2_hsotg_complete_setup(struct usb_ep *ep, dwc2_hsotg_complete_setup() argument
1277 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); dwc2_hsotg_complete_setup()
1322 ret = dwc2_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC); dwc2_hsotg_enqueue_setup()
1341 dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n", dwc2_hsotg_program_zlp()
1344 dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n", dwc2_hsotg_program_zlp()
1353 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */ dwc2_hsotg_program_zlp()
1369 * Note, expects the ep to already be locked as appropriate.
1383 dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n", dwc2_hsotg_complete_request()
1384 hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete); dwc2_hsotg_complete_request()
1409 usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req); dwc2_hsotg_complete_request()
1453 "%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n", dwc2_hsotg_rx_data()
1733 * @ep: The index number of the endpoint
1740 unsigned int ep, unsigned int mps, unsigned int dir_in) dwc2_hsotg_set_ep_maxpacket()
1748 hs_ep = index_to_ep(hsotg, ep, dir_in); dwc2_hsotg_set_ep_maxpacket()
1752 if (ep == 0) { dwc2_hsotg_set_ep_maxpacket()
1757 hs_ep->ep.maxpacket = mps; dwc2_hsotg_set_ep_maxpacket()
1767 hs_ep->ep.maxpacket = mpsval; dwc2_hsotg_set_ep_maxpacket()
1771 reg = dwc2_readl(regs + DIEPCTL(ep)); dwc2_hsotg_set_ep_maxpacket()
1774 dwc2_writel(reg, regs + DIEPCTL(ep)); dwc2_hsotg_set_ep_maxpacket()
1776 reg = dwc2_readl(regs + DOEPCTL(ep)); dwc2_hsotg_set_ep_maxpacket()
1779 dwc2_writel(reg, regs + DOEPCTL(ep)); dwc2_hsotg_set_ep_maxpacket()
1785 dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps); dwc2_hsotg_set_ep_maxpacket()
1846 dev_dbg(hsotg->dev, "trying to write more for ep%d\n", dwc2_hsotg_trytx()
1964 dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n", dwc2_hsotg_epint()
1969 dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n", dwc2_hsotg_epint()
2050 dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n", dwc2_hsotg_epint()
2056 dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n", dwc2_hsotg_epint()
2063 dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n", dwc2_hsotg_epint()
2154 * @ep: The endpoint the requests may be on.
2161 struct dwc2_hsotg_ep *ep, kill_all_requests()
2167 ep->req = NULL; kill_all_requests()
2169 list_for_each_entry_safe(req, treq, &ep->queue, queue) kill_all_requests()
2170 dwc2_hsotg_complete_request(hsotg, ep, req, kill_all_requests()
2175 size = (dwc2_readl(hsotg->regs + DTXFSTS(ep->index)) & 0xffff) * 4; kill_all_requests()
2176 if (size < ep->fifo_size) kill_all_requests()
2177 dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index); kill_all_requests()
2190 unsigned ep; dwc2_hsotg_disconnect() local
2198 for (ep = 0; ep < hsotg->num_of_eps; ep++) { dwc2_hsotg_disconnect()
2199 if (hsotg->eps_in[ep]) dwc2_hsotg_disconnect()
2200 kill_all_requests(hsotg, hsotg->eps_in[ep], dwc2_hsotg_disconnect()
2202 if (hsotg->eps_out[ep]) dwc2_hsotg_disconnect()
2203 kill_all_requests(hsotg, hsotg->eps_out[ep], dwc2_hsotg_disconnect()
2218 struct dwc2_hsotg_ep *ep; dwc2_hsotg_irq_fifoempty() local
2223 ep = index_to_ep(hsotg, epno, 1); dwc2_hsotg_irq_fifoempty()
2225 if (!ep) dwc2_hsotg_irq_fifoempty()
2228 if (!ep->dir_in) dwc2_hsotg_irq_fifoempty()
2231 if ((periodic && !ep->periodic) || dwc2_hsotg_irq_fifoempty()
2232 (!periodic && ep->periodic)) dwc2_hsotg_irq_fifoempty()
2235 ret = dwc2_hsotg_trytx(hsotg, ep); dwc2_hsotg_irq_fifoempty()
2358 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts dwc2_hsotg_core_init_disconnected()
2418 dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | dwc2_hsotg_core_init_disconnected()
2424 dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | dwc2_hsotg_core_init_disconnected()
2519 int ep; dwc2_hsotg_irq() local
2527 for (ep = 0; ep < hsotg->num_of_eps && daint_out; dwc2_hsotg_irq()
2528 ep++, daint_out >>= 1) { dwc2_hsotg_irq()
2530 dwc2_hsotg_epint(hsotg, ep, 0); dwc2_hsotg_irq()
2533 for (ep = 0; ep < hsotg->num_of_eps && daint_in; dwc2_hsotg_irq()
2534 ep++, daint_in >>= 1) { dwc2_hsotg_irq()
2536 dwc2_hsotg_epint(hsotg, ep, 1); dwc2_hsotg_irq()
2650 * @ep: The USB endpint to configure
2655 static int dwc2_hsotg_ep_enable(struct usb_ep *ep, dwc2_hsotg_ep_enable() argument
2658 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); dwc2_hsotg_ep_enable()
2670 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n", dwc2_hsotg_ep_enable()
2671 __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes, dwc2_hsotg_ep_enable()
2751 /* If fifo is already allocated for this ep */ dwc2_hsotg_ep_enable()
2753 size = hs_ep->ep.maxpacket * hs_ep->mc; dwc2_hsotg_ep_enable()
2769 size = hs_ep->ep.maxpacket*hs_ep->mc; dwc2_hsotg_ep_enable()
2816 * @ep: The endpoint to disable.
2818 static int dwc2_hsotg_ep_disable(struct usb_ep *ep) dwc2_hsotg_ep_disable() argument
2820 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); dwc2_hsotg_ep_disable()
2828 dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep); dwc2_hsotg_ep_disable()
2830 if (ep == &hsotg->eps_out[0]->ep) { dwc2_hsotg_ep_disable()
2863 * @ep: The endpoint to check.
2866 static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test) on_list() argument
2870 list_for_each_entry_safe(req, treq, &ep->queue, queue) { on_list()
2925 /* Disable ep */ dwc2_hsotg_ep_stop_xfr()
2928 /* Wait for ep to be disabled */ dwc2_hsotg_ep_stop_xfr()
2953 * @ep: The endpoint to dequeue.
2956 static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) dwc2_hsotg_ep_dequeue() argument
2959 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); dwc2_hsotg_ep_dequeue()
2963 dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req); dwc2_hsotg_ep_dequeue()
2984 * @ep: The endpoint to set halt.
2987 static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value) dwc2_hsotg_ep_sethalt() argument
2989 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); dwc2_hsotg_ep_sethalt()
2996 dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value); dwc2_hsotg_ep_sethalt()
3047 * @ep: The endpoint to set halt.
3050 static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value) dwc2_hsotg_ep_sethalt_lock() argument
3052 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); dwc2_hsotg_ep_sethalt_lock()
3058 ret = dwc2_hsotg_ep_sethalt(ep, value); dwc2_hsotg_ep_sethalt_lock()
3190 int ep; dwc2_hsotg_udc_stop() local
3196 for (ep = 1; ep < hsotg->num_of_eps; ep++) { dwc2_hsotg_udc_stop()
3197 if (hsotg->eps_in[ep]) dwc2_hsotg_udc_stop()
3198 dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep); dwc2_hsotg_udc_stop()
3199 if (hsotg->eps_out[ep]) dwc2_hsotg_udc_stop()
3200 dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep); dwc2_hsotg_udc_stop()
3351 snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir); dwc2_hsotg_initep()
3354 INIT_LIST_HEAD(&hs_ep->ep.ep_list); dwc2_hsotg_initep()
3358 list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list); dwc2_hsotg_initep()
3361 hs_ep->ep.name = hs_ep->name; dwc2_hsotg_initep()
3362 usb_ep_set_maxpacket_limit(&hs_ep->ep, epnum ? 1024 : EP0_MPS_LIMIT); dwc2_hsotg_initep()
3363 hs_ep->ep.ops = &dwc2_hsotg_ep_ops; dwc2_hsotg_initep()
3366 hs_ep->ep.caps.type_control = true; dwc2_hsotg_initep()
3368 hs_ep->ep.caps.type_iso = true; dwc2_hsotg_initep()
3369 hs_ep->ep.caps.type_bulk = true; dwc2_hsotg_initep()
3370 hs_ep->ep.caps.type_int = true; dwc2_hsotg_initep()
3374 hs_ep->ep.caps.dir_in = true; dwc2_hsotg_initep()
3376 hs_ep->ep.caps.dir_out = true; dwc2_hsotg_initep()
3483 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx, dwc2_hsotg_dump()
3490 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", dwc2_hsotg_dump()
3638 hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep; dwc2_gadget_init()
3642 hsotg->ctrl_req = dwc2_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep, dwc2_gadget_init()
3687 int ep; dwc2_hsotg_suspend() local
3699 for (ep = 0; ep < hsotg->num_of_eps; ep++) { dwc2_hsotg_suspend()
3700 if (hsotg->eps_in[ep]) dwc2_hsotg_suspend()
3701 dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep); dwc2_hsotg_suspend()
3702 if (hsotg->eps_out[ep]) dwc2_hsotg_suspend()
3703 dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep); dwc2_hsotg_suspend()
143 dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg, unsigned int ep, unsigned int dir_in, unsigned int en) dwc2_hsotg_ctrl_epint() argument
928 dwc2_hsotg_send_reply(struct dwc2_hsotg *hsotg, struct dwc2_hsotg_ep *ep, void *buff, int length) dwc2_hsotg_send_reply() argument
1739 dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg, unsigned int ep, unsigned int mps, unsigned int dir_in) dwc2_hsotg_set_ep_maxpacket() argument
2160 kill_all_requests(struct dwc2_hsotg *hsotg, struct dwc2_hsotg_ep *ep, int result) kill_all_requests() argument
H A Ddebugfs.c167 seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x", state_show()
259 struct dwc2_hsotg_ep *ep = seq->private; ep_show() local
260 struct dwc2_hsotg *hsotg = ep->parent; ep_show()
263 int index = ep->index; ep_show()
268 ep->index, ep->ep.name, decode_direction(ep->dir_in)); ep_show()
289 seq_printf(seq, "mps %d\n", ep->ep.maxpacket); ep_show()
290 seq_printf(seq, "total_data=%ld\n", ep->total_data); ep_show()
293 ep->queue.next, ep->queue.prev); ep_show()
297 list_for_each_entry(req, &ep->queue, queue) { ep_show()
304 req == ep->req ? '*' : ' ', ep_show()
363 struct dwc2_hsotg_ep *ep; dwc2_hsotg_create_debug() local
365 ep = hsotg->eps_out[epidx]; dwc2_hsotg_create_debug()
366 if (ep) { dwc2_hsotg_create_debug()
367 file = debugfs_create_file(ep->name, S_IRUGO, dwc2_hsotg_create_debug()
368 root, ep, &ep_fops); dwc2_hsotg_create_debug()
371 ep->name); dwc2_hsotg_create_debug()
376 struct dwc2_hsotg_ep *ep; dwc2_hsotg_create_debug() local
378 ep = hsotg->eps_in[epidx]; dwc2_hsotg_create_debug()
379 if (ep) { dwc2_hsotg_create_debug()
380 file = debugfs_create_file(ep->name, S_IRUGO, dwc2_hsotg_create_debug()
381 root, ep, &ep_fops); dwc2_hsotg_create_debug()
384 ep->name); dwc2_hsotg_create_debug()
/linux-4.4.14/drivers/media/platform/xilinx/
H A Dxilinx-vipp.c78 struct device_node *ep = NULL; xvip_graph_build_one() local
86 next = of_graph_get_next_endpoint(entity->node, ep); xvip_graph_build_one()
90 of_node_put(ep); xvip_graph_build_one()
91 ep = next; xvip_graph_build_one()
93 dev_dbg(xdev->dev, "processing endpoint %s\n", ep->full_name); xvip_graph_build_one()
95 ret = v4l2_of_parse_link(ep, &link); xvip_graph_build_one()
98 ep->full_name); xvip_graph_build_one()
171 of_node_put(ep); xvip_graph_build_one()
198 struct device_node *ep = NULL; xvip_graph_build_dma() local
207 next = of_graph_get_next_endpoint(node, ep); xvip_graph_build_dma()
211 of_node_put(ep); xvip_graph_build_dma()
212 ep = next; xvip_graph_build_dma()
214 dev_dbg(xdev->dev, "processing endpoint %s\n", ep->full_name); xvip_graph_build_dma()
216 ret = v4l2_of_parse_link(ep, &link); xvip_graph_build_dma()
219 ep->full_name); xvip_graph_build_dma()
285 of_node_put(ep); xvip_graph_build_dma()
353 struct device_node *ep = NULL; xvip_graph_parse_one() local
360 next = of_graph_get_next_endpoint(node, ep); xvip_graph_parse_one()
364 of_node_put(ep); xvip_graph_parse_one()
365 ep = next; xvip_graph_parse_one()
367 dev_dbg(xdev->dev, "handling endpoint %s\n", ep->full_name); xvip_graph_parse_one()
369 remote = of_graph_get_remote_port_parent(ep); xvip_graph_parse_one()
396 of_node_put(ep); xvip_graph_parse_one()
/linux-4.4.14/arch/arm/mach-ixp4xx/include/mach/
H A Dixp4xx-regs.h560 #define UICR0_IM0 (1 << 0) /* Interrupt mask ep 0 */
561 #define UICR0_IM1 (1 << 1) /* Interrupt mask ep 1 */
562 #define UICR0_IM2 (1 << 2) /* Interrupt mask ep 2 */
563 #define UICR0_IM3 (1 << 3) /* Interrupt mask ep 3 */
564 #define UICR0_IM4 (1 << 4) /* Interrupt mask ep 4 */
565 #define UICR0_IM5 (1 << 5) /* Interrupt mask ep 5 */
566 #define UICR0_IM6 (1 << 6) /* Interrupt mask ep 6 */
567 #define UICR0_IM7 (1 << 7) /* Interrupt mask ep 7 */
569 #define UICR1_IM8 (1 << 0) /* Interrupt mask ep 8 */
570 #define UICR1_IM9 (1 << 1) /* Interrupt mask ep 9 */
571 #define UICR1_IM10 (1 << 2) /* Interrupt mask ep 10 */
572 #define UICR1_IM11 (1 << 3) /* Interrupt mask ep 11 */
573 #define UICR1_IM12 (1 << 4) /* Interrupt mask ep 12 */
574 #define UICR1_IM13 (1 << 5) /* Interrupt mask ep 13 */
575 #define UICR1_IM14 (1 << 6) /* Interrupt mask ep 14 */
576 #define UICR1_IM15 (1 << 7) /* Interrupt mask ep 15 */
578 #define USIR0_IR0 (1 << 0) /* Interrupt request ep 0 */
579 #define USIR0_IR1 (1 << 1) /* Interrupt request ep 1 */
580 #define USIR0_IR2 (1 << 2) /* Interrupt request ep 2 */
581 #define USIR0_IR3 (1 << 3) /* Interrupt request ep 3 */
582 #define USIR0_IR4 (1 << 4) /* Interrupt request ep 4 */
583 #define USIR0_IR5 (1 << 5) /* Interrupt request ep 5 */
584 #define USIR0_IR6 (1 << 6) /* Interrupt request ep 6 */
585 #define USIR0_IR7 (1 << 7) /* Interrupt request ep 7 */
587 #define USIR1_IR8 (1 << 0) /* Interrupt request ep 8 */
588 #define USIR1_IR9 (1 << 1) /* Interrupt request ep 9 */
589 #define USIR1_IR10 (1 << 2) /* Interrupt request ep 10 */
590 #define USIR1_IR11 (1 << 3) /* Interrupt request ep 11 */
591 #define USIR1_IR12 (1 << 4) /* Interrupt request ep 12 */
592 #define USIR1_IR13 (1 << 5) /* Interrupt request ep 13 */
593 #define USIR1_IR14 (1 << 6) /* Interrupt request ep 14 */
594 #define USIR1_IR15 (1 << 7) /* Interrupt request ep 15 */
/linux-4.4.14/drivers/scsi/be2iscsi/
H A Dbe_iscsi.h53 struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
67 int beiscsi_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param,
84 int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
86 void beiscsi_ep_disconnect(struct iscsi_endpoint *ep);
H A Dbe_iscsi.c40 struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep, beiscsi_session_create() argument
54 if (!ep) { beiscsi_session_create()
56 "beiscsi_session_create: invalid ep\n"); beiscsi_session_create()
59 beiscsi_ep = ep->dd_data; beiscsi_session_create()
151 beiscsi_conn->ep = NULL; beiscsi_conn_create()
206 struct iscsi_endpoint *ep; beiscsi_conn_bind() local
208 ep = iscsi_lookup_endpoint(transport_fd); beiscsi_conn_bind()
209 if (!ep) beiscsi_conn_bind()
212 beiscsi_ep = ep->dd_data; beiscsi_conn_bind()
229 beiscsi_conn->ep = beiscsi_ep; beiscsi_conn_bind()
635 * @ep: pointer to iscsi ep
641 int beiscsi_ep_get_param(struct iscsi_endpoint *ep, beiscsi_ep_get_param() argument
644 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; beiscsi_ep_get_param()
991 beiscsi_ep = beiscsi_conn->ep; beiscsi_conn_start()
1065 * @ep: pointer to iscsi endpoint structure
1095 * @ep: endpoint to be used
1101 static int beiscsi_open_conn(struct iscsi_endpoint *ep, beiscsi_open_conn() argument
1105 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; beiscsi_open_conn()
1127 (beiscsi_ep->ep_cid)] = ep; beiscsi_open_conn()
1177 beiscsi_ep = ep->dd_data; beiscsi_open_conn()
1202 struct iscsi_endpoint *ep; beiscsi_ep_connect() local
1233 ep = iscsi_create_endpoint(sizeof(struct beiscsi_endpoint)); beiscsi_ep_connect()
1234 if (!ep) { beiscsi_ep_connect()
1239 beiscsi_ep = ep->dd_data; beiscsi_ep_connect()
1241 beiscsi_ep->openiscsi_ep = ep; beiscsi_ep_connect()
1242 ret = beiscsi_open_conn(ep, NULL, dst_addr, non_blocking); beiscsi_ep_connect()
1249 return ep; beiscsi_ep_connect()
1252 iscsi_destroy_endpoint(ep); beiscsi_ep_connect()
1258 * @ep: endpoint to be used
1263 int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) beiscsi_ep_poll() argument
1265 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; beiscsi_ep_poll()
1303 * @ep: The iscsi endpoint
1351 * @ep: endpoint to be used
1355 void beiscsi_ep_disconnect(struct iscsi_endpoint *ep) beiscsi_ep_disconnect() argument
1364 beiscsi_ep = ep->dd_data; beiscsi_ep_disconnect()
/linux-4.4.14/arch/s390/oprofile/
H A Dhwsampler.c68 struct hws_execute_parms *ep = parms; execute_qsi() local
70 ep->rc = qsi(ep->buffer); execute_qsi()
75 struct hws_execute_parms *ep = parms; execute_ssctl() local
77 ep->rc = lsctl(ep->buffer); execute_ssctl()
83 struct hws_execute_parms ep; smp_ctl_ssctl_stop() local
91 ep.buffer = &cb->ssctl; smp_ctl_ssctl_stop()
92 smp_call_function_single(cpu, execute_ssctl, &ep, 1); smp_ctl_ssctl_stop()
93 rc = ep.rc; smp_ctl_ssctl_stop()
99 ep.buffer = &cb->qsi; smp_ctl_ssctl_stop()
100 smp_call_function_single(cpu, execute_qsi, &ep, 1); smp_ctl_ssctl_stop()
113 struct hws_execute_parms ep; smp_ctl_ssctl_deactivate() local
121 ep.buffer = &cb->ssctl; smp_ctl_ssctl_deactivate()
122 smp_call_function_single(cpu, execute_ssctl, &ep, 1); smp_ctl_ssctl_deactivate()
123 rc = ep.rc; smp_ctl_ssctl_deactivate()
127 ep.buffer = &cb->qsi; smp_ctl_ssctl_deactivate()
128 smp_call_function_single(cpu, execute_qsi, &ep, 1); smp_ctl_ssctl_deactivate()
139 struct hws_execute_parms ep; smp_ctl_ssctl_enable_activate() local
151 ep.buffer = &cb->ssctl; smp_ctl_ssctl_enable_activate()
152 smp_call_function_single(cpu, execute_ssctl, &ep, 1); smp_ctl_ssctl_enable_activate()
153 rc = ep.rc; smp_ctl_ssctl_enable_activate()
157 ep.buffer = &cb->qsi; smp_ctl_ssctl_enable_activate()
158 smp_call_function_single(cpu, execute_qsi, &ep, 1); smp_ctl_ssctl_enable_activate()
159 if (ep.rc) smp_ctl_ssctl_enable_activate()
167 struct hws_execute_parms ep; smp_ctl_qsi() local
172 ep.buffer = &cb->qsi; smp_ctl_qsi()
173 smp_call_function_single(cpu, execute_qsi, &ep, 1); smp_ctl_qsi()
175 return ep.rc; smp_ctl_qsi()
/linux-4.4.14/tools/usb/ffs-aio-example/simple/device_app/
H A Daio_simple.c210 int ep[2]; main() local
227 ep_path = malloc(strlen(argv[1]) + 4 /* "/ep#" */ + 1 /* '\0' */); main()
249 sprintf(ep_path, "%s/ep%d", argv[1], i+1); main()
250 ep[i] = open(ep_path, O_RDWR); main()
251 if (ep[i] < 0) { main()
252 printf("unable to open ep%d: %s\n", i+1, main()
314 if (e[i].obj->aio_fildes == ep[0]) { main()
317 } else if (e[i].obj->aio_fildes == ep[1]) { main()
326 io_prep_pwrite(iocb_in, ep[0], buf_in, BUF_LEN, 0); main()
340 io_prep_pread(iocb_out, ep[1], buf_out, BUF_LEN, 0); main()
364 close(ep[i]); main()
/linux-4.4.14/drivers/net/wireless/mwifiex/
H A Dusb.c61 struct sk_buff *skb, u8 ep) mwifiex_usb_recv()
76 switch (ep) { mwifiex_usb_recv()
152 "%s: unknown endport %#x\n", __func__, ep); mwifiex_usb_recv()
180 if (card->rx_cmd_ep == context->ep) mwifiex_usb_rx_complete()
189 /* Do not free skb in case of command ep */ mwifiex_usb_rx_complete()
190 if (card->rx_cmd_ep != context->ep) mwifiex_usb_rx_complete()
199 status = mwifiex_usb_recv(adapter, skb, context->ep); mwifiex_usb_rx_complete()
211 if (card->rx_cmd_ep == context->ep) mwifiex_usb_rx_complete()
218 /* Do not free skb in case of command ep */ mwifiex_usb_rx_complete()
219 if (card->rx_cmd_ep != context->ep) mwifiex_usb_rx_complete()
231 /* Do not free skb in case of command ep */ mwifiex_usb_rx_complete()
232 if (card->rx_cmd_ep != context->ep) mwifiex_usb_rx_complete()
239 if (card->rx_cmd_ep == context->ep) mwifiex_usb_rx_complete()
244 if (card->rx_cmd_ep == context->ep) { mwifiex_usb_rx_complete()
268 if (context->ep == card->tx_cmd_ep) { mwifiex_usb_tx_complete()
278 if (context->ep == port->tx_data_ep) { mwifiex_usb_tx_complete()
302 if (card->rx_cmd_ep != ctx->ep) { mwifiex_usb_submit_rx_urb()
312 usb_rcvbulkpipe(card->udev, ctx->ep), ctx->skb->data, mwifiex_usb_submit_rx_urb()
315 if (card->rx_cmd_ep == ctx->ep) mwifiex_usb_submit_rx_urb()
325 if (card->rx_cmd_ep == ctx->ep) mwifiex_usb_submit_rx_urb()
657 card->tx_cmd.ep = card->tx_cmd_ep; mwifiex_usb_tx_init()
677 port->tx_data_list[j].ep = port->tx_data_ep; mwifiex_usb_tx_init()
697 card->rx_cmd.ep = card->rx_cmd_ep; mwifiex_usb_rx_init()
714 card->rx_data_list[i].ep = card->rx_data_ep; mwifiex_usb_rx_init()
731 u32 *len, u8 ep, u32 timeout) mwifiex_write_data_sync()
740 ret = usb_bulk_msg(card->udev, usb_sndbulkpipe(card->udev, ep), pbuf, mwifiex_write_data_sync()
754 u32 *len, u8 ep, u32 timeout) mwifiex_read_data_sync()
760 ret = usb_bulk_msg(card->udev, usb_rcvbulkpipe(card->udev, ep), pbuf, mwifiex_read_data_sync()
846 static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep, mwifiex_usb_host_to_card() argument
868 mwifiex_dbg(adapter, INFO, "%s: ep=%d\n", __func__, ep); mwifiex_usb_host_to_card()
870 if (ep == card->tx_cmd_ep) { mwifiex_usb_host_to_card()
874 if (ep == card->port[idx].tx_data_ep) { mwifiex_usb_host_to_card()
896 context->ep = ep; mwifiex_usb_host_to_card()
900 usb_fill_bulk_urb(tx_urb, card->udev, usb_sndbulkpipe(card->udev, ep), mwifiex_usb_host_to_card()
906 if (ep == card->tx_cmd_ep) mwifiex_usb_host_to_card()
914 if (ep == card->tx_cmd_ep) { mwifiex_usb_host_to_card()
927 if (ep != card->tx_cmd_ep && mwifiex_usb_host_to_card()
939 if (ep != card->tx_cmd_ep) mwifiex_usb_host_to_card()
1146 static void mwifiex_submit_rx_urb(struct mwifiex_adapter *adapter, u8 ep) mwifiex_submit_rx_urb() argument
1151 if ((ep == card->rx_cmd_ep) && mwifiex_submit_rx_urb()
60 mwifiex_usb_recv(struct mwifiex_adapter *adapter, struct sk_buff *skb, u8 ep) mwifiex_usb_recv() argument
730 mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf, u32 *len, u8 ep, u32 timeout) mwifiex_write_data_sync() argument
753 mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf, u32 *len, u8 ep, u32 timeout) mwifiex_read_data_sync() argument
/linux-4.4.14/drivers/media/usb/cx231xx/
H A Dcx231xx-pcb-cfg.c51 0, /* interrupt ep index */
79 0, /* interrupt ep index */
121 0, /* interrupt ep index */
148 0, /* interrupt ep index */
191 0, /* interrupt ep index */
218 0, /* interrupt ep index */
260 0, /* interrupt ep index */
287 0, /* interrupt ep index */
330 0, /* interrupt ep index */
357 0, /* interrupt ep index */
399 0, /* interrupt ep index */
426 0, /* interrupt ep index */
469 0, /* interrupt ep index = 2 */
496 0, /* interrupt ep index = 2 */
536 0, /* interrupt ep index */
562 0, /* interrupt ep index */
603 0, /* interrupt ep index */
629 0, /* interrupt ep index */
/linux-4.4.14/lib/mpi/
H A Dmpi-pow.c41 mpi_ptr_t rp, ep, mp, bp; mpi_powm() local
59 ep = exp->d; mpi_powm()
115 if (rp == ep || rp == mp || rp == bp) { mpi_powm()
134 if (rp == ep) { mpi_powm()
136 ep = ep_marker = mpi_alloc_limb_space(esize); mpi_powm()
137 if (!ep) mpi_powm()
139 MPN_COPY(ep, rp, esize); mpi_powm()
168 negative_result = (ep[0] & 1) && base->sign; mpi_powm()
171 e = ep[i]; mpi_powm()
259 e = ep[i]; mpi_powm()
/linux-4.4.14/drivers/usb/host/whci/
H A Dhcd.c173 struct usb_host_endpoint *ep) whc_endpoint_disable()
179 qset = ep->hcpriv; whc_endpoint_disable()
181 ep->hcpriv = NULL; whc_endpoint_disable()
182 if (usb_endpoint_xfer_bulk(&ep->desc) whc_endpoint_disable()
183 || usb_endpoint_xfer_control(&ep->desc)) whc_endpoint_disable()
191 struct usb_host_endpoint *ep) whc_endpoint_reset()
200 qset = ep->hcpriv; whc_endpoint_reset()
205 if (usb_endpoint_xfer_bulk(&ep->desc) whc_endpoint_reset()
206 || usb_endpoint_xfer_control(&ep->desc)) whc_endpoint_reset()
172 whc_endpoint_disable(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep) whc_endpoint_disable() argument
190 whc_endpoint_reset(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep) whc_endpoint_reset() argument
H A Dqset.c63 qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize); qset_fill_qh()
65 epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra; qset_fill_qh()
163 qset = urb->ep->hcpriv; get_qset()
169 qset->ep = urb->ep; get_qset()
170 urb->ep->hcpriv = qset; get_qset()
361 dma_addr_t sp, ep; qset_fill_page_list() local
372 ep = dma_addr + std->len; qset_fill_page_list()
373 std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); qset_fill_page_list()
453 dma_addr_t sp, ep; qset_add_urb_sg() local
509 ep = dma_addr + dma_len; qset_add_urb_sg()
510 num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); qset_add_urb_sg()
528 prev_end = dma_addr = ep; qset_add_urb_sg()
/linux-4.4.14/drivers/gpu/drm/tilcdc/
H A Dtilcdc_external.c141 struct device_node *ep = NULL; tilcdc_get_external_components() local
144 while ((ep = of_graph_get_next_endpoint(dev->of_node, ep))) { tilcdc_get_external_components()
147 node = of_graph_get_remote_port_parent(ep); tilcdc_get_external_components()
/linux-4.4.14/drivers/md/
H A Ddm-cache-policy-mq.c330 static int epool_init(struct entry_pool *ep, unsigned nr_entries) epool_init() argument
334 ep->entries = vzalloc(sizeof(struct entry) * nr_entries); epool_init()
335 if (!ep->entries) epool_init()
338 ep->entries_end = ep->entries + nr_entries; epool_init()
340 INIT_LIST_HEAD(&ep->free); epool_init()
342 list_add(&ep->entries[i].list, &ep->free); epool_init()
344 ep->nr_allocated = 0; epool_init()
349 static void epool_exit(struct entry_pool *ep) epool_exit() argument
351 vfree(ep->entries); epool_exit()
354 static struct entry *alloc_entry(struct entry_pool *ep) alloc_entry() argument
358 if (list_empty(&ep->free)) alloc_entry()
361 e = list_entry(list_pop(&ep->free), struct entry, list); alloc_entry()
364 ep->nr_allocated++; alloc_entry()
372 static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock) alloc_particular_entry() argument
374 struct entry *e = ep->entries + from_cblock(cblock); alloc_particular_entry()
378 ep->nr_allocated++; alloc_particular_entry()
383 static void free_entry(struct entry_pool *ep, struct entry *e) free_entry() argument
385 BUG_ON(!ep->nr_allocated); free_entry()
386 ep->nr_allocated--; free_entry()
388 list_add(&e->list, &ep->free); free_entry()
394 static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock) epool_find() argument
396 struct entry *e = ep->entries + from_cblock(cblock); epool_find()
400 static bool epool_empty(struct entry_pool *ep) epool_empty() argument
402 return list_empty(&ep->free); epool_empty()
405 static bool in_pool(struct entry_pool *ep, struct entry *e) in_pool() argument
407 return e >= ep->entries && e < ep->entries_end; in_pool()
410 static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e) infer_cblock() argument
412 return to_cblock(e - ep->entries); infer_cblock()
/linux-4.4.14/arch/sparc/kernel/
H A Dmdesc.c388 struct mdesc_elem *ep = node_block(&hp->mdesc); mdesc_node_by_name() local
398 ret = ep[from_node].d.val; mdesc_node_by_name()
402 if (ep[ret].tag != MD_NODE) mdesc_node_by_name()
404 if (!strcmp(names + ep[ret].name_offset, name)) mdesc_node_by_name()
406 ret = ep[ret].d.val; mdesc_node_by_name()
420 struct mdesc_elem *ep; mdesc_get_property() local
425 ep = node_block(&hp->mdesc) + node; mdesc_get_property()
426 ep++; mdesc_get_property()
427 for (; ep->tag != MD_NODE_END; ep++) { mdesc_get_property()
431 switch (ep->tag) { mdesc_get_property()
433 val = &ep->d.val; mdesc_get_property()
439 val = data + ep->d.data.data_offset; mdesc_get_property()
440 len = ep->d.data.data_len; mdesc_get_property()
449 if (!strcmp(names + ep->name_offset, name)) { mdesc_get_property()
462 struct mdesc_elem *ep, *base = node_block(&hp->mdesc); mdesc_next_arc() local
469 ep = base + from; mdesc_next_arc()
471 ep++; mdesc_next_arc()
472 for (; ep->tag != MD_NODE_END; ep++) { mdesc_next_arc()
473 if (ep->tag != MD_PROP_ARC) mdesc_next_arc()
476 if (strcmp(names + ep->name_offset, arc_type)) mdesc_next_arc()
479 return ep - base; mdesc_next_arc()
488 struct mdesc_elem *ep, *base = node_block(&hp->mdesc); mdesc_arc_target() local
490 ep = base + arc; mdesc_arc_target()
492 return ep->d.val; mdesc_arc_target()
498 struct mdesc_elem *ep, *base = node_block(&hp->mdesc); mdesc_node_name() local
505 ep = base + node; mdesc_node_name()
506 if (ep->tag != MD_NODE) mdesc_node_name()
509 return names + ep->name_offset; mdesc_node_name()
H A Dpci_fire.c159 struct pci_msiq_entry *base, *ep; pci_fire_dequeue_msi() local
162 ep = &base[*head]; pci_fire_dequeue_msi()
164 if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0) pci_fire_dequeue_msi()
167 type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >> pci_fire_dequeue_msi()
174 *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >> pci_fire_dequeue_msi()
180 ep->word0 &= ~MSIQ_WORD0_FMT_TYPE; pci_fire_dequeue_msi()
/linux-4.4.14/drivers/infiniband/ulp/iser/
H A Discsi_iser.c466 struct iscsi_endpoint *ep; iscsi_iser_conn_bind() local
473 /* the transport ep handle comes from user space so it must be iscsi_iser_conn_bind()
475 ep = iscsi_lookup_endpoint(transport_eph); iscsi_iser_conn_bind()
476 if (!ep) { iscsi_iser_conn_bind()
481 iser_conn = ep->dd_data; iscsi_iser_conn_bind()
597 * @ep: iscsi end-point handle
606 iscsi_iser_session_create(struct iscsi_endpoint *ep, iscsi_iser_session_create() argument
629 * the leading conn's ep so this will be NULL; iscsi_iser_session_create()
631 if (ep) { iscsi_iser_session_create()
632 iser_conn = ep->dd_data; iscsi_iser_session_create()
768 static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep, iscsi_iser_get_ep_param() argument
771 struct iser_conn *iser_conn = ep->dd_data; iscsi_iser_get_ep_param()
812 struct iscsi_endpoint *ep; iscsi_iser_ep_connect() local
814 ep = iscsi_create_endpoint(0); iscsi_iser_ep_connect()
815 if (!ep) iscsi_iser_ep_connect()
824 ep->dd_data = iser_conn; iscsi_iser_ep_connect()
825 iser_conn->ep = ep; iscsi_iser_ep_connect()
832 return ep; iscsi_iser_ep_connect()
834 iscsi_destroy_endpoint(ep); iscsi_iser_ep_connect()
840 * @ep: iscsi endpoint (created at ep_connect)
852 iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) iscsi_iser_ep_poll() argument
854 struct iser_conn *iser_conn = ep->dd_data; iscsi_iser_ep_poll()
880 * @ep: iscsi endpoint handle
888 iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) iscsi_iser_ep_disconnect() argument
890 struct iser_conn *iser_conn = ep->dd_data; iscsi_iser_ep_disconnect()
892 iser_info("ep %p iser conn %p\n", ep, iser_conn); iscsi_iser_ep_disconnect()
913 iscsi_destroy_endpoint(ep); iscsi_iser_ep_disconnect()
/linux-4.4.14/drivers/media/platform/exynos4-is/
H A Dmedia-dev.c197 static int __fimc_pipeline_open(struct exynos_media_pipeline *ep, __fimc_pipeline_open() argument
201 struct fimc_pipeline *p = to_fimc_pipeline(ep); __fimc_pipeline_open()
238 static int __fimc_pipeline_close(struct exynos_media_pipeline *ep) __fimc_pipeline_close() argument
240 struct fimc_pipeline *p = to_fimc_pipeline(ep); __fimc_pipeline_close()
266 static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on) __fimc_pipeline_s_stream() argument
272 struct fimc_pipeline *p = to_fimc_pipeline(ep); __fimc_pipeline_s_stream()
313 p->ep.ops = &fimc_pipeline_ops; fimc_md_pipeline_create()
314 return &p->ep; fimc_md_pipeline_create()
334 struct device_node *rem, *ep, *np; fimc_md_parse_port_node() local
338 ep = of_get_next_child(port, NULL); fimc_md_parse_port_node()
339 if (!ep) fimc_md_parse_port_node()
342 v4l2_of_parse_endpoint(ep, &endpoint); fimc_md_parse_port_node()
348 rem = of_graph_get_remote_port_parent(ep); fimc_md_parse_port_node()
349 of_node_put(ep); fimc_md_parse_port_node()
352 ep->full_name); fimc_md_parse_port_node()
472 struct exynos_media_pipeline *ep; register_fimc_lite_entity() local
482 ep = fimc_md_pipeline_create(fmd); register_fimc_lite_entity()
483 if (!ep) register_fimc_lite_entity()
486 v4l2_set_subdev_hostdata(sd, ep); register_fimc_lite_entity()
500 struct exynos_media_pipeline *ep; register_fimc_entity() local
509 ep = fimc_md_pipeline_create(fmd); register_fimc_entity()
510 if (!ep) register_fimc_entity()
513 v4l2_set_subdev_hostdata(sd, ep); register_fimc_entity()
556 struct exynos_media_pipeline *ep; register_fimc_is_entity() local
560 ep = fimc_md_pipeline_create(fmd); register_fimc_is_entity()
561 if (!ep) register_fimc_is_entity()
564 v4l2_set_subdev_hostdata(sd, ep); register_fimc_is_entity()
H A Dmedia-dev.h61 struct exynos_media_pipeline ep; member in struct:fimc_pipeline
67 #define to_fimc_pipeline(_ep) container_of(_ep, struct fimc_pipeline, ep)
199 struct exynos_media_pipeline *ep, __fimc_md_get_subdev()
202 struct fimc_pipeline *p = to_fimc_pipeline(ep); __fimc_md_get_subdev()
198 __fimc_md_get_subdev( struct exynos_media_pipeline *ep, unsigned int index) __fimc_md_get_subdev() argument
/linux-4.4.14/drivers/media/usb/uvc/
H A Duvc_status.c165 struct usb_host_endpoint *ep = dev->int_ep; uvc_status_init() local
169 if (ep == NULL) uvc_status_init()
184 pipe = usb_rcvintpipe(dev->udev, ep->desc.bEndpointAddress); uvc_status_init()
189 interval = ep->desc.bInterval; uvc_status_init()
/linux-4.4.14/drivers/usb/usbip/
H A Dstub_rx.c347 struct usb_host_endpoint *ep; get_pipe() local
351 ep = udev->ep_in[epnum & 0x7f]; get_pipe()
353 ep = udev->ep_out[epnum & 0x7f]; get_pipe()
354 if (!ep) { get_pipe()
360 epd = &ep->desc; get_pipe()
398 struct usb_host_endpoint *ep; masking_bogus_flags() local
408 ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out) masking_bogus_flags()
410 if (!ep) masking_bogus_flags()
413 xfertype = usb_endpoint_type(&ep->desc); masking_bogus_flags()
423 is_out = usb_endpoint_dir_out(&ep->desc); masking_bogus_flags()
455 int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); stub_recv_cmd_submit()
/linux-4.4.14/drivers/gpu/drm/rockchip/
H A Drockchip_drm_drv.c391 struct device_node *ep; rockchip_drm_encoder_get_mux_id() local
400 for_each_endpoint_of_node(node, ep) { for_each_endpoint_of_node()
401 port = of_graph_get_remote_port(ep); for_each_endpoint_of_node()
404 ret = of_graph_parse_endpoint(ep, &endpoint); for_each_endpoint_of_node()
405 of_node_put(ep); for_each_endpoint_of_node()
425 struct device_node *ep, *remote; rockchip_add_endpoints() local
427 for_each_child_of_node(port, ep) { for_each_child_of_node()
428 remote = of_graph_get_remote_port_parent(ep); for_each_child_of_node()
/linux-4.4.14/drivers/net/wireless/mediatek/mt7601u/
H A Ddma.c280 struct sk_buff *skb, u8 ep) mt7601u_dma_submit_tx()
283 unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]); mt7601u_dma_submit_tx()
285 struct mt7601u_tx_queue *q = &dev->tx_q[ep]; mt7601u_dma_submit_tx()
332 static enum mt76_qsel ep2dmaq(u8 ep) ep2dmaq() argument
334 if (ep == 5) ep2dmaq()
342 u8 ep = q2ep(hw_q); mt7601u_dma_enqueue_tx() local
350 ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags); mt7601u_dma_enqueue_tx()
354 ret = mt7601u_dma_submit_tx(dev, skb, ep); mt7601u_dma_enqueue_tx()
279 mt7601u_dma_submit_tx(struct mt7601u_dev *dev, struct sk_buff *skb, u8 ep) mt7601u_dma_submit_tx() argument
/linux-4.4.14/arch/c6x/kernel/
H A Dmodule.c20 long ep = (long)ip & ~31; fixup_pcr() local
21 long delta = ((long)dest - ep) >> 2; fixup_pcr()
/linux-4.4.14/drivers/usb/c67x00/
H A Dc67x00.h105 #define DEVICE_N_ENDPOINT_N_CTL_REG(dev, ep) ((dev) \
106 ? (0x0280 + (ep << 4)) \
107 : (0x0200 + (ep << 4)))
108 #define DEVICE_N_ENDPOINT_N_STAT_REG(dev, ep) ((dev) \
109 ? (0x0286 + (ep << 4)) \
110 : (0x0206 + (ep << 4)))
/linux-4.4.14/arch/x86/pci/
H A Dsta2x11-fixup.c87 int ep; sta2x11_pdev_to_instance() local
90 ep = pdev->bus->number - instance->bus0; sta2x11_pdev_to_instance()
91 if (ep >= 0 && ep < STA2X11_NR_EP) sta2x11_pdev_to_instance()
111 int ep; sta2x11_pdev_to_mapping() local
116 ep = sta2x11_pdev_to_ep(pdev); sta2x11_pdev_to_mapping()
117 return instance->map + ep; sta2x11_pdev_to_mapping()
/linux-4.4.14/drivers/gpu/drm/atmel-hlcdc/
H A Datmel_hlcdc_output.c229 struct of_endpoint *ep) atmel_hlcdc_create_panel_output()
237 np = of_graph_get_remote_port_parent(ep->local_node); atmel_hlcdc_create_panel_output()
291 struct of_endpoint ep; atmel_hlcdc_create_outputs() local
304 ret = of_graph_parse_endpoint(np, &ep); atmel_hlcdc_create_outputs()
311 return atmel_hlcdc_create_panel_output(dev, &ep); atmel_hlcdc_create_outputs()
228 atmel_hlcdc_create_panel_output(struct drm_device *dev, struct of_endpoint *ep) atmel_hlcdc_create_panel_output() argument
/linux-4.4.14/drivers/gpu/drm/exynos/
H A Dexynos_drm_dpi.c246 struct device_node *np, *ep; exynos_dpi_of_find_panel_node() local
252 ep = of_graph_get_endpoint_by_reg(np, 0); exynos_dpi_of_find_panel_node()
254 if (!ep) exynos_dpi_of_find_panel_node()
257 np = of_graph_get_remote_port_parent(ep); exynos_dpi_of_find_panel_node()
258 of_node_put(ep); exynos_dpi_of_find_panel_node()
/linux-4.4.14/drivers/media/usb/gspca/
H A Dgspca.c193 struct usb_endpoint_descriptor *ep) alloc_and_submit_int_urb()
202 buffer_len = le16_to_cpu(ep->wMaxPacketSize); alloc_and_submit_int_urb()
203 interval = ep->bInterval; alloc_and_submit_int_urb()
206 ep->bEndpointAddress, buffer_len, interval); alloc_and_submit_int_urb()
223 usb_rcvintpipe(dev, ep->bEndpointAddress), alloc_and_submit_int_urb()
250 struct usb_endpoint_descriptor *ep; gspca_input_create_urb() local
257 ep = &intf_desc->endpoint[i].desc; gspca_input_create_urb()
258 if (usb_endpoint_dir_in(ep) && gspca_input_create_urb()
259 usb_endpoint_xfer_int(ep)) { gspca_input_create_urb()
261 alloc_and_submit_int_urb(gspca_dev, ep); gspca_input_create_urb()
608 * If xfer_ep is invalid, return the first valid ep found, otherwise
609 * look for exactly the ep with address equal to xfer_ep.
614 struct usb_host_endpoint *ep; alt_xfer() local
618 ep = &alt->endpoint[i]; alt_xfer()
619 attr = ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; alt_xfer()
621 && ep->desc.wMaxPacketSize != 0 alt_xfer()
622 && usb_endpoint_dir_in(&ep->desc) alt_xfer()
623 && (xfer_ep < 0 || ep->desc.bEndpointAddress == xfer_ep)) alt_xfer()
624 return ep; alt_xfer()
680 struct usb_host_endpoint *ep; build_isoc_ep_tb() local
695 ep = alt_xfer(&intf->altsetting[j], build_isoc_ep_tb()
698 if (ep == NULL) build_isoc_ep_tb()
700 if (ep->desc.bInterval == 0) { build_isoc_ep_tb()
704 psize = le16_to_cpu(ep->desc.wMaxPacketSize); build_isoc_ep_tb()
710 bandwidth /= 1 << (ep->desc.bInterval - 1); build_isoc_ep_tb()
761 struct usb_host_endpoint *ep) create_urbs()
767 psize = le16_to_cpu(ep->desc.wMaxPacketSize); create_urbs()
817 ep->desc.bEndpointAddress); create_urbs()
820 urb->interval = 1 << (ep->desc.bInterval - 1); create_urbs()
829 ep->desc.bEndpointAddress); create_urbs()
843 struct usb_host_endpoint *ep; gspca_init_transfer() local
870 ep = alt_xfer(&intf->altsetting[gspca_dev->alt], xfer, gspca_init_transfer()
872 if (ep == NULL) { gspca_init_transfer()
192 alloc_and_submit_int_urb(struct gspca_dev *gspca_dev, struct usb_endpoint_descriptor *ep) alloc_and_submit_int_urb() argument
760 create_urbs(struct gspca_dev *gspca_dev, struct usb_host_endpoint *ep) create_urbs() argument
/linux-4.4.14/fs/xfs/libxfs/
H A Dxfs_bmap.c346 xfs_bmbt_rec_t *ep; /* pointer to current extent */ xfs_bmap_check_leaf_extents() local
435 ep = XFS_BMBT_REC_ADDR(mp, block, 1); xfs_bmap_check_leaf_extents()
439 xfs_bmbt_disk_get_startoff(ep)); xfs_bmap_check_leaf_extents()
443 ASSERT(xfs_bmbt_disk_get_startoff(ep) + xfs_bmap_check_leaf_extents()
444 xfs_bmbt_disk_get_blockcount(ep) <= xfs_bmap_check_leaf_extents()
446 ep = nextp; xfs_bmap_check_leaf_extents()
449 last = *ep; xfs_bmap_check_leaf_extents()
734 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ xfs_bmap_extents_to_btree() local
829 ep = xfs_iext_get_ext(ifp, i); xfs_bmap_extents_to_btree()
830 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) { xfs_bmap_extents_to_btree()
831 arp->l0 = cpu_to_be64(ep->l0); xfs_bmap_extents_to_btree()
832 arp->l1 = cpu_to_be64(ep->l1); xfs_bmap_extents_to_btree()
903 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ xfs_bmap_local_to_extents() local
966 ep = xfs_iext_get_ext(ifp, 0); xfs_bmap_local_to_extents()
967 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); xfs_bmap_local_to_extents()
1389 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ xfs_bmap_search_multi_extents() local
1402 ep = xfs_iext_bno_to_ext(ifp, bno, &lastx); xfs_bmap_search_multi_extents()
1407 xfs_bmbt_get_all(ep, gotp); xfs_bmap_search_multi_extents()
1414 ep = NULL; xfs_bmap_search_multi_extents()
1417 return ep; xfs_bmap_search_multi_extents()
1438 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ xfs_bmap_search_extents() local
1443 ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp); xfs_bmap_search_extents()
1460 return ep; xfs_bmap_search_extents()
1501 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx); xfs_bmap_first_unused() local
1502 off = xfs_bmbt_get_startoff(ep); xfs_bmap_first_unused()
1510 lastaddr = off + xfs_bmbt_get_blockcount(ep); xfs_bmap_first_unused()
1532 xfs_bmbt_rec_host_t *ep; /* pointer to last extent */ xfs_bmap_last_before() local
1552 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, xfs_bmap_last_before()
1554 if (eof || xfs_bmbt_get_startoff(ep) > bno) { xfs_bmap_last_before()
1676 xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */ xfs_bmap_one_block() local
1691 ep = xfs_iext_get_ext(ifp, 0); xfs_bmap_one_block()
1692 xfs_bmbt_get_all(ep, &s); xfs_bmap_one_block()
1712 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ xfs_bmap_add_extent_delay_real() local
1746 ep = xfs_iext_get_ext(ifp, bma->idx); xfs_bmap_add_extent_delay_real()
1747 xfs_bmbt_get_all(ep, &PREV); xfs_bmap_add_extent_delay_real()
1896 xfs_bmbt_set_startblock(ep, new->br_startblock); xfs_bmap_add_extent_delay_real()
1897 xfs_bmbt_set_blockcount(ep, xfs_bmap_add_extent_delay_real()
1928 xfs_bmbt_set_startblock(ep, new->br_startblock); xfs_bmap_add_extent_delay_real()
1958 xfs_bmbt_set_startoff(ep, xfs_bmap_add_extent_delay_real()
1964 xfs_bmbt_set_blockcount(ep, temp); xfs_bmap_add_extent_delay_real()
1985 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); xfs_bmap_add_extent_delay_real()
1997 xfs_bmbt_set_startoff(ep, new_endoff); xfs_bmap_add_extent_delay_real()
1999 xfs_bmbt_set_blockcount(ep, temp); xfs_bmap_add_extent_delay_real()
2030 ep = xfs_iext_get_ext(ifp, bma->idx + 1); xfs_bmap_add_extent_delay_real()
2031 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); xfs_bmap_add_extent_delay_real()
2042 xfs_bmbt_set_blockcount(ep, temp); xfs_bmap_add_extent_delay_real()
2070 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); xfs_bmap_add_extent_delay_real()
2083 xfs_bmbt_set_blockcount(ep, temp); xfs_bmap_add_extent_delay_real()
2114 ep = xfs_iext_get_ext(ifp, bma->idx); xfs_bmap_add_extent_delay_real()
2115 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); xfs_bmap_add_extent_delay_real()
2145 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */ xfs_bmap_add_extent_delay_real()
2192 ep = xfs_iext_get_ext(ifp, bma->idx); xfs_bmap_add_extent_delay_real()
2193 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); xfs_bmap_add_extent_delay_real()
2269 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ xfs_bmap_add_extent_unwritten_real() local
2301 ep = xfs_iext_get_ext(ifp, *idx); xfs_bmap_add_extent_unwritten_real()
2302 xfs_bmbt_get_all(ep, &PREV); xfs_bmap_add_extent_unwritten_real()
2457 xfs_bmbt_set_blockcount(ep, xfs_bmap_add_extent_unwritten_real()
2459 xfs_bmbt_set_state(ep, newext); xfs_bmap_add_extent_unwritten_real()
2493 xfs_bmbt_set_state(ep, newext); xfs_bmap_add_extent_unwritten_real()
2520 xfs_bmbt_set_startoff(ep, xfs_bmap_add_extent_unwritten_real()
2525 xfs_bmbt_set_startblock(ep, xfs_bmap_add_extent_unwritten_real()
2527 xfs_bmbt_set_blockcount(ep, xfs_bmap_add_extent_unwritten_real()
2565 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); xfs_bmap_add_extent_unwritten_real()
2566 xfs_bmbt_set_startoff(ep, new_endoff); xfs_bmap_add_extent_unwritten_real()
2567 xfs_bmbt_set_blockcount(ep, xfs_bmap_add_extent_unwritten_real()
2569 xfs_bmbt_set_startblock(ep, xfs_bmap_add_extent_unwritten_real()
2603 xfs_bmbt_set_blockcount(ep, xfs_bmap_add_extent_unwritten_real()
2645 xfs_bmbt_set_blockcount(ep, xfs_bmap_add_extent_unwritten_real()
2686 xfs_bmbt_set_blockcount(ep, xfs_bmap_add_extent_unwritten_real()
4738 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */ xfs_bmap_del_extent() local
4764 ep = xfs_iext_get_ext(ifp, *idx); xfs_bmap_del_extent()
4765 xfs_bmbt_get_all(ep, &got); xfs_bmap_del_extent()
4861 xfs_bmbt_set_startoff(ep, del_endoff); xfs_bmap_del_extent()
4863 xfs_bmbt_set_blockcount(ep, temp); xfs_bmap_del_extent()
4867 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); xfs_bmap_del_extent()
4872 xfs_bmbt_set_startblock(ep, del_endblock); xfs_bmap_del_extent()
4890 xfs_bmbt_set_blockcount(ep, temp); xfs_bmap_del_extent()
4894 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); xfs_bmap_del_extent()
4917 xfs_bmbt_set_blockcount(ep, temp); xfs_bmap_del_extent()
4969 xfs_bmbt_set_blockcount(ep, xfs_bmap_del_extent()
4983 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); xfs_bmap_del_extent()
4991 xfs_bmbt_set_startblock(ep, xfs_bmap_del_extent()
5060 xfs_bmbt_rec_host_t *ep; /* extent record pointer */ xfs_bunmapi() local
5110 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, xfs_bunmapi()
5118 ep = xfs_iext_get_ext(ifp, --lastx); xfs_bunmapi()
5119 xfs_bmbt_get_all(ep, &got); xfs_bunmapi()
5150 ep = xfs_iext_get_ext(ifp, lastx); xfs_bunmapi()
5151 xfs_bmbt_get_all(ep, &got); xfs_bunmapi()
5165 ASSERT(ep != NULL); xfs_bunmapi()
5248 ep = xfs_iext_get_ext(ifp, xfs_bunmapi()
5250 xfs_bmbt_get_all(ep, &got); xfs_bunmapi()
5351 ep = xfs_iext_get_ext(ifp, lastx); xfs_bunmapi()
5352 if (xfs_bmbt_get_startoff(ep) > bno) { xfs_bunmapi()
5354 ep = xfs_iext_get_ext(ifp, xfs_bunmapi()
5357 xfs_bmbt_get_all(ep, &got); xfs_bunmapi()
/linux-4.4.14/drivers/scsi/
H A Dscsi_transport_iscsi.c158 struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); iscsi_endpoint_release() local
159 kfree(ep); iscsi_endpoint_release()
170 struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); show_ep_handle() local
171 return sprintf(buf, "%llu\n", (unsigned long long) ep->id); show_ep_handle()
173 static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
188 struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); iscsi_match_epid() local
191 return *epid == ep->id; iscsi_match_epid()
198 struct iscsi_endpoint *ep; iscsi_create_endpoint() local
216 ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL); iscsi_create_endpoint()
217 if (!ep) iscsi_create_endpoint()
220 ep->id = id; iscsi_create_endpoint()
221 ep->dev.class = &iscsi_endpoint_class; iscsi_create_endpoint()
222 dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id); iscsi_create_endpoint()
223 err = device_register(&ep->dev); iscsi_create_endpoint()
227 err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group); iscsi_create_endpoint()
232 ep->dd_data = &ep[1]; iscsi_create_endpoint()
233 return ep; iscsi_create_endpoint()
236 device_unregister(&ep->dev); iscsi_create_endpoint()
240 kfree(ep); iscsi_create_endpoint()
245 void iscsi_destroy_endpoint(struct iscsi_endpoint *ep) iscsi_destroy_endpoint() argument
247 sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group); iscsi_destroy_endpoint()
248 device_unregister(&ep->dev); iscsi_destroy_endpoint()
254 struct iscsi_endpoint *ep; iscsi_lookup_endpoint() local
262 ep = iscsi_dev_to_endpoint(dev); iscsi_lookup_endpoint()
268 return ep; iscsi_lookup_endpoint()
2708 iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep, iscsi_if_create_session() argument
2717 session = transport->create_session(ep, cmds_max, queue_depth, iscsi_if_create_session()
2804 struct iscsi_endpoint *ep; iscsi_if_ep_connect() local
2815 printk(KERN_ERR "ep connect failed. Could not find " iscsi_if_ep_connect()
2825 ep = transport->ep_connect(shost, dst_addr, non_blocking); iscsi_if_ep_connect()
2826 if (IS_ERR(ep)) { iscsi_if_ep_connect()
2827 err = PTR_ERR(ep); iscsi_if_ep_connect()
2831 ev->r.ep_connect_ret.handle = ep->id; iscsi_if_ep_connect()
2842 struct iscsi_endpoint *ep; iscsi_if_ep_disconnect() local
2847 ep = iscsi_lookup_endpoint(ep_handle); iscsi_if_ep_disconnect()
2848 if (!ep) iscsi_if_ep_disconnect()
2850 conn = ep->conn; iscsi_if_ep_disconnect()
2853 conn->ep = NULL; iscsi_if_ep_disconnect()
2857 transport->ep_disconnect(ep); iscsi_if_ep_disconnect()
2865 struct iscsi_endpoint *ep; iscsi_if_transport_ep() local
2877 ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle); iscsi_if_transport_ep()
2878 if (!ep) iscsi_if_transport_ep()
2881 ev->r.retcode = transport->ep_poll(ep, iscsi_if_transport_ep()
3502 struct iscsi_endpoint *ep = NULL; iscsi_if_recv_msg() local
3519 err = iscsi_if_create_session(priv, ep, ev, iscsi_if_recv_msg()
3526 ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle); iscsi_if_recv_msg()
3527 if (!ep) { iscsi_if_recv_msg()
3532 err = iscsi_if_create_session(priv, ep, ev, iscsi_if_recv_msg()
3563 if (conn && conn->ep) iscsi_if_recv_msg()
3564 iscsi_if_ep_disconnect(transport, conn->ep->id); iscsi_if_recv_msg()
3577 ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph); iscsi_if_recv_msg()
3578 if (ep) { iscsi_if_recv_msg()
3579 ep->conn = conn; iscsi_if_recv_msg()
3582 conn->ep = ep; iscsi_if_recv_msg()
3586 "Could not set ep conn " iscsi_if_recv_msg()
3793 struct iscsi_endpoint *ep; \
3801 ep = conn->ep; \
3802 if (!ep && t->ep_connect) { \
3807 if (ep) \
3808 rc = t->get_ep_param(ep, param, buf); \
/linux-4.4.14/drivers/usb/atm/
H A Dxusbatm.c52 static struct usb_interface *xusbatm_find_intf(struct usb_device *usb_dev, int altsetting, u8 ep) xusbatm_find_intf() argument
61 if (alt->endpoint[j].desc.bEndpointAddress == ep) xusbatm_find_intf()
104 " rx: ep %02x padd %d alt %2d tx: ep %02x padd %d alt %2d\n", xusbatm_bind()
/linux-4.4.14/drivers/media/rc/
H A Digorplugusb.c152 struct usb_endpoint_descriptor *ep; igorplugusb_probe() local
165 ep = &idesc->endpoint[0].desc; igorplugusb_probe()
166 if (!usb_endpoint_dir_in(ep) || !usb_endpoint_xfer_control(ep)) { igorplugusb_probe()
/linux-4.4.14/security/tomoyo/
H A Drealpath.c156 char *ep; tomoyo_get_local_path() local
157 const pid_t pid = (pid_t) simple_strtoul(pos + 1, &ep, 10); tomoyo_get_local_path()
158 if (*ep == '/' && pid && pid == tomoyo_get_local_path()
160 pos = ep - 5; tomoyo_get_local_path()
/linux-4.4.14/include/linux/
H A Dusbdevice_fs.h47 compat_uint_t ep; member in struct:usbdevfs_bulktransfer32
/linux-4.4.14/drivers/w1/masters/
H A Dds2490.c145 int ep[NUM_EP]; member in struct:ds_device
206 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), ds_send_control_cmd()
221 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), ds_send_control_mode()
236 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), ds_send_control()
256 dev->ep[EP_STATUS]), buf, size, &count, 1000); ds_recv_status_nodump()
259 dev->ep[EP_STATUS], err); ds_recv_status_nodump()
278 pr_info("0x%x: count=%d, status: ", dev->ep[EP_STATUS], count); ds_dump_status()
359 err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]), ds_recv_data()
365 pr_info("Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]); ds_recv_data()
366 usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN])); ds_recv_data()
391 err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->ep[EP_DATA_OUT]), buf, len, &count, 1000); ds_send_data()
394 "err=%d.\n", dev->ep[EP_DATA_OUT], err); ds_send_data()
468 printk("0x%x: count=%d, status: ", dev->ep[EP_STATUS], err); ds_wait_status()
1002 memset(dev->ep, 0, sizeof(dev->ep)); ds_probe()
1039 dev->ep[i+1] = endpoint->bEndpointAddress; ds_probe()

Completed in 5253 milliseconds

123