Lines Matching refs:dev

121 #define DBG(dev, fmt, args...) \  argument
122 xprintk(dev , KERN_DEBUG , fmt , ## args)
124 #define DBG(dev, fmt, args...) \ argument
131 #define VDBG(dev, fmt, args...) \ argument
135 #define ERROR(dev, fmt, args...) \ argument
136 xprintk(dev , KERN_ERR , fmt , ## args)
137 #define INFO(dev, fmt, args...) \ argument
138 xprintk(dev , KERN_INFO , fmt , ## args)
146 struct eth_dev *dev = netdev_priv(net); in ueth_change_mtu() local
151 spin_lock_irqsave(&dev->lock, flags); in ueth_change_mtu()
152 if (dev->port_usb) in ueth_change_mtu()
158 spin_unlock_irqrestore(&dev->lock, flags); in ueth_change_mtu()
165 struct eth_dev *dev = netdev_priv(net); in eth_get_drvinfo() local
169 strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version)); in eth_get_drvinfo()
170 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info)); in eth_get_drvinfo()
184 static void defer_kevent(struct eth_dev *dev, int flag) in defer_kevent() argument
186 if (test_and_set_bit(flag, &dev->todo)) in defer_kevent()
188 if (!schedule_work(&dev->work)) in defer_kevent()
189 ERROR(dev, "kevent %d may have been dropped\n", flag); in defer_kevent()
191 DBG(dev, "kevent %d scheduled\n", flag); in defer_kevent()
197 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) in rx_submit() argument
205 spin_lock_irqsave(&dev->lock, flags); in rx_submit()
206 if (dev->port_usb) in rx_submit()
207 out = dev->port_usb->out_ep; in rx_submit()
210 spin_unlock_irqrestore(&dev->lock, flags); in rx_submit()
228 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; in rx_submit()
229 size += dev->port_usb->header_len; in rx_submit()
233 if (dev->port_usb->is_fixed) in rx_submit()
234 size = max_t(size_t, size, dev->port_usb->fixed_out_len); in rx_submit()
238 DBG(dev, "no rx skb\n"); in rx_submit()
256 defer_kevent(dev, WORK_RX_MEMORY); in rx_submit()
258 DBG(dev, "rx submit --> %d\n", retval); in rx_submit()
261 spin_lock_irqsave(&dev->req_lock, flags); in rx_submit()
262 list_add(&req->list, &dev->rx_reqs); in rx_submit()
263 spin_unlock_irqrestore(&dev->req_lock, flags); in rx_submit()
271 struct eth_dev *dev = ep->driver_data; in rx_complete() local
280 if (dev->unwrap) { in rx_complete()
283 spin_lock_irqsave(&dev->lock, flags); in rx_complete()
284 if (dev->port_usb) { in rx_complete()
285 status = dev->unwrap(dev->port_usb, in rx_complete()
287 &dev->rx_frames); in rx_complete()
292 spin_unlock_irqrestore(&dev->lock, flags); in rx_complete()
294 skb_queue_tail(&dev->rx_frames, skb); in rx_complete()
298 skb2 = skb_dequeue(&dev->rx_frames); in rx_complete()
303 dev->net->stats.rx_errors++; in rx_complete()
304 dev->net->stats.rx_length_errors++; in rx_complete()
305 DBG(dev, "rx length %d\n", skb2->len); in rx_complete()
309 skb2->protocol = eth_type_trans(skb2, dev->net); in rx_complete()
310 dev->net->stats.rx_packets++; in rx_complete()
311 dev->net->stats.rx_bytes += skb2->len; in rx_complete()
318 skb2 = skb_dequeue(&dev->rx_frames); in rx_complete()
325 VDBG(dev, "rx shutdown, code %d\n", status); in rx_complete()
330 DBG(dev, "rx %s reset\n", ep->name); in rx_complete()
331 defer_kevent(dev, WORK_RX_MEMORY); in rx_complete()
338 dev->net->stats.rx_over_errors++; in rx_complete()
342 dev->net->stats.rx_errors++; in rx_complete()
343 DBG(dev, "rx status %d\n", status); in rx_complete()
349 if (!netif_running(dev->net)) { in rx_complete()
351 spin_lock(&dev->req_lock); in rx_complete()
352 list_add(&req->list, &dev->rx_reqs); in rx_complete()
353 spin_unlock(&dev->req_lock); in rx_complete()
357 rx_submit(dev, req, GFP_ATOMIC); in rx_complete()
399 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n) in alloc_requests() argument
403 spin_lock(&dev->req_lock); in alloc_requests()
404 status = prealloc(&dev->tx_reqs, link->in_ep, n); in alloc_requests()
407 status = prealloc(&dev->rx_reqs, link->out_ep, n); in alloc_requests()
412 DBG(dev, "can't alloc requests\n"); in alloc_requests()
414 spin_unlock(&dev->req_lock); in alloc_requests()
418 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) in rx_fill() argument
424 spin_lock_irqsave(&dev->req_lock, flags); in rx_fill()
425 while (!list_empty(&dev->rx_reqs)) { in rx_fill()
426 req = container_of(dev->rx_reqs.next, in rx_fill()
429 spin_unlock_irqrestore(&dev->req_lock, flags); in rx_fill()
431 if (rx_submit(dev, req, gfp_flags) < 0) { in rx_fill()
432 defer_kevent(dev, WORK_RX_MEMORY); in rx_fill()
436 spin_lock_irqsave(&dev->req_lock, flags); in rx_fill()
438 spin_unlock_irqrestore(&dev->req_lock, flags); in rx_fill()
443 struct eth_dev *dev = container_of(work, struct eth_dev, work); in eth_work() local
445 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) { in eth_work()
446 if (netif_running(dev->net)) in eth_work()
447 rx_fill(dev, GFP_KERNEL); in eth_work()
450 if (dev->todo) in eth_work()
451 DBG(dev, "work done, flags = 0x%lx\n", dev->todo); in eth_work()
457 struct eth_dev *dev = ep->driver_data; in tx_complete() local
461 dev->net->stats.tx_errors++; in tx_complete()
462 VDBG(dev, "tx err %d\n", req->status); in tx_complete()
468 dev->net->stats.tx_bytes += skb->len; in tx_complete()
470 dev->net->stats.tx_packets++; in tx_complete()
472 spin_lock(&dev->req_lock); in tx_complete()
473 list_add(&req->list, &dev->tx_reqs); in tx_complete()
474 spin_unlock(&dev->req_lock); in tx_complete()
477 atomic_dec(&dev->tx_qlen); in tx_complete()
478 if (netif_carrier_ok(dev->net)) in tx_complete()
479 netif_wake_queue(dev->net); in tx_complete()
490 struct eth_dev *dev = netdev_priv(net); in eth_start_xmit() local
498 spin_lock_irqsave(&dev->lock, flags); in eth_start_xmit()
499 if (dev->port_usb) { in eth_start_xmit()
500 in = dev->port_usb->in_ep; in eth_start_xmit()
501 cdc_filter = dev->port_usb->cdc_filter; in eth_start_xmit()
506 spin_unlock_irqrestore(&dev->lock, flags); in eth_start_xmit()
535 spin_lock_irqsave(&dev->req_lock, flags); in eth_start_xmit()
541 if (list_empty(&dev->tx_reqs)) { in eth_start_xmit()
542 spin_unlock_irqrestore(&dev->req_lock, flags); in eth_start_xmit()
546 req = container_of(dev->tx_reqs.next, struct usb_request, list); in eth_start_xmit()
550 if (list_empty(&dev->tx_reqs)) in eth_start_xmit()
552 spin_unlock_irqrestore(&dev->req_lock, flags); in eth_start_xmit()
558 if (dev->wrap) { in eth_start_xmit()
561 spin_lock_irqsave(&dev->lock, flags); in eth_start_xmit()
562 if (dev->port_usb) in eth_start_xmit()
563 skb = dev->wrap(dev->port_usb, skb); in eth_start_xmit()
564 spin_unlock_irqrestore(&dev->lock, flags); in eth_start_xmit()
569 if (dev->port_usb->supports_multi_frame) in eth_start_xmit()
581 if (dev->port_usb->is_fixed && in eth_start_xmit()
582 length == dev->port_usb->fixed_in_len && in eth_start_xmit()
592 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) in eth_start_xmit()
598 if (gadget_is_dualspeed(dev->gadget)) in eth_start_xmit()
599 req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH || in eth_start_xmit()
600 dev->gadget->speed == USB_SPEED_SUPER) in eth_start_xmit()
601 ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0) in eth_start_xmit()
607 DBG(dev, "tx queue err %d\n", retval); in eth_start_xmit()
611 atomic_inc(&dev->tx_qlen); in eth_start_xmit()
617 dev->net->stats.tx_dropped++; in eth_start_xmit()
619 spin_lock_irqsave(&dev->req_lock, flags); in eth_start_xmit()
620 if (list_empty(&dev->tx_reqs)) in eth_start_xmit()
622 list_add(&req->list, &dev->tx_reqs); in eth_start_xmit()
623 spin_unlock_irqrestore(&dev->req_lock, flags); in eth_start_xmit()
630 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) in eth_start() argument
632 DBG(dev, "%s\n", __func__); in eth_start()
635 rx_fill(dev, gfp_flags); in eth_start()
638 atomic_set(&dev->tx_qlen, 0); in eth_start()
639 netif_wake_queue(dev->net); in eth_start()
644 struct eth_dev *dev = netdev_priv(net); in eth_open() local
647 DBG(dev, "%s\n", __func__); in eth_open()
648 if (netif_carrier_ok(dev->net)) in eth_open()
649 eth_start(dev, GFP_KERNEL); in eth_open()
651 spin_lock_irq(&dev->lock); in eth_open()
652 link = dev->port_usb; in eth_open()
655 spin_unlock_irq(&dev->lock); in eth_open()
662 struct eth_dev *dev = netdev_priv(net); in eth_stop() local
665 VDBG(dev, "%s\n", __func__); in eth_stop()
668 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", in eth_stop()
669 dev->net->stats.rx_packets, dev->net->stats.tx_packets, in eth_stop()
670 dev->net->stats.rx_errors, dev->net->stats.tx_errors in eth_stop()
674 spin_lock_irqsave(&dev->lock, flags); in eth_stop()
675 if (dev->port_usb) { in eth_stop()
676 struct gether *link = dev->port_usb; in eth_stop()
697 DBG(dev, "host still using in/out endpoints\n"); in eth_stop()
704 spin_unlock_irqrestore(&dev->lock, flags); in eth_stop()
772 struct eth_dev *dev; in gether_setup_name() local
776 net = alloc_etherdev(sizeof *dev); in gether_setup_name()
780 dev = netdev_priv(net); in gether_setup_name()
781 spin_lock_init(&dev->lock); in gether_setup_name()
782 spin_lock_init(&dev->req_lock); in gether_setup_name()
783 INIT_WORK(&dev->work, eth_work); in gether_setup_name()
784 INIT_LIST_HEAD(&dev->tx_reqs); in gether_setup_name()
785 INIT_LIST_HEAD(&dev->rx_reqs); in gether_setup_name()
787 skb_queue_head_init(&dev->rx_frames); in gether_setup_name()
790 dev->net = net; in gether_setup_name()
791 dev->qmult = qmult; in gether_setup_name()
795 dev_warn(&g->dev, in gether_setup_name()
797 if (get_ether_addr(host_addr, dev->host_mac)) in gether_setup_name()
798 dev_warn(&g->dev, in gether_setup_name()
802 memcpy(ethaddr, dev->host_mac, ETH_ALEN); in gether_setup_name()
808 dev->gadget = g; in gether_setup_name()
809 SET_NETDEV_DEV(net, &g->dev); in gether_setup_name()
814 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); in gether_setup_name()
816 dev = ERR_PTR(status); in gether_setup_name()
818 INFO(dev, "MAC %pM\n", net->dev_addr); in gether_setup_name()
819 INFO(dev, "HOST MAC %pM\n", dev->host_mac); in gether_setup_name()
829 return dev; in gether_setup_name()
836 struct eth_dev *dev; in gether_setup_name_default() local
838 net = alloc_etherdev(sizeof(*dev)); in gether_setup_name_default()
842 dev = netdev_priv(net); in gether_setup_name_default()
843 spin_lock_init(&dev->lock); in gether_setup_name_default()
844 spin_lock_init(&dev->req_lock); in gether_setup_name_default()
845 INIT_WORK(&dev->work, eth_work); in gether_setup_name_default()
846 INIT_LIST_HEAD(&dev->tx_reqs); in gether_setup_name_default()
847 INIT_LIST_HEAD(&dev->rx_reqs); in gether_setup_name_default()
849 skb_queue_head_init(&dev->rx_frames); in gether_setup_name_default()
852 dev->net = net; in gether_setup_name_default()
853 dev->qmult = QMULT_DEFAULT; in gether_setup_name_default()
856 eth_random_addr(dev->dev_mac); in gether_setup_name_default()
858 eth_random_addr(dev->host_mac); in gether_setup_name_default()
872 struct eth_dev *dev; in gether_register_netdev() local
877 if (!net->dev.parent) in gether_register_netdev()
879 dev = netdev_priv(net); in gether_register_netdev()
880 g = dev->gadget; in gether_register_netdev()
883 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); in gether_register_netdev()
886 INFO(dev, "HOST MAC %pM\n", dev->host_mac); in gether_register_netdev()
895 memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN); in gether_register_netdev()
902 INFO(dev, "MAC %pM\n", dev->dev_mac); in gether_register_netdev()
910 struct eth_dev *dev; in gether_set_gadget() local
912 dev = netdev_priv(net); in gether_set_gadget()
913 dev->gadget = g; in gether_set_gadget()
914 SET_NETDEV_DEV(net, &g->dev); in gether_set_gadget()
920 struct eth_dev *dev; in gether_set_dev_addr() local
923 dev = netdev_priv(net); in gether_set_dev_addr()
926 memcpy(dev->dev_mac, new_addr, ETH_ALEN); in gether_set_dev_addr()
933 struct eth_dev *dev; in gether_get_dev_addr() local
935 dev = netdev_priv(net); in gether_get_dev_addr()
936 return get_ether_addr_str(dev->dev_mac, dev_addr, len); in gether_get_dev_addr()
942 struct eth_dev *dev; in gether_set_host_addr() local
945 dev = netdev_priv(net); in gether_set_host_addr()
948 memcpy(dev->host_mac, new_addr, ETH_ALEN); in gether_set_host_addr()
955 struct eth_dev *dev; in gether_get_host_addr() local
957 dev = netdev_priv(net); in gether_get_host_addr()
958 return get_ether_addr_str(dev->host_mac, host_addr, len); in gether_get_host_addr()
964 struct eth_dev *dev; in gether_get_host_addr_cdc() local
969 dev = netdev_priv(net); in gether_get_host_addr_cdc()
970 snprintf(host_addr, len, "%pm", dev->host_mac); in gether_get_host_addr_cdc()
978 struct eth_dev *dev; in gether_get_host_addr_u8() local
980 dev = netdev_priv(net); in gether_get_host_addr_u8()
981 memcpy(host_mac, dev->host_mac, ETH_ALEN); in gether_get_host_addr_u8()
987 struct eth_dev *dev; in gether_set_qmult() local
989 dev = netdev_priv(net); in gether_set_qmult()
990 dev->qmult = qmult; in gether_set_qmult()
996 struct eth_dev *dev; in gether_get_qmult() local
998 dev = netdev_priv(net); in gether_get_qmult()
999 return dev->qmult; in gether_get_qmult()
1018 void gether_cleanup(struct eth_dev *dev) in gether_cleanup() argument
1020 if (!dev) in gether_cleanup()
1023 unregister_netdev(dev->net); in gether_cleanup()
1024 flush_work(&dev->work); in gether_cleanup()
1025 free_netdev(dev->net); in gether_cleanup()
1047 struct eth_dev *dev = link->ioport; in gether_connect() local
1050 if (!dev) in gether_connect()
1053 link->in_ep->driver_data = dev; in gether_connect()
1056 DBG(dev, "enable %s --> %d\n", in gether_connect()
1061 link->out_ep->driver_data = dev; in gether_connect()
1064 DBG(dev, "enable %s --> %d\n", in gether_connect()
1070 result = alloc_requests(dev, link, qlen(dev->gadget, in gether_connect()
1071 dev->qmult)); in gether_connect()
1074 dev->zlp = link->is_zlp_ok; in gether_connect()
1075 DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult)); in gether_connect()
1077 dev->header_len = link->header_len; in gether_connect()
1078 dev->unwrap = link->unwrap; in gether_connect()
1079 dev->wrap = link->wrap; in gether_connect()
1081 spin_lock(&dev->lock); in gether_connect()
1082 dev->port_usb = link; in gether_connect()
1083 if (netif_running(dev->net)) { in gether_connect()
1090 spin_unlock(&dev->lock); in gether_connect()
1092 netif_carrier_on(dev->net); in gether_connect()
1093 if (netif_running(dev->net)) in gether_connect()
1094 eth_start(dev, GFP_ATOMIC); in gether_connect()
1106 return dev->net; in gether_connect()
1124 struct eth_dev *dev = link->ioport; in gether_disconnect() local
1127 WARN_ON(!dev); in gether_disconnect()
1128 if (!dev) in gether_disconnect()
1131 DBG(dev, "%s\n", __func__); in gether_disconnect()
1133 netif_stop_queue(dev->net); in gether_disconnect()
1134 netif_carrier_off(dev->net); in gether_disconnect()
1141 spin_lock(&dev->req_lock); in gether_disconnect()
1142 while (!list_empty(&dev->tx_reqs)) { in gether_disconnect()
1143 req = container_of(dev->tx_reqs.next, in gether_disconnect()
1147 spin_unlock(&dev->req_lock); in gether_disconnect()
1149 spin_lock(&dev->req_lock); in gether_disconnect()
1151 spin_unlock(&dev->req_lock); in gether_disconnect()
1155 spin_lock(&dev->req_lock); in gether_disconnect()
1156 while (!list_empty(&dev->rx_reqs)) { in gether_disconnect()
1157 req = container_of(dev->rx_reqs.next, in gether_disconnect()
1161 spin_unlock(&dev->req_lock); in gether_disconnect()
1163 spin_lock(&dev->req_lock); in gether_disconnect()
1165 spin_unlock(&dev->req_lock); in gether_disconnect()
1169 dev->header_len = 0; in gether_disconnect()
1170 dev->unwrap = NULL; in gether_disconnect()
1171 dev->wrap = NULL; in gether_disconnect()
1173 spin_lock(&dev->lock); in gether_disconnect()
1174 dev->port_usb = NULL; in gether_disconnect()
1175 spin_unlock(&dev->lock); in gether_disconnect()