Lines Matching refs:queue

93 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
96 static void make_tx_response(struct xenvif_queue *queue,
99 static void push_tx_responses(struct xenvif_queue *queue);
101 static inline int tx_work_todo(struct xenvif_queue *queue);
103 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
110 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, in idx_to_pfn() argument
113 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn()
116 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, in idx_to_kaddr() argument
119 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); in idx_to_kaddr()
160 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument
165 needed = xenvif_rx_ring_slots_needed(queue->vif); in xenvif_rx_ring_slots_available()
168 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available()
169 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available()
174 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available()
180 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available()
185 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument
189 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail()
191 __skb_queue_tail(&queue->rx_queue, skb); in xenvif_rx_queue_tail()
193 queue->rx_queue_len += skb->len; in xenvif_rx_queue_tail()
194 if (queue->rx_queue_len > queue->rx_queue_max) in xenvif_rx_queue_tail()
195 netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); in xenvif_rx_queue_tail()
197 spin_unlock_irqrestore(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail()
200 static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) in xenvif_rx_dequeue() argument
204 spin_lock_irq(&queue->rx_queue.lock); in xenvif_rx_dequeue()
206 skb = __skb_dequeue(&queue->rx_queue); in xenvif_rx_dequeue()
208 queue->rx_queue_len -= skb->len; in xenvif_rx_dequeue()
210 spin_unlock_irq(&queue->rx_queue.lock); in xenvif_rx_dequeue()
215 static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue) in xenvif_rx_queue_maybe_wake() argument
217 spin_lock_irq(&queue->rx_queue.lock); in xenvif_rx_queue_maybe_wake()
219 if (queue->rx_queue_len < queue->rx_queue_max) in xenvif_rx_queue_maybe_wake()
220 netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); in xenvif_rx_queue_maybe_wake()
222 spin_unlock_irq(&queue->rx_queue.lock); in xenvif_rx_queue_maybe_wake()
226 static void xenvif_rx_queue_purge(struct xenvif_queue *queue) in xenvif_rx_queue_purge() argument
229 while ((skb = xenvif_rx_dequeue(queue)) != NULL) in xenvif_rx_queue_purge()
233 static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) in xenvif_rx_queue_drop_expired() argument
238 skb = skb_peek(&queue->rx_queue); in xenvif_rx_queue_drop_expired()
243 xenvif_rx_dequeue(queue); in xenvif_rx_queue_drop_expired()
257 static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, in get_next_rx_buffer() argument
263 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); in get_next_rx_buffer()
278 struct xenvif_queue *queue; member
295 struct xenvif_queue *queue = info->queue; in xenvif_setup_copy_gop() local
302 info->meta = get_next_rx_buffer(queue, npo); in xenvif_setup_copy_gop()
322 copy_gop->dest.domid = queue->vif->domid; in xenvif_setup_copy_gop()
330 if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask)) in xenvif_setup_copy_gop()
331 queue->rx.req_cons++; in xenvif_setup_copy_gop()
355 static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, in xenvif_gop_frag_copy() argument
361 .queue = queue, in xenvif_gop_frag_copy()
422 struct xenvif_queue *queue) in xenvif_gop_skb() argument
446 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); in xenvif_gop_skb()
454 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); in xenvif_gop_skb()
478 xenvif_gop_frag_copy(queue, skb, npo, in xenvif_gop_skb()
484 xenvif_gop_frag_copy(queue, skb, npo, in xenvif_gop_skb()
520 static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status, in xenvif_add_frag_responses() argument
541 make_rx_response(queue, meta[i].id, status, offset, in xenvif_add_frag_responses()
546 void xenvif_kick_thread(struct xenvif_queue *queue) in xenvif_kick_thread() argument
548 wake_up(&queue->wq); in xenvif_kick_thread()
551 static void xenvif_rx_action(struct xenvif_queue *queue) in xenvif_rx_action() argument
564 .copy = queue->grant_copy_op, in xenvif_rx_action()
565 .meta = queue->meta, in xenvif_rx_action()
570 while (xenvif_rx_ring_slots_available(queue) in xenvif_rx_action()
571 && (skb = xenvif_rx_dequeue(queue)) != NULL) { in xenvif_rx_action()
572 queue->last_rx_time = jiffies; in xenvif_rx_action()
574 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); in xenvif_rx_action()
579 BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta)); in xenvif_rx_action()
585 gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod); in xenvif_rx_action()
589 if ((1 << queue->meta[npo.meta_cons].gso_type) & in xenvif_rx_action()
590 queue->vif->gso_prefix_mask) { in xenvif_rx_action()
591 resp = RING_GET_RESPONSE(&queue->rx, in xenvif_rx_action()
592 queue->rx.rsp_prod_pvt++); in xenvif_rx_action()
596 resp->offset = queue->meta[npo.meta_cons].gso_size; in xenvif_rx_action()
597 resp->id = queue->meta[npo.meta_cons].id; in xenvif_rx_action()
605 queue->stats.tx_bytes += skb->len; in xenvif_rx_action()
606 queue->stats.tx_packets++; in xenvif_rx_action()
608 status = xenvif_check_gop(queue->vif, in xenvif_rx_action()
624 resp = make_rx_response(queue, queue->meta[npo.meta_cons].id, in xenvif_rx_action()
626 queue->meta[npo.meta_cons].size, in xenvif_rx_action()
629 if ((1 << queue->meta[npo.meta_cons].gso_type) & in xenvif_rx_action()
630 queue->vif->gso_mask) { in xenvif_rx_action()
633 RING_GET_RESPONSE(&queue->rx, in xenvif_rx_action()
634 queue->rx.rsp_prod_pvt++); in xenvif_rx_action()
638 gso->u.gso.type = queue->meta[npo.meta_cons].gso_type; in xenvif_rx_action()
639 gso->u.gso.size = queue->meta[npo.meta_cons].gso_size; in xenvif_rx_action()
647 xenvif_add_frag_responses(queue, status, in xenvif_rx_action()
648 queue->meta + npo.meta_cons + 1, in xenvif_rx_action()
651 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret); in xenvif_rx_action()
661 notify_remote_via_irq(queue->rx_irq); in xenvif_rx_action()
664 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue) in xenvif_napi_schedule_or_enable_events() argument
668 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do); in xenvif_napi_schedule_or_enable_events()
671 napi_schedule(&queue->napi); in xenvif_napi_schedule_or_enable_events()
674 static void tx_add_credit(struct xenvif_queue *queue) in tx_add_credit() argument
682 max_burst = max(131072UL, queue->credit_bytes); in tx_add_credit()
685 max_credit = queue->remaining_credit + queue->credit_bytes; in tx_add_credit()
686 if (max_credit < queue->remaining_credit) in tx_add_credit()
689 queue->remaining_credit = min(max_credit, max_burst); in tx_add_credit()
694 struct xenvif_queue *queue = (struct xenvif_queue *)data; in xenvif_tx_credit_callback() local
695 tx_add_credit(queue); in xenvif_tx_credit_callback()
696 xenvif_napi_schedule_or_enable_events(queue); in xenvif_tx_credit_callback()
699 static void xenvif_tx_err(struct xenvif_queue *queue, in xenvif_tx_err() argument
702 RING_IDX cons = queue->tx.req_cons; in xenvif_tx_err()
706 spin_lock_irqsave(&queue->response_lock, flags); in xenvif_tx_err()
707 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); in xenvif_tx_err()
708 push_tx_responses(queue); in xenvif_tx_err()
709 spin_unlock_irqrestore(&queue->response_lock, flags); in xenvif_tx_err()
712 RING_COPY_REQUEST(&queue->tx, cons++, txp); in xenvif_tx_err()
714 queue->tx.req_cons = cons; in xenvif_tx_err()
726 static int xenvif_count_requests(struct xenvif_queue *queue, in xenvif_count_requests() argument
731 RING_IDX cons = queue->tx.req_cons; in xenvif_count_requests()
743 netdev_err(queue->vif->dev, in xenvif_count_requests()
746 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
754 netdev_err(queue->vif->dev, in xenvif_count_requests()
757 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
770 netdev_dbg(queue->vif->dev, in xenvif_count_requests()
779 RING_COPY_REQUEST(&queue->tx, cons + slots, txp); in xenvif_count_requests()
792 netdev_dbg(queue->vif->dev, in xenvif_count_requests()
802 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n", in xenvif_count_requests()
804 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
816 xenvif_tx_err(queue, first, cons + slots); in xenvif_count_requests()
830 static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, in xenvif_tx_create_map_op() argument
835 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; in xenvif_tx_create_map_op()
836 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx), in xenvif_tx_create_map_op()
838 txp->gref, queue->vif->domid); in xenvif_tx_create_map_op()
840 memcpy(&queue->pending_tx_info[pending_idx].req, txp, in xenvif_tx_create_map_op()
861 static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, in xenvif_get_requests() argument
882 index = pending_index(queue->pending_cons++); in xenvif_get_requests()
883 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
884 xenvif_tx_create_map_op(queue, pending_idx, txp, gop); in xenvif_get_requests()
895 index = pending_index(queue->pending_cons++); in xenvif_get_requests()
896 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
897 xenvif_tx_create_map_op(queue, pending_idx, txp, gop); in xenvif_get_requests()
908 static inline void xenvif_grant_handle_set(struct xenvif_queue *queue, in xenvif_grant_handle_set() argument
912 if (unlikely(queue->grant_tx_handle[pending_idx] != in xenvif_grant_handle_set()
914 netdev_err(queue->vif->dev, in xenvif_grant_handle_set()
919 queue->grant_tx_handle[pending_idx] = handle; in xenvif_grant_handle_set()
922 static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue, in xenvif_grant_handle_reset() argument
925 if (unlikely(queue->grant_tx_handle[pending_idx] == in xenvif_grant_handle_reset()
927 netdev_err(queue->vif->dev, in xenvif_grant_handle_reset()
932 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; in xenvif_grant_handle_reset()
935 static int xenvif_tx_check_gop(struct xenvif_queue *queue, in xenvif_tx_check_gop() argument
959 netdev_dbg(queue->vif->dev, in xenvif_tx_check_gop()
966 xenvif_idx_release(queue, pending_idx, in xenvif_tx_check_gop()
981 xenvif_grant_handle_set(queue, in xenvif_tx_check_gop()
986 xenvif_idx_unmap(queue, pending_idx); in xenvif_tx_check_gop()
992 xenvif_idx_release(queue, pending_idx, in xenvif_tx_check_gop()
995 xenvif_idx_release(queue, pending_idx, in xenvif_tx_check_gop()
1003 netdev_dbg(queue->vif->dev, in xenvif_tx_check_gop()
1010 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); in xenvif_tx_check_gop()
1020 xenvif_idx_release(queue, in xenvif_tx_check_gop()
1027 xenvif_idx_unmap(queue, pending_idx); in xenvif_tx_check_gop()
1028 xenvif_idx_release(queue, pending_idx, in xenvif_tx_check_gop()
1038 xenvif_idx_unmap(queue, pending_idx); in xenvif_tx_check_gop()
1039 xenvif_idx_release(queue, pending_idx, in xenvif_tx_check_gop()
1060 static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_fill_frags() argument
1078 &callback_param(queue, pending_idx); in xenvif_fill_frags()
1080 callback_param(queue, prev_pending_idx).ctx = in xenvif_fill_frags()
1081 &callback_param(queue, pending_idx); in xenvif_fill_frags()
1083 callback_param(queue, pending_idx).ctx = NULL; in xenvif_fill_frags()
1086 txp = &queue->pending_tx_info[pending_idx].req; in xenvif_fill_frags()
1087 page = virt_to_page(idx_to_kaddr(queue, pending_idx)); in xenvif_fill_frags()
1094 get_page(queue->mmap_pages[pending_idx]); in xenvif_fill_frags()
1098 static int xenvif_get_extras(struct xenvif_queue *queue, in xenvif_get_extras() argument
1103 RING_IDX cons = queue->tx.req_cons; in xenvif_get_extras()
1107 netdev_err(queue->vif->dev, "Missing extra info\n"); in xenvif_get_extras()
1108 xenvif_fatal_tx_err(queue->vif); in xenvif_get_extras()
1112 RING_COPY_REQUEST(&queue->tx, cons, &extra); in xenvif_get_extras()
1115 queue->tx.req_cons = ++cons; in xenvif_get_extras()
1116 netdev_err(queue->vif->dev, in xenvif_get_extras()
1118 xenvif_fatal_tx_err(queue->vif); in xenvif_get_extras()
1123 queue->tx.req_cons = ++cons; in xenvif_get_extras()
1158 static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb) in checksum_setup() argument
1168 queue->stats.rx_gso_checksum_fixup++; in checksum_setup()
1180 static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) in tx_credit_exceeded() argument
1183 u64 next_credit = queue->credit_window_start + in tx_credit_exceeded()
1184 msecs_to_jiffies(queue->credit_usec / 1000); in tx_credit_exceeded()
1187 if (timer_pending(&queue->credit_timeout)) in tx_credit_exceeded()
1192 queue->credit_window_start = now; in tx_credit_exceeded()
1193 tx_add_credit(queue); in tx_credit_exceeded()
1197 if (size > queue->remaining_credit) { in tx_credit_exceeded()
1198 queue->credit_timeout.data = in tx_credit_exceeded()
1199 (unsigned long)queue; in tx_credit_exceeded()
1200 mod_timer(&queue->credit_timeout, in tx_credit_exceeded()
1202 queue->credit_window_start = next_credit; in tx_credit_exceeded()
1284 static void xenvif_tx_build_gops(struct xenvif_queue *queue, in xenvif_tx_build_gops() argument
1289 struct gnttab_map_grant_ref *gop = queue->tx_map_ops; in xenvif_tx_build_gops()
1294 while (skb_queue_len(&queue->tx_queue) < budget) { in xenvif_tx_build_gops()
1304 if (queue->tx.sring->req_prod - queue->tx.req_cons > in xenvif_tx_build_gops()
1306 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
1309 queue->tx.sring->req_prod, queue->tx.req_cons, in xenvif_tx_build_gops()
1311 xenvif_fatal_tx_err(queue->vif); in xenvif_tx_build_gops()
1315 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_tx_build_gops()
1319 idx = queue->tx.req_cons; in xenvif_tx_build_gops()
1321 RING_COPY_REQUEST(&queue->tx, idx, &txreq); in xenvif_tx_build_gops()
1324 if (txreq.size > queue->remaining_credit && in xenvif_tx_build_gops()
1325 tx_credit_exceeded(queue, txreq.size)) in xenvif_tx_build_gops()
1328 queue->remaining_credit -= txreq.size; in xenvif_tx_build_gops()
1331 queue->tx.req_cons = ++idx; in xenvif_tx_build_gops()
1335 work_to_do = xenvif_get_extras(queue, extras, in xenvif_tx_build_gops()
1337 idx = queue->tx.req_cons; in xenvif_tx_build_gops()
1346 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr); in xenvif_tx_build_gops()
1348 make_tx_response(queue, &txreq, in xenvif_tx_build_gops()
1352 push_tx_responses(queue); in xenvif_tx_build_gops()
1360 xenvif_mcast_del(queue->vif, extra->u.mcast.addr); in xenvif_tx_build_gops()
1362 make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY); in xenvif_tx_build_gops()
1363 push_tx_responses(queue); in xenvif_tx_build_gops()
1367 ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); in xenvif_tx_build_gops()
1374 netdev_dbg(queue->vif->dev, in xenvif_tx_build_gops()
1376 xenvif_tx_err(queue, &txreq, idx); in xenvif_tx_build_gops()
1382 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
1386 xenvif_fatal_tx_err(queue->vif); in xenvif_tx_build_gops()
1390 index = pending_index(queue->pending_cons); in xenvif_tx_build_gops()
1391 pending_idx = queue->pending_ring[index]; in xenvif_tx_build_gops()
1399 netdev_dbg(queue->vif->dev, in xenvif_tx_build_gops()
1401 xenvif_tx_err(queue, &txreq, idx); in xenvif_tx_build_gops()
1420 xenvif_tx_err(queue, &txreq, idx); in xenvif_tx_build_gops()
1422 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
1432 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { in xenvif_tx_build_gops()
1443 queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; in xenvif_tx_build_gops()
1444 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid; in xenvif_tx_build_gops()
1445 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; in xenvif_tx_build_gops()
1447 queue->tx_copy_ops[*copy_ops].dest.u.gmfn = in xenvif_tx_build_gops()
1449 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; in xenvif_tx_build_gops()
1450 queue->tx_copy_ops[*copy_ops].dest.offset = in xenvif_tx_build_gops()
1453 queue->tx_copy_ops[*copy_ops].len = data_len; in xenvif_tx_build_gops()
1454 queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; in xenvif_tx_build_gops()
1461 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); in xenvif_tx_build_gops()
1466 memcpy(&queue->pending_tx_info[pending_idx].req, &txreq, in xenvif_tx_build_gops()
1470 queue->pending_cons++; in xenvif_tx_build_gops()
1472 gop = xenvif_get_requests(queue, skb, txfrags, gop, in xenvif_tx_build_gops()
1475 __skb_queue_tail(&queue->tx_queue, skb); in xenvif_tx_build_gops()
1477 queue->tx.req_cons = idx; in xenvif_tx_build_gops()
1479 if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) || in xenvif_tx_build_gops()
1480 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) in xenvif_tx_build_gops()
1484 (*map_ops) = gop - queue->tx_map_ops; in xenvif_tx_build_gops()
1491 static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_handle_frag_list() argument
1499 queue->stats.tx_zerocopy_sent += 2; in xenvif_handle_frag_list()
1500 queue->stats.tx_frag_overflow++; in xenvif_handle_frag_list()
1502 xenvif_fill_frags(queue, nskb); in xenvif_handle_frag_list()
1538 xenvif_skb_zerocopy_prepare(queue, nskb); in xenvif_handle_frag_list()
1546 atomic_inc(&queue->inflight_packets); in xenvif_handle_frag_list()
1558 static int xenvif_tx_submit(struct xenvif_queue *queue) in xenvif_tx_submit() argument
1560 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops; in xenvif_tx_submit()
1561 struct gnttab_copy *gop_copy = queue->tx_copy_ops; in xenvif_tx_submit()
1565 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { in xenvif_tx_submit()
1571 txp = &queue->pending_tx_info[pending_idx].req; in xenvif_tx_submit()
1574 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { in xenvif_tx_submit()
1590 callback_param(queue, pending_idx).ctx = NULL; in xenvif_tx_submit()
1597 xenvif_idx_release(queue, pending_idx, in xenvif_tx_submit()
1606 xenvif_fill_frags(queue, skb); in xenvif_tx_submit()
1609 if (xenvif_handle_frag_list(queue, skb)) { in xenvif_tx_submit()
1611 netdev_err(queue->vif->dev, in xenvif_tx_submit()
1613 xenvif_skb_zerocopy_prepare(queue, skb); in xenvif_tx_submit()
1619 skb->dev = queue->vif->dev; in xenvif_tx_submit()
1623 if (checksum_setup(queue, skb)) { in xenvif_tx_submit()
1624 netdev_dbg(queue->vif->dev, in xenvif_tx_submit()
1628 xenvif_skb_zerocopy_prepare(queue, skb); in xenvif_tx_submit()
1649 queue->stats.rx_bytes += skb->len; in xenvif_tx_submit()
1650 queue->stats.rx_packets++; in xenvif_tx_submit()
1660 xenvif_skb_zerocopy_prepare(queue, skb); in xenvif_tx_submit()
1661 queue->stats.tx_zerocopy_sent++; in xenvif_tx_submit()
1674 struct xenvif_queue *queue = ubuf_to_queue(ubuf); in xenvif_zerocopy_callback() local
1679 spin_lock_irqsave(&queue->callback_lock, flags); in xenvif_zerocopy_callback()
1683 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >= in xenvif_zerocopy_callback()
1685 index = pending_index(queue->dealloc_prod); in xenvif_zerocopy_callback()
1686 queue->dealloc_ring[index] = pending_idx; in xenvif_zerocopy_callback()
1691 queue->dealloc_prod++; in xenvif_zerocopy_callback()
1693 spin_unlock_irqrestore(&queue->callback_lock, flags); in xenvif_zerocopy_callback()
1696 queue->stats.tx_zerocopy_success++; in xenvif_zerocopy_callback()
1698 queue->stats.tx_zerocopy_fail++; in xenvif_zerocopy_callback()
1699 xenvif_skb_zerocopy_complete(queue); in xenvif_zerocopy_callback()
1702 static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) in xenvif_tx_dealloc_action() argument
1709 dc = queue->dealloc_cons; in xenvif_tx_dealloc_action()
1710 gop = queue->tx_unmap_ops; in xenvif_tx_dealloc_action()
1714 dp = queue->dealloc_prod; in xenvif_tx_dealloc_action()
1722 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); in xenvif_tx_dealloc_action()
1724 queue->dealloc_ring[pending_index(dc++)]; in xenvif_tx_dealloc_action()
1726 pending_idx_release[gop - queue->tx_unmap_ops] = in xenvif_tx_dealloc_action()
1728 queue->pages_to_unmap[gop - queue->tx_unmap_ops] = in xenvif_tx_dealloc_action()
1729 queue->mmap_pages[pending_idx]; in xenvif_tx_dealloc_action()
1731 idx_to_kaddr(queue, pending_idx), in xenvif_tx_dealloc_action()
1733 queue->grant_tx_handle[pending_idx]); in xenvif_tx_dealloc_action()
1734 xenvif_grant_handle_reset(queue, pending_idx); in xenvif_tx_dealloc_action()
1738 } while (dp != queue->dealloc_prod); in xenvif_tx_dealloc_action()
1740 queue->dealloc_cons = dc; in xenvif_tx_dealloc_action()
1742 if (gop - queue->tx_unmap_ops > 0) { in xenvif_tx_dealloc_action()
1744 ret = gnttab_unmap_refs(queue->tx_unmap_ops, in xenvif_tx_dealloc_action()
1746 queue->pages_to_unmap, in xenvif_tx_dealloc_action()
1747 gop - queue->tx_unmap_ops); in xenvif_tx_dealloc_action()
1749 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n", in xenvif_tx_dealloc_action()
1750 gop - queue->tx_unmap_ops, ret); in xenvif_tx_dealloc_action()
1751 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) { in xenvif_tx_dealloc_action()
1753 netdev_err(queue->vif->dev, in xenvif_tx_dealloc_action()
1763 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) in xenvif_tx_dealloc_action()
1764 xenvif_idx_release(queue, pending_idx_release[i], in xenvif_tx_dealloc_action()
1770 int xenvif_tx_action(struct xenvif_queue *queue, int budget) in xenvif_tx_action() argument
1775 if (unlikely(!tx_work_todo(queue))) in xenvif_tx_action()
1778 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops); in xenvif_tx_action()
1783 gnttab_batch_copy(queue->tx_copy_ops, nr_cops); in xenvif_tx_action()
1785 ret = gnttab_map_refs(queue->tx_map_ops, in xenvif_tx_action()
1787 queue->pages_to_map, in xenvif_tx_action()
1792 work_done = xenvif_tx_submit(queue); in xenvif_tx_action()
1797 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, in xenvif_idx_release() argument
1804 pending_tx_info = &queue->pending_tx_info[pending_idx]; in xenvif_idx_release()
1806 spin_lock_irqsave(&queue->response_lock, flags); in xenvif_idx_release()
1808 make_tx_response(queue, &pending_tx_info->req, status); in xenvif_idx_release()
1814 index = pending_index(queue->pending_prod++); in xenvif_idx_release()
1815 queue->pending_ring[index] = pending_idx; in xenvif_idx_release()
1817 push_tx_responses(queue); in xenvif_idx_release()
1819 spin_unlock_irqrestore(&queue->response_lock, flags); in xenvif_idx_release()
1823 static void make_tx_response(struct xenvif_queue *queue, in make_tx_response() argument
1827 RING_IDX i = queue->tx.rsp_prod_pvt; in make_tx_response()
1830 resp = RING_GET_RESPONSE(&queue->tx, i); in make_tx_response()
1835 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; in make_tx_response()
1837 queue->tx.rsp_prod_pvt = ++i; in make_tx_response()
1840 static void push_tx_responses(struct xenvif_queue *queue) in push_tx_responses() argument
1844 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); in push_tx_responses()
1846 notify_remote_via_irq(queue->tx_irq); in push_tx_responses()
1849 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, in make_rx_response() argument
1856 RING_IDX i = queue->rx.rsp_prod_pvt; in make_rx_response()
1859 resp = RING_GET_RESPONSE(&queue->rx, i); in make_rx_response()
1867 queue->rx.rsp_prod_pvt = ++i; in make_rx_response()
1872 void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) in xenvif_idx_unmap() argument
1878 idx_to_kaddr(queue, pending_idx), in xenvif_idx_unmap()
1880 queue->grant_tx_handle[pending_idx]); in xenvif_idx_unmap()
1881 xenvif_grant_handle_reset(queue, pending_idx); in xenvif_idx_unmap()
1884 &queue->mmap_pages[pending_idx], 1); in xenvif_idx_unmap()
1886 netdev_err(queue->vif->dev, in xenvif_idx_unmap()
1897 static inline int tx_work_todo(struct xenvif_queue *queue) in tx_work_todo() argument
1899 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) in tx_work_todo()
1905 static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue) in tx_dealloc_work_todo() argument
1907 return queue->dealloc_cons != queue->dealloc_prod; in tx_dealloc_work_todo()
1910 void xenvif_unmap_frontend_rings(struct xenvif_queue *queue) in xenvif_unmap_frontend_rings() argument
1912 if (queue->tx.sring) in xenvif_unmap_frontend_rings()
1913 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), in xenvif_unmap_frontend_rings()
1914 queue->tx.sring); in xenvif_unmap_frontend_rings()
1915 if (queue->rx.sring) in xenvif_unmap_frontend_rings()
1916 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), in xenvif_unmap_frontend_rings()
1917 queue->rx.sring); in xenvif_unmap_frontend_rings()
1920 int xenvif_map_frontend_rings(struct xenvif_queue *queue, in xenvif_map_frontend_rings() argument
1930 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), in xenvif_map_frontend_rings()
1936 BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); in xenvif_map_frontend_rings()
1938 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), in xenvif_map_frontend_rings()
1944 BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); in xenvif_map_frontend_rings()
1949 xenvif_unmap_frontend_rings(queue); in xenvif_map_frontend_rings()
1953 static void xenvif_queue_carrier_off(struct xenvif_queue *queue) in xenvif_queue_carrier_off() argument
1955 struct xenvif *vif = queue->vif; in xenvif_queue_carrier_off()
1957 queue->stalled = true; in xenvif_queue_carrier_off()
1968 static void xenvif_queue_carrier_on(struct xenvif_queue *queue) in xenvif_queue_carrier_on() argument
1970 struct xenvif *vif = queue->vif; in xenvif_queue_carrier_on()
1972 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */ in xenvif_queue_carrier_on()
1973 queue->stalled = false; in xenvif_queue_carrier_on()
1984 static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) in xenvif_rx_queue_stalled() argument
1988 prod = queue->rx.sring->req_prod; in xenvif_rx_queue_stalled()
1989 cons = queue->rx.req_cons; in xenvif_rx_queue_stalled()
1991 return !queue->stalled && prod - cons < 1 in xenvif_rx_queue_stalled()
1993 queue->last_rx_time + queue->vif->stall_timeout); in xenvif_rx_queue_stalled()
1996 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) in xenvif_rx_queue_ready() argument
2000 prod = queue->rx.sring->req_prod; in xenvif_rx_queue_ready()
2001 cons = queue->rx.req_cons; in xenvif_rx_queue_ready()
2003 return queue->stalled && prod - cons >= 1; in xenvif_rx_queue_ready()
2006 static bool xenvif_have_rx_work(struct xenvif_queue *queue) in xenvif_have_rx_work() argument
2008 return (!skb_queue_empty(&queue->rx_queue) in xenvif_have_rx_work()
2009 && xenvif_rx_ring_slots_available(queue)) in xenvif_have_rx_work()
2010 || (queue->vif->stall_timeout && in xenvif_have_rx_work()
2011 (xenvif_rx_queue_stalled(queue) in xenvif_have_rx_work()
2012 || xenvif_rx_queue_ready(queue))) in xenvif_have_rx_work()
2014 || queue->vif->disabled; in xenvif_have_rx_work()
2017 static long xenvif_rx_queue_timeout(struct xenvif_queue *queue) in xenvif_rx_queue_timeout() argument
2022 skb = skb_peek(&queue->rx_queue); in xenvif_rx_queue_timeout()
2040 static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) in xenvif_wait_for_rx_work() argument
2044 if (xenvif_have_rx_work(queue)) in xenvif_wait_for_rx_work()
2050 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); in xenvif_wait_for_rx_work()
2051 if (xenvif_have_rx_work(queue)) in xenvif_wait_for_rx_work()
2053 ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); in xenvif_wait_for_rx_work()
2057 finish_wait(&queue->wq, &wait); in xenvif_wait_for_rx_work()
2062 struct xenvif_queue *queue = data; in xenvif_kthread_guest_rx() local
2063 struct xenvif *vif = queue->vif; in xenvif_kthread_guest_rx()
2066 xenvif_queue_carrier_on(queue); in xenvif_kthread_guest_rx()
2069 xenvif_wait_for_rx_work(queue); in xenvif_kthread_guest_rx()
2081 if (unlikely(vif->disabled && queue->id == 0)) { in xenvif_kthread_guest_rx()
2086 if (!skb_queue_empty(&queue->rx_queue)) in xenvif_kthread_guest_rx()
2087 xenvif_rx_action(queue); in xenvif_kthread_guest_rx()
2094 if (xenvif_rx_queue_stalled(queue)) in xenvif_kthread_guest_rx()
2095 xenvif_queue_carrier_off(queue); in xenvif_kthread_guest_rx()
2096 else if (xenvif_rx_queue_ready(queue)) in xenvif_kthread_guest_rx()
2097 xenvif_queue_carrier_on(queue); in xenvif_kthread_guest_rx()
2105 xenvif_rx_queue_drop_expired(queue); in xenvif_kthread_guest_rx()
2107 xenvif_rx_queue_maybe_wake(queue); in xenvif_kthread_guest_rx()
2113 xenvif_rx_queue_purge(queue); in xenvif_kthread_guest_rx()
2118 static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue) in xenvif_dealloc_kthread_should_stop() argument
2124 !atomic_read(&queue->inflight_packets); in xenvif_dealloc_kthread_should_stop()
2129 struct xenvif_queue *queue = data; in xenvif_dealloc_kthread() local
2132 wait_event_interruptible(queue->dealloc_wq, in xenvif_dealloc_kthread()
2133 tx_dealloc_work_todo(queue) || in xenvif_dealloc_kthread()
2134 xenvif_dealloc_kthread_should_stop(queue)); in xenvif_dealloc_kthread()
2135 if (xenvif_dealloc_kthread_should_stop(queue)) in xenvif_dealloc_kthread()
2138 xenvif_tx_dealloc_action(queue); in xenvif_dealloc_kthread()
2143 if (tx_dealloc_work_todo(queue)) in xenvif_dealloc_kthread()
2144 xenvif_tx_dealloc_action(queue); in xenvif_dealloc_kthread()