This source file includes following definitions.
- xenvif_skb_zerocopy_prepare
- xenvif_skb_zerocopy_complete
- xenvif_schedulable
- xenvif_tx_interrupt
- xenvif_poll
- xenvif_rx_interrupt
- xenvif_interrupt
- xenvif_queue_stopped
- xenvif_wake_queue
- xenvif_select_queue
- xenvif_start_xmit
- xenvif_get_stats
- xenvif_up
- xenvif_down
- xenvif_open
- xenvif_close
- xenvif_change_mtu
- xenvif_fix_features
- xenvif_get_sset_count
- xenvif_get_ethtool_stats
- xenvif_get_strings
- xenvif_alloc
- xenvif_init_queue
- xenvif_carrier_on
- xenvif_connect_ctrl
- xenvif_connect_data
- xenvif_carrier_off
- xenvif_disconnect_data
- xenvif_disconnect_ctrl
- xenvif_deinit_queue
- xenvif_free
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 #include "common.h"
32
33 #include <linux/kthread.h>
34 #include <linux/sched/task.h>
35 #include <linux/ethtool.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/if_vlan.h>
38 #include <linux/vmalloc.h>
39
40 #include <xen/events.h>
41 #include <asm/xen/hypercall.h>
42 #include <xen/balloon.h>
43
44 #define XENVIF_QUEUE_LENGTH 32
45 #define XENVIF_NAPI_WEIGHT 64
46
47
48 #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
49
50
51
52
53
54
55 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
56 struct sk_buff *skb)
57 {
58 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
59 atomic_inc(&queue->inflight_packets);
60 }
61
62 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
63 {
64 atomic_dec(&queue->inflight_packets);
65
66
67
68
69
70 wake_up(&queue->dealloc_wq);
71 }
72
73 int xenvif_schedulable(struct xenvif *vif)
74 {
75 return netif_running(vif->dev) &&
76 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
77 !vif->disabled;
78 }
79
80 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
81 {
82 struct xenvif_queue *queue = dev_id;
83
84 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
85 napi_schedule(&queue->napi);
86
87 return IRQ_HANDLED;
88 }
89
90 static int xenvif_poll(struct napi_struct *napi, int budget)
91 {
92 struct xenvif_queue *queue =
93 container_of(napi, struct xenvif_queue, napi);
94 int work_done;
95
96
97
98
99
100 if (unlikely(queue->vif->disabled)) {
101 napi_complete(napi);
102 return 0;
103 }
104
105 work_done = xenvif_tx_action(queue, budget);
106
107 if (work_done < budget) {
108 napi_complete_done(napi, work_done);
109
110
111
112 if (likely(!queue->rate_limited))
113 xenvif_napi_schedule_or_enable_events(queue);
114 }
115
116 return work_done;
117 }
118
119 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
120 {
121 struct xenvif_queue *queue = dev_id;
122
123 xenvif_kick_thread(queue);
124
125 return IRQ_HANDLED;
126 }
127
128 irqreturn_t xenvif_interrupt(int irq, void *dev_id)
129 {
130 xenvif_tx_interrupt(irq, dev_id);
131 xenvif_rx_interrupt(irq, dev_id);
132
133 return IRQ_HANDLED;
134 }
135
136 int xenvif_queue_stopped(struct xenvif_queue *queue)
137 {
138 struct net_device *dev = queue->vif->dev;
139 unsigned int id = queue->id;
140 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
141 }
142
143 void xenvif_wake_queue(struct xenvif_queue *queue)
144 {
145 struct net_device *dev = queue->vif->dev;
146 unsigned int id = queue->id;
147 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
148 }
149
150 static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
151 struct net_device *sb_dev)
152 {
153 struct xenvif *vif = netdev_priv(dev);
154 unsigned int size = vif->hash.size;
155 unsigned int num_queues;
156
157
158
159 num_queues = READ_ONCE(vif->num_queues);
160 if (num_queues < 1)
161 return 0;
162
163 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
164 return netdev_pick_tx(dev, skb, NULL) %
165 dev->real_num_tx_queues;
166
167 xenvif_set_skb_hash(vif, skb);
168
169 if (size == 0)
170 return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
171
172 return vif->hash.mapping[vif->hash.mapping_sel]
173 [skb_get_hash_raw(skb) % size];
174 }
175
176 static netdev_tx_t
177 xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
178 {
179 struct xenvif *vif = netdev_priv(dev);
180 struct xenvif_queue *queue = NULL;
181 unsigned int num_queues;
182 u16 index;
183 struct xenvif_rx_cb *cb;
184
185 BUG_ON(skb->dev != dev);
186
187
188
189
190
191 num_queues = READ_ONCE(vif->num_queues);
192 if (num_queues < 1)
193 goto drop;
194
195
196 index = skb_get_queue_mapping(skb);
197 if (index >= num_queues) {
198 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
199 index, vif->dev->name);
200 index %= num_queues;
201 }
202 queue = &vif->queues[index];
203
204
205 if (queue->task == NULL ||
206 queue->dealloc_task == NULL ||
207 !xenvif_schedulable(vif))
208 goto drop;
209
210 if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
211 struct ethhdr *eth = (struct ethhdr *)skb->data;
212
213 if (!xenvif_mcast_match(vif, eth->h_dest))
214 goto drop;
215 }
216
217 cb = XENVIF_RX_CB(skb);
218 cb->expires = jiffies + vif->drain_timeout;
219
220
221
222
223
224 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
225 skb_clear_hash(skb);
226
227 xenvif_rx_queue_tail(queue, skb);
228 xenvif_kick_thread(queue);
229
230 return NETDEV_TX_OK;
231
232 drop:
233 vif->dev->stats.tx_dropped++;
234 dev_kfree_skb(skb);
235 return NETDEV_TX_OK;
236 }
237
238 static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
239 {
240 struct xenvif *vif = netdev_priv(dev);
241 struct xenvif_queue *queue = NULL;
242 unsigned int num_queues;
243 u64 rx_bytes = 0;
244 u64 rx_packets = 0;
245 u64 tx_bytes = 0;
246 u64 tx_packets = 0;
247 unsigned int index;
248
249 rcu_read_lock();
250 num_queues = READ_ONCE(vif->num_queues);
251
252
253 for (index = 0; index < num_queues; ++index) {
254 queue = &vif->queues[index];
255 rx_bytes += queue->stats.rx_bytes;
256 rx_packets += queue->stats.rx_packets;
257 tx_bytes += queue->stats.tx_bytes;
258 tx_packets += queue->stats.tx_packets;
259 }
260
261 rcu_read_unlock();
262
263 vif->dev->stats.rx_bytes = rx_bytes;
264 vif->dev->stats.rx_packets = rx_packets;
265 vif->dev->stats.tx_bytes = tx_bytes;
266 vif->dev->stats.tx_packets = tx_packets;
267
268 return &vif->dev->stats;
269 }
270
271 static void xenvif_up(struct xenvif *vif)
272 {
273 struct xenvif_queue *queue = NULL;
274 unsigned int num_queues = vif->num_queues;
275 unsigned int queue_index;
276
277 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
278 queue = &vif->queues[queue_index];
279 napi_enable(&queue->napi);
280 enable_irq(queue->tx_irq);
281 if (queue->tx_irq != queue->rx_irq)
282 enable_irq(queue->rx_irq);
283 xenvif_napi_schedule_or_enable_events(queue);
284 }
285 }
286
287 static void xenvif_down(struct xenvif *vif)
288 {
289 struct xenvif_queue *queue = NULL;
290 unsigned int num_queues = vif->num_queues;
291 unsigned int queue_index;
292
293 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
294 queue = &vif->queues[queue_index];
295 disable_irq(queue->tx_irq);
296 if (queue->tx_irq != queue->rx_irq)
297 disable_irq(queue->rx_irq);
298 napi_disable(&queue->napi);
299 del_timer_sync(&queue->credit_timeout);
300 }
301 }
302
303 static int xenvif_open(struct net_device *dev)
304 {
305 struct xenvif *vif = netdev_priv(dev);
306 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
307 xenvif_up(vif);
308 netif_tx_start_all_queues(dev);
309 return 0;
310 }
311
312 static int xenvif_close(struct net_device *dev)
313 {
314 struct xenvif *vif = netdev_priv(dev);
315 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
316 xenvif_down(vif);
317 netif_tx_stop_all_queues(dev);
318 return 0;
319 }
320
321 static int xenvif_change_mtu(struct net_device *dev, int mtu)
322 {
323 struct xenvif *vif = netdev_priv(dev);
324 int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
325
326 if (mtu > max)
327 return -EINVAL;
328 dev->mtu = mtu;
329 return 0;
330 }
331
332 static netdev_features_t xenvif_fix_features(struct net_device *dev,
333 netdev_features_t features)
334 {
335 struct xenvif *vif = netdev_priv(dev);
336
337 if (!vif->can_sg)
338 features &= ~NETIF_F_SG;
339 if (~(vif->gso_mask) & GSO_BIT(TCPV4))
340 features &= ~NETIF_F_TSO;
341 if (~(vif->gso_mask) & GSO_BIT(TCPV6))
342 features &= ~NETIF_F_TSO6;
343 if (!vif->ip_csum)
344 features &= ~NETIF_F_IP_CSUM;
345 if (!vif->ipv6_csum)
346 features &= ~NETIF_F_IPV6_CSUM;
347
348 return features;
349 }
350
351 static const struct xenvif_stat {
352 char name[ETH_GSTRING_LEN];
353 u16 offset;
354 } xenvif_stats[] = {
355 {
356 "rx_gso_checksum_fixup",
357 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
358 },
359
360
361
362 {
363 "tx_zerocopy_sent",
364 offsetof(struct xenvif_stats, tx_zerocopy_sent),
365 },
366 {
367 "tx_zerocopy_success",
368 offsetof(struct xenvif_stats, tx_zerocopy_success),
369 },
370 {
371 "tx_zerocopy_fail",
372 offsetof(struct xenvif_stats, tx_zerocopy_fail)
373 },
374
375
376
377 {
378 "tx_frag_overflow",
379 offsetof(struct xenvif_stats, tx_frag_overflow)
380 },
381 };
382
383 static int xenvif_get_sset_count(struct net_device *dev, int string_set)
384 {
385 switch (string_set) {
386 case ETH_SS_STATS:
387 return ARRAY_SIZE(xenvif_stats);
388 default:
389 return -EINVAL;
390 }
391 }
392
393 static void xenvif_get_ethtool_stats(struct net_device *dev,
394 struct ethtool_stats *stats, u64 * data)
395 {
396 struct xenvif *vif = netdev_priv(dev);
397 unsigned int num_queues;
398 int i;
399 unsigned int queue_index;
400
401 rcu_read_lock();
402 num_queues = READ_ONCE(vif->num_queues);
403
404 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
405 unsigned long accum = 0;
406 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
407 void *vif_stats = &vif->queues[queue_index].stats;
408 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
409 }
410 data[i] = accum;
411 }
412
413 rcu_read_unlock();
414 }
415
416 static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
417 {
418 int i;
419
420 switch (stringset) {
421 case ETH_SS_STATS:
422 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
423 memcpy(data + i * ETH_GSTRING_LEN,
424 xenvif_stats[i].name, ETH_GSTRING_LEN);
425 break;
426 }
427 }
428
429 static const struct ethtool_ops xenvif_ethtool_ops = {
430 .get_link = ethtool_op_get_link,
431
432 .get_sset_count = xenvif_get_sset_count,
433 .get_ethtool_stats = xenvif_get_ethtool_stats,
434 .get_strings = xenvif_get_strings,
435 };
436
437 static const struct net_device_ops xenvif_netdev_ops = {
438 .ndo_select_queue = xenvif_select_queue,
439 .ndo_start_xmit = xenvif_start_xmit,
440 .ndo_get_stats = xenvif_get_stats,
441 .ndo_open = xenvif_open,
442 .ndo_stop = xenvif_close,
443 .ndo_change_mtu = xenvif_change_mtu,
444 .ndo_fix_features = xenvif_fix_features,
445 .ndo_set_mac_address = eth_mac_addr,
446 .ndo_validate_addr = eth_validate_addr,
447 };
448
449 struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
450 unsigned int handle)
451 {
452 int err;
453 struct net_device *dev;
454 struct xenvif *vif;
455 char name[IFNAMSIZ] = {};
456
457 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
458
459
460
461
462 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
463 ether_setup, xenvif_max_queues);
464 if (dev == NULL) {
465 pr_warn("Could not allocate netdev for %s\n", name);
466 return ERR_PTR(-ENOMEM);
467 }
468
469 SET_NETDEV_DEV(dev, parent);
470
471 vif = netdev_priv(dev);
472
473 vif->domid = domid;
474 vif->handle = handle;
475 vif->can_sg = 1;
476 vif->ip_csum = 1;
477 vif->dev = dev;
478 vif->disabled = false;
479 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
480 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
481
482
483 vif->queues = NULL;
484 vif->num_queues = 0;
485
486 spin_lock_init(&vif->lock);
487 INIT_LIST_HEAD(&vif->fe_mcast_addr);
488
489 dev->netdev_ops = &xenvif_netdev_ops;
490 dev->hw_features = NETIF_F_SG |
491 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
492 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
493 dev->features = dev->hw_features | NETIF_F_RXCSUM;
494 dev->ethtool_ops = &xenvif_ethtool_ops;
495
496 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
497
498 dev->min_mtu = ETH_MIN_MTU;
499 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
500
501
502
503
504
505
506
507 eth_broadcast_addr(dev->dev_addr);
508 dev->dev_addr[0] &= ~0x01;
509
510 netif_carrier_off(dev);
511
512 err = register_netdev(dev);
513 if (err) {
514 netdev_warn(dev, "Could not register device: err=%d\n", err);
515 free_netdev(dev);
516 return ERR_PTR(err);
517 }
518
519 netdev_dbg(dev, "Successfully created xenvif\n");
520
521 __module_get(THIS_MODULE);
522
523 return vif;
524 }
525
526 int xenvif_init_queue(struct xenvif_queue *queue)
527 {
528 int err, i;
529
530 queue->credit_bytes = queue->remaining_credit = ~0UL;
531 queue->credit_usec = 0UL;
532 timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
533 queue->credit_window_start = get_jiffies_64();
534
535 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
536
537 skb_queue_head_init(&queue->rx_queue);
538 skb_queue_head_init(&queue->tx_queue);
539
540 queue->pending_cons = 0;
541 queue->pending_prod = MAX_PENDING_REQS;
542 for (i = 0; i < MAX_PENDING_REQS; ++i)
543 queue->pending_ring[i] = i;
544
545 spin_lock_init(&queue->callback_lock);
546 spin_lock_init(&queue->response_lock);
547
548
549
550
551
552 err = gnttab_alloc_pages(MAX_PENDING_REQS,
553 queue->mmap_pages);
554 if (err) {
555 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
556 return -ENOMEM;
557 }
558
559 for (i = 0; i < MAX_PENDING_REQS; i++) {
560 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
561 { .callback = xenvif_zerocopy_callback,
562 { { .ctx = NULL,
563 .desc = i } } };
564 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
565 }
566
567 return 0;
568 }
569
570 void xenvif_carrier_on(struct xenvif *vif)
571 {
572 rtnl_lock();
573 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
574 dev_set_mtu(vif->dev, ETH_DATA_LEN);
575 netdev_update_features(vif->dev);
576 set_bit(VIF_STATUS_CONNECTED, &vif->status);
577 if (netif_running(vif->dev))
578 xenvif_up(vif);
579 rtnl_unlock();
580 }
581
582 int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
583 unsigned int evtchn)
584 {
585 struct net_device *dev = vif->dev;
586 void *addr;
587 struct xen_netif_ctrl_sring *shared;
588 int err;
589
590 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
591 &ring_ref, 1, &addr);
592 if (err)
593 goto err;
594
595 shared = (struct xen_netif_ctrl_sring *)addr;
596 BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
597
598 err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
599 if (err < 0)
600 goto err_unmap;
601
602 vif->ctrl_irq = err;
603
604 xenvif_init_hash(vif);
605
606 err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
607 IRQF_ONESHOT, "xen-netback-ctrl", vif);
608 if (err) {
609 pr_warn("Could not setup irq handler for %s\n", dev->name);
610 goto err_deinit;
611 }
612
613 return 0;
614
615 err_deinit:
616 xenvif_deinit_hash(vif);
617 unbind_from_irqhandler(vif->ctrl_irq, vif);
618 vif->ctrl_irq = 0;
619
620 err_unmap:
621 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
622 vif->ctrl.sring);
623 vif->ctrl.sring = NULL;
624
625 err:
626 return err;
627 }
628
629 int xenvif_connect_data(struct xenvif_queue *queue,
630 unsigned long tx_ring_ref,
631 unsigned long rx_ring_ref,
632 unsigned int tx_evtchn,
633 unsigned int rx_evtchn)
634 {
635 struct task_struct *task;
636 int err;
637
638 BUG_ON(queue->tx_irq);
639 BUG_ON(queue->task);
640 BUG_ON(queue->dealloc_task);
641
642 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
643 rx_ring_ref);
644 if (err < 0)
645 goto err;
646
647 init_waitqueue_head(&queue->wq);
648 init_waitqueue_head(&queue->dealloc_wq);
649 atomic_set(&queue->inflight_packets, 0);
650
651 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
652 XENVIF_NAPI_WEIGHT);
653
654 if (tx_evtchn == rx_evtchn) {
655
656 err = bind_interdomain_evtchn_to_irqhandler(
657 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
658 queue->name, queue);
659 if (err < 0)
660 goto err_unmap;
661 queue->tx_irq = queue->rx_irq = err;
662 disable_irq(queue->tx_irq);
663 } else {
664
665 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
666 "%s-tx", queue->name);
667 err = bind_interdomain_evtchn_to_irqhandler(
668 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
669 queue->tx_irq_name, queue);
670 if (err < 0)
671 goto err_unmap;
672 queue->tx_irq = err;
673 disable_irq(queue->tx_irq);
674
675 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
676 "%s-rx", queue->name);
677 err = bind_interdomain_evtchn_to_irqhandler(
678 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
679 queue->rx_irq_name, queue);
680 if (err < 0)
681 goto err_tx_unbind;
682 queue->rx_irq = err;
683 disable_irq(queue->rx_irq);
684 }
685
686 queue->stalled = true;
687
688 task = kthread_create(xenvif_kthread_guest_rx,
689 (void *)queue, "%s-guest-rx", queue->name);
690 if (IS_ERR(task)) {
691 pr_warn("Could not allocate kthread for %s\n", queue->name);
692 err = PTR_ERR(task);
693 goto err_rx_unbind;
694 }
695 queue->task = task;
696 get_task_struct(task);
697
698 task = kthread_create(xenvif_dealloc_kthread,
699 (void *)queue, "%s-dealloc", queue->name);
700 if (IS_ERR(task)) {
701 pr_warn("Could not allocate kthread for %s\n", queue->name);
702 err = PTR_ERR(task);
703 goto err_rx_unbind;
704 }
705 queue->dealloc_task = task;
706
707 wake_up_process(queue->task);
708 wake_up_process(queue->dealloc_task);
709
710 return 0;
711
712 err_rx_unbind:
713 unbind_from_irqhandler(queue->rx_irq, queue);
714 queue->rx_irq = 0;
715 err_tx_unbind:
716 unbind_from_irqhandler(queue->tx_irq, queue);
717 queue->tx_irq = 0;
718 err_unmap:
719 xenvif_unmap_frontend_data_rings(queue);
720 netif_napi_del(&queue->napi);
721 err:
722 return err;
723 }
724
725 void xenvif_carrier_off(struct xenvif *vif)
726 {
727 struct net_device *dev = vif->dev;
728
729 rtnl_lock();
730 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
731 netif_carrier_off(dev);
732 if (netif_running(dev))
733 xenvif_down(vif);
734 }
735 rtnl_unlock();
736 }
737
738 void xenvif_disconnect_data(struct xenvif *vif)
739 {
740 struct xenvif_queue *queue = NULL;
741 unsigned int num_queues = vif->num_queues;
742 unsigned int queue_index;
743
744 xenvif_carrier_off(vif);
745
746 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
747 queue = &vif->queues[queue_index];
748
749 netif_napi_del(&queue->napi);
750
751 if (queue->task) {
752 kthread_stop(queue->task);
753 put_task_struct(queue->task);
754 queue->task = NULL;
755 }
756
757 if (queue->dealloc_task) {
758 kthread_stop(queue->dealloc_task);
759 queue->dealloc_task = NULL;
760 }
761
762 if (queue->tx_irq) {
763 if (queue->tx_irq == queue->rx_irq)
764 unbind_from_irqhandler(queue->tx_irq, queue);
765 else {
766 unbind_from_irqhandler(queue->tx_irq, queue);
767 unbind_from_irqhandler(queue->rx_irq, queue);
768 }
769 queue->tx_irq = 0;
770 }
771
772 xenvif_unmap_frontend_data_rings(queue);
773 }
774
775 xenvif_mcast_addr_list_free(vif);
776 }
777
778 void xenvif_disconnect_ctrl(struct xenvif *vif)
779 {
780 if (vif->ctrl_irq) {
781 xenvif_deinit_hash(vif);
782 unbind_from_irqhandler(vif->ctrl_irq, vif);
783 vif->ctrl_irq = 0;
784 }
785
786 if (vif->ctrl.sring) {
787 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
788 vif->ctrl.sring);
789 vif->ctrl.sring = NULL;
790 }
791 }
792
793
794
795
796
797 void xenvif_deinit_queue(struct xenvif_queue *queue)
798 {
799 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
800 }
801
802 void xenvif_free(struct xenvif *vif)
803 {
804 struct xenvif_queue *queues = vif->queues;
805 unsigned int num_queues = vif->num_queues;
806 unsigned int queue_index;
807
808 unregister_netdev(vif->dev);
809 free_netdev(vif->dev);
810
811 for (queue_index = 0; queue_index < num_queues; ++queue_index)
812 xenvif_deinit_queue(&queues[queue_index]);
813 vfree(queues);
814
815 module_put(THIS_MODULE);
816 }