This source file includes following definitions.
- dev_map_create_hash
- dev_map_index_hash
- dev_map_init_map
- dev_map_alloc
- dev_map_free
- dev_map_get_next_key
- __dev_map_hash_lookup_elem
- dev_map_hash_get_next_key
- bq_xmit_all
- __dev_map_flush
- __dev_map_lookup_elem
- bq_enqueue
- dev_map_enqueue
- dev_map_generic_redirect
- dev_map_lookup_elem
- dev_map_hash_lookup_elem
- dev_map_flush_old
- __dev_map_entry_free
- dev_map_delete_elem
- dev_map_hash_delete_elem
- __dev_map_alloc_node
- __dev_map_update_elem
- dev_map_update_elem
- __dev_map_hash_update_elem
- dev_map_hash_update_elem
- dev_map_hash_remove_netdev
- dev_map_notification
- dev_map_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47 #include <linux/bpf.h>
48 #include <net/xdp.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
51
52 #define DEV_CREATE_FLAG_MASK \
53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
54
55 #define DEV_MAP_BULK_SIZE 16
56 struct bpf_dtab_netdev;
57
58 struct xdp_bulk_queue {
59 struct xdp_frame *q[DEV_MAP_BULK_SIZE];
60 struct list_head flush_node;
61 struct net_device *dev_rx;
62 struct bpf_dtab_netdev *obj;
63 unsigned int count;
64 };
65
66 struct bpf_dtab_netdev {
67 struct net_device *dev;
68 struct hlist_node index_hlist;
69 struct bpf_dtab *dtab;
70 struct xdp_bulk_queue __percpu *bulkq;
71 struct rcu_head rcu;
72 unsigned int idx;
73 };
74
75 struct bpf_dtab {
76 struct bpf_map map;
77 struct bpf_dtab_netdev **netdev_map;
78 struct list_head __percpu *flush_list;
79 struct list_head list;
80
81
82 struct hlist_head *dev_index_head;
83 spinlock_t index_lock;
84 unsigned int items;
85 u32 n_buckets;
86 };
87
88 static DEFINE_SPINLOCK(dev_map_lock);
89 static LIST_HEAD(dev_map_list);
90
91 static struct hlist_head *dev_map_create_hash(unsigned int entries)
92 {
93 int i;
94 struct hlist_head *hash;
95
96 hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL);
97 if (hash != NULL)
98 for (i = 0; i < entries; i++)
99 INIT_HLIST_HEAD(&hash[i]);
100
101 return hash;
102 }
103
104 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
105 int idx)
106 {
107 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
108 }
109
110 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
111 {
112 int err, cpu;
113 u64 cost;
114
115
116 if (attr->max_entries == 0 || attr->key_size != 4 ||
117 attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
118 return -EINVAL;
119
120
121
122
123 attr->map_flags |= BPF_F_RDONLY_PROG;
124
125
126 bpf_map_init_from_attr(&dtab->map, attr);
127
128
129 cost = (u64) sizeof(struct list_head) * num_possible_cpus();
130
131 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
132 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
133
134 if (!dtab->n_buckets)
135 return -EINVAL;
136 cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
137 } else {
138 cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
139 }
140
141
142 err = bpf_map_charge_init(&dtab->map.memory, cost);
143 if (err)
144 return -EINVAL;
145
146 dtab->flush_list = alloc_percpu(struct list_head);
147 if (!dtab->flush_list)
148 goto free_charge;
149
150 for_each_possible_cpu(cpu)
151 INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));
152
153 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
154 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
155 if (!dtab->dev_index_head)
156 goto free_percpu;
157
158 spin_lock_init(&dtab->index_lock);
159 } else {
160 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
161 sizeof(struct bpf_dtab_netdev *),
162 dtab->map.numa_node);
163 if (!dtab->netdev_map)
164 goto free_percpu;
165 }
166
167 return 0;
168
169 free_percpu:
170 free_percpu(dtab->flush_list);
171 free_charge:
172 bpf_map_charge_finish(&dtab->map.memory);
173 return -ENOMEM;
174 }
175
176 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
177 {
178 struct bpf_dtab *dtab;
179 int err;
180
181 if (!capable(CAP_NET_ADMIN))
182 return ERR_PTR(-EPERM);
183
184 dtab = kzalloc(sizeof(*dtab), GFP_USER);
185 if (!dtab)
186 return ERR_PTR(-ENOMEM);
187
188 err = dev_map_init_map(dtab, attr);
189 if (err) {
190 kfree(dtab);
191 return ERR_PTR(err);
192 }
193
194 spin_lock(&dev_map_lock);
195 list_add_tail_rcu(&dtab->list, &dev_map_list);
196 spin_unlock(&dev_map_lock);
197
198 return &dtab->map;
199 }
200
201 static void dev_map_free(struct bpf_map *map)
202 {
203 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
204 int i, cpu;
205
206
207
208
209
210
211
212
213
214 spin_lock(&dev_map_lock);
215 list_del_rcu(&dtab->list);
216 spin_unlock(&dev_map_lock);
217
218 bpf_clear_redirect_map(map);
219 synchronize_rcu();
220
221
222 rcu_barrier();
223
224
225
226
227
228
229 for_each_online_cpu(cpu) {
230 struct list_head *flush_list = per_cpu_ptr(dtab->flush_list, cpu);
231
232 while (!list_empty(flush_list))
233 cond_resched();
234 }
235
236 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
237 for (i = 0; i < dtab->n_buckets; i++) {
238 struct bpf_dtab_netdev *dev;
239 struct hlist_head *head;
240 struct hlist_node *next;
241
242 head = dev_map_index_hash(dtab, i);
243
244 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
245 hlist_del_rcu(&dev->index_hlist);
246 free_percpu(dev->bulkq);
247 dev_put(dev->dev);
248 kfree(dev);
249 }
250 }
251
252 kfree(dtab->dev_index_head);
253 } else {
254 for (i = 0; i < dtab->map.max_entries; i++) {
255 struct bpf_dtab_netdev *dev;
256
257 dev = dtab->netdev_map[i];
258 if (!dev)
259 continue;
260
261 free_percpu(dev->bulkq);
262 dev_put(dev->dev);
263 kfree(dev);
264 }
265
266 bpf_map_area_free(dtab->netdev_map);
267 }
268
269 free_percpu(dtab->flush_list);
270 kfree(dtab);
271 }
272
273 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
274 {
275 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
276 u32 index = key ? *(u32 *)key : U32_MAX;
277 u32 *next = next_key;
278
279 if (index >= dtab->map.max_entries) {
280 *next = 0;
281 return 0;
282 }
283
284 if (index == dtab->map.max_entries - 1)
285 return -ENOENT;
286 *next = index + 1;
287 return 0;
288 }
289
290 struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
291 {
292 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
293 struct hlist_head *head = dev_map_index_hash(dtab, key);
294 struct bpf_dtab_netdev *dev;
295
296 hlist_for_each_entry_rcu(dev, head, index_hlist,
297 lockdep_is_held(&dtab->index_lock))
298 if (dev->idx == key)
299 return dev;
300
301 return NULL;
302 }
303
304 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
305 void *next_key)
306 {
307 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
308 u32 idx, *next = next_key;
309 struct bpf_dtab_netdev *dev, *next_dev;
310 struct hlist_head *head;
311 int i = 0;
312
313 if (!key)
314 goto find_first;
315
316 idx = *(u32 *)key;
317
318 dev = __dev_map_hash_lookup_elem(map, idx);
319 if (!dev)
320 goto find_first;
321
322 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
323 struct bpf_dtab_netdev, index_hlist);
324
325 if (next_dev) {
326 *next = next_dev->idx;
327 return 0;
328 }
329
330 i = idx & (dtab->n_buckets - 1);
331 i++;
332
333 find_first:
334 for (; i < dtab->n_buckets; i++) {
335 head = dev_map_index_hash(dtab, i);
336
337 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
338 struct bpf_dtab_netdev,
339 index_hlist);
340 if (next_dev) {
341 *next = next_dev->idx;
342 return 0;
343 }
344 }
345
346 return -ENOENT;
347 }
348
349 static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags,
350 bool in_napi_ctx)
351 {
352 struct bpf_dtab_netdev *obj = bq->obj;
353 struct net_device *dev = obj->dev;
354 int sent = 0, drops = 0, err = 0;
355 int i;
356
357 if (unlikely(!bq->count))
358 return 0;
359
360 for (i = 0; i < bq->count; i++) {
361 struct xdp_frame *xdpf = bq->q[i];
362
363 prefetch(xdpf);
364 }
365
366 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
367 if (sent < 0) {
368 err = sent;
369 sent = 0;
370 goto error;
371 }
372 drops = bq->count - sent;
373 out:
374 bq->count = 0;
375
376 trace_xdp_devmap_xmit(&obj->dtab->map, obj->idx,
377 sent, drops, bq->dev_rx, dev, err);
378 bq->dev_rx = NULL;
379 __list_del_clearprev(&bq->flush_node);
380 return 0;
381 error:
382
383
384
385 for (i = 0; i < bq->count; i++) {
386 struct xdp_frame *xdpf = bq->q[i];
387
388
389 if (likely(in_napi_ctx))
390 xdp_return_frame_rx_napi(xdpf);
391 else
392 xdp_return_frame(xdpf);
393 drops++;
394 }
395 goto out;
396 }
397
398
399
400
401
402
403
404
405 void __dev_map_flush(struct bpf_map *map)
406 {
407 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
408 struct list_head *flush_list = this_cpu_ptr(dtab->flush_list);
409 struct xdp_bulk_queue *bq, *tmp;
410
411 rcu_read_lock();
412 list_for_each_entry_safe(bq, tmp, flush_list, flush_node)
413 bq_xmit_all(bq, XDP_XMIT_FLUSH, true);
414 rcu_read_unlock();
415 }
416
417
418
419
420
421 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
422 {
423 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
424 struct bpf_dtab_netdev *obj;
425
426 if (key >= map->max_entries)
427 return NULL;
428
429 obj = READ_ONCE(dtab->netdev_map[key]);
430 return obj;
431 }
432
433
434
435
436 static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
437 struct net_device *dev_rx)
438
439 {
440 struct list_head *flush_list = this_cpu_ptr(obj->dtab->flush_list);
441 struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
442
443 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
444 bq_xmit_all(bq, 0, true);
445
446
447
448
449
450 if (!bq->dev_rx)
451 bq->dev_rx = dev_rx;
452
453 bq->q[bq->count++] = xdpf;
454
455 if (!bq->flush_node.prev)
456 list_add(&bq->flush_node, flush_list);
457
458 return 0;
459 }
460
461 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
462 struct net_device *dev_rx)
463 {
464 struct net_device *dev = dst->dev;
465 struct xdp_frame *xdpf;
466 int err;
467
468 if (!dev->netdev_ops->ndo_xdp_xmit)
469 return -EOPNOTSUPP;
470
471 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
472 if (unlikely(err))
473 return err;
474
475 xdpf = convert_to_xdp_frame(xdp);
476 if (unlikely(!xdpf))
477 return -EOVERFLOW;
478
479 return bq_enqueue(dst, xdpf, dev_rx);
480 }
481
482 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
483 struct bpf_prog *xdp_prog)
484 {
485 int err;
486
487 err = xdp_ok_fwd_dev(dst->dev, skb->len);
488 if (unlikely(err))
489 return err;
490 skb->dev = dst->dev;
491 generic_xdp_tx(skb, xdp_prog);
492
493 return 0;
494 }
495
496 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
497 {
498 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
499 struct net_device *dev = obj ? obj->dev : NULL;
500
501 return dev ? &dev->ifindex : NULL;
502 }
503
504 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
505 {
506 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
507 *(u32 *)key);
508 struct net_device *dev = obj ? obj->dev : NULL;
509
510 return dev ? &dev->ifindex : NULL;
511 }
512
513 static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
514 {
515 if (dev->dev->netdev_ops->ndo_xdp_xmit) {
516 struct xdp_bulk_queue *bq;
517 int cpu;
518
519 rcu_read_lock();
520 for_each_online_cpu(cpu) {
521 bq = per_cpu_ptr(dev->bulkq, cpu);
522 bq_xmit_all(bq, XDP_XMIT_FLUSH, false);
523 }
524 rcu_read_unlock();
525 }
526 }
527
528 static void __dev_map_entry_free(struct rcu_head *rcu)
529 {
530 struct bpf_dtab_netdev *dev;
531
532 dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
533 dev_map_flush_old(dev);
534 free_percpu(dev->bulkq);
535 dev_put(dev->dev);
536 kfree(dev);
537 }
538
539 static int dev_map_delete_elem(struct bpf_map *map, void *key)
540 {
541 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
542 struct bpf_dtab_netdev *old_dev;
543 int k = *(u32 *)key;
544
545 if (k >= map->max_entries)
546 return -EINVAL;
547
548
549
550
551
552
553
554
555
556 old_dev = xchg(&dtab->netdev_map[k], NULL);
557 if (old_dev)
558 call_rcu(&old_dev->rcu, __dev_map_entry_free);
559 return 0;
560 }
561
562 static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
563 {
564 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
565 struct bpf_dtab_netdev *old_dev;
566 int k = *(u32 *)key;
567 unsigned long flags;
568 int ret = -ENOENT;
569
570 spin_lock_irqsave(&dtab->index_lock, flags);
571
572 old_dev = __dev_map_hash_lookup_elem(map, k);
573 if (old_dev) {
574 dtab->items--;
575 hlist_del_init_rcu(&old_dev->index_hlist);
576 call_rcu(&old_dev->rcu, __dev_map_entry_free);
577 ret = 0;
578 }
579 spin_unlock_irqrestore(&dtab->index_lock, flags);
580
581 return ret;
582 }
583
584 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
585 struct bpf_dtab *dtab,
586 u32 ifindex,
587 unsigned int idx)
588 {
589 gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
590 struct bpf_dtab_netdev *dev;
591 struct xdp_bulk_queue *bq;
592 int cpu;
593
594 dev = kmalloc_node(sizeof(*dev), gfp, dtab->map.numa_node);
595 if (!dev)
596 return ERR_PTR(-ENOMEM);
597
598 dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq),
599 sizeof(void *), gfp);
600 if (!dev->bulkq) {
601 kfree(dev);
602 return ERR_PTR(-ENOMEM);
603 }
604
605 for_each_possible_cpu(cpu) {
606 bq = per_cpu_ptr(dev->bulkq, cpu);
607 bq->obj = dev;
608 }
609
610 dev->dev = dev_get_by_index(net, ifindex);
611 if (!dev->dev) {
612 free_percpu(dev->bulkq);
613 kfree(dev);
614 return ERR_PTR(-EINVAL);
615 }
616
617 dev->idx = idx;
618 dev->dtab = dtab;
619
620 return dev;
621 }
622
623 static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
624 void *key, void *value, u64 map_flags)
625 {
626 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
627 struct bpf_dtab_netdev *dev, *old_dev;
628 u32 ifindex = *(u32 *)value;
629 u32 i = *(u32 *)key;
630
631 if (unlikely(map_flags > BPF_EXIST))
632 return -EINVAL;
633 if (unlikely(i >= dtab->map.max_entries))
634 return -E2BIG;
635 if (unlikely(map_flags == BPF_NOEXIST))
636 return -EEXIST;
637
638 if (!ifindex) {
639 dev = NULL;
640 } else {
641 dev = __dev_map_alloc_node(net, dtab, ifindex, i);
642 if (IS_ERR(dev))
643 return PTR_ERR(dev);
644 }
645
646
647
648
649
650 old_dev = xchg(&dtab->netdev_map[i], dev);
651 if (old_dev)
652 call_rcu(&old_dev->rcu, __dev_map_entry_free);
653
654 return 0;
655 }
656
657 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
658 u64 map_flags)
659 {
660 return __dev_map_update_elem(current->nsproxy->net_ns,
661 map, key, value, map_flags);
662 }
663
664 static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
665 void *key, void *value, u64 map_flags)
666 {
667 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
668 struct bpf_dtab_netdev *dev, *old_dev;
669 u32 ifindex = *(u32 *)value;
670 u32 idx = *(u32 *)key;
671 unsigned long flags;
672 int err = -EEXIST;
673
674 if (unlikely(map_flags > BPF_EXIST || !ifindex))
675 return -EINVAL;
676
677 spin_lock_irqsave(&dtab->index_lock, flags);
678
679 old_dev = __dev_map_hash_lookup_elem(map, idx);
680 if (old_dev && (map_flags & BPF_NOEXIST))
681 goto out_err;
682
683 dev = __dev_map_alloc_node(net, dtab, ifindex, idx);
684 if (IS_ERR(dev)) {
685 err = PTR_ERR(dev);
686 goto out_err;
687 }
688
689 if (old_dev) {
690 hlist_del_rcu(&old_dev->index_hlist);
691 } else {
692 if (dtab->items >= dtab->map.max_entries) {
693 spin_unlock_irqrestore(&dtab->index_lock, flags);
694 call_rcu(&dev->rcu, __dev_map_entry_free);
695 return -E2BIG;
696 }
697 dtab->items++;
698 }
699
700 hlist_add_head_rcu(&dev->index_hlist,
701 dev_map_index_hash(dtab, idx));
702 spin_unlock_irqrestore(&dtab->index_lock, flags);
703
704 if (old_dev)
705 call_rcu(&old_dev->rcu, __dev_map_entry_free);
706
707 return 0;
708
709 out_err:
710 spin_unlock_irqrestore(&dtab->index_lock, flags);
711 return err;
712 }
713
714 static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
715 u64 map_flags)
716 {
717 return __dev_map_hash_update_elem(current->nsproxy->net_ns,
718 map, key, value, map_flags);
719 }
720
721 const struct bpf_map_ops dev_map_ops = {
722 .map_alloc = dev_map_alloc,
723 .map_free = dev_map_free,
724 .map_get_next_key = dev_map_get_next_key,
725 .map_lookup_elem = dev_map_lookup_elem,
726 .map_update_elem = dev_map_update_elem,
727 .map_delete_elem = dev_map_delete_elem,
728 .map_check_btf = map_check_no_btf,
729 };
730
731 const struct bpf_map_ops dev_map_hash_ops = {
732 .map_alloc = dev_map_alloc,
733 .map_free = dev_map_free,
734 .map_get_next_key = dev_map_hash_get_next_key,
735 .map_lookup_elem = dev_map_hash_lookup_elem,
736 .map_update_elem = dev_map_hash_update_elem,
737 .map_delete_elem = dev_map_hash_delete_elem,
738 .map_check_btf = map_check_no_btf,
739 };
740
741 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
742 struct net_device *netdev)
743 {
744 unsigned long flags;
745 u32 i;
746
747 spin_lock_irqsave(&dtab->index_lock, flags);
748 for (i = 0; i < dtab->n_buckets; i++) {
749 struct bpf_dtab_netdev *dev;
750 struct hlist_head *head;
751 struct hlist_node *next;
752
753 head = dev_map_index_hash(dtab, i);
754
755 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
756 if (netdev != dev->dev)
757 continue;
758
759 dtab->items--;
760 hlist_del_rcu(&dev->index_hlist);
761 call_rcu(&dev->rcu, __dev_map_entry_free);
762 }
763 }
764 spin_unlock_irqrestore(&dtab->index_lock, flags);
765 }
766
767 static int dev_map_notification(struct notifier_block *notifier,
768 ulong event, void *ptr)
769 {
770 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
771 struct bpf_dtab *dtab;
772 int i;
773
774 switch (event) {
775 case NETDEV_UNREGISTER:
776
777
778
779
780
781 rcu_read_lock();
782 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
783 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
784 dev_map_hash_remove_netdev(dtab, netdev);
785 continue;
786 }
787
788 for (i = 0; i < dtab->map.max_entries; i++) {
789 struct bpf_dtab_netdev *dev, *odev;
790
791 dev = READ_ONCE(dtab->netdev_map[i]);
792 if (!dev || netdev != dev->dev)
793 continue;
794 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
795 if (dev == odev)
796 call_rcu(&dev->rcu,
797 __dev_map_entry_free);
798 }
799 }
800 rcu_read_unlock();
801 break;
802 default:
803 break;
804 }
805 return NOTIFY_OK;
806 }
807
808 static struct notifier_block dev_map_notifier = {
809 .notifier_call = dev_map_notification,
810 };
811
812 static int __init dev_map_init(void)
813 {
814
815 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
816 offsetof(struct _bpf_dtab_netdev, dev));
817 register_netdevice_notifier(&dev_map_notifier);
818 return 0;
819 }
820
821 subsys_initcall(dev_map_init);