1/*
2 * Copyright (c) 2015, Mellanox Technologies inc.  All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "core_priv.h"
34
35#include <linux/in.h>
36#include <linux/in6.h>
37
38/* For in6_dev_get/in6_dev_put */
39#include <net/addrconf.h>
40#include <net/bonding.h>
41
42#include <rdma/ib_cache.h>
43#include <rdma/ib_addr.h>
44
45enum gid_op_type {
46	GID_DEL = 0,
47	GID_ADD
48};
49
50struct update_gid_event_work {
51	struct work_struct work;
52	union ib_gid       gid;
53	struct ib_gid_attr gid_attr;
54	enum gid_op_type gid_op;
55};
56
57#define ROCE_NETDEV_CALLBACK_SZ		3
58struct netdev_event_work_cmd {
59	roce_netdev_callback	cb;
60	roce_netdev_filter	filter;
61	struct net_device	*ndev;
62	struct net_device	*filter_ndev;
63};
64
65struct netdev_event_work {
66	struct work_struct		work;
67	struct netdev_event_work_cmd	cmds[ROCE_NETDEV_CALLBACK_SZ];
68};
69
70static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
71		       u8 port, union ib_gid *gid,
72		       struct ib_gid_attr *gid_attr)
73{
74	switch (gid_op) {
75	case GID_ADD:
76		ib_cache_gid_add(ib_dev, port, gid, gid_attr);
77		break;
78	case GID_DEL:
79		ib_cache_gid_del(ib_dev, port, gid, gid_attr);
80		break;
81	}
82}
83
84enum bonding_slave_state {
85	BONDING_SLAVE_STATE_ACTIVE	= 1UL << 0,
86	BONDING_SLAVE_STATE_INACTIVE	= 1UL << 1,
87	/* No primary slave or the device isn't a slave in bonding */
88	BONDING_SLAVE_STATE_NA		= 1UL << 2,
89};
90
91static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev,
92								   struct net_device *upper)
93{
94	if (upper && netif_is_bond_master(upper)) {
95		struct net_device *pdev =
96			bond_option_active_slave_get_rcu(netdev_priv(upper));
97
98		if (pdev)
99			return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
100				BONDING_SLAVE_STATE_INACTIVE;
101	}
102
103	return BONDING_SLAVE_STATE_NA;
104}
105
106static bool is_upper_dev_rcu(struct net_device *dev, struct net_device *upper)
107{
108	struct net_device *_upper = NULL;
109	struct list_head *iter;
110
111	netdev_for_each_all_upper_dev_rcu(dev, _upper, iter)
112		if (_upper == upper)
113			break;
114
115	return _upper == upper;
116}
117
118#define REQUIRED_BOND_STATES		(BONDING_SLAVE_STATE_ACTIVE |	\
119					 BONDING_SLAVE_STATE_NA)
120static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
121				 struct net_device *rdma_ndev, void *cookie)
122{
123	struct net_device *event_ndev = (struct net_device *)cookie;
124	struct net_device *real_dev;
125	int res;
126
127	if (!rdma_ndev)
128		return 0;
129
130	rcu_read_lock();
131	real_dev = rdma_vlan_dev_real_dev(event_ndev);
132	if (!real_dev)
133		real_dev = event_ndev;
134
135	res = ((is_upper_dev_rcu(rdma_ndev, event_ndev) &&
136	       (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
137		REQUIRED_BOND_STATES)) ||
138	       real_dev == rdma_ndev);
139
140	rcu_read_unlock();
141	return res;
142}
143
144static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port,
145				      struct net_device *rdma_ndev, void *cookie)
146{
147	struct net_device *master_dev;
148	int res;
149
150	if (!rdma_ndev)
151		return 0;
152
153	rcu_read_lock();
154	master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
155	res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) ==
156		BONDING_SLAVE_STATE_INACTIVE;
157	rcu_read_unlock();
158
159	return res;
160}
161
162static int pass_all_filter(struct ib_device *ib_dev, u8 port,
163			   struct net_device *rdma_ndev, void *cookie)
164{
165	return 1;
166}
167
168static int upper_device_filter(struct ib_device *ib_dev, u8 port,
169			       struct net_device *rdma_ndev, void *cookie)
170{
171	struct net_device *event_ndev = (struct net_device *)cookie;
172	int res;
173
174	if (!rdma_ndev)
175		return 0;
176
177	if (rdma_ndev == event_ndev)
178		return 1;
179
180	rcu_read_lock();
181	res = is_upper_dev_rcu(rdma_ndev, event_ndev);
182	rcu_read_unlock();
183
184	return res;
185}
186
187static void update_gid_ip(enum gid_op_type gid_op,
188			  struct ib_device *ib_dev,
189			  u8 port, struct net_device *ndev,
190			  struct sockaddr *addr)
191{
192	union ib_gid gid;
193	struct ib_gid_attr gid_attr;
194
195	rdma_ip2gid(addr, &gid);
196	memset(&gid_attr, 0, sizeof(gid_attr));
197	gid_attr.ndev = ndev;
198
199	update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
200}
201
202static void enum_netdev_default_gids(struct ib_device *ib_dev,
203				     u8 port, struct net_device *event_ndev,
204				     struct net_device *rdma_ndev)
205{
206	rcu_read_lock();
207	if (!rdma_ndev ||
208	    ((rdma_ndev != event_ndev &&
209	      !is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
210	     is_eth_active_slave_of_bonding_rcu(rdma_ndev,
211						netdev_master_upper_dev_get_rcu(rdma_ndev)) ==
212	     BONDING_SLAVE_STATE_INACTIVE)) {
213		rcu_read_unlock();
214		return;
215	}
216	rcu_read_unlock();
217
218	ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
219				     IB_CACHE_GID_DEFAULT_MODE_SET);
220}
221
222static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
223					    u8 port,
224					    struct net_device *event_ndev,
225					    struct net_device *rdma_ndev)
226{
227	struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
228
229	if (!rdma_ndev)
230		return;
231
232	if (!real_dev)
233		real_dev = event_ndev;
234
235	rcu_read_lock();
236
237	if (is_upper_dev_rcu(rdma_ndev, event_ndev) &&
238	    is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) ==
239	    BONDING_SLAVE_STATE_INACTIVE) {
240		rcu_read_unlock();
241
242		ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
243					     IB_CACHE_GID_DEFAULT_MODE_DELETE);
244	} else {
245		rcu_read_unlock();
246	}
247}
248
249static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
250				 u8 port, struct net_device *ndev)
251{
252	struct in_device *in_dev;
253	struct sin_list {
254		struct list_head	list;
255		struct sockaddr_in	ip;
256	};
257	struct sin_list *sin_iter;
258	struct sin_list *sin_temp;
259
260	LIST_HEAD(sin_list);
261	if (ndev->reg_state >= NETREG_UNREGISTERING)
262		return;
263
264	rcu_read_lock();
265	in_dev = __in_dev_get_rcu(ndev);
266	if (!in_dev) {
267		rcu_read_unlock();
268		return;
269	}
270
271	for_ifa(in_dev) {
272		struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
273
274		if (!entry) {
275			pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv4 update\n");
276			continue;
277		}
278		entry->ip.sin_family = AF_INET;
279		entry->ip.sin_addr.s_addr = ifa->ifa_address;
280		list_add_tail(&entry->list, &sin_list);
281	}
282	endfor_ifa(in_dev);
283	rcu_read_unlock();
284
285	list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
286		update_gid_ip(GID_ADD, ib_dev, port, ndev,
287			      (struct sockaddr *)&sin_iter->ip);
288		list_del(&sin_iter->list);
289		kfree(sin_iter);
290	}
291}
292
293static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
294				 u8 port, struct net_device *ndev)
295{
296	struct inet6_ifaddr *ifp;
297	struct inet6_dev *in6_dev;
298	struct sin6_list {
299		struct list_head	list;
300		struct sockaddr_in6	sin6;
301	};
302	struct sin6_list *sin6_iter;
303	struct sin6_list *sin6_temp;
304	struct ib_gid_attr gid_attr = {.ndev = ndev};
305	LIST_HEAD(sin6_list);
306
307	if (ndev->reg_state >= NETREG_UNREGISTERING)
308		return;
309
310	in6_dev = in6_dev_get(ndev);
311	if (!in6_dev)
312		return;
313
314	read_lock_bh(&in6_dev->lock);
315	list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
316		struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
317
318		if (!entry) {
319			pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv6 update\n");
320			continue;
321		}
322
323		entry->sin6.sin6_family = AF_INET6;
324		entry->sin6.sin6_addr = ifp->addr;
325		list_add_tail(&entry->list, &sin6_list);
326	}
327	read_unlock_bh(&in6_dev->lock);
328
329	in6_dev_put(in6_dev);
330
331	list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) {
332		union ib_gid	gid;
333
334		rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid);
335		update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr);
336		list_del(&sin6_iter->list);
337		kfree(sin6_iter);
338	}
339}
340
341static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
342			    struct net_device *ndev)
343{
344	enum_netdev_ipv4_ips(ib_dev, port, ndev);
345	if (IS_ENABLED(CONFIG_IPV6))
346		enum_netdev_ipv6_ips(ib_dev, port, ndev);
347}
348
349static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
350			   struct net_device *rdma_ndev, void *cookie)
351{
352	struct net_device *event_ndev = (struct net_device *)cookie;
353
354	enum_netdev_default_gids(ib_dev, port, event_ndev, rdma_ndev);
355	_add_netdev_ips(ib_dev, port, event_ndev);
356}
357
358static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
359			   struct net_device *rdma_ndev, void *cookie)
360{
361	struct net_device *event_ndev = (struct net_device *)cookie;
362
363	ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
364}
365
366static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
367				    u8 port,
368				    struct net_device *rdma_ndev,
369				    void *cookie)
370{
371	struct net *net;
372	struct net_device *ndev;
373
374	/* Lock the rtnl to make sure the netdevs does not move under
375	 * our feet
376	 */
377	rtnl_lock();
378	for_each_net(net)
379		for_each_netdev(net, ndev)
380			if (is_eth_port_of_netdev(ib_dev, port, rdma_ndev, ndev))
381				add_netdev_ips(ib_dev, port, rdma_ndev, ndev);
382	rtnl_unlock();
383}
384
385/* This function will rescan all of the network devices in the system
386 * and add their gids, as needed, to the relevant RoCE devices. */
387int roce_rescan_device(struct ib_device *ib_dev)
388{
389	ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
390			    enum_all_gids_of_dev_cb, NULL);
391
392	return 0;
393}
394
395static void callback_for_addr_gid_device_scan(struct ib_device *device,
396					      u8 port,
397					      struct net_device *rdma_ndev,
398					      void *cookie)
399{
400	struct update_gid_event_work *parsed = cookie;
401
402	return update_gid(parsed->gid_op, device,
403			  port, &parsed->gid,
404			  &parsed->gid_attr);
405}
406
407static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
408				void *cookie,
409				void (*handle_netdev)(struct ib_device *ib_dev,
410						      u8 port,
411						      struct net_device *ndev))
412{
413	struct net_device *ndev = (struct net_device *)cookie;
414	struct upper_list {
415		struct list_head list;
416		struct net_device *upper;
417	};
418	struct net_device *upper;
419	struct list_head *iter;
420	struct upper_list *upper_iter;
421	struct upper_list *upper_temp;
422	LIST_HEAD(upper_list);
423
424	rcu_read_lock();
425	netdev_for_each_all_upper_dev_rcu(ndev, upper, iter) {
426		struct upper_list *entry = kmalloc(sizeof(*entry),
427						   GFP_ATOMIC);
428
429		if (!entry) {
430			pr_info("roce_gid_mgmt: couldn't allocate entry to delete ndev\n");
431			continue;
432		}
433
434		list_add_tail(&entry->list, &upper_list);
435		dev_hold(upper);
436		entry->upper = upper;
437	}
438	rcu_read_unlock();
439
440	handle_netdev(ib_dev, port, ndev);
441	list_for_each_entry_safe(upper_iter, upper_temp, &upper_list,
442				 list) {
443		handle_netdev(ib_dev, port, upper_iter->upper);
444		dev_put(upper_iter->upper);
445		list_del(&upper_iter->list);
446		kfree(upper_iter);
447	}
448}
449
450static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
451				      struct net_device *event_ndev)
452{
453	ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
454}
455
456static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
457				 struct net_device *rdma_ndev, void *cookie)
458{
459	handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
460}
461
462static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
463				 struct net_device *rdma_ndev, void *cookie)
464{
465	handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
466}
467
468static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
469					struct net_device *rdma_ndev,
470					void *cookie)
471{
472	struct net_device *master_ndev;
473
474	rcu_read_lock();
475	master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
476	if (master_ndev)
477		dev_hold(master_ndev);
478	rcu_read_unlock();
479
480	if (master_ndev) {
481		bond_delete_netdev_default_gids(ib_dev, port, master_ndev,
482						rdma_ndev);
483		dev_put(master_ndev);
484	}
485}
486
487static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port,
488				   struct net_device *rdma_ndev, void *cookie)
489{
490	struct net_device *event_ndev = (struct net_device *)cookie;
491
492	bond_delete_netdev_default_gids(ib_dev, port, event_ndev, rdma_ndev);
493}
494
495/* The following functions operate on all IB devices. netdevice_event and
496 * addr_event execute ib_enum_all_roce_netdevs through a work.
497 * ib_enum_all_roce_netdevs iterates through all IB devices.
498 */
499
500static void netdevice_event_work_handler(struct work_struct *_work)
501{
502	struct netdev_event_work *work =
503		container_of(_work, struct netdev_event_work, work);
504	unsigned int i;
505
506	for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
507		ib_enum_all_roce_netdevs(work->cmds[i].filter,
508					 work->cmds[i].filter_ndev,
509					 work->cmds[i].cb,
510					 work->cmds[i].ndev);
511		dev_put(work->cmds[i].ndev);
512		dev_put(work->cmds[i].filter_ndev);
513	}
514
515	kfree(work);
516}
517
518static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
519				struct net_device *ndev)
520{
521	unsigned int i;
522	struct netdev_event_work *ndev_work =
523		kmalloc(sizeof(*ndev_work), GFP_KERNEL);
524
525	if (!ndev_work) {
526		pr_warn("roce_gid_mgmt: can't allocate work for netdevice_event\n");
527		return NOTIFY_DONE;
528	}
529
530	memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
531	for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
532		if (!ndev_work->cmds[i].ndev)
533			ndev_work->cmds[i].ndev = ndev;
534		if (!ndev_work->cmds[i].filter_ndev)
535			ndev_work->cmds[i].filter_ndev = ndev;
536		dev_hold(ndev_work->cmds[i].ndev);
537		dev_hold(ndev_work->cmds[i].filter_ndev);
538	}
539	INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
540
541	queue_work(ib_wq, &ndev_work->work);
542
543	return NOTIFY_DONE;
544}
545
546static const struct netdev_event_work_cmd add_cmd = {
547	.cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
548static const struct netdev_event_work_cmd add_cmd_upper_ips = {
549	.cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev};
550
551static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info *changeupper_info,
552					struct netdev_event_work_cmd *cmds)
553{
554	static const struct netdev_event_work_cmd upper_ips_del_cmd = {
555		.cb = del_netdev_upper_ips, .filter = upper_device_filter};
556	static const struct netdev_event_work_cmd bonding_default_del_cmd = {
557		.cb = del_netdev_default_ips, .filter = is_eth_port_inactive_slave};
558
559	if (changeupper_info->linking == false) {
560		cmds[0] = upper_ips_del_cmd;
561		cmds[0].ndev = changeupper_info->upper_dev;
562		cmds[1] = add_cmd;
563	} else {
564		cmds[0] = bonding_default_del_cmd;
565		cmds[0].ndev = changeupper_info->upper_dev;
566		cmds[1] = add_cmd_upper_ips;
567		cmds[1].ndev = changeupper_info->upper_dev;
568		cmds[1].filter_ndev = changeupper_info->upper_dev;
569	}
570}
571
572static int netdevice_event(struct notifier_block *this, unsigned long event,
573			   void *ptr)
574{
575	static const struct netdev_event_work_cmd del_cmd = {
576		.cb = del_netdev_ips, .filter = pass_all_filter};
577	static const struct netdev_event_work_cmd bonding_default_del_cmd_join = {
578		.cb = del_netdev_default_ips_join, .filter = is_eth_port_inactive_slave};
579	static const struct netdev_event_work_cmd default_del_cmd = {
580		.cb = del_netdev_default_ips, .filter = pass_all_filter};
581	static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
582		.cb = del_netdev_upper_ips, .filter = upper_device_filter};
583	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
584	struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
585
586	if (ndev->type != ARPHRD_ETHER)
587		return NOTIFY_DONE;
588
589	switch (event) {
590	case NETDEV_REGISTER:
591	case NETDEV_UP:
592		cmds[0] = bonding_default_del_cmd_join;
593		cmds[1] = add_cmd;
594		break;
595
596	case NETDEV_UNREGISTER:
597		if (ndev->reg_state < NETREG_UNREGISTERED)
598			cmds[0] = del_cmd;
599		else
600			return NOTIFY_DONE;
601		break;
602
603	case NETDEV_CHANGEADDR:
604		cmds[0] = default_del_cmd;
605		cmds[1] = add_cmd;
606		break;
607
608	case NETDEV_CHANGEUPPER:
609		netdevice_event_changeupper(
610			container_of(ptr, struct netdev_notifier_changeupper_info, info),
611			cmds);
612		break;
613
614	case NETDEV_BONDING_FAILOVER:
615		cmds[0] = bonding_event_ips_del_cmd;
616		cmds[1] = bonding_default_del_cmd_join;
617		cmds[2] = add_cmd_upper_ips;
618		break;
619
620	default:
621		return NOTIFY_DONE;
622	}
623
624	return netdevice_queue_work(cmds, ndev);
625}
626
627static void update_gid_event_work_handler(struct work_struct *_work)
628{
629	struct update_gid_event_work *work =
630		container_of(_work, struct update_gid_event_work, work);
631
632	ib_enum_all_roce_netdevs(is_eth_port_of_netdev, work->gid_attr.ndev,
633				 callback_for_addr_gid_device_scan, work);
634
635	dev_put(work->gid_attr.ndev);
636	kfree(work);
637}
638
639static int addr_event(struct notifier_block *this, unsigned long event,
640		      struct sockaddr *sa, struct net_device *ndev)
641{
642	struct update_gid_event_work *work;
643	enum gid_op_type gid_op;
644
645	if (ndev->type != ARPHRD_ETHER)
646		return NOTIFY_DONE;
647
648	switch (event) {
649	case NETDEV_UP:
650		gid_op = GID_ADD;
651		break;
652
653	case NETDEV_DOWN:
654		gid_op = GID_DEL;
655		break;
656
657	default:
658		return NOTIFY_DONE;
659	}
660
661	work = kmalloc(sizeof(*work), GFP_ATOMIC);
662	if (!work) {
663		pr_warn("roce_gid_mgmt: Couldn't allocate work for addr_event\n");
664		return NOTIFY_DONE;
665	}
666
667	INIT_WORK(&work->work, update_gid_event_work_handler);
668
669	rdma_ip2gid(sa, &work->gid);
670	work->gid_op = gid_op;
671
672	memset(&work->gid_attr, 0, sizeof(work->gid_attr));
673	dev_hold(ndev);
674	work->gid_attr.ndev   = ndev;
675
676	queue_work(ib_wq, &work->work);
677
678	return NOTIFY_DONE;
679}
680
681static int inetaddr_event(struct notifier_block *this, unsigned long event,
682			  void *ptr)
683{
684	struct sockaddr_in	in;
685	struct net_device	*ndev;
686	struct in_ifaddr	*ifa = ptr;
687
688	in.sin_family = AF_INET;
689	in.sin_addr.s_addr = ifa->ifa_address;
690	ndev = ifa->ifa_dev->dev;
691
692	return addr_event(this, event, (struct sockaddr *)&in, ndev);
693}
694
695static int inet6addr_event(struct notifier_block *this, unsigned long event,
696			   void *ptr)
697{
698	struct sockaddr_in6	in6;
699	struct net_device	*ndev;
700	struct inet6_ifaddr	*ifa6 = ptr;
701
702	in6.sin6_family = AF_INET6;
703	in6.sin6_addr = ifa6->addr;
704	ndev = ifa6->idev->dev;
705
706	return addr_event(this, event, (struct sockaddr *)&in6, ndev);
707}
708
709static struct notifier_block nb_netdevice = {
710	.notifier_call = netdevice_event
711};
712
713static struct notifier_block nb_inetaddr = {
714	.notifier_call = inetaddr_event
715};
716
717static struct notifier_block nb_inet6addr = {
718	.notifier_call = inet6addr_event
719};
720
721int __init roce_gid_mgmt_init(void)
722{
723	register_inetaddr_notifier(&nb_inetaddr);
724	if (IS_ENABLED(CONFIG_IPV6))
725		register_inet6addr_notifier(&nb_inet6addr);
726	/* We relay on the netdevice notifier to enumerate all
727	 * existing devices in the system. Register to this notifier
728	 * last to make sure we will not miss any IP add/del
729	 * callbacks.
730	 */
731	register_netdevice_notifier(&nb_netdevice);
732
733	return 0;
734}
735
736void __exit roce_gid_mgmt_cleanup(void)
737{
738	if (IS_ENABLED(CONFIG_IPV6))
739		unregister_inet6addr_notifier(&nb_inet6addr);
740	unregister_inetaddr_notifier(&nb_inetaddr);
741	unregister_netdevice_notifier(&nb_netdevice);
742	/* Ensure all gid deletion tasks complete before we go down,
743	 * to avoid any reference to free'd memory. By the time
744	 * ib-core is removed, all physical devices have been removed,
745	 * so no issue with remaining hardware contexts.
746	 */
747}
748