This source file includes following definitions.
- rxe_mcast_get_grp
- rxe_mcast_add_grp_elem
- rxe_mcast_drop_grp_elem
- rxe_drop_all_mcast_groups
- rxe_mc_cleanup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 #include "rxe.h"
35 #include "rxe_loc.h"
36
37 int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
38 struct rxe_mc_grp **grp_p)
39 {
40 int err;
41 struct rxe_mc_grp *grp;
42
43 if (rxe->attr.max_mcast_qp_attach == 0) {
44 err = -EINVAL;
45 goto err1;
46 }
47
48 grp = rxe_pool_get_key(&rxe->mc_grp_pool, mgid);
49 if (grp)
50 goto done;
51
52 grp = rxe_alloc(&rxe->mc_grp_pool);
53 if (!grp) {
54 err = -ENOMEM;
55 goto err1;
56 }
57
58 INIT_LIST_HEAD(&grp->qp_list);
59 spin_lock_init(&grp->mcg_lock);
60 grp->rxe = rxe;
61
62 rxe_add_key(grp, mgid);
63
64 err = rxe_mcast_add(rxe, mgid);
65 if (err)
66 goto err2;
67
68 done:
69 *grp_p = grp;
70 return 0;
71
72 err2:
73 rxe_drop_ref(grp);
74 err1:
75 return err;
76 }
77
78 int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
79 struct rxe_mc_grp *grp)
80 {
81 int err;
82 struct rxe_mc_elem *elem;
83
84
85 spin_lock_bh(&qp->grp_lock);
86 spin_lock_bh(&grp->mcg_lock);
87 list_for_each_entry(elem, &grp->qp_list, qp_list) {
88 if (elem->qp == qp) {
89 err = 0;
90 goto out;
91 }
92 }
93
94 if (grp->num_qp >= rxe->attr.max_mcast_qp_attach) {
95 err = -ENOMEM;
96 goto out;
97 }
98
99 elem = rxe_alloc(&rxe->mc_elem_pool);
100 if (!elem) {
101 err = -ENOMEM;
102 goto out;
103 }
104
105
106 rxe_add_ref(grp);
107
108 grp->num_qp++;
109 elem->qp = qp;
110 elem->grp = grp;
111
112 list_add(&elem->qp_list, &grp->qp_list);
113 list_add(&elem->grp_list, &qp->grp_list);
114
115 err = 0;
116 out:
117 spin_unlock_bh(&grp->mcg_lock);
118 spin_unlock_bh(&qp->grp_lock);
119 return err;
120 }
121
122 int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
123 union ib_gid *mgid)
124 {
125 struct rxe_mc_grp *grp;
126 struct rxe_mc_elem *elem, *tmp;
127
128 grp = rxe_pool_get_key(&rxe->mc_grp_pool, mgid);
129 if (!grp)
130 goto err1;
131
132 spin_lock_bh(&qp->grp_lock);
133 spin_lock_bh(&grp->mcg_lock);
134
135 list_for_each_entry_safe(elem, tmp, &grp->qp_list, qp_list) {
136 if (elem->qp == qp) {
137 list_del(&elem->qp_list);
138 list_del(&elem->grp_list);
139 grp->num_qp--;
140
141 spin_unlock_bh(&grp->mcg_lock);
142 spin_unlock_bh(&qp->grp_lock);
143 rxe_drop_ref(elem);
144 rxe_drop_ref(grp);
145 rxe_drop_ref(grp);
146 return 0;
147 }
148 }
149
150 spin_unlock_bh(&grp->mcg_lock);
151 spin_unlock_bh(&qp->grp_lock);
152 rxe_drop_ref(grp);
153 err1:
154 return -EINVAL;
155 }
156
157 void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
158 {
159 struct rxe_mc_grp *grp;
160 struct rxe_mc_elem *elem;
161
162 while (1) {
163 spin_lock_bh(&qp->grp_lock);
164 if (list_empty(&qp->grp_list)) {
165 spin_unlock_bh(&qp->grp_lock);
166 break;
167 }
168 elem = list_first_entry(&qp->grp_list, struct rxe_mc_elem,
169 grp_list);
170 list_del(&elem->grp_list);
171 spin_unlock_bh(&qp->grp_lock);
172
173 grp = elem->grp;
174 spin_lock_bh(&grp->mcg_lock);
175 list_del(&elem->qp_list);
176 grp->num_qp--;
177 spin_unlock_bh(&grp->mcg_lock);
178 rxe_drop_ref(grp);
179 rxe_drop_ref(elem);
180 }
181 }
182
183 void rxe_mc_cleanup(struct rxe_pool_entry *arg)
184 {
185 struct rxe_mc_grp *grp = container_of(arg, typeof(*grp), pelem);
186 struct rxe_dev *rxe = grp->rxe;
187
188 rxe_drop_key(grp);
189 rxe_mcast_delete(rxe, &grp->mgid);
190 }