This source file includes following definitions.
- reuseport_get_id
- __reuseport_alloc
- reuseport_alloc
- reuseport_grow
- reuseport_free_rcu
- reuseport_add_sock
- reuseport_detach_sock
- run_bpf_filter
- reuseport_select_sock
- reuseport_attach_prog
- reuseport_detach_prog
1
2
3
4
5
6
7
8
9 #include <net/sock_reuseport.h>
10 #include <linux/bpf.h>
11 #include <linux/idr.h>
12 #include <linux/filter.h>
13 #include <linux/rcupdate.h>
14
15 #define INIT_SOCKS 128
16
17 DEFINE_SPINLOCK(reuseport_lock);
18
19 #define REUSEPORT_MIN_ID 1
20 static DEFINE_IDA(reuseport_ida);
21
22 int reuseport_get_id(struct sock_reuseport *reuse)
23 {
24 int id;
25
26 if (reuse->reuseport_id)
27 return reuse->reuseport_id;
28
29 id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0,
30
31 GFP_ATOMIC);
32 if (id < 0)
33 return id;
34
35 reuse->reuseport_id = id;
36
37 return reuse->reuseport_id;
38 }
39
40 static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
41 {
42 unsigned int size = sizeof(struct sock_reuseport) +
43 sizeof(struct sock *) * max_socks;
44 struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
45
46 if (!reuse)
47 return NULL;
48
49 reuse->max_socks = max_socks;
50
51 RCU_INIT_POINTER(reuse->prog, NULL);
52 return reuse;
53 }
54
55 int reuseport_alloc(struct sock *sk, bool bind_inany)
56 {
57 struct sock_reuseport *reuse;
58
59
60
61
62 spin_lock_bh(&reuseport_lock);
63
64
65
66
67 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
68 lockdep_is_held(&reuseport_lock));
69 if (reuse) {
70
71
72
73
74 if (bind_inany)
75 reuse->bind_inany = bind_inany;
76 goto out;
77 }
78
79 reuse = __reuseport_alloc(INIT_SOCKS);
80 if (!reuse) {
81 spin_unlock_bh(&reuseport_lock);
82 return -ENOMEM;
83 }
84
85 reuse->socks[0] = sk;
86 reuse->num_socks = 1;
87 reuse->bind_inany = bind_inany;
88 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
89
90 out:
91 spin_unlock_bh(&reuseport_lock);
92
93 return 0;
94 }
95 EXPORT_SYMBOL(reuseport_alloc);
96
97 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
98 {
99 struct sock_reuseport *more_reuse;
100 u32 more_socks_size, i;
101
102 more_socks_size = reuse->max_socks * 2U;
103 if (more_socks_size > U16_MAX)
104 return NULL;
105
106 more_reuse = __reuseport_alloc(more_socks_size);
107 if (!more_reuse)
108 return NULL;
109
110 more_reuse->max_socks = more_socks_size;
111 more_reuse->num_socks = reuse->num_socks;
112 more_reuse->prog = reuse->prog;
113 more_reuse->reuseport_id = reuse->reuseport_id;
114 more_reuse->bind_inany = reuse->bind_inany;
115
116 memcpy(more_reuse->socks, reuse->socks,
117 reuse->num_socks * sizeof(struct sock *));
118 more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
119
120 for (i = 0; i < reuse->num_socks; ++i)
121 rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
122 more_reuse);
123
124
125
126
127
128 kfree_rcu(reuse, rcu);
129 return more_reuse;
130 }
131
132 static void reuseport_free_rcu(struct rcu_head *head)
133 {
134 struct sock_reuseport *reuse;
135
136 reuse = container_of(head, struct sock_reuseport, rcu);
137 sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
138 if (reuse->reuseport_id)
139 ida_simple_remove(&reuseport_ida, reuse->reuseport_id);
140 kfree(reuse);
141 }
142
143
144
145
146
147
148
149
150
151 int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
152 {
153 struct sock_reuseport *old_reuse, *reuse;
154
155 if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
156 int err = reuseport_alloc(sk2, bind_inany);
157
158 if (err)
159 return err;
160 }
161
162 spin_lock_bh(&reuseport_lock);
163 reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
164 lockdep_is_held(&reuseport_lock));
165 old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
166 lockdep_is_held(&reuseport_lock));
167 if (old_reuse && old_reuse->num_socks != 1) {
168 spin_unlock_bh(&reuseport_lock);
169 return -EBUSY;
170 }
171
172 if (reuse->num_socks == reuse->max_socks) {
173 reuse = reuseport_grow(reuse);
174 if (!reuse) {
175 spin_unlock_bh(&reuseport_lock);
176 return -ENOMEM;
177 }
178 }
179
180 reuse->socks[reuse->num_socks] = sk;
181
182 smp_wmb();
183 reuse->num_socks++;
184 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
185
186 spin_unlock_bh(&reuseport_lock);
187
188 if (old_reuse)
189 call_rcu(&old_reuse->rcu, reuseport_free_rcu);
190 return 0;
191 }
192 EXPORT_SYMBOL(reuseport_add_sock);
193
194 void reuseport_detach_sock(struct sock *sk)
195 {
196 struct sock_reuseport *reuse;
197 int i;
198
199 spin_lock_bh(&reuseport_lock);
200 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
201 lockdep_is_held(&reuseport_lock));
202
203
204
205
206
207 if (reuse->reuseport_id)
208 bpf_sk_reuseport_detach(sk);
209
210 rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
211
212 for (i = 0; i < reuse->num_socks; i++) {
213 if (reuse->socks[i] == sk) {
214 reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
215 reuse->num_socks--;
216 if (reuse->num_socks == 0)
217 call_rcu(&reuse->rcu, reuseport_free_rcu);
218 break;
219 }
220 }
221 spin_unlock_bh(&reuseport_lock);
222 }
223 EXPORT_SYMBOL(reuseport_detach_sock);
224
225 static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
226 struct bpf_prog *prog, struct sk_buff *skb,
227 int hdr_len)
228 {
229 struct sk_buff *nskb = NULL;
230 u32 index;
231
232 if (skb_shared(skb)) {
233 nskb = skb_clone(skb, GFP_ATOMIC);
234 if (!nskb)
235 return NULL;
236 skb = nskb;
237 }
238
239
240 if (!pskb_pull(skb, hdr_len)) {
241 kfree_skb(nskb);
242 return NULL;
243 }
244 index = bpf_prog_run_save_cb(prog, skb);
245 __skb_push(skb, hdr_len);
246
247 consume_skb(nskb);
248
249 if (index >= socks)
250 return NULL;
251
252 return reuse->socks[index];
253 }
254
255
256
257
258
259
260
261
262
263
264
265 struct sock *reuseport_select_sock(struct sock *sk,
266 u32 hash,
267 struct sk_buff *skb,
268 int hdr_len)
269 {
270 struct sock_reuseport *reuse;
271 struct bpf_prog *prog;
272 struct sock *sk2 = NULL;
273 u16 socks;
274
275 rcu_read_lock();
276 reuse = rcu_dereference(sk->sk_reuseport_cb);
277
278
279 if (!reuse)
280 goto out;
281
282 prog = rcu_dereference(reuse->prog);
283 socks = READ_ONCE(reuse->num_socks);
284 if (likely(socks)) {
285
286 smp_rmb();
287
288 if (!prog || !skb)
289 goto select_by_hash;
290
291 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
292 sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
293 else
294 sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
295
296 select_by_hash:
297
298 if (!sk2) {
299 int i, j;
300
301 i = j = reciprocal_scale(hash, socks);
302 while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
303 i++;
304 if (i >= reuse->num_socks)
305 i = 0;
306 if (i == j)
307 goto out;
308 }
309 sk2 = reuse->socks[i];
310 }
311 }
312
313 out:
314 rcu_read_unlock();
315 return sk2;
316 }
317 EXPORT_SYMBOL(reuseport_select_sock);
318
319 int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
320 {
321 struct sock_reuseport *reuse;
322 struct bpf_prog *old_prog;
323
324 if (sk_unhashed(sk) && sk->sk_reuseport) {
325 int err = reuseport_alloc(sk, false);
326
327 if (err)
328 return err;
329 } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
330
331 return -EINVAL;
332 }
333
334 spin_lock_bh(&reuseport_lock);
335 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
336 lockdep_is_held(&reuseport_lock));
337 old_prog = rcu_dereference_protected(reuse->prog,
338 lockdep_is_held(&reuseport_lock));
339 rcu_assign_pointer(reuse->prog, prog);
340 spin_unlock_bh(&reuseport_lock);
341
342 sk_reuseport_prog_free(old_prog);
343 return 0;
344 }
345 EXPORT_SYMBOL(reuseport_attach_prog);
346
347 int reuseport_detach_prog(struct sock *sk)
348 {
349 struct sock_reuseport *reuse;
350 struct bpf_prog *old_prog;
351
352 if (!rcu_access_pointer(sk->sk_reuseport_cb))
353 return sk->sk_reuseport ? -ENOENT : -EINVAL;
354
355 old_prog = NULL;
356 spin_lock_bh(&reuseport_lock);
357 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
358 lockdep_is_held(&reuseport_lock));
359 rcu_swap_protected(reuse->prog, old_prog,
360 lockdep_is_held(&reuseport_lock));
361 spin_unlock_bh(&reuseport_lock);
362
363 if (!old_prog)
364 return -ENOENT;
365
366 sk_reuseport_prog_free(old_prog);
367 return 0;
368 }
369 EXPORT_SYMBOL(reuseport_detach_prog);