This source file includes following definitions.
- sctp_endpoint_init
- sctp_endpoint_new
- sctp_endpoint_add_asoc
- sctp_endpoint_free
- sctp_endpoint_destroy
- sctp_endpoint_hold
- sctp_endpoint_put
- sctp_endpoint_is_match
- sctp_endpoint_lookup_assoc
- sctp_endpoint_is_peeled_off
- sctp_endpoint_bh_rcv
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/in.h>
29 #include <linux/random.h>
30 #include <net/sock.h>
31 #include <net/ipv6.h>
32 #include <net/sctp/sctp.h>
33 #include <net/sctp/sm.h>
34
35
36 static void sctp_endpoint_bh_rcv(struct work_struct *work);
37
38
39
40
41 static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
42 struct sock *sk,
43 gfp_t gfp)
44 {
45 struct net *net = sock_net(sk);
46 struct sctp_shared_key *null_key;
47
48 ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp);
49 if (!ep->digest)
50 return NULL;
51
52 ep->asconf_enable = net->sctp.addip_enable;
53 ep->auth_enable = net->sctp.auth_enable;
54 if (ep->auth_enable) {
55 if (sctp_auth_init(ep, gfp))
56 goto nomem;
57 if (ep->asconf_enable) {
58 sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF);
59 sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF_ACK);
60 }
61 }
62
63
64
65 ep->base.type = SCTP_EP_TYPE_SOCKET;
66
67
68 refcount_set(&ep->base.refcnt, 1);
69 ep->base.dead = false;
70
71
72 sctp_inq_init(&ep->base.inqueue);
73
74
75 sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
76
77
78 sctp_bind_addr_init(&ep->base.bind_addr, 0);
79
80
81 INIT_LIST_HEAD(&ep->asocs);
82
83
84 ep->sndbuf_policy = net->sctp.sndbuf_policy;
85
86 sk->sk_data_ready = sctp_data_ready;
87 sk->sk_write_space = sctp_write_space;
88 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
89
90
91 ep->rcvbuf_policy = net->sctp.rcvbuf_policy;
92
93
94 get_random_bytes(ep->secret_key, sizeof(ep->secret_key));
95
96
97 INIT_LIST_HEAD(&ep->endpoint_shared_keys);
98 null_key = sctp_auth_shkey_create(0, gfp);
99 if (!null_key)
100 goto nomem_shkey;
101
102 list_add(&null_key->key_list, &ep->endpoint_shared_keys);
103
104
105
106
107 ep->prsctp_enable = net->sctp.prsctp_enable;
108 ep->reconf_enable = net->sctp.reconf_enable;
109 ep->ecn_enable = net->sctp.ecn_enable;
110
111
112 ep->base.sk = sk;
113 ep->base.net = sock_net(sk);
114 sock_hold(ep->base.sk);
115
116 return ep;
117
118 nomem_shkey:
119 sctp_auth_free(ep);
120 nomem:
121 kfree(ep->digest);
122 return NULL;
123
124 }
125
126
127
128
129 struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp)
130 {
131 struct sctp_endpoint *ep;
132
133
134 ep = kzalloc(sizeof(*ep), gfp);
135 if (!ep)
136 goto fail;
137
138 if (!sctp_endpoint_init(ep, sk, gfp))
139 goto fail_init;
140
141 SCTP_DBG_OBJCNT_INC(ep);
142 return ep;
143
144 fail_init:
145 kfree(ep);
146 fail:
147 return NULL;
148 }
149
150
151 void sctp_endpoint_add_asoc(struct sctp_endpoint *ep,
152 struct sctp_association *asoc)
153 {
154 struct sock *sk = ep->base.sk;
155
156
157
158
159
160 if (asoc->temp)
161 return;
162
163
164 list_add_tail(&asoc->asocs, &ep->asocs);
165
166
167 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
168 sk->sk_ack_backlog++;
169 }
170
171
172
173
174 void sctp_endpoint_free(struct sctp_endpoint *ep)
175 {
176 ep->base.dead = true;
177
178 inet_sk_set_state(ep->base.sk, SCTP_SS_CLOSED);
179
180
181 sctp_unhash_endpoint(ep);
182
183 sctp_endpoint_put(ep);
184 }
185
186
187 static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
188 {
189 struct sock *sk;
190
191 if (unlikely(!ep->base.dead)) {
192 WARN(1, "Attempt to destroy undead endpoint %p!\n", ep);
193 return;
194 }
195
196
197 kfree(ep->digest);
198
199
200
201
202 sctp_auth_destroy_keys(&ep->endpoint_shared_keys);
203 sctp_auth_free(ep);
204
205
206 sctp_inq_free(&ep->base.inqueue);
207 sctp_bind_addr_free(&ep->base.bind_addr);
208
209 memset(ep->secret_key, 0, sizeof(ep->secret_key));
210
211 sk = ep->base.sk;
212
213 if (sctp_sk(sk)->bind_hash)
214 sctp_put_port(sk);
215
216 sctp_sk(sk)->ep = NULL;
217
218 sock_put(sk);
219
220 kfree(ep);
221 SCTP_DBG_OBJCNT_DEC(ep);
222 }
223
224
225 void sctp_endpoint_hold(struct sctp_endpoint *ep)
226 {
227 refcount_inc(&ep->base.refcnt);
228 }
229
230
231
232
233 void sctp_endpoint_put(struct sctp_endpoint *ep)
234 {
235 if (refcount_dec_and_test(&ep->base.refcnt))
236 sctp_endpoint_destroy(ep);
237 }
238
239
240 struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
241 struct net *net,
242 const union sctp_addr *laddr)
243 {
244 struct sctp_endpoint *retval = NULL;
245
246 if ((htons(ep->base.bind_addr.port) == laddr->v4.sin_port) &&
247 net_eq(sock_net(ep->base.sk), net)) {
248 if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
249 sctp_sk(ep->base.sk)))
250 retval = ep;
251 }
252
253 return retval;
254 }
255
256
257
258
259
260 struct sctp_association *sctp_endpoint_lookup_assoc(
261 const struct sctp_endpoint *ep,
262 const union sctp_addr *paddr,
263 struct sctp_transport **transport)
264 {
265 struct sctp_association *asoc = NULL;
266 struct sctp_transport *t;
267
268 *transport = NULL;
269
270
271
272
273 if (!ep->base.bind_addr.port)
274 return NULL;
275
276 rcu_read_lock();
277 t = sctp_epaddr_lookup_transport(ep, paddr);
278 if (!t)
279 goto out;
280
281 *transport = t;
282 asoc = t->asoc;
283 out:
284 rcu_read_unlock();
285 return asoc;
286 }
287
288
289
290
291 bool sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
292 const union sctp_addr *paddr)
293 {
294 struct sctp_sockaddr_entry *addr;
295 struct sctp_bind_addr *bp;
296 struct net *net = sock_net(ep->base.sk);
297
298 bp = &ep->base.bind_addr;
299
300
301
302 list_for_each_entry(addr, &bp->address_list, list) {
303 if (sctp_has_association(net, &addr->a, paddr))
304 return true;
305 }
306
307 return false;
308 }
309
310
311
312
313 static void sctp_endpoint_bh_rcv(struct work_struct *work)
314 {
315 struct sctp_endpoint *ep =
316 container_of(work, struct sctp_endpoint,
317 base.inqueue.immediate);
318 struct sctp_association *asoc;
319 struct sock *sk;
320 struct net *net;
321 struct sctp_transport *transport;
322 struct sctp_chunk *chunk;
323 struct sctp_inq *inqueue;
324 union sctp_subtype subtype;
325 enum sctp_state state;
326 int error = 0;
327 int first_time = 1;
328
329 if (ep->base.dead)
330 return;
331
332 asoc = NULL;
333 inqueue = &ep->base.inqueue;
334 sk = ep->base.sk;
335 net = sock_net(sk);
336
337 while (NULL != (chunk = sctp_inq_pop(inqueue))) {
338 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
339
340
341
342
343 if (first_time && (subtype.chunk == SCTP_CID_AUTH)) {
344 struct sctp_chunkhdr *next_hdr;
345
346 next_hdr = sctp_inq_peek(inqueue);
347 if (!next_hdr)
348 goto normal;
349
350
351
352
353
354
355 if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
356 chunk->auth_chunk = skb_clone(chunk->skb,
357 GFP_ATOMIC);
358 chunk->auth = 1;
359 continue;
360 }
361 }
362 normal:
363
364
365
366
367
368
369 if (NULL == chunk->asoc) {
370 asoc = sctp_endpoint_lookup_assoc(ep,
371 sctp_source(chunk),
372 &transport);
373 chunk->asoc = asoc;
374 chunk->transport = transport;
375 }
376
377 state = asoc ? asoc->state : SCTP_STATE_CLOSED;
378 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
379 continue;
380
381
382
383
384 if (asoc && sctp_chunk_is_data(chunk))
385 asoc->peer.last_data_from = chunk->transport;
386 else {
387 SCTP_INC_STATS(sock_net(ep->base.sk), SCTP_MIB_INCTRLCHUNKS);
388 if (asoc)
389 asoc->stats.ictrlchunks++;
390 }
391
392 if (chunk->transport)
393 chunk->transport->last_time_heard = ktime_get();
394
395 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state,
396 ep, asoc, chunk, GFP_ATOMIC);
397
398 if (error && chunk)
399 chunk->pdiscard = 1;
400
401
402
403
404 if (!sctp_sk(sk)->ep)
405 break;
406
407 if (first_time)
408 first_time = 0;
409 }
410 }