1/*
2 *  net/dccp/input.c
3 *
4 *  An implementation of the DCCP protocol
5 *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 *	This program is free software; you can redistribute it and/or
8 *	modify it under the terms of the GNU General Public License
9 *	as published by the Free Software Foundation; either version
10 *	2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/dccp.h>
14#include <linux/skbuff.h>
15#include <linux/slab.h>
16
17#include <net/sock.h>
18
19#include "ackvec.h"
20#include "ccid.h"
21#include "dccp.h"
22
23/* rate-limit for syncs in reply to sequence-invalid packets; RFC 4340, 7.5.4 */
24int sysctl_dccp_sync_ratelimit	__read_mostly = HZ / 8;
25
26static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb)
27{
28	__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
29	__skb_queue_tail(&sk->sk_receive_queue, skb);
30	skb_set_owner_r(skb, sk);
31	sk->sk_data_ready(sk);
32}
33
34static void dccp_fin(struct sock *sk, struct sk_buff *skb)
35{
36	/*
37	 * On receiving Close/CloseReq, both RD/WR shutdown are performed.
38	 * RFC 4340, 8.3 says that we MAY send further Data/DataAcks after
39	 * receiving the closing segment, but there is no guarantee that such
40	 * data will be processed at all.
41	 */
42	sk->sk_shutdown = SHUTDOWN_MASK;
43	sock_set_flag(sk, SOCK_DONE);
44	dccp_enqueue_skb(sk, skb);
45}
46
47static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
48{
49	int queued = 0;
50
51	switch (sk->sk_state) {
52	/*
53	 * We ignore Close when received in one of the following states:
54	 *  - CLOSED		(may be a late or duplicate packet)
55	 *  - PASSIVE_CLOSEREQ	(the peer has sent a CloseReq earlier)
56	 *  - RESPOND		(already handled by dccp_check_req)
57	 */
58	case DCCP_CLOSING:
59		/*
60		 * Simultaneous-close: receiving a Close after sending one. This
61		 * can happen if both client and server perform active-close and
62		 * will result in an endless ping-pong of crossing and retrans-
63		 * mitted Close packets, which only terminates when one of the
64		 * nodes times out (min. 64 seconds). Quicker convergence can be
65		 * achieved when one of the nodes acts as tie-breaker.
66		 * This is ok as both ends are done with data transfer and each
67		 * end is just waiting for the other to acknowledge termination.
68		 */
69		if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT)
70			break;
71		/* fall through */
72	case DCCP_REQUESTING:
73	case DCCP_ACTIVE_CLOSEREQ:
74		dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
75		dccp_done(sk);
76		break;
77	case DCCP_OPEN:
78	case DCCP_PARTOPEN:
79		/* Give waiting application a chance to read pending data */
80		queued = 1;
81		dccp_fin(sk, skb);
82		dccp_set_state(sk, DCCP_PASSIVE_CLOSE);
83		/* fall through */
84	case DCCP_PASSIVE_CLOSE:
85		/*
86		 * Retransmitted Close: we have already enqueued the first one.
87		 */
88		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
89	}
90	return queued;
91}
92
93static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
94{
95	int queued = 0;
96
97	/*
98	 *   Step 7: Check for unexpected packet types
99	 *      If (S.is_server and P.type == CloseReq)
100	 *	  Send Sync packet acknowledging P.seqno
101	 *	  Drop packet and return
102	 */
103	if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
104		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
105		return queued;
106	}
107
108	/* Step 13: process relevant Client states < CLOSEREQ */
109	switch (sk->sk_state) {
110	case DCCP_REQUESTING:
111		dccp_send_close(sk, 0);
112		dccp_set_state(sk, DCCP_CLOSING);
113		break;
114	case DCCP_OPEN:
115	case DCCP_PARTOPEN:
116		/* Give waiting application a chance to read pending data */
117		queued = 1;
118		dccp_fin(sk, skb);
119		dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ);
120		/* fall through */
121	case DCCP_PASSIVE_CLOSEREQ:
122		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
123	}
124	return queued;
125}
126
127static u16 dccp_reset_code_convert(const u8 code)
128{
129	const u16 error_code[] = {
130	[DCCP_RESET_CODE_CLOSED]	     = 0,	/* normal termination */
131	[DCCP_RESET_CODE_UNSPECIFIED]	     = 0,	/* nothing known */
132	[DCCP_RESET_CODE_ABORTED]	     = ECONNRESET,
133
134	[DCCP_RESET_CODE_NO_CONNECTION]	     = ECONNREFUSED,
135	[DCCP_RESET_CODE_CONNECTION_REFUSED] = ECONNREFUSED,
136	[DCCP_RESET_CODE_TOO_BUSY]	     = EUSERS,
137	[DCCP_RESET_CODE_AGGRESSION_PENALTY] = EDQUOT,
138
139	[DCCP_RESET_CODE_PACKET_ERROR]	     = ENOMSG,
140	[DCCP_RESET_CODE_BAD_INIT_COOKIE]    = EBADR,
141	[DCCP_RESET_CODE_BAD_SERVICE_CODE]   = EBADRQC,
142	[DCCP_RESET_CODE_OPTION_ERROR]	     = EILSEQ,
143	[DCCP_RESET_CODE_MANDATORY_ERROR]    = EOPNOTSUPP,
144	};
145
146	return code >= DCCP_MAX_RESET_CODES ? 0 : error_code[code];
147}
148
149static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
150{
151	u16 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code);
152
153	sk->sk_err = err;
154
155	/* Queue the equivalent of TCP fin so that dccp_recvmsg exits the loop */
156	dccp_fin(sk, skb);
157
158	if (err && !sock_flag(sk, SOCK_DEAD))
159		sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
160	dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
161}
162
163static void dccp_handle_ackvec_processing(struct sock *sk, struct sk_buff *skb)
164{
165	struct dccp_ackvec *av = dccp_sk(sk)->dccps_hc_rx_ackvec;
166
167	if (av == NULL)
168		return;
169	if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
170		dccp_ackvec_clear_state(av, DCCP_SKB_CB(skb)->dccpd_ack_seq);
171	dccp_ackvec_input(av, skb);
172}
173
174static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
175{
176	const struct dccp_sock *dp = dccp_sk(sk);
177
178	/* Don't deliver to RX CCID when node has shut down read end. */
179	if (!(sk->sk_shutdown & RCV_SHUTDOWN))
180		ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
181	/*
182	 * Until the TX queue has been drained, we can not honour SHUT_WR, since
183	 * we need received feedback as input to adjust congestion control.
184	 */
185	if (sk->sk_write_queue.qlen > 0 || !(sk->sk_shutdown & SEND_SHUTDOWN))
186		ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
187}
188
189static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
190{
191	const struct dccp_hdr *dh = dccp_hdr(skb);
192	struct dccp_sock *dp = dccp_sk(sk);
193	u64 lswl, lawl, seqno = DCCP_SKB_CB(skb)->dccpd_seq,
194			ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
195
196	/*
197	 *   Step 5: Prepare sequence numbers for Sync
198	 *     If P.type == Sync or P.type == SyncAck,
199	 *	  If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
200	 *	     / * P is valid, so update sequence number variables
201	 *		 accordingly.  After this update, P will pass the tests
202	 *		 in Step 6.  A SyncAck is generated if necessary in
203	 *		 Step 15 * /
204	 *	     Update S.GSR, S.SWL, S.SWH
205	 *	  Otherwise,
206	 *	     Drop packet and return
207	 */
208	if (dh->dccph_type == DCCP_PKT_SYNC ||
209	    dh->dccph_type == DCCP_PKT_SYNCACK) {
210		if (between48(ackno, dp->dccps_awl, dp->dccps_awh) &&
211		    dccp_delta_seqno(dp->dccps_swl, seqno) >= 0)
212			dccp_update_gsr(sk, seqno);
213		else
214			return -1;
215	}
216
217	/*
218	 *   Step 6: Check sequence numbers
219	 *      Let LSWL = S.SWL and LAWL = S.AWL
220	 *      If P.type == CloseReq or P.type == Close or P.type == Reset,
221	 *	  LSWL := S.GSR + 1, LAWL := S.GAR
222	 *      If LSWL <= P.seqno <= S.SWH
223	 *	     and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
224	 *	  Update S.GSR, S.SWL, S.SWH
225	 *	  If P.type != Sync,
226	 *	     Update S.GAR
227	 */
228	lswl = dp->dccps_swl;
229	lawl = dp->dccps_awl;
230
231	if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
232	    dh->dccph_type == DCCP_PKT_CLOSE ||
233	    dh->dccph_type == DCCP_PKT_RESET) {
234		lswl = ADD48(dp->dccps_gsr, 1);
235		lawl = dp->dccps_gar;
236	}
237
238	if (between48(seqno, lswl, dp->dccps_swh) &&
239	    (ackno == DCCP_PKT_WITHOUT_ACK_SEQ ||
240	     between48(ackno, lawl, dp->dccps_awh))) {
241		dccp_update_gsr(sk, seqno);
242
243		if (dh->dccph_type != DCCP_PKT_SYNC &&
244		    ackno != DCCP_PKT_WITHOUT_ACK_SEQ &&
245		    after48(ackno, dp->dccps_gar))
246			dp->dccps_gar = ackno;
247	} else {
248		unsigned long now = jiffies;
249		/*
250		 *   Step 6: Check sequence numbers
251		 *      Otherwise,
252		 *         If P.type == Reset,
253		 *            Send Sync packet acknowledging S.GSR
254		 *         Otherwise,
255		 *            Send Sync packet acknowledging P.seqno
256		 *      Drop packet and return
257		 *
258		 *   These Syncs are rate-limited as per RFC 4340, 7.5.4:
259		 *   at most 1 / (dccp_sync_rate_limit * HZ) Syncs per second.
260		 */
261		if (time_before(now, (dp->dccps_rate_last +
262				      sysctl_dccp_sync_ratelimit)))
263			return -1;
264
265		DCCP_WARN("Step 6 failed for %s packet, "
266			  "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
267			  "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
268			  "sending SYNC...\n",  dccp_packet_name(dh->dccph_type),
269			  (unsigned long long) lswl, (unsigned long long) seqno,
270			  (unsigned long long) dp->dccps_swh,
271			  (ackno == DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist"
272							      : "exists",
273			  (unsigned long long) lawl, (unsigned long long) ackno,
274			  (unsigned long long) dp->dccps_awh);
275
276		dp->dccps_rate_last = now;
277
278		if (dh->dccph_type == DCCP_PKT_RESET)
279			seqno = dp->dccps_gsr;
280		dccp_send_sync(sk, seqno, DCCP_PKT_SYNC);
281		return -1;
282	}
283
284	return 0;
285}
286
287static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
288				  const struct dccp_hdr *dh, const unsigned int len)
289{
290	struct dccp_sock *dp = dccp_sk(sk);
291
292	switch (dccp_hdr(skb)->dccph_type) {
293	case DCCP_PKT_DATAACK:
294	case DCCP_PKT_DATA:
295		/*
296		 * FIXME: schedule DATA_DROPPED (RFC 4340, 11.7.2) if and when
297		 * - sk_shutdown == RCV_SHUTDOWN, use Code 1, "Not Listening"
298		 * - sk_receive_queue is full, use Code 2, "Receive Buffer"
299		 */
300		dccp_enqueue_skb(sk, skb);
301		return 0;
302	case DCCP_PKT_ACK:
303		goto discard;
304	case DCCP_PKT_RESET:
305		/*
306		 *  Step 9: Process Reset
307		 *	If P.type == Reset,
308		 *		Tear down connection
309		 *		S.state := TIMEWAIT
310		 *		Set TIMEWAIT timer
311		 *		Drop packet and return
312		 */
313		dccp_rcv_reset(sk, skb);
314		return 0;
315	case DCCP_PKT_CLOSEREQ:
316		if (dccp_rcv_closereq(sk, skb))
317			return 0;
318		goto discard;
319	case DCCP_PKT_CLOSE:
320		if (dccp_rcv_close(sk, skb))
321			return 0;
322		goto discard;
323	case DCCP_PKT_REQUEST:
324		/* Step 7
325		 *   or (S.is_server and P.type == Response)
326		 *   or (S.is_client and P.type == Request)
327		 *   or (S.state >= OPEN and P.type == Request
328		 *	and P.seqno >= S.OSR)
329		 *    or (S.state >= OPEN and P.type == Response
330		 *	and P.seqno >= S.OSR)
331		 *    or (S.state == RESPOND and P.type == Data),
332		 *  Send Sync packet acknowledging P.seqno
333		 *  Drop packet and return
334		 */
335		if (dp->dccps_role != DCCP_ROLE_LISTEN)
336			goto send_sync;
337		goto check_seq;
338	case DCCP_PKT_RESPONSE:
339		if (dp->dccps_role != DCCP_ROLE_CLIENT)
340			goto send_sync;
341check_seq:
342		if (dccp_delta_seqno(dp->dccps_osr,
343				     DCCP_SKB_CB(skb)->dccpd_seq) >= 0) {
344send_sync:
345			dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
346				       DCCP_PKT_SYNC);
347		}
348		break;
349	case DCCP_PKT_SYNC:
350		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
351			       DCCP_PKT_SYNCACK);
352		/*
353		 * From RFC 4340, sec. 5.7
354		 *
355		 * As with DCCP-Ack packets, DCCP-Sync and DCCP-SyncAck packets
356		 * MAY have non-zero-length application data areas, whose
357		 * contents receivers MUST ignore.
358		 */
359		goto discard;
360	}
361
362	DCCP_INC_STATS_BH(DCCP_MIB_INERRS);
363discard:
364	__kfree_skb(skb);
365	return 0;
366}
367
368int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
369			 const struct dccp_hdr *dh, const unsigned int len)
370{
371	if (dccp_check_seqno(sk, skb))
372		goto discard;
373
374	if (dccp_parse_options(sk, NULL, skb))
375		return 1;
376
377	dccp_handle_ackvec_processing(sk, skb);
378	dccp_deliver_input_to_ccids(sk, skb);
379
380	return __dccp_rcv_established(sk, skb, dh, len);
381discard:
382	__kfree_skb(skb);
383	return 0;
384}
385
386EXPORT_SYMBOL_GPL(dccp_rcv_established);
387
388static int dccp_rcv_request_sent_state_process(struct sock *sk,
389					       struct sk_buff *skb,
390					       const struct dccp_hdr *dh,
391					       const unsigned int len)
392{
393	/*
394	 *  Step 4: Prepare sequence numbers in REQUEST
395	 *     If S.state == REQUEST,
396	 *	  If (P.type == Response or P.type == Reset)
397	 *		and S.AWL <= P.ackno <= S.AWH,
398	 *	     / * Set sequence number variables corresponding to the
399	 *		other endpoint, so P will pass the tests in Step 6 * /
400	 *	     Set S.GSR, S.ISR, S.SWL, S.SWH
401	 *	     / * Response processing continues in Step 10; Reset
402	 *		processing continues in Step 9 * /
403	*/
404	if (dh->dccph_type == DCCP_PKT_RESPONSE) {
405		const struct inet_connection_sock *icsk = inet_csk(sk);
406		struct dccp_sock *dp = dccp_sk(sk);
407		long tstamp = dccp_timestamp();
408
409		if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
410			       dp->dccps_awl, dp->dccps_awh)) {
411			dccp_pr_debug("invalid ackno: S.AWL=%llu, "
412				      "P.ackno=%llu, S.AWH=%llu\n",
413				      (unsigned long long)dp->dccps_awl,
414			   (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
415				      (unsigned long long)dp->dccps_awh);
416			goto out_invalid_packet;
417		}
418
419		/*
420		 * If option processing (Step 8) failed, return 1 here so that
421		 * dccp_v4_do_rcv() sends a Reset. The Reset code depends on
422		 * the option type and is set in dccp_parse_options().
423		 */
424		if (dccp_parse_options(sk, NULL, skb))
425			return 1;
426
427		/* Obtain usec RTT sample from SYN exchange (used by TFRC). */
428		if (likely(dp->dccps_options_received.dccpor_timestamp_echo))
429			dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp -
430			    dp->dccps_options_received.dccpor_timestamp_echo));
431
432		/* Stop the REQUEST timer */
433		inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
434		WARN_ON(sk->sk_send_head == NULL);
435		kfree_skb(sk->sk_send_head);
436		sk->sk_send_head = NULL;
437
438		/*
439		 * Set ISR, GSR from packet. ISS was set in dccp_v{4,6}_connect
440		 * and GSS in dccp_transmit_skb(). Setting AWL/AWH and SWL/SWH
441		 * is done as part of activating the feature values below, since
442		 * these settings depend on the local/remote Sequence Window
443		 * features, which were undefined or not confirmed until now.
444		 */
445		dp->dccps_gsr = dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
446
447		dccp_sync_mss(sk, icsk->icsk_pmtu_cookie);
448
449		/*
450		 *    Step 10: Process REQUEST state (second part)
451		 *       If S.state == REQUEST,
452		 *	  / * If we get here, P is a valid Response from the
453		 *	      server (see Step 4), and we should move to
454		 *	      PARTOPEN state. PARTOPEN means send an Ack,
455		 *	      don't send Data packets, retransmit Acks
456		 *	      periodically, and always include any Init Cookie
457		 *	      from the Response * /
458		 *	  S.state := PARTOPEN
459		 *	  Set PARTOPEN timer
460		 *	  Continue with S.state == PARTOPEN
461		 *	  / * Step 12 will send the Ack completing the
462		 *	      three-way handshake * /
463		 */
464		dccp_set_state(sk, DCCP_PARTOPEN);
465
466		/*
467		 * If feature negotiation was successful, activate features now;
468		 * an activation failure means that this host could not activate
469		 * one ore more features (e.g. insufficient memory), which would
470		 * leave at least one feature in an undefined state.
471		 */
472		if (dccp_feat_activate_values(sk, &dp->dccps_featneg))
473			goto unable_to_proceed;
474
475		/* Make sure socket is routed, for correct metrics. */
476		icsk->icsk_af_ops->rebuild_header(sk);
477
478		if (!sock_flag(sk, SOCK_DEAD)) {
479			sk->sk_state_change(sk);
480			sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
481		}
482
483		if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
484		    icsk->icsk_accept_queue.rskq_defer_accept) {
485			/* Save one ACK. Data will be ready after
486			 * several ticks, if write_pending is set.
487			 *
488			 * It may be deleted, but with this feature tcpdumps
489			 * look so _wonderfully_ clever, that I was not able
490			 * to stand against the temptation 8)     --ANK
491			 */
492			/*
493			 * OK, in DCCP we can as well do a similar trick, its
494			 * even in the draft, but there is no need for us to
495			 * schedule an ack here, as dccp_sendmsg does this for
496			 * us, also stated in the draft. -acme
497			 */
498			__kfree_skb(skb);
499			return 0;
500		}
501		dccp_send_ack(sk);
502		return -1;
503	}
504
505out_invalid_packet:
506	/* dccp_v4_do_rcv will send a reset */
507	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
508	return 1;
509
510unable_to_proceed:
511	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_ABORTED;
512	/*
513	 * We mark this socket as no longer usable, so that the loop in
514	 * dccp_sendmsg() terminates and the application gets notified.
515	 */
516	dccp_set_state(sk, DCCP_CLOSED);
517	sk->sk_err = ECOMM;
518	return 1;
519}
520
521static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
522						   struct sk_buff *skb,
523						   const struct dccp_hdr *dh,
524						   const unsigned int len)
525{
526	struct dccp_sock *dp = dccp_sk(sk);
527	u32 sample = dp->dccps_options_received.dccpor_timestamp_echo;
528	int queued = 0;
529
530	switch (dh->dccph_type) {
531	case DCCP_PKT_RESET:
532		inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
533		break;
534	case DCCP_PKT_DATA:
535		if (sk->sk_state == DCCP_RESPOND)
536			break;
537	case DCCP_PKT_DATAACK:
538	case DCCP_PKT_ACK:
539		/*
540		 * FIXME: we should be resetting the PARTOPEN (DELACK) timer
541		 * here but only if we haven't used the DELACK timer for
542		 * something else, like sending a delayed ack for a TIMESTAMP
543		 * echo, etc, for now were not clearing it, sending an extra
544		 * ACK when there is nothing else to do in DELACK is not a big
545		 * deal after all.
546		 */
547
548		/* Stop the PARTOPEN timer */
549		if (sk->sk_state == DCCP_PARTOPEN)
550			inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
551
552		/* Obtain usec RTT sample from SYN exchange (used by TFRC). */
553		if (likely(sample)) {
554			long delta = dccp_timestamp() - sample;
555
556			dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * delta);
557		}
558
559		dp->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
560		dccp_set_state(sk, DCCP_OPEN);
561
562		if (dh->dccph_type == DCCP_PKT_DATAACK ||
563		    dh->dccph_type == DCCP_PKT_DATA) {
564			__dccp_rcv_established(sk, skb, dh, len);
565			queued = 1; /* packet was queued
566				       (by __dccp_rcv_established) */
567		}
568		break;
569	}
570
571	return queued;
572}
573
574int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
575			   struct dccp_hdr *dh, unsigned int len)
576{
577	struct dccp_sock *dp = dccp_sk(sk);
578	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
579	const int old_state = sk->sk_state;
580	int queued = 0;
581
582	/*
583	 *  Step 3: Process LISTEN state
584	 *
585	 *     If S.state == LISTEN,
586	 *	 If P.type == Request or P contains a valid Init Cookie option,
587	 *	      (* Must scan the packet's options to check for Init
588	 *		 Cookies.  Only Init Cookies are processed here,
589	 *		 however; other options are processed in Step 8.  This
590	 *		 scan need only be performed if the endpoint uses Init
591	 *		 Cookies *)
592	 *	      (* Generate a new socket and switch to that socket *)
593	 *	      Set S := new socket for this port pair
594	 *	      S.state = RESPOND
595	 *	      Choose S.ISS (initial seqno) or set from Init Cookies
596	 *	      Initialize S.GAR := S.ISS
597	 *	      Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init
598	 *	      Cookies Continue with S.state == RESPOND
599	 *	      (* A Response packet will be generated in Step 11 *)
600	 *	 Otherwise,
601	 *	      Generate Reset(No Connection) unless P.type == Reset
602	 *	      Drop packet and return
603	 */
604	if (sk->sk_state == DCCP_LISTEN) {
605		if (dh->dccph_type == DCCP_PKT_REQUEST) {
606			if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
607								    skb) < 0)
608				return 1;
609			goto discard;
610		}
611		if (dh->dccph_type == DCCP_PKT_RESET)
612			goto discard;
613
614		/* Caller (dccp_v4_do_rcv) will send Reset */
615		dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
616		return 1;
617	} else if (sk->sk_state == DCCP_CLOSED) {
618		dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
619		return 1;
620	}
621
622	/* Step 6: Check sequence numbers (omitted in LISTEN/REQUEST state) */
623	if (sk->sk_state != DCCP_REQUESTING && dccp_check_seqno(sk, skb))
624		goto discard;
625
626	/*
627	 *   Step 7: Check for unexpected packet types
628	 *      If (S.is_server and P.type == Response)
629	 *	    or (S.is_client and P.type == Request)
630	 *	    or (S.state == RESPOND and P.type == Data),
631	 *	  Send Sync packet acknowledging P.seqno
632	 *	  Drop packet and return
633	 */
634	if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
635	     dh->dccph_type == DCCP_PKT_RESPONSE) ||
636	    (dp->dccps_role == DCCP_ROLE_CLIENT &&
637	     dh->dccph_type == DCCP_PKT_REQUEST) ||
638	    (sk->sk_state == DCCP_RESPOND && dh->dccph_type == DCCP_PKT_DATA)) {
639		dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
640		goto discard;
641	}
642
643	/*  Step 8: Process options */
644	if (dccp_parse_options(sk, NULL, skb))
645		return 1;
646
647	/*
648	 *  Step 9: Process Reset
649	 *	If P.type == Reset,
650	 *		Tear down connection
651	 *		S.state := TIMEWAIT
652	 *		Set TIMEWAIT timer
653	 *		Drop packet and return
654	 */
655	if (dh->dccph_type == DCCP_PKT_RESET) {
656		dccp_rcv_reset(sk, skb);
657		return 0;
658	} else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {	/* Step 13 */
659		if (dccp_rcv_closereq(sk, skb))
660			return 0;
661		goto discard;
662	} else if (dh->dccph_type == DCCP_PKT_CLOSE) {		/* Step 14 */
663		if (dccp_rcv_close(sk, skb))
664			return 0;
665		goto discard;
666	}
667
668	switch (sk->sk_state) {
669	case DCCP_REQUESTING:
670		queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
671		if (queued >= 0)
672			return queued;
673
674		__kfree_skb(skb);
675		return 0;
676
677	case DCCP_PARTOPEN:
678		/* Step 8: if using Ack Vectors, mark packet acknowledgeable */
679		dccp_handle_ackvec_processing(sk, skb);
680		dccp_deliver_input_to_ccids(sk, skb);
681		/* fall through */
682	case DCCP_RESPOND:
683		queued = dccp_rcv_respond_partopen_state_process(sk, skb,
684								 dh, len);
685		break;
686	}
687
688	if (dh->dccph_type == DCCP_PKT_ACK ||
689	    dh->dccph_type == DCCP_PKT_DATAACK) {
690		switch (old_state) {
691		case DCCP_PARTOPEN:
692			sk->sk_state_change(sk);
693			sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
694			break;
695		}
696	} else if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) {
697		dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK);
698		goto discard;
699	}
700
701	if (!queued) {
702discard:
703		__kfree_skb(skb);
704	}
705	return 0;
706}
707
708EXPORT_SYMBOL_GPL(dccp_rcv_state_process);
709
710/**
711 *  dccp_sample_rtt  -  Validate and finalise computation of RTT sample
712 *  @delta:	number of microseconds between packet and acknowledgment
713 *
714 *  The routine is kept generic to work in different contexts. It should be
715 *  called immediately when the ACK used for the RTT sample arrives.
716 */
717u32 dccp_sample_rtt(struct sock *sk, long delta)
718{
719	/* dccpor_elapsed_time is either zeroed out or set and > 0 */
720	delta -= dccp_sk(sk)->dccps_options_received.dccpor_elapsed_time * 10;
721
722	if (unlikely(delta <= 0)) {
723		DCCP_WARN("unusable RTT sample %ld, using min\n", delta);
724		return DCCP_SANE_RTT_MIN;
725	}
726	if (unlikely(delta > DCCP_SANE_RTT_MAX)) {
727		DCCP_WARN("RTT sample %ld too large, using max\n", delta);
728		return DCCP_SANE_RTT_MAX;
729	}
730
731	return delta;
732}
733