1/* RxRPC remote transport endpoint management
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/udp.h>
16#include <linux/in.h>
17#include <linux/in6.h>
18#include <linux/icmp.h>
19#include <linux/slab.h>
20#include <net/sock.h>
21#include <net/af_rxrpc.h>
22#include <net/ip.h>
23#include <net/route.h>
24#include "ar-internal.h"
25
26static LIST_HEAD(rxrpc_peers);
27static DEFINE_RWLOCK(rxrpc_peer_lock);
28static DECLARE_WAIT_QUEUE_HEAD(rxrpc_peer_wq);
29
30static void rxrpc_destroy_peer(struct work_struct *work);
31
32/*
33 * assess the MTU size for the network interface through which this peer is
34 * reached
35 */
36static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
37{
38	struct rtable *rt;
39	struct flowi4 fl4;
40
41	peer->if_mtu = 1500;
42
43	rt = ip_route_output_ports(&init_net, &fl4, NULL,
44				   peer->srx.transport.sin.sin_addr.s_addr, 0,
45				   htons(7000), htons(7001),
46				   IPPROTO_UDP, 0, 0);
47	if (IS_ERR(rt)) {
48		_leave(" [route err %ld]", PTR_ERR(rt));
49		return;
50	}
51
52	peer->if_mtu = dst_mtu(&rt->dst);
53	dst_release(&rt->dst);
54
55	_leave(" [if_mtu %u]", peer->if_mtu);
56}
57
58/*
59 * allocate a new peer
60 */
61static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
62					   gfp_t gfp)
63{
64	struct rxrpc_peer *peer;
65
66	_enter("");
67
68	peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
69	if (peer) {
70		INIT_WORK(&peer->destroyer, &rxrpc_destroy_peer);
71		INIT_LIST_HEAD(&peer->link);
72		INIT_LIST_HEAD(&peer->error_targets);
73		spin_lock_init(&peer->lock);
74		atomic_set(&peer->usage, 1);
75		peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
76		memcpy(&peer->srx, srx, sizeof(*srx));
77
78		rxrpc_assess_MTU_size(peer);
79		peer->mtu = peer->if_mtu;
80
81		if (srx->transport.family == AF_INET) {
82			peer->hdrsize = sizeof(struct iphdr);
83			switch (srx->transport_type) {
84			case SOCK_DGRAM:
85				peer->hdrsize += sizeof(struct udphdr);
86				break;
87			default:
88				BUG();
89				break;
90			}
91		} else {
92			BUG();
93		}
94
95		peer->hdrsize += sizeof(struct rxrpc_header);
96		peer->maxdata = peer->mtu - peer->hdrsize;
97	}
98
99	_leave(" = %p", peer);
100	return peer;
101}
102
103/*
104 * obtain a remote transport endpoint for the specified address
105 */
106struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *srx, gfp_t gfp)
107{
108	struct rxrpc_peer *peer, *candidate;
109	const char *new = "old";
110	int usage;
111
112	_enter("{%d,%d,%pI4+%hu}",
113	       srx->transport_type,
114	       srx->transport_len,
115	       &srx->transport.sin.sin_addr,
116	       ntohs(srx->transport.sin.sin_port));
117
118	/* search the peer list first */
119	read_lock_bh(&rxrpc_peer_lock);
120	list_for_each_entry(peer, &rxrpc_peers, link) {
121		_debug("check PEER %d { u=%d t=%d l=%d }",
122		       peer->debug_id,
123		       atomic_read(&peer->usage),
124		       peer->srx.transport_type,
125		       peer->srx.transport_len);
126
127		if (atomic_read(&peer->usage) > 0 &&
128		    peer->srx.transport_type == srx->transport_type &&
129		    peer->srx.transport_len == srx->transport_len &&
130		    memcmp(&peer->srx.transport,
131			   &srx->transport,
132			   srx->transport_len) == 0)
133			goto found_extant_peer;
134	}
135	read_unlock_bh(&rxrpc_peer_lock);
136
137	/* not yet present - create a candidate for a new record and then
138	 * redo the search */
139	candidate = rxrpc_alloc_peer(srx, gfp);
140	if (!candidate) {
141		_leave(" = -ENOMEM");
142		return ERR_PTR(-ENOMEM);
143	}
144
145	write_lock_bh(&rxrpc_peer_lock);
146
147	list_for_each_entry(peer, &rxrpc_peers, link) {
148		if (atomic_read(&peer->usage) > 0 &&
149		    peer->srx.transport_type == srx->transport_type &&
150		    peer->srx.transport_len == srx->transport_len &&
151		    memcmp(&peer->srx.transport,
152			   &srx->transport,
153			   srx->transport_len) == 0)
154			goto found_extant_second;
155	}
156
157	/* we can now add the new candidate to the list */
158	peer = candidate;
159	candidate = NULL;
160	usage = atomic_read(&peer->usage);
161
162	list_add_tail(&peer->link, &rxrpc_peers);
163	write_unlock_bh(&rxrpc_peer_lock);
164	new = "new";
165
166success:
167	_net("PEER %s %d {%d,%u,%pI4+%hu}",
168	     new,
169	     peer->debug_id,
170	     peer->srx.transport_type,
171	     peer->srx.transport.family,
172	     &peer->srx.transport.sin.sin_addr,
173	     ntohs(peer->srx.transport.sin.sin_port));
174
175	_leave(" = %p {u=%d}", peer, usage);
176	return peer;
177
178	/* we found the peer in the list immediately */
179found_extant_peer:
180	usage = atomic_inc_return(&peer->usage);
181	read_unlock_bh(&rxrpc_peer_lock);
182	goto success;
183
184	/* we found the peer on the second time through the list */
185found_extant_second:
186	usage = atomic_inc_return(&peer->usage);
187	write_unlock_bh(&rxrpc_peer_lock);
188	kfree(candidate);
189	goto success;
190}
191
192/*
193 * find the peer associated with a packet
194 */
195struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *local,
196				   __be32 addr, __be16 port)
197{
198	struct rxrpc_peer *peer;
199
200	_enter("");
201
202	/* search the peer list */
203	read_lock_bh(&rxrpc_peer_lock);
204
205	if (local->srx.transport.family == AF_INET &&
206	    local->srx.transport_type == SOCK_DGRAM
207	    ) {
208		list_for_each_entry(peer, &rxrpc_peers, link) {
209			if (atomic_read(&peer->usage) > 0 &&
210			    peer->srx.transport_type == SOCK_DGRAM &&
211			    peer->srx.transport.family == AF_INET &&
212			    peer->srx.transport.sin.sin_port == port &&
213			    peer->srx.transport.sin.sin_addr.s_addr == addr)
214				goto found_UDP_peer;
215		}
216
217		goto new_UDP_peer;
218	}
219
220	read_unlock_bh(&rxrpc_peer_lock);
221	_leave(" = -EAFNOSUPPORT");
222	return ERR_PTR(-EAFNOSUPPORT);
223
224found_UDP_peer:
225	_net("Rx UDP DGRAM from peer %d", peer->debug_id);
226	atomic_inc(&peer->usage);
227	read_unlock_bh(&rxrpc_peer_lock);
228	_leave(" = %p", peer);
229	return peer;
230
231new_UDP_peer:
232	_net("Rx UDP DGRAM from NEW peer");
233	read_unlock_bh(&rxrpc_peer_lock);
234	_leave(" = -EBUSY [new]");
235	return ERR_PTR(-EBUSY);
236}
237
238/*
239 * release a remote transport endpoint
240 */
241void rxrpc_put_peer(struct rxrpc_peer *peer)
242{
243	_enter("%p{u=%d}", peer, atomic_read(&peer->usage));
244
245	ASSERTCMP(atomic_read(&peer->usage), >, 0);
246
247	if (likely(!atomic_dec_and_test(&peer->usage))) {
248		_leave(" [in use]");
249		return;
250	}
251
252	rxrpc_queue_work(&peer->destroyer);
253	_leave("");
254}
255
256/*
257 * destroy a remote transport endpoint
258 */
259static void rxrpc_destroy_peer(struct work_struct *work)
260{
261	struct rxrpc_peer *peer =
262		container_of(work, struct rxrpc_peer, destroyer);
263
264	_enter("%p{%d}", peer, atomic_read(&peer->usage));
265
266	write_lock_bh(&rxrpc_peer_lock);
267	list_del(&peer->link);
268	write_unlock_bh(&rxrpc_peer_lock);
269
270	_net("DESTROY PEER %d", peer->debug_id);
271	kfree(peer);
272
273	if (list_empty(&rxrpc_peers))
274		wake_up_all(&rxrpc_peer_wq);
275	_leave("");
276}
277
278/*
279 * preemptively destroy all the peer records from a transport endpoint rather
280 * than waiting for them to time out
281 */
282void __exit rxrpc_destroy_all_peers(void)
283{
284	DECLARE_WAITQUEUE(myself,current);
285
286	_enter("");
287
288	/* we simply have to wait for them to go away */
289	if (!list_empty(&rxrpc_peers)) {
290		set_current_state(TASK_UNINTERRUPTIBLE);
291		add_wait_queue(&rxrpc_peer_wq, &myself);
292
293		while (!list_empty(&rxrpc_peers)) {
294			schedule();
295			set_current_state(TASK_UNINTERRUPTIBLE);
296		}
297
298		remove_wait_queue(&rxrpc_peer_wq, &myself);
299		set_current_state(TASK_RUNNING);
300	}
301
302	_leave("");
303}
304