1/*
2 * net/core/dst.c	Protocol independent destination cache.
3 *
4 * Authors:		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8#include <linux/bitops.h>
9#include <linux/errno.h>
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/workqueue.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/netdevice.h>
17#include <linux/skbuff.h>
18#include <linux/string.h>
19#include <linux/types.h>
20#include <net/net_namespace.h>
21#include <linux/sched.h>
22#include <linux/prefetch.h>
23
24#include <net/dst.h>
25
26/*
27 * Theory of operations:
28 * 1) We use a list, protected by a spinlock, to add
29 *    new entries from both BH and non-BH context.
30 * 2) In order to keep spinlock held for a small delay,
31 *    we use a second list where are stored long lived
32 *    entries, that are handled by the garbage collect thread
33 *    fired by a workqueue.
34 * 3) This list is guarded by a mutex,
35 *    so that the gc_task and dst_dev_event() can be synchronized.
36 */
37
38/*
39 * We want to keep lock & list close together
40 * to dirty as few cache lines as possible in __dst_free().
41 * As this is not a very strong hint, we dont force an alignment on SMP.
42 */
43static struct {
44	spinlock_t		lock;
45	struct dst_entry	*list;
46	unsigned long		timer_inc;
47	unsigned long		timer_expires;
48} dst_garbage = {
49	.lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock),
50	.timer_inc = DST_GC_MAX,
51};
52static void dst_gc_task(struct work_struct *work);
53static void ___dst_free(struct dst_entry *dst);
54
55static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
56
57static DEFINE_MUTEX(dst_gc_mutex);
58/*
59 * long lived entries are maintained in this list, guarded by dst_gc_mutex
60 */
61static struct dst_entry         *dst_busy_list;
62
63static void dst_gc_task(struct work_struct *work)
64{
65	int    delayed = 0;
66	int    work_performed = 0;
67	unsigned long expires = ~0L;
68	struct dst_entry *dst, *next, head;
69	struct dst_entry *last = &head;
70
71	mutex_lock(&dst_gc_mutex);
72	next = dst_busy_list;
73
74loop:
75	while ((dst = next) != NULL) {
76		next = dst->next;
77		prefetch(&next->next);
78		cond_resched();
79		if (likely(atomic_read(&dst->__refcnt))) {
80			last->next = dst;
81			last = dst;
82			delayed++;
83			continue;
84		}
85		work_performed++;
86
87		dst = dst_destroy(dst);
88		if (dst) {
89			/* NOHASH and still referenced. Unless it is already
90			 * on gc list, invalidate it and add to gc list.
91			 *
92			 * Note: this is temporary. Actually, NOHASH dst's
93			 * must be obsoleted when parent is obsoleted.
94			 * But we do not have state "obsoleted, but
95			 * referenced by parent", so it is right.
96			 */
97			if (dst->obsolete > 0)
98				continue;
99
100			___dst_free(dst);
101			dst->next = next;
102			next = dst;
103		}
104	}
105
106	spin_lock_bh(&dst_garbage.lock);
107	next = dst_garbage.list;
108	if (next) {
109		dst_garbage.list = NULL;
110		spin_unlock_bh(&dst_garbage.lock);
111		goto loop;
112	}
113	last->next = NULL;
114	dst_busy_list = head.next;
115	if (!dst_busy_list)
116		dst_garbage.timer_inc = DST_GC_MAX;
117	else {
118		/*
119		 * if we freed less than 1/10 of delayed entries,
120		 * we can sleep longer.
121		 */
122		if (work_performed <= delayed/10) {
123			dst_garbage.timer_expires += dst_garbage.timer_inc;
124			if (dst_garbage.timer_expires > DST_GC_MAX)
125				dst_garbage.timer_expires = DST_GC_MAX;
126			dst_garbage.timer_inc += DST_GC_INC;
127		} else {
128			dst_garbage.timer_inc = DST_GC_INC;
129			dst_garbage.timer_expires = DST_GC_MIN;
130		}
131		expires = dst_garbage.timer_expires;
132		/*
133		 * if the next desired timer is more than 4 seconds in the
134		 * future then round the timer to whole seconds
135		 */
136		if (expires > 4*HZ)
137			expires = round_jiffies_relative(expires);
138		schedule_delayed_work(&dst_gc_work, expires);
139	}
140
141	spin_unlock_bh(&dst_garbage.lock);
142	mutex_unlock(&dst_gc_mutex);
143}
144
145int dst_discard_sk(struct sock *sk, struct sk_buff *skb)
146{
147	kfree_skb(skb);
148	return 0;
149}
150EXPORT_SYMBOL(dst_discard_sk);
151
152const u32 dst_default_metrics[RTAX_MAX + 1] = {
153	/* This initializer is needed to force linker to place this variable
154	 * into const section. Otherwise it might end into bss section.
155	 * We really want to avoid false sharing on this variable, and catch
156	 * any writes on it.
157	 */
158	[RTAX_MAX] = 0xdeadbeef,
159};
160
161
162void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
163		int initial_ref, int initial_obsolete, unsigned short flags)
164{
165	struct dst_entry *dst;
166
167	if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
168		if (ops->gc(ops))
169			return NULL;
170	}
171	dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
172	if (!dst)
173		return NULL;
174	dst->child = NULL;
175	dst->dev = dev;
176	if (dev)
177		dev_hold(dev);
178	dst->ops = ops;
179	dst_init_metrics(dst, dst_default_metrics, true);
180	dst->expires = 0UL;
181	dst->path = dst;
182	dst->from = NULL;
183#ifdef CONFIG_XFRM
184	dst->xfrm = NULL;
185#endif
186	dst->input = dst_discard;
187	dst->output = dst_discard_sk;
188	dst->error = 0;
189	dst->obsolete = initial_obsolete;
190	dst->header_len = 0;
191	dst->trailer_len = 0;
192#ifdef CONFIG_IP_ROUTE_CLASSID
193	dst->tclassid = 0;
194#endif
195	atomic_set(&dst->__refcnt, initial_ref);
196	dst->__use = 0;
197	dst->lastuse = jiffies;
198	dst->flags = flags;
199	dst->pending_confirm = 0;
200	dst->next = NULL;
201	if (!(flags & DST_NOCOUNT))
202		dst_entries_add(ops, 1);
203	return dst;
204}
205EXPORT_SYMBOL(dst_alloc);
206
207static void ___dst_free(struct dst_entry *dst)
208{
209	/* The first case (dev==NULL) is required, when
210	   protocol module is unloaded.
211	 */
212	if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
213		dst->input = dst_discard;
214		dst->output = dst_discard_sk;
215	}
216	dst->obsolete = DST_OBSOLETE_DEAD;
217}
218
219void __dst_free(struct dst_entry *dst)
220{
221	spin_lock_bh(&dst_garbage.lock);
222	___dst_free(dst);
223	dst->next = dst_garbage.list;
224	dst_garbage.list = dst;
225	if (dst_garbage.timer_inc > DST_GC_INC) {
226		dst_garbage.timer_inc = DST_GC_INC;
227		dst_garbage.timer_expires = DST_GC_MIN;
228		mod_delayed_work(system_wq, &dst_gc_work,
229				 dst_garbage.timer_expires);
230	}
231	spin_unlock_bh(&dst_garbage.lock);
232}
233EXPORT_SYMBOL(__dst_free);
234
235struct dst_entry *dst_destroy(struct dst_entry * dst)
236{
237	struct dst_entry *child;
238
239	smp_rmb();
240
241again:
242	child = dst->child;
243
244	if (!(dst->flags & DST_NOCOUNT))
245		dst_entries_add(dst->ops, -1);
246
247	if (dst->ops->destroy)
248		dst->ops->destroy(dst);
249	if (dst->dev)
250		dev_put(dst->dev);
251	kmem_cache_free(dst->ops->kmem_cachep, dst);
252
253	dst = child;
254	if (dst) {
255		int nohash = dst->flags & DST_NOHASH;
256
257		if (atomic_dec_and_test(&dst->__refcnt)) {
258			/* We were real parent of this dst, so kill child. */
259			if (nohash)
260				goto again;
261		} else {
262			/* Child is still referenced, return it for freeing. */
263			if (nohash)
264				return dst;
265			/* Child is still in his hash table */
266		}
267	}
268	return NULL;
269}
270EXPORT_SYMBOL(dst_destroy);
271
272static void dst_destroy_rcu(struct rcu_head *head)
273{
274	struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
275
276	dst = dst_destroy(dst);
277	if (dst)
278		__dst_free(dst);
279}
280
281void dst_release(struct dst_entry *dst)
282{
283	if (dst) {
284		int newrefcnt;
285		unsigned short nocache = dst->flags & DST_NOCACHE;
286
287		newrefcnt = atomic_dec_return(&dst->__refcnt);
288		WARN_ON(newrefcnt < 0);
289		if (!newrefcnt && unlikely(nocache))
290			call_rcu(&dst->rcu_head, dst_destroy_rcu);
291	}
292}
293EXPORT_SYMBOL(dst_release);
294
295u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
296{
297	u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
298
299	if (p) {
300		u32 *old_p = __DST_METRICS_PTR(old);
301		unsigned long prev, new;
302
303		memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
304
305		new = (unsigned long) p;
306		prev = cmpxchg(&dst->_metrics, old, new);
307
308		if (prev != old) {
309			kfree(p);
310			p = __DST_METRICS_PTR(prev);
311			if (prev & DST_METRICS_READ_ONLY)
312				p = NULL;
313		}
314	}
315	return p;
316}
317EXPORT_SYMBOL(dst_cow_metrics_generic);
318
319/* Caller asserts that dst_metrics_read_only(dst) is false.  */
320void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
321{
322	unsigned long prev, new;
323
324	new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
325	prev = cmpxchg(&dst->_metrics, old, new);
326	if (prev == old)
327		kfree(__DST_METRICS_PTR(old));
328}
329EXPORT_SYMBOL(__dst_destroy_metrics_generic);
330
331/* Dirty hack. We did it in 2.2 (in __dst_free),
332 * we have _very_ good reasons not to repeat
333 * this mistake in 2.3, but we have no choice
334 * now. _It_ _is_ _explicit_ _deliberate_
335 * _race_ _condition_.
336 *
337 * Commented and originally written by Alexey.
338 */
339static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
340		       int unregister)
341{
342	if (dst->ops->ifdown)
343		dst->ops->ifdown(dst, dev, unregister);
344
345	if (dev != dst->dev)
346		return;
347
348	if (!unregister) {
349		dst->input = dst_discard;
350		dst->output = dst_discard_sk;
351	} else {
352		dst->dev = dev_net(dst->dev)->loopback_dev;
353		dev_hold(dst->dev);
354		dev_put(dev);
355	}
356}
357
358static int dst_dev_event(struct notifier_block *this, unsigned long event,
359			 void *ptr)
360{
361	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
362	struct dst_entry *dst, *last = NULL;
363
364	switch (event) {
365	case NETDEV_UNREGISTER_FINAL:
366	case NETDEV_DOWN:
367		mutex_lock(&dst_gc_mutex);
368		for (dst = dst_busy_list; dst; dst = dst->next) {
369			last = dst;
370			dst_ifdown(dst, dev, event != NETDEV_DOWN);
371		}
372
373		spin_lock_bh(&dst_garbage.lock);
374		dst = dst_garbage.list;
375		dst_garbage.list = NULL;
376		spin_unlock_bh(&dst_garbage.lock);
377
378		if (last)
379			last->next = dst;
380		else
381			dst_busy_list = dst;
382		for (; dst; dst = dst->next)
383			dst_ifdown(dst, dev, event != NETDEV_DOWN);
384		mutex_unlock(&dst_gc_mutex);
385		break;
386	}
387	return NOTIFY_DONE;
388}
389
390static struct notifier_block dst_dev_notifier = {
391	.notifier_call	= dst_dev_event,
392	.priority = -10, /* must be called after other network notifiers */
393};
394
395void __init dst_init(void)
396{
397	register_netdevice_notifier(&dst_dev_notifier);
398}
399