1#ifndef __NET_FRAG_H__
2#define __NET_FRAG_H__
3
4#include <linux/percpu_counter.h>
5
6struct netns_frags {
7	/* The percpu_counter "mem" need to be cacheline aligned.
8	 *  mem.count must not share cacheline with other writers
9	 */
10	struct percpu_counter   mem ____cacheline_aligned_in_smp;
11
12	/* sysctls */
13	int			timeout;
14	int			high_thresh;
15	int			low_thresh;
16};
17
18/**
19 * fragment queue flags
20 *
21 * @INET_FRAG_FIRST_IN: first fragment has arrived
22 * @INET_FRAG_LAST_IN: final fragment has arrived
23 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
24 * @INET_FRAG_EVICTED: frag queue is being evicted
25 */
26enum {
27	INET_FRAG_FIRST_IN	= BIT(0),
28	INET_FRAG_LAST_IN	= BIT(1),
29	INET_FRAG_COMPLETE	= BIT(2),
30	INET_FRAG_EVICTED	= BIT(3)
31};
32
33/**
34 * struct inet_frag_queue - fragment queue
35 *
36 * @lock: spinlock protecting the queue
37 * @timer: queue expiration timer
38 * @list: hash bucket list
39 * @refcnt: reference count of the queue
40 * @fragments: received fragments head
41 * @fragments_tail: received fragments tail
42 * @stamp: timestamp of the last received fragment
43 * @len: total length of the original datagram
44 * @meat: length of received fragments so far
45 * @flags: fragment queue flags
46 * @max_size: (ipv4 only) maximum received fragment size with IP_DF set
47 * @net: namespace that this frag belongs to
48 */
49struct inet_frag_queue {
50	spinlock_t		lock;
51	struct timer_list	timer;
52	struct hlist_node	list;
53	atomic_t		refcnt;
54	struct sk_buff		*fragments;
55	struct sk_buff		*fragments_tail;
56	ktime_t			stamp;
57	int			len;
58	int			meat;
59	__u8			flags;
60	u16			max_size;
61	struct netns_frags	*net;
62};
63
64#define INETFRAGS_HASHSZ	1024
65
66/* averaged:
67 * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
68 *	       rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
69 *	       struct frag_queue))
70 */
71#define INETFRAGS_MAXDEPTH	128
72
73struct inet_frag_bucket {
74	struct hlist_head	chain;
75	spinlock_t		chain_lock;
76};
77
78struct inet_frags {
79	struct inet_frag_bucket	hash[INETFRAGS_HASHSZ];
80
81	struct work_struct	frags_work;
82	unsigned int next_bucket;
83	unsigned long last_rebuild_jiffies;
84	bool rebuild;
85
86	/* The first call to hashfn is responsible to initialize
87	 * rnd. This is best done with net_get_random_once.
88	 *
89	 * rnd_seqlock is used to let hash insertion detect
90	 * when it needs to re-lookup the hash chain to use.
91	 */
92	u32			rnd;
93	seqlock_t		rnd_seqlock;
94	int			qsize;
95
96	unsigned int		(*hashfn)(const struct inet_frag_queue *);
97	bool			(*match)(const struct inet_frag_queue *q,
98					 const void *arg);
99	void			(*constructor)(struct inet_frag_queue *q,
100					       const void *arg);
101	void			(*destructor)(struct inet_frag_queue *);
102	void			(*skb_free)(struct sk_buff *);
103	void			(*frag_expire)(unsigned long data);
104	struct kmem_cache	*frags_cachep;
105	const char		*frags_cache_name;
106};
107
108int inet_frags_init(struct inet_frags *);
109void inet_frags_fini(struct inet_frags *);
110
111void inet_frags_init_net(struct netns_frags *nf);
112void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
113
114void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
115void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
116struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
117		struct inet_frags *f, void *key, unsigned int hash);
118
119void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
120				   const char *prefix);
121
122static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
123{
124	if (atomic_dec_and_test(&q->refcnt))
125		inet_frag_destroy(q, f);
126}
127
128/* Memory Tracking Functions. */
129
130/* The default percpu_counter batch size is not big enough to scale to
131 * fragmentation mem acct sizes.
132 * The mem size of a 64K fragment is approx:
133 *  (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
134 */
135static unsigned int frag_percpu_counter_batch = 130000;
136
137static inline int frag_mem_limit(struct netns_frags *nf)
138{
139	return percpu_counter_read(&nf->mem);
140}
141
142static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
143{
144	__percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
145}
146
147static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
148{
149	__percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
150}
151
152static inline void init_frag_mem_limit(struct netns_frags *nf)
153{
154	percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
155}
156
157static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
158{
159	unsigned int res;
160
161	local_bh_disable();
162	res = percpu_counter_sum_positive(&nf->mem);
163	local_bh_enable();
164
165	return res;
166}
167
168/* RFC 3168 support :
169 * We want to check ECN values of all fragments, do detect invalid combinations.
170 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
171 */
172#define	IPFRAG_ECN_NOT_ECT	0x01 /* one frag had ECN_NOT_ECT */
173#define	IPFRAG_ECN_ECT_1	0x02 /* one frag had ECN_ECT_1 */
174#define	IPFRAG_ECN_ECT_0	0x04 /* one frag had ECN_ECT_0 */
175#define	IPFRAG_ECN_CE		0x08 /* one frag had ECN_CE */
176
177extern const u8 ip_frag_ecn_table[16];
178
179#endif
180