This source file includes following definitions.
- __xfrm_transport_prep
- __xfrm_mode_tunnel_prep
- xfrm_outer_mode_prep
- validate_xmit_xfrm
- xfrm_dev_state_add
- xfrm_dev_offload_ok
- xfrm_dev_resume
- xfrm_dev_backlog
- xfrm_api_check
- xfrm_dev_register
- xfrm_dev_feat_change
- xfrm_dev_down
- xfrm_dev_event
- xfrm_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <net/dst.h>
18 #include <net/xfrm.h>
19 #include <linux/notifier.h>
20
21 #ifdef CONFIG_XFRM_OFFLOAD
22 static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
23 unsigned int hsize)
24 {
25 struct xfrm_offload *xo = xfrm_offload(skb);
26
27 skb_reset_mac_len(skb);
28 if (xo->flags & XFRM_GSO_SEGMENT)
29 skb->transport_header -= x->props.header_len;
30
31 pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
32 }
33
34 static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
35 unsigned int hsize)
36
37 {
38 struct xfrm_offload *xo = xfrm_offload(skb);
39
40 if (xo->flags & XFRM_GSO_SEGMENT)
41 skb->transport_header = skb->network_header + hsize;
42
43 skb_reset_mac_len(skb);
44 pskb_pull(skb, skb->mac_len + x->props.header_len);
45 }
46
47
48 static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
49 {
50 switch (x->outer_mode.encap) {
51 case XFRM_MODE_TUNNEL:
52 if (x->outer_mode.family == AF_INET)
53 return __xfrm_mode_tunnel_prep(x, skb,
54 sizeof(struct iphdr));
55 if (x->outer_mode.family == AF_INET6)
56 return __xfrm_mode_tunnel_prep(x, skb,
57 sizeof(struct ipv6hdr));
58 break;
59 case XFRM_MODE_TRANSPORT:
60 if (x->outer_mode.family == AF_INET)
61 return __xfrm_transport_prep(x, skb,
62 sizeof(struct iphdr));
63 if (x->outer_mode.family == AF_INET6)
64 return __xfrm_transport_prep(x, skb,
65 sizeof(struct ipv6hdr));
66 break;
67 case XFRM_MODE_ROUTEOPTIMIZATION:
68 case XFRM_MODE_IN_TRIGGER:
69 case XFRM_MODE_BEET:
70 break;
71 }
72 }
73
74 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
75 {
76 int err;
77 unsigned long flags;
78 struct xfrm_state *x;
79 struct sk_buff *skb2;
80 struct softnet_data *sd;
81 netdev_features_t esp_features = features;
82 struct xfrm_offload *xo = xfrm_offload(skb);
83 struct sec_path *sp;
84
85 if (!xo)
86 return skb;
87
88 if (!(features & NETIF_F_HW_ESP))
89 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
90
91 sp = skb_sec_path(skb);
92 x = sp->xvec[sp->len - 1];
93 if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
94 return skb;
95
96 local_irq_save(flags);
97 sd = this_cpu_ptr(&softnet_data);
98 err = !skb_queue_empty(&sd->xfrm_backlog);
99 local_irq_restore(flags);
100
101 if (err) {
102 *again = true;
103 return skb;
104 }
105
106 if (skb_is_gso(skb)) {
107 struct net_device *dev = skb->dev;
108
109 if (unlikely(x->xso.dev != dev)) {
110 struct sk_buff *segs;
111
112
113 esp_features = esp_features & ~(NETIF_F_HW_ESP
114 | NETIF_F_GSO_ESP);
115
116 segs = skb_gso_segment(skb, esp_features);
117 if (IS_ERR(segs)) {
118 kfree_skb(skb);
119 atomic_long_inc(&dev->tx_dropped);
120 return NULL;
121 } else {
122 consume_skb(skb);
123 skb = segs;
124 }
125 }
126 }
127
128 if (!skb->next) {
129 esp_features |= skb->dev->gso_partial_features;
130 xfrm_outer_mode_prep(x, skb);
131
132 xo->flags |= XFRM_DEV_RESUME;
133
134 err = x->type_offload->xmit(x, skb, esp_features);
135 if (err) {
136 if (err == -EINPROGRESS)
137 return NULL;
138
139 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
140 kfree_skb(skb);
141 return NULL;
142 }
143
144 skb_push(skb, skb->data - skb_mac_header(skb));
145
146 return skb;
147 }
148
149 skb2 = skb;
150
151 do {
152 struct sk_buff *nskb = skb2->next;
153
154 esp_features |= skb->dev->gso_partial_features;
155 skb_mark_not_on_list(skb2);
156
157 xo = xfrm_offload(skb2);
158 xo->flags |= XFRM_DEV_RESUME;
159
160 xfrm_outer_mode_prep(x, skb2);
161
162 err = x->type_offload->xmit(x, skb2, esp_features);
163 if (!err) {
164 skb2->next = nskb;
165 } else if (err != -EINPROGRESS) {
166 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
167 skb2->next = nskb;
168 kfree_skb_list(skb2);
169 return NULL;
170 } else {
171 if (skb == skb2)
172 skb = nskb;
173
174 if (!skb)
175 return NULL;
176
177 goto skip_push;
178 }
179
180 skb_push(skb2, skb2->data - skb_mac_header(skb2));
181
182 skip_push:
183 skb2 = nskb;
184 } while (skb2);
185
186 return skb;
187 }
188 EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
189
190 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
191 struct xfrm_user_offload *xuo)
192 {
193 int err;
194 struct dst_entry *dst;
195 struct net_device *dev;
196 struct xfrm_state_offload *xso = &x->xso;
197 xfrm_address_t *saddr;
198 xfrm_address_t *daddr;
199
200 if (!x->type_offload)
201 return -EINVAL;
202
203
204 if (x->encap || x->tfcpad)
205 return -EINVAL;
206
207 dev = dev_get_by_index(net, xuo->ifindex);
208 if (!dev) {
209 if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
210 saddr = &x->props.saddr;
211 daddr = &x->id.daddr;
212 } else {
213 saddr = &x->id.daddr;
214 daddr = &x->props.saddr;
215 }
216
217 dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
218 x->props.family,
219 xfrm_smark_get(0, x));
220 if (IS_ERR(dst))
221 return 0;
222
223 dev = dst->dev;
224
225 dev_hold(dev);
226 dst_release(dst);
227 }
228
229 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
230 xso->dev = NULL;
231 dev_put(dev);
232 return 0;
233 }
234
235 if (x->props.flags & XFRM_STATE_ESN &&
236 !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
237 xso->dev = NULL;
238 dev_put(dev);
239 return -EINVAL;
240 }
241
242 xso->dev = dev;
243 xso->num_exthdrs = 1;
244 xso->flags = xuo->flags;
245
246 err = dev->xfrmdev_ops->xdo_dev_state_add(x);
247 if (err) {
248 xso->num_exthdrs = 0;
249 xso->flags = 0;
250 xso->dev = NULL;
251 dev_put(dev);
252
253 if (err != -EOPNOTSUPP)
254 return err;
255 }
256
257 return 0;
258 }
259 EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
260
261 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
262 {
263 int mtu;
264 struct dst_entry *dst = skb_dst(skb);
265 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
266 struct net_device *dev = x->xso.dev;
267
268 if (!x->type_offload || x->encap)
269 return false;
270
271 if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
272 (!xdst->child->xfrm)) {
273 mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
274 if (skb->len <= mtu)
275 goto ok;
276
277 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
278 goto ok;
279 }
280
281 return false;
282
283 ok:
284 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
285 return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
286
287 return true;
288 }
289 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
290
291 void xfrm_dev_resume(struct sk_buff *skb)
292 {
293 struct net_device *dev = skb->dev;
294 int ret = NETDEV_TX_BUSY;
295 struct netdev_queue *txq;
296 struct softnet_data *sd;
297 unsigned long flags;
298
299 rcu_read_lock();
300 txq = netdev_core_pick_tx(dev, skb, NULL);
301
302 HARD_TX_LOCK(dev, txq, smp_processor_id());
303 if (!netif_xmit_frozen_or_stopped(txq))
304 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
305 HARD_TX_UNLOCK(dev, txq);
306
307 if (!dev_xmit_complete(ret)) {
308 local_irq_save(flags);
309 sd = this_cpu_ptr(&softnet_data);
310 skb_queue_tail(&sd->xfrm_backlog, skb);
311 raise_softirq_irqoff(NET_TX_SOFTIRQ);
312 local_irq_restore(flags);
313 }
314 rcu_read_unlock();
315 }
316 EXPORT_SYMBOL_GPL(xfrm_dev_resume);
317
318 void xfrm_dev_backlog(struct softnet_data *sd)
319 {
320 struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
321 struct sk_buff_head list;
322 struct sk_buff *skb;
323
324 if (skb_queue_empty(xfrm_backlog))
325 return;
326
327 __skb_queue_head_init(&list);
328
329 spin_lock(&xfrm_backlog->lock);
330 skb_queue_splice_init(xfrm_backlog, &list);
331 spin_unlock(&xfrm_backlog->lock);
332
333 while (!skb_queue_empty(&list)) {
334 skb = __skb_dequeue(&list);
335 xfrm_dev_resume(skb);
336 }
337
338 }
339 #endif
340
341 static int xfrm_api_check(struct net_device *dev)
342 {
343 #ifdef CONFIG_XFRM_OFFLOAD
344 if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
345 !(dev->features & NETIF_F_HW_ESP))
346 return NOTIFY_BAD;
347
348 if ((dev->features & NETIF_F_HW_ESP) &&
349 (!(dev->xfrmdev_ops &&
350 dev->xfrmdev_ops->xdo_dev_state_add &&
351 dev->xfrmdev_ops->xdo_dev_state_delete)))
352 return NOTIFY_BAD;
353 #else
354 if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
355 return NOTIFY_BAD;
356 #endif
357
358 return NOTIFY_DONE;
359 }
360
361 static int xfrm_dev_register(struct net_device *dev)
362 {
363 return xfrm_api_check(dev);
364 }
365
366 static int xfrm_dev_feat_change(struct net_device *dev)
367 {
368 return xfrm_api_check(dev);
369 }
370
371 static int xfrm_dev_down(struct net_device *dev)
372 {
373 if (dev->features & NETIF_F_HW_ESP)
374 xfrm_dev_state_flush(dev_net(dev), dev, true);
375
376 return NOTIFY_DONE;
377 }
378
379 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
380 {
381 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
382
383 switch (event) {
384 case NETDEV_REGISTER:
385 return xfrm_dev_register(dev);
386
387 case NETDEV_FEAT_CHANGE:
388 return xfrm_dev_feat_change(dev);
389
390 case NETDEV_DOWN:
391 case NETDEV_UNREGISTER:
392 return xfrm_dev_down(dev);
393 }
394 return NOTIFY_DONE;
395 }
396
397 static struct notifier_block xfrm_dev_notifier = {
398 .notifier_call = xfrm_dev_event,
399 };
400
401 void __init xfrm_dev_init(void)
402 {
403 register_netdevice_notifier(&xfrm_dev_notifier);
404 }