This source file includes following definitions.
- ovs_vport_init
- ovs_vport_exit
- hash_bucket
- __ovs_vport_ops_register
- ovs_vport_ops_unregister
- ovs_vport_locate
- ovs_vport_alloc
- ovs_vport_free
- ovs_vport_lookup
- ovs_vport_add
- ovs_vport_set_options
- ovs_vport_del
- ovs_vport_get_stats
- ovs_vport_get_options
- ovs_vport_set_upcall_portids
- ovs_vport_get_upcall_portids
- ovs_vport_find_upcall_portid
- ovs_vport_receive
- packet_length
- ovs_vport_send
1
2
3
4
5
6 #include <linux/etherdevice.h>
7 #include <linux/if.h>
8 #include <linux/if_vlan.h>
9 #include <linux/jhash.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/percpu.h>
14 #include <linux/rcupdate.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/compat.h>
17 #include <net/net_namespace.h>
18 #include <linux/module.h>
19
20 #include "datapath.h"
21 #include "vport.h"
22 #include "vport-internal_dev.h"
23
24 static LIST_HEAD(vport_ops_list);
25
26
27 static struct hlist_head *dev_table;
28 #define VPORT_HASH_BUCKETS 1024
29
30
31
32
33
34
35 int ovs_vport_init(void)
36 {
37 dev_table = kcalloc(VPORT_HASH_BUCKETS, sizeof(struct hlist_head),
38 GFP_KERNEL);
39 if (!dev_table)
40 return -ENOMEM;
41
42 return 0;
43 }
44
45
46
47
48
49
50 void ovs_vport_exit(void)
51 {
52 kfree(dev_table);
53 }
54
55 static struct hlist_head *hash_bucket(const struct net *net, const char *name)
56 {
57 unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
58 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
59 }
60
61 int __ovs_vport_ops_register(struct vport_ops *ops)
62 {
63 int err = -EEXIST;
64 struct vport_ops *o;
65
66 ovs_lock();
67 list_for_each_entry(o, &vport_ops_list, list)
68 if (ops->type == o->type)
69 goto errout;
70
71 list_add_tail(&ops->list, &vport_ops_list);
72 err = 0;
73 errout:
74 ovs_unlock();
75 return err;
76 }
77 EXPORT_SYMBOL_GPL(__ovs_vport_ops_register);
78
79 void ovs_vport_ops_unregister(struct vport_ops *ops)
80 {
81 ovs_lock();
82 list_del(&ops->list);
83 ovs_unlock();
84 }
85 EXPORT_SYMBOL_GPL(ovs_vport_ops_unregister);
86
87
88
89
90
91
92
93
94 struct vport *ovs_vport_locate(const struct net *net, const char *name)
95 {
96 struct hlist_head *bucket = hash_bucket(net, name);
97 struct vport *vport;
98
99 hlist_for_each_entry_rcu(vport, bucket, hash_node)
100 if (!strcmp(name, ovs_vport_name(vport)) &&
101 net_eq(ovs_dp_get_net(vport->dp), net))
102 return vport;
103
104 return NULL;
105 }
106
107
108
109
110
111
112
113
114
115
116
117
118 struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
119 const struct vport_parms *parms)
120 {
121 struct vport *vport;
122 size_t alloc_size;
123
124 alloc_size = sizeof(struct vport);
125 if (priv_size) {
126 alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
127 alloc_size += priv_size;
128 }
129
130 vport = kzalloc(alloc_size, GFP_KERNEL);
131 if (!vport)
132 return ERR_PTR(-ENOMEM);
133
134 vport->dp = parms->dp;
135 vport->port_no = parms->port_no;
136 vport->ops = ops;
137 INIT_HLIST_NODE(&vport->dp_hash_node);
138
139 if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) {
140 kfree(vport);
141 return ERR_PTR(-EINVAL);
142 }
143
144 return vport;
145 }
146 EXPORT_SYMBOL_GPL(ovs_vport_alloc);
147
148
149
150
151
152
153
154
155
156
157
158 void ovs_vport_free(struct vport *vport)
159 {
160
161
162
163 kfree(rcu_dereference_raw(vport->upcall_portids));
164 kfree(vport);
165 }
166 EXPORT_SYMBOL_GPL(ovs_vport_free);
167
168 static struct vport_ops *ovs_vport_lookup(const struct vport_parms *parms)
169 {
170 struct vport_ops *ops;
171
172 list_for_each_entry(ops, &vport_ops_list, list)
173 if (ops->type == parms->type)
174 return ops;
175
176 return NULL;
177 }
178
179
180
181
182
183
184
185
186
187 struct vport *ovs_vport_add(const struct vport_parms *parms)
188 {
189 struct vport_ops *ops;
190 struct vport *vport;
191
192 ops = ovs_vport_lookup(parms);
193 if (ops) {
194 struct hlist_head *bucket;
195
196 if (!try_module_get(ops->owner))
197 return ERR_PTR(-EAFNOSUPPORT);
198
199 vport = ops->create(parms);
200 if (IS_ERR(vport)) {
201 module_put(ops->owner);
202 return vport;
203 }
204
205 bucket = hash_bucket(ovs_dp_get_net(vport->dp),
206 ovs_vport_name(vport));
207 hlist_add_head_rcu(&vport->hash_node, bucket);
208 return vport;
209 }
210
211
212
213
214
215 ovs_unlock();
216 request_module("vport-type-%d", parms->type);
217 ovs_lock();
218
219 if (!ovs_vport_lookup(parms))
220 return ERR_PTR(-EAFNOSUPPORT);
221 else
222 return ERR_PTR(-EAGAIN);
223 }
224
225
226
227
228
229
230
231
232
233
234 int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
235 {
236 if (!vport->ops->set_options)
237 return -EOPNOTSUPP;
238 return vport->ops->set_options(vport, options);
239 }
240
241
242
243
244
245
246
247
248
249 void ovs_vport_del(struct vport *vport)
250 {
251 hlist_del_rcu(&vport->hash_node);
252 module_put(vport->ops->owner);
253 vport->ops->destroy(vport);
254 }
255
256
257
258
259
260
261
262
263
264
265
266 void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
267 {
268 const struct rtnl_link_stats64 *dev_stats;
269 struct rtnl_link_stats64 temp;
270
271 dev_stats = dev_get_stats(vport->dev, &temp);
272 stats->rx_errors = dev_stats->rx_errors;
273 stats->tx_errors = dev_stats->tx_errors;
274 stats->tx_dropped = dev_stats->tx_dropped;
275 stats->rx_dropped = dev_stats->rx_dropped;
276
277 stats->rx_bytes = dev_stats->rx_bytes;
278 stats->rx_packets = dev_stats->rx_packets;
279 stats->tx_bytes = dev_stats->tx_bytes;
280 stats->tx_packets = dev_stats->tx_packets;
281 }
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299 int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
300 {
301 struct nlattr *nla;
302 int err;
303
304 if (!vport->ops->get_options)
305 return 0;
306
307 nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_OPTIONS);
308 if (!nla)
309 return -EMSGSIZE;
310
311 err = vport->ops->get_options(vport, skb);
312 if (err) {
313 nla_nest_cancel(skb, nla);
314 return err;
315 }
316
317 nla_nest_end(skb, nla);
318 return 0;
319 }
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334 int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids)
335 {
336 struct vport_portids *old, *vport_portids;
337
338 if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
339 return -EINVAL;
340
341 old = ovsl_dereference(vport->upcall_portids);
342
343 vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
344 GFP_KERNEL);
345 if (!vport_portids)
346 return -ENOMEM;
347
348 vport_portids->n_ids = nla_len(ids) / sizeof(u32);
349 vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
350 nla_memcpy(vport_portids->ids, ids, nla_len(ids));
351
352 rcu_assign_pointer(vport->upcall_portids, vport_portids);
353
354 if (old)
355 kfree_rcu(old, rcu);
356 return 0;
357 }
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373 int ovs_vport_get_upcall_portids(const struct vport *vport,
374 struct sk_buff *skb)
375 {
376 struct vport_portids *ids;
377
378 ids = rcu_dereference_ovsl(vport->upcall_portids);
379
380 if (vport->dp->user_features & OVS_DP_F_VPORT_PIDS)
381 return nla_put(skb, OVS_VPORT_ATTR_UPCALL_PID,
382 ids->n_ids * sizeof(u32), (void *)ids->ids);
383 else
384 return nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, ids->ids[0]);
385 }
386
387
388
389
390
391
392
393
394
395
396
397
398 u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
399 {
400 struct vport_portids *ids;
401 u32 ids_index;
402 u32 hash;
403
404 ids = rcu_dereference(vport->upcall_portids);
405
406 if (ids->n_ids == 1 && ids->ids[0] == 0)
407 return 0;
408
409 hash = skb_get_hash(skb);
410 ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
411 return ids->ids[ids_index];
412 }
413
414
415
416
417
418
419
420
421
422
423
424 int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
425 const struct ip_tunnel_info *tun_info)
426 {
427 struct sw_flow_key key;
428 int error;
429
430 OVS_CB(skb)->input_vport = vport;
431 OVS_CB(skb)->mru = 0;
432 OVS_CB(skb)->cutlen = 0;
433 if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
434 u32 mark;
435
436 mark = skb->mark;
437 skb_scrub_packet(skb, true);
438 skb->mark = mark;
439 tun_info = NULL;
440 }
441
442
443 error = ovs_flow_key_extract(tun_info, skb, &key);
444 if (unlikely(error)) {
445 kfree_skb(skb);
446 return error;
447 }
448 ovs_dp_process_packet(skb, &key);
449 return 0;
450 }
451
452 static int packet_length(const struct sk_buff *skb,
453 struct net_device *dev)
454 {
455 int length = skb->len - dev->hard_header_len;
456
457 if (!skb_vlan_tag_present(skb) &&
458 eth_type_vlan(skb->protocol))
459 length -= VLAN_HLEN;
460
461
462
463
464
465
466 return length > 0 ? length : 0;
467 }
468
469 void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
470 {
471 int mtu = vport->dev->mtu;
472
473 switch (vport->dev->type) {
474 case ARPHRD_NONE:
475 if (mac_proto == MAC_PROTO_ETHERNET) {
476 skb_reset_network_header(skb);
477 skb_reset_mac_len(skb);
478 skb->protocol = htons(ETH_P_TEB);
479 } else if (mac_proto != MAC_PROTO_NONE) {
480 WARN_ON_ONCE(1);
481 goto drop;
482 }
483 break;
484 case ARPHRD_ETHER:
485 if (mac_proto != MAC_PROTO_ETHERNET)
486 goto drop;
487 break;
488 default:
489 goto drop;
490 }
491
492 if (unlikely(packet_length(skb, vport->dev) > mtu &&
493 !skb_is_gso(skb))) {
494 net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
495 vport->dev->name,
496 packet_length(skb, vport->dev), mtu);
497 vport->dev->stats.tx_errors++;
498 goto drop;
499 }
500
501 skb->dev = vport->dev;
502 vport->ops->send(skb);
503 return;
504
505 drop:
506 kfree_skb(skb);
507 }