Lines Matching refs:vport
37 static void ovs_vport_record_error(struct vport *,
110 struct vport *ovs_vport_locate(const struct net *net, const char *name) in ovs_vport_locate()
113 struct vport *vport; in ovs_vport_locate() local
115 hlist_for_each_entry_rcu(vport, bucket, hash_node) in ovs_vport_locate()
116 if (!strcmp(name, vport->ops->get_name(vport)) && in ovs_vport_locate()
117 net_eq(ovs_dp_get_net(vport->dp), net)) in ovs_vport_locate()
118 return vport; in ovs_vport_locate()
134 struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops, in ovs_vport_alloc()
137 struct vport *vport; in ovs_vport_alloc() local
140 alloc_size = sizeof(struct vport); in ovs_vport_alloc()
146 vport = kzalloc(alloc_size, GFP_KERNEL); in ovs_vport_alloc()
147 if (!vport) in ovs_vport_alloc()
150 vport->dp = parms->dp; in ovs_vport_alloc()
151 vport->port_no = parms->port_no; in ovs_vport_alloc()
152 vport->ops = ops; in ovs_vport_alloc()
153 INIT_HLIST_NODE(&vport->dp_hash_node); in ovs_vport_alloc()
155 if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) { in ovs_vport_alloc()
156 kfree(vport); in ovs_vport_alloc()
160 vport->percpu_stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); in ovs_vport_alloc()
161 if (!vport->percpu_stats) { in ovs_vport_alloc()
162 kfree(vport); in ovs_vport_alloc()
166 return vport; in ovs_vport_alloc()
180 void ovs_vport_free(struct vport *vport) in ovs_vport_free() argument
185 kfree(rcu_dereference_raw(vport->upcall_portids)); in ovs_vport_free()
186 free_percpu(vport->percpu_stats); in ovs_vport_free()
187 kfree(vport); in ovs_vport_free()
210 struct vport *ovs_vport_add(const struct vport_parms *parms) in ovs_vport_add()
213 struct vport *vport; in ovs_vport_add() local
222 vport = ops->create(parms); in ovs_vport_add()
223 if (IS_ERR(vport)) { in ovs_vport_add()
225 return vport; in ovs_vport_add()
228 bucket = hash_bucket(ovs_dp_get_net(vport->dp), in ovs_vport_add()
229 vport->ops->get_name(vport)); in ovs_vport_add()
230 hlist_add_head_rcu(&vport->hash_node, bucket); in ovs_vport_add()
231 return vport; in ovs_vport_add()
257 int ovs_vport_set_options(struct vport *vport, struct nlattr *options) in ovs_vport_set_options() argument
259 if (!vport->ops->set_options) in ovs_vport_set_options()
261 return vport->ops->set_options(vport, options); in ovs_vport_set_options()
272 void ovs_vport_del(struct vport *vport) in ovs_vport_del() argument
276 hlist_del_rcu(&vport->hash_node); in ovs_vport_del()
277 module_put(vport->ops->owner); in ovs_vport_del()
278 vport->ops->destroy(vport); in ovs_vport_del()
291 void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) in ovs_vport_get_stats() argument
306 stats->rx_errors = atomic_long_read(&vport->err_stats.rx_errors); in ovs_vport_get_stats()
307 stats->tx_errors = atomic_long_read(&vport->err_stats.tx_errors); in ovs_vport_get_stats()
308 stats->tx_dropped = atomic_long_read(&vport->err_stats.tx_dropped); in ovs_vport_get_stats()
309 stats->rx_dropped = atomic_long_read(&vport->err_stats.rx_dropped); in ovs_vport_get_stats()
316 percpu_stats = per_cpu_ptr(vport->percpu_stats, i); in ovs_vport_get_stats()
346 int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb) in ovs_vport_get_options() argument
351 if (!vport->ops->get_options) in ovs_vport_get_options()
358 err = vport->ops->get_options(vport, skb); in ovs_vport_get_options()
381 int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids) in ovs_vport_set_upcall_portids() argument
388 old = ovsl_dereference(vport->upcall_portids); in ovs_vport_set_upcall_portids()
399 rcu_assign_pointer(vport->upcall_portids, vport_portids); in ovs_vport_set_upcall_portids()
420 int ovs_vport_get_upcall_portids(const struct vport *vport, in ovs_vport_get_upcall_portids() argument
425 ids = rcu_dereference_ovsl(vport->upcall_portids); in ovs_vport_get_upcall_portids()
427 if (vport->dp->user_features & OVS_DP_F_VPORT_PIDS) in ovs_vport_get_upcall_portids()
445 u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb) in ovs_vport_find_upcall_portid() argument
451 ids = rcu_dereference(vport->upcall_portids); in ovs_vport_find_upcall_portid()
471 void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, in ovs_vport_receive() argument
478 stats = this_cpu_ptr(vport->percpu_stats); in ovs_vport_receive()
485 OVS_CB(skb)->input_vport = vport; in ovs_vport_receive()
506 int ovs_vport_send(struct vport *vport, struct sk_buff *skb) in ovs_vport_send() argument
508 int sent = vport->ops->send(vport, skb); in ovs_vport_send()
513 stats = this_cpu_ptr(vport->percpu_stats); in ovs_vport_send()
520 ovs_vport_record_error(vport, VPORT_E_TX_ERROR); in ovs_vport_send()
522 ovs_vport_record_error(vport, VPORT_E_TX_DROPPED); in ovs_vport_send()
536 static void ovs_vport_record_error(struct vport *vport, in ovs_vport_record_error() argument
541 atomic_long_inc(&vport->err_stats.rx_dropped); in ovs_vport_record_error()
545 atomic_long_inc(&vport->err_stats.rx_errors); in ovs_vport_record_error()
549 atomic_long_inc(&vport->err_stats.tx_dropped); in ovs_vport_record_error()
553 atomic_long_inc(&vport->err_stats.tx_errors); in ovs_vport_record_error()
561 struct vport *vport = container_of(rcu, struct vport, rcu); in free_vport_rcu() local
563 ovs_vport_free(vport); in free_vport_rcu()
566 void ovs_vport_deferred_free(struct vport *vport) in ovs_vport_deferred_free() argument
568 if (!vport) in ovs_vport_deferred_free()
571 call_rcu(&vport->rcu, free_vport_rcu); in ovs_vport_deferred_free()
619 int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, in ovs_vport_get_egress_tun_info() argument
623 if (unlikely(!vport->ops->get_egress_tun_info)) in ovs_vport_get_egress_tun_info()
626 return vport->ops->get_egress_tun_info(vport, skb, info); in ovs_vport_get_egress_tun_info()