tun 595 drivers/infiniband/hw/mlx4/cq.c wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index); tun 596 drivers/infiniband/hw/mlx4/cq.c wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF; tun 597 drivers/infiniband/hw/mlx4/cq.c wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0; tun 602 drivers/infiniband/hw/mlx4/cq.c wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); tun 603 drivers/infiniband/hw/mlx4/cq.c memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); tun 604 drivers/infiniband/hw/mlx4/cq.c memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); tun 607 drivers/infiniband/hw/mlx4/cq.c wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32); tun 608 drivers/infiniband/hw/mlx4/cq.c wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12); tun 526 drivers/infiniband/hw/mlx4/mad.c tun_ctx = dev->sriov.demux[port-1].tun[slave]; tun 1972 drivers/infiniband/hw/mlx4/mad.c if (dev->sriov.demux[port - 1].tun[slave]) { tun 1973 drivers/infiniband/hw/mlx4/mad.c kfree(dev->sriov.demux[port - 1].tun[slave]); tun 1974 drivers/infiniband/hw/mlx4/mad.c dev->sriov.demux[port - 1].tun[slave] = NULL; tun 2126 drivers/infiniband/hw/mlx4/mad.c dev->sriov.demux[port - 1].tun[slave], 1); tun 2132 drivers/infiniband/hw/mlx4/mad.c dev->sriov.demux[port - 1].tun[slave]); tun 2160 drivers/infiniband/hw/mlx4/mad.c ctx->tun = kcalloc(dev->dev->caps.sqp_demux, tun 2162 drivers/infiniband/hw/mlx4/mad.c if (!ctx->tun) tun 2179 drivers/infiniband/hw/mlx4/mad.c ret = alloc_pv_object(dev, i, port, &ctx->tun[i]); tun 2219 drivers/infiniband/hw/mlx4/mad.c kfree(ctx->tun); tun 2220 drivers/infiniband/hw/mlx4/mad.c ctx->tun = NULL; tun 2252 drivers/infiniband/hw/mlx4/mad.c if (!ctx->tun[i]) tun 2254 drivers/infiniband/hw/mlx4/mad.c if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN) tun 2255 drivers/infiniband/hw/mlx4/mad.c ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING; tun 2259 drivers/infiniband/hw/mlx4/mad.c destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0); tun 2262 drivers/infiniband/hw/mlx4/mad.c kfree(ctx->tun); tun 273 drivers/infiniband/hw/mlx4/mlx4_ib.h struct mlx4_rcv_tunnel_hdr tun; tun 479 drivers/infiniband/hw/mlx4/mlx4_ib.h struct mlx4_ib_demux_pv_ctx **tun; tun 115 drivers/media/tuners/tuner-simple.c struct tunertype *tun; tun 239 drivers/media/tuners/tuner-simple.c struct tunertype *tun = priv->tun; tun 242 drivers/media/tuners/tuner-simple.c for (i = 0; i < tun->count; i++) tun 243 drivers/media/tuners/tuner-simple.c if (desired_type == tun->params[i].type) tun 247 drivers/media/tuners/tuner-simple.c if (i == tun->count) { tun 254 drivers/media/tuners/tuner-simple.c tuner_param_name(tun->params[i].type)); tun 256 drivers/media/tuners/tuner-simple.c return &tun->params[i]; tun 667 drivers/media/tuners/tuner-simple.c struct tunertype *tun; tun 676 drivers/media/tuners/tuner-simple.c tun = priv->tun; tun 678 drivers/media/tuners/tuner-simple.c for (j = tun->count-1; j > 0; j--) tun 679 drivers/media/tuners/tuner-simple.c if (tun->params[j].type == TUNER_PARAM_TYPE_RADIO) tun 682 drivers/media/tuners/tuner-simple.c t_params = &tun->params[j]; tun 850 drivers/media/tuners/tuner-simple.c struct tunertype *tun = priv->tun; tun 857 drivers/media/tuners/tuner-simple.c if (!tun->stepsize) { tun 861 drivers/media/tuners/tuner-simple.c priv->type, priv->tun->name); tun 871 drivers/media/tuners/tuner-simple.c tun->stepsize/2) / tun->stepsize; tun 881 drivers/media/tuners/tuner-simple.c tun->name, div, buf[0], buf[1], buf[2], buf[3]); tun 884 drivers/media/tuners/tuner-simple.c return (div * tun->stepsize) - t_params->iffreq; tun 967 drivers/media/tuners/tuner-simple.c if (priv->tun->initdata) { tun 974 drivers/media/tuners/tuner-simple.c priv->tun->initdata + 1, tun 975 drivers/media/tuners/tuner-simple.c priv->tun->initdata[0]); tun 976 drivers/media/tuners/tuner-simple.c if (ret != priv->tun->initdata[0]) tun 990 drivers/media/tuners/tuner-simple.c if (priv->tun->sleepdata) { tun 997 drivers/media/tuners/tuner-simple.c priv->tun->sleepdata + 1, tun 998 drivers/media/tuners/tuner-simple.c priv->tun->sleepdata[0]); tun 999 drivers/media/tuners/tuner-simple.c if (ret != priv->tun->sleepdata[0]) tun 1098 drivers/media/tuners/tuner-simple.c priv->tun = &tuners[type]; tun 1113 drivers/media/tuners/tuner-simple.c type, priv->type, priv->tun->name); tun 1116 drivers/media/tuners/tuner-simple.c priv->type, priv->tun->name); tun 1134 drivers/media/tuners/tuner-simple.c strscpy(fe->ops.tuner_ops.info.name, priv->tun->name, tun 12 drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c struct ip_tunnel *tun = netdev_priv(ol_dev); tun 14 drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c return tun->parms; tun 20 drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c struct ip6_tnl *tun = netdev_priv(ol_dev); tun 22 drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c return tun->parms; tun 984 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct ip_tunnel *tun = netdev_priv(ol_dev); tun 987 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return __dev_get_by_index(net, tun->parms.link); tun 297 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c struct ip_tunnel *tun = netdev_priv(to_dev); tun 308 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c 0, 0, parms.link, tun->fwmark, 0); tun 310 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c rt = ip_route_output_key(tun->net, &fl4); tun 284 drivers/net/ethernet/netronome/nfp/flower/action.c const struct ip_tunnel_info *tun = act->tunnel; tun 293 drivers/net/ethernet/netronome/nfp/flower/action.c switch (tun->key.tp_dst) { tun 185 drivers/net/ethernet/netronome/nfp/flower/main.h struct nfp_fl_tunnel_offloads tun; tun 768 drivers/net/ethernet/netronome/nfp/flower/offload.c struct nfp_fl_set_ipv4_tun *tun; tun 776 drivers/net/ethernet/netronome/nfp/flower/offload.c tun = (struct nfp_fl_set_ipv4_tun *)a; tun 777 drivers/net/ethernet/netronome/nfp/flower/offload.c tun->outer_vlan_tpid = vlan->vlan_tpid; tun 778 drivers/net/ethernet/netronome/nfp/flower/offload.c tun->outer_vlan_tci = vlan->vlan_tci; tun 233 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c spin_lock_bh(&priv->tun.neigh_off_lock); tun 234 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { tun 237 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c spin_unlock_bh(&priv->tun.neigh_off_lock); tun 241 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c spin_unlock_bh(&priv->tun.neigh_off_lock); tun 251 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c spin_lock_bh(&priv->tun.neigh_off_lock); tun 252 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { tun 255 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c spin_unlock_bh(&priv->tun.neigh_off_lock); tun 261 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c spin_unlock_bh(&priv->tun.neigh_off_lock); tun 267 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_add_tail(&entry->list, &priv->tun.neigh_off_list); tun 268 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c spin_unlock_bh(&priv->tun.neigh_off_lock); tun 277 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c spin_lock_bh(&priv->tun.neigh_off_lock); tun 278 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { tun 286 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c spin_unlock_bh(&priv->tun.neigh_off_lock); tun 351 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); tun 433 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c mutex_lock(&priv->tun.ipv4_off_lock); tun 435 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { tun 437 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c mutex_unlock(&priv->tun.ipv4_off_lock); tun 445 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c mutex_unlock(&priv->tun.ipv4_off_lock); tun 458 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c mutex_lock(&priv->tun.ipv4_off_lock); tun 459 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { tun 463 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c mutex_unlock(&priv->tun.ipv4_off_lock); tun 470 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c mutex_unlock(&priv->tun.ipv4_off_lock); tun 476 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_add_tail(&entry->list, &priv->tun.ipv4_off_list); tun 477 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c mutex_unlock(&priv->tun.ipv4_off_lock); tun 488 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c mutex_lock(&priv->tun.ipv4_off_lock); tun 489 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { tun 500 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c mutex_unlock(&priv->tun.ipv4_off_lock); tun 559 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac, tun 611 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, tun 638 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c if (rhashtable_insert_fast(&priv->tun.offloaded_macs, tun 661 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node, tun 667 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); tun 732 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); tun 740 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs, tun 746 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); tun 1009 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c err = rhashtable_init(&priv->tun.offloaded_macs, tun 1014 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c ida_init(&priv->tun.mac_off_ids); tun 1017 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c mutex_init(&priv->tun.ipv4_off_lock); tun 1018 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c INIT_LIST_HEAD(&priv->tun.ipv4_off_list); tun 1021 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c spin_lock_init(&priv->tun.neigh_off_lock); tun 1022 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c INIT_LIST_HEAD(&priv->tun.neigh_off_list); tun 1023 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler; tun 1025 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c err = register_netevent_notifier(&priv->tun.neigh_nb); tun 1027 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c rhashtable_free_and_destroy(&priv->tun.offloaded_macs, tun 1042 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c unregister_netevent_notifier(&priv->tun.neigh_nb); tun 1044 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c ida_destroy(&priv->tun.mac_off_ids); tun 1047 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { tun 1054 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { tun 1062 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c rhashtable_free_and_destroy(&priv->tun.offloaded_macs, tun 2639 drivers/net/ethernet/qlogic/qed/qed_l2.c struct qed_tunnel_info *tun; tun 2641 drivers/net/ethernet/qlogic/qed/qed_l2.c tun = &hwfn->cdev->tunnel; tun 2662 drivers/net/ethernet/qlogic/qed/qed_l2.c vxlan_port = tun->vxlan_port.port; tun 2663 drivers/net/ethernet/qlogic/qed/qed_l2.c geneve_port = tun->geneve_port.port; tun 249 drivers/net/ethernet/qlogic/qed/qed_main.c struct qed_tunnel_info *tun = &cdev->tunnel; tun 254 drivers/net/ethernet/qlogic/qed/qed_main.c if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && tun 255 drivers/net/ethernet/qlogic/qed/qed_main.c tun->vxlan.b_mode_enabled) tun 258 drivers/net/ethernet/qlogic/qed/qed_main.c if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && tun 259 drivers/net/ethernet/qlogic/qed/qed_main.c tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && tun 260 drivers/net/ethernet/qlogic/qed/qed_main.c tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) tun 263 drivers/net/ethernet/qlogic/qed/qed_main.c if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && tun 264 drivers/net/ethernet/qlogic/qed/qed_main.c tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && tun 265 drivers/net/ethernet/qlogic/qed/qed_main.c tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) tun 2248 drivers/net/ethernet/qlogic/qed/qed_sriov.c static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc) tun 2250 drivers/net/ethernet/qlogic/qed/qed_sriov.c if (tun->b_update_mode && !tun->b_mode_enabled) { tun 2251 drivers/net/ethernet/qlogic/qed/qed_sriov.c tun->b_update_mode = false; tun 2262 drivers/net/ethernet/qlogic/qed/qed_sriov.c struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel; tun 2268 drivers/net/ethernet/qlogic/qed/qed_sriov.c bultn_vxlan_port = tun->vxlan_port.port; tun 2269 drivers/net/ethernet/qlogic/qed/qed_sriov.c bultn_geneve_port = tun->geneve_port.port; tun 2288 drivers/net/ethernet/qlogic/qed/qed_sriov.c if (tun_src->vxlan_port.port == tun->vxlan_port.port) { tun 2297 drivers/net/ethernet/qlogic/qed/qed_sriov.c if (tun_src->geneve_port.port == tun->geneve_port.port) { tun 1330 drivers/net/hyperv/hyperv_net.h u8 tun; tun 84 drivers/net/tun.c #define tun_debug(level, tun, fmt, args...) \ tun 86 drivers/net/tun.c if (tun->debug) \ tun 87 drivers/net/tun.c netdev_printk(level, tun->dev, fmt, ##args); \ tun 95 drivers/net/tun.c #define tun_debug(level, tun, fmt, args...) \ tun 98 drivers/net/tun.c netdev_printk(level, tun->dev, fmt, ##args); \ tun 163 drivers/net/tun.c struct tun_struct __rcu *tun; tun 189 drivers/net/tun.c struct tun_struct *tun; tun 310 drivers/net/tun.c static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, tun 316 drivers/net/tun.c netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll, tun 340 drivers/net/tun.c static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) tun 342 drivers/net/tun.c return tun->flags & TUN_VNET_BE ? false : tun 346 drivers/net/tun.c static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) tun 348 drivers/net/tun.c int be = !!(tun->flags & TUN_VNET_BE); tun 356 drivers/net/tun.c static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) tun 364 drivers/net/tun.c tun->flags |= TUN_VNET_BE; tun 366 drivers/net/tun.c tun->flags &= ~TUN_VNET_BE; tun 371 drivers/net/tun.c static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) tun 376 drivers/net/tun.c static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) tun 381 drivers/net/tun.c static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) tun 387 drivers/net/tun.c static inline bool tun_is_little_endian(struct tun_struct *tun) tun 389 drivers/net/tun.c return tun->flags & TUN_VNET_LE || tun 390 drivers/net/tun.c tun_legacy_is_little_endian(tun); tun 393 drivers/net/tun.c static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) tun 395 drivers/net/tun.c return __virtio16_to_cpu(tun_is_little_endian(tun), val); tun 398 drivers/net/tun.c static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) tun 400 drivers/net/tun.c return __cpu_to_virtio16(tun_is_little_endian(tun), val); tun 419 drivers/net/tun.c static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, tun 426 drivers/net/tun.c tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n", tun 432 drivers/net/tun.c e->tun = tun; tun 434 drivers/net/tun.c ++tun->flow_count; tun 439 drivers/net/tun.c static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) tun 441 drivers/net/tun.c tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", tun 445 drivers/net/tun.c --tun->flow_count; tun 448 drivers/net/tun.c static void tun_flow_flush(struct tun_struct *tun) tun 452 drivers/net/tun.c spin_lock_bh(&tun->lock); tun 457 drivers/net/tun.c hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) tun 458 drivers/net/tun.c tun_flow_delete(tun, e); tun 460 drivers/net/tun.c spin_unlock_bh(&tun->lock); tun 463 drivers/net/tun.c static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) tun 467 drivers/net/tun.c spin_lock_bh(&tun->lock); tun 472 drivers/net/tun.c hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { tun 474 drivers/net/tun.c tun_flow_delete(tun, e); tun 477 drivers/net/tun.c spin_unlock_bh(&tun->lock); tun 482 drivers/net/tun.c struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); tun 483 drivers/net/tun.c unsigned long delay = tun->ageing_time; tun 488 drivers/net/tun.c tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n"); tun 490 drivers/net/tun.c spin_lock(&tun->lock); tun 495 drivers/net/tun.c hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { tun 500 drivers/net/tun.c tun_flow_delete(tun, e); tun 510 drivers/net/tun.c mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); tun 511 drivers/net/tun.c spin_unlock(&tun->lock); tun 514 drivers/net/tun.c static void tun_flow_update(struct tun_struct *tun, u32 rxhash, tun 519 drivers/net/tun.c unsigned long delay = tun->ageing_time; tun 522 drivers/net/tun.c head = &tun->flows[tun_hashfn(rxhash)]; tun 535 drivers/net/tun.c spin_lock_bh(&tun->lock); tun 537 drivers/net/tun.c tun->flow_count < MAX_TAP_FLOWS) tun 538 drivers/net/tun.c tun_flow_create(tun, head, rxhash, queue_index); tun 540 drivers/net/tun.c if (!timer_pending(&tun->flow_gc_timer)) tun 541 drivers/net/tun.c mod_timer(&tun->flow_gc_timer, tun 543 drivers/net/tun.c spin_unlock_bh(&tun->lock); tun 565 drivers/net/tun.c static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) tun 571 drivers/net/tun.c numqueues = READ_ONCE(tun->numqueues); tun 574 drivers/net/tun.c e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); tun 586 drivers/net/tun.c static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) tun 592 drivers/net/tun.c numqueues = READ_ONCE(tun->numqueues); tun 596 drivers/net/tun.c prog = rcu_dereference(tun->steering_prog); tun 606 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 610 drivers/net/tun.c if (rcu_dereference(tun->steering_prog)) tun 611 drivers/net/tun.c ret = tun_ebpf_select_queue(tun, skb); tun 613 drivers/net/tun.c ret = tun_automq_select_queue(tun, skb); tun 619 drivers/net/tun.c static inline bool tun_not_capable(struct tun_struct *tun) tun 622 drivers/net/tun.c struct net *net = dev_net(tun->dev); tun 624 drivers/net/tun.c return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || tun 625 drivers/net/tun.c (gid_valid(tun->group) && !in_egroup_p(tun->group))) && tun 629 drivers/net/tun.c static void tun_set_real_num_queues(struct tun_struct *tun) tun 631 drivers/net/tun.c netif_set_real_num_tx_queues(tun->dev, tun->numqueues); tun 632 drivers/net/tun.c netif_set_real_num_rx_queues(tun->dev, tun->numqueues); tun 635 drivers/net/tun.c static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) tun 637 drivers/net/tun.c tfile->detached = tun; tun 638 drivers/net/tun.c list_add_tail(&tfile->next, &tun->disabled); tun 639 drivers/net/tun.c ++tun->numdisabled; tun 644 drivers/net/tun.c struct tun_struct *tun = tfile->detached; tun 648 drivers/net/tun.c --tun->numdisabled; tun 649 drivers/net/tun.c return tun; tun 680 drivers/net/tun.c struct tun_struct *tun; tun 682 drivers/net/tun.c tun = rtnl_dereference(tfile->tun); tun 684 drivers/net/tun.c if (tun && clean) { tun 689 drivers/net/tun.c if (tun && !tfile->detached) { tun 691 drivers/net/tun.c BUG_ON(index >= tun->numqueues); tun 693 drivers/net/tun.c rcu_assign_pointer(tun->tfiles[index], tun 694 drivers/net/tun.c tun->tfiles[tun->numqueues - 1]); tun 695 drivers/net/tun.c ntfile = rtnl_dereference(tun->tfiles[index]); tun 697 drivers/net/tun.c rcu_assign_pointer(tun->tfiles[tun->numqueues - 1], tun 700 drivers/net/tun.c --tun->numqueues; tun 702 drivers/net/tun.c RCU_INIT_POINTER(tfile->tun, NULL); tun 705 drivers/net/tun.c tun_disable_queue(tun, tfile); tun 708 drivers/net/tun.c tun_flow_delete_by_queue(tun, tun->numqueues + 1); tun 711 drivers/net/tun.c tun_set_real_num_queues(tun); tun 713 drivers/net/tun.c tun = tun_enable_queue(tfile); tun 718 drivers/net/tun.c if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { tun 719 drivers/net/tun.c netif_carrier_off(tun->dev); tun 721 drivers/net/tun.c if (!(tun->flags & IFF_PERSIST) && tun 722 drivers/net/tun.c tun->dev->reg_state == NETREG_REGISTERED) tun 723 drivers/net/tun.c unregister_netdevice(tun->dev); tun 725 drivers/net/tun.c if (tun) tun 734 drivers/net/tun.c struct tun_struct *tun; tun 738 drivers/net/tun.c tun = rtnl_dereference(tfile->tun); tun 739 drivers/net/tun.c dev = tun ? tun->dev : NULL; tun 748 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 750 drivers/net/tun.c int i, n = tun->numqueues; tun 753 drivers/net/tun.c tfile = rtnl_dereference(tun->tfiles[i]); tun 758 drivers/net/tun.c RCU_INIT_POINTER(tfile->tun, NULL); tun 759 drivers/net/tun.c --tun->numqueues; tun 761 drivers/net/tun.c list_for_each_entry(tfile, &tun->disabled, next) { tun 764 drivers/net/tun.c RCU_INIT_POINTER(tfile->tun, NULL); tun 766 drivers/net/tun.c BUG_ON(tun->numqueues != 0); tun 770 drivers/net/tun.c tfile = rtnl_dereference(tun->tfiles[i]); tun 777 drivers/net/tun.c list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { tun 783 drivers/net/tun.c BUG_ON(tun->numdisabled != 0); tun 785 drivers/net/tun.c if (tun->flags & IFF_PERSIST) tun 789 drivers/net/tun.c static int tun_attach(struct tun_struct *tun, struct file *file, tun 794 drivers/net/tun.c struct net_device *dev = tun->dev; tun 797 drivers/net/tun.c err = security_tun_dev_attach(tfile->socket.sk, tun->security); tun 802 drivers/net/tun.c if (rtnl_dereference(tfile->tun) && !tfile->detached) tun 806 drivers/net/tun.c if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) tun 811 drivers/net/tun.c tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) tun 817 drivers/net/tun.c if (!skip_filter && (tun->filter_attached == true)) { tun 819 drivers/net/tun.c err = sk_attach_filter(&tun->fprog, tfile->socket.sk); tun 832 drivers/net/tun.c tfile->queue_index = tun->numqueues; tun 844 drivers/net/tun.c tun->dev, tfile->queue_index); tun 860 drivers/net/tun.c tun_napi_init(tun, tfile, napi, napi_frags); tun 863 drivers/net/tun.c if (rtnl_dereference(tun->xdp_prog)) tun 875 drivers/net/tun.c rcu_assign_pointer(tfile->tun, tun); tun 876 drivers/net/tun.c rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); tun 877 drivers/net/tun.c tun->numqueues++; tun 878 drivers/net/tun.c tun_set_real_num_queues(tun); tun 885 drivers/net/tun.c struct tun_struct *tun; tun 888 drivers/net/tun.c tun = rcu_dereference(tfile->tun); tun 889 drivers/net/tun.c if (tun) tun 890 drivers/net/tun.c dev_hold(tun->dev); tun 893 drivers/net/tun.c return tun; tun 896 drivers/net/tun.c static void tun_put(struct tun_struct *tun) tun 898 drivers/net/tun.c dev_put(tun->dev); tun 1031 drivers/net/tun.c static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) tun 1034 drivers/net/tun.c if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) { tun 1042 drivers/net/tun.c e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); tun 1049 drivers/net/tun.c static unsigned int run_ebpf_filter(struct tun_struct *tun, tun 1053 drivers/net/tun.c struct tun_prog *prog = rcu_dereference(tun->filter_prog); tun 1064 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 1070 drivers/net/tun.c tfile = rcu_dereference(tun->tfiles[txq]); tun 1076 drivers/net/tun.c if (!rcu_dereference(tun->steering_prog)) tun 1077 drivers/net/tun.c tun_automq_xmit(tun, skb); tun 1079 drivers/net/tun.c tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); tun 1086 drivers/net/tun.c if (!check_filter(&tun->txflt, skb)) tun 1093 drivers/net/tun.c len = run_ebpf_filter(tun, skb, len); tun 1121 drivers/net/tun.c this_cpu_inc(tun->pcpu_stats->tx_dropped); tun 1140 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 1142 drivers/net/tun.c return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); tun 1147 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 1152 drivers/net/tun.c tun->align = new_hr; tun 1159 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 1167 drivers/net/tun.c p = per_cpu_ptr(tun->pcpu_stats, i); tun 1194 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 1199 drivers/net/tun.c old_prog = rtnl_dereference(tun->xdp_prog); tun 1200 drivers/net/tun.c rcu_assign_pointer(tun->xdp_prog, prog); tun 1204 drivers/net/tun.c for (i = 0; i < tun->numqueues; i++) { tun 1205 drivers/net/tun.c tfile = rtnl_dereference(tun->tfiles[i]); tun 1211 drivers/net/tun.c list_for_each_entry(tfile, &tun->disabled, next) { tun 1223 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 1226 drivers/net/tun.c xdp_prog = rtnl_dereference(tun->xdp_prog); tun 1249 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 1251 drivers/net/tun.c if (!tun->numqueues) tun 1284 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 1297 drivers/net/tun.c numqueues = READ_ONCE(tun->numqueues); tun 1303 drivers/net/tun.c tfile = rcu_dereference(tun->tfiles[smp_processor_id() % tun 1317 drivers/net/tun.c this_cpu_inc(tun->pcpu_stats->tx_dropped); tun 1359 drivers/net/tun.c static void tun_flow_init(struct tun_struct *tun) tun 1364 drivers/net/tun.c INIT_HLIST_HEAD(&tun->flows[i]); tun 1366 drivers/net/tun.c tun->ageing_time = TUN_FLOW_EXPIRE; tun 1367 drivers/net/tun.c timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); tun 1368 drivers/net/tun.c mod_timer(&tun->flow_gc_timer, tun 1369 drivers/net/tun.c round_jiffies_up(jiffies + tun->ageing_time)); tun 1372 drivers/net/tun.c static void tun_flow_uninit(struct tun_struct *tun) tun 1374 drivers/net/tun.c del_timer_sync(&tun->flow_gc_timer); tun 1375 drivers/net/tun.c tun_flow_flush(tun); tun 1384 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 1386 drivers/net/tun.c switch (tun->flags & TUN_TYPE_MASK) { tun 1416 drivers/net/tun.c static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) tun 1420 drivers/net/tun.c return (tun->dev->flags & IFF_UP) && sock_writeable(sk); tun 1429 drivers/net/tun.c struct tun_struct *tun = tun_get(tfile); tun 1433 drivers/net/tun.c if (!tun) tun 1438 drivers/net/tun.c tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); tun 1450 drivers/net/tun.c if (tun_sock_writeable(tun, tfile) || tun 1452 drivers/net/tun.c tun_sock_writeable(tun, tfile))) tun 1455 drivers/net/tun.c if (tun->dev->reg_state != NETREG_REGISTERED) tun 1458 drivers/net/tun.c tun_put(tun); tun 1542 drivers/net/tun.c static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, tun 1547 drivers/net/tun.c u32 rx_batched = tun->rx_batched; tun 1582 drivers/net/tun.c static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, tun 1585 drivers/net/tun.c if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) tun 1623 drivers/net/tun.c static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, tun 1630 drivers/net/tun.c err = xdp_do_redirect(tun->dev, xdp, xdp_prog); tun 1635 drivers/net/tun.c err = tun_xdp_tx(tun->dev, xdp); tun 1645 drivers/net/tun.c trace_xdp_exception(tun->dev, xdp_prog, act); tun 1648 drivers/net/tun.c this_cpu_inc(tun->pcpu_stats->rx_dropped); tun 1655 drivers/net/tun.c static struct sk_buff *tun_build_skb(struct tun_struct *tun, tun 1670 drivers/net/tun.c xdp_prog = rcu_dereference(tun->xdp_prog); tun 1701 drivers/net/tun.c xdp_prog = rcu_dereference(tun->xdp_prog); tun 1717 drivers/net/tun.c err = tun_xdp_act(tun, xdp_prog, &xdp, act); tun 1744 drivers/net/tun.c static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, tun 1751 drivers/net/tun.c size_t len = total_len, align = tun->align, linear; tun 1762 drivers/net/tun.c if (!(tun->flags & IFF_NO_PI)) { tun 1771 drivers/net/tun.c if (tun->flags & IFF_VNET_HDR) { tun 1772 drivers/net/tun.c int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); tun 1782 drivers/net/tun.c tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) tun 1783 drivers/net/tun.c gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); tun 1785 drivers/net/tun.c if (tun16_to_cpu(tun, gso.hdr_len) > len) tun 1790 drivers/net/tun.c if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { tun 1793 drivers/net/tun.c (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) tun 1806 drivers/net/tun.c copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; tun 1815 drivers/net/tun.c if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { tun 1820 drivers/net/tun.c skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); tun 1822 drivers/net/tun.c this_cpu_inc(tun->pcpu_stats->rx_dropped); tun 1830 drivers/net/tun.c if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) tun 1833 drivers/net/tun.c linear = tun16_to_cpu(tun, gso.hdr_len); tun 1851 drivers/net/tun.c this_cpu_inc(tun->pcpu_stats->rx_dropped); tun 1865 drivers/net/tun.c this_cpu_inc(tun->pcpu_stats->rx_dropped); tun 1876 drivers/net/tun.c if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { tun 1877 drivers/net/tun.c this_cpu_inc(tun->pcpu_stats->rx_frame_errors); tun 1887 drivers/net/tun.c switch (tun->flags & TUN_TYPE_MASK) { tun 1889 drivers/net/tun.c if (tun->flags & IFF_NO_PI) { tun 1900 drivers/net/tun.c this_cpu_inc(tun->pcpu_stats->rx_dropped); tun 1908 drivers/net/tun.c skb->dev = tun->dev; tun 1915 drivers/net/tun.c skb->protocol = eth_type_trans(skb, tun->dev); tun 1939 drivers/net/tun.c xdp_prog = rcu_dereference(tun->xdp_prog); tun 1960 drivers/net/tun.c if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && tun 1965 drivers/net/tun.c if (unlikely(!(tun->dev->flags & IFF_UP))) { tun 1976 drivers/net/tun.c headlen = eth_get_headlen(tun->dev, skb->data, tun 1980 drivers/net/tun.c this_cpu_inc(tun->pcpu_stats->rx_dropped); tun 2006 drivers/net/tun.c tun_rx_batched(tun, tfile, skb, more); tun 2012 drivers/net/tun.c stats = get_cpu_ptr(tun->pcpu_stats); tun 2020 drivers/net/tun.c tun_flow_update(tun, rxhash, tfile); tun 2029 drivers/net/tun.c struct tun_struct *tun = tun_get(tfile); tun 2032 drivers/net/tun.c if (!tun) tun 2035 drivers/net/tun.c result = tun_get_user(tun, tfile, NULL, from, tun 2038 drivers/net/tun.c tun_put(tun); tun 2042 drivers/net/tun.c static ssize_t tun_put_user_xdp(struct tun_struct *tun, tun 2052 drivers/net/tun.c if (tun->flags & IFF_VNET_HDR) { tun 2055 drivers/net/tun.c vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); tun 2066 drivers/net/tun.c stats = get_cpu_ptr(tun->pcpu_stats); tun 2071 drivers/net/tun.c put_cpu_ptr(tun->pcpu_stats); tun 2077 drivers/net/tun.c static ssize_t tun_put_user(struct tun_struct *tun, tun 2092 drivers/net/tun.c if (tun->flags & IFF_VNET_HDR) tun 2093 drivers/net/tun.c vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); tun 2097 drivers/net/tun.c if (!(tun->flags & IFF_NO_PI)) { tun 2118 drivers/net/tun.c tun_is_little_endian(tun), true, tun 2123 drivers/net/tun.c sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), tun 2124 drivers/net/tun.c tun16_to_cpu(tun, gso.hdr_len)); tun 2128 drivers/net/tun.c min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); tun 2161 drivers/net/tun.c stats = get_cpu_ptr(tun->pcpu_stats); tun 2166 drivers/net/tun.c put_cpu_ptr(tun->pcpu_stats); tun 2212 drivers/net/tun.c static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, tun 2219 drivers/net/tun.c tun_debug(KERN_INFO, tun, "tun_do_read\n"); tun 2236 drivers/net/tun.c ret = tun_put_user_xdp(tun, tfile, xdpf, to); tun 2241 drivers/net/tun.c ret = tun_put_user(tun, tfile, skb, to); tun 2255 drivers/net/tun.c struct tun_struct *tun = tun_get(tfile); tun 2258 drivers/net/tun.c if (!tun) tun 2260 drivers/net/tun.c ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL); tun 2264 drivers/net/tun.c tun_put(tun); tun 2276 drivers/net/tun.c static int __tun_set_ebpf(struct tun_struct *tun, tun 2289 drivers/net/tun.c spin_lock_bh(&tun->lock); tun 2291 drivers/net/tun.c lockdep_is_held(&tun->lock)); tun 2293 drivers/net/tun.c spin_unlock_bh(&tun->lock); tun 2303 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 2305 drivers/net/tun.c BUG_ON(!(list_empty(&tun->disabled))); tun 2306 drivers/net/tun.c free_percpu(tun->pcpu_stats); tun 2307 drivers/net/tun.c tun_flow_uninit(tun); tun 2308 drivers/net/tun.c security_tun_dev_free_security(tun->security); tun 2309 drivers/net/tun.c __tun_set_ebpf(tun, &tun->steering_prog, NULL); tun 2310 drivers/net/tun.c __tun_set_ebpf(tun, &tun->filter_prog, NULL); tun 2315 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 2317 drivers/net/tun.c tun->owner = INVALID_UID; tun 2318 drivers/net/tun.c tun->group = INVALID_GID; tun 2319 drivers/net/tun.c tun_default_link_ksettings(dev, &tun->link_ksettings); tun 2358 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 2360 drivers/net/tun.c if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) tun 2362 drivers/net/tun.c if (uid_valid(tun->owner) && tun 2364 drivers/net/tun.c from_kuid_munged(current_user_ns(), tun->owner))) tun 2366 drivers/net/tun.c if (gid_valid(tun->group) && tun 2368 drivers/net/tun.c from_kgid_munged(current_user_ns(), tun->group))) tun 2370 drivers/net/tun.c if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) tun 2372 drivers/net/tun.c if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) tun 2374 drivers/net/tun.c if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) tun 2377 drivers/net/tun.c !!(tun->flags & IFF_MULTI_QUEUE))) tun 2379 drivers/net/tun.c if (tun->flags & IFF_MULTI_QUEUE) { tun 2380 drivers/net/tun.c if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) tun 2383 drivers/net/tun.c tun->numdisabled)) tun 2428 drivers/net/tun.c static int tun_xdp_one(struct tun_struct *tun, tun 2445 drivers/net/tun.c xdp_prog = rcu_dereference(tun->xdp_prog); tun 2455 drivers/net/tun.c err = tun_xdp_act(tun, xdp_prog, xdp, act); tun 2492 drivers/net/tun.c if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { tun 2493 drivers/net/tun.c this_cpu_inc(tun->pcpu_stats->rx_frame_errors); tun 2499 drivers/net/tun.c skb->protocol = eth_type_trans(skb, tun->dev); tun 2510 drivers/net/tun.c if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 && tun 2519 drivers/net/tun.c stats = this_cpu_ptr(tun->pcpu_stats); tun 2526 drivers/net/tun.c tun_flow_update(tun, rxhash, tfile); tun 2536 drivers/net/tun.c struct tun_struct *tun = tun_get(tfile); tun 2540 drivers/net/tun.c if (!tun) tun 2555 drivers/net/tun.c tun_xdp_one(tun, tfile, xdp, &flush, &tpage); tun 2570 drivers/net/tun.c ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, tun 2574 drivers/net/tun.c tun_put(tun); tun 2582 drivers/net/tun.c struct tun_struct *tun = tun_get(tfile); tun 2586 drivers/net/tun.c if (!tun) { tun 2600 drivers/net/tun.c ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); tun 2606 drivers/net/tun.c tun_put(tun); tun 2610 drivers/net/tun.c tun_put(tun); tun 2633 drivers/net/tun.c struct tun_struct *tun; tun 2636 drivers/net/tun.c tun = tun_get(tfile); tun 2637 drivers/net/tun.c if (!tun) tun 2641 drivers/net/tun.c tun_put(tun); tun 2659 drivers/net/tun.c static int tun_flags(struct tun_struct *tun) tun 2661 drivers/net/tun.c return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); tun 2667 drivers/net/tun.c struct tun_struct *tun = netdev_priv(to_net_dev(dev)); tun 2668 drivers/net/tun.c return sprintf(buf, "0x%x\n", tun_flags(tun)); tun 2674 drivers/net/tun.c struct tun_struct *tun = netdev_priv(to_net_dev(dev)); tun 2675 drivers/net/tun.c return uid_valid(tun->owner)? tun 2677 drivers/net/tun.c from_kuid_munged(current_user_ns(), tun->owner)): tun 2684 drivers/net/tun.c struct tun_struct *tun = netdev_priv(to_net_dev(dev)); tun 2685 drivers/net/tun.c return gid_valid(tun->group) ? tun 2687 drivers/net/tun.c from_kgid_munged(current_user_ns(), tun->group)): tun 2708 drivers/net/tun.c struct tun_struct *tun; tun 2730 drivers/net/tun.c tun = netdev_priv(dev); tun 2732 drivers/net/tun.c tun = netdev_priv(dev); tun 2737 drivers/net/tun.c !!(tun->flags & IFF_MULTI_QUEUE)) tun 2740 drivers/net/tun.c if (tun_not_capable(tun)) tun 2742 drivers/net/tun.c err = security_tun_dev_open(tun->security); tun 2746 drivers/net/tun.c err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, tun 2752 drivers/net/tun.c if (tun->flags & IFF_MULTI_QUEUE && tun 2753 drivers/net/tun.c (tun->numqueues + tun->numdisabled > 1)) { tun 2761 drivers/net/tun.c tun->flags = (tun->flags & ~TUN_FEATURES) | tun 2807 drivers/net/tun.c tun = netdev_priv(dev); tun 2808 drivers/net/tun.c tun->dev = dev; tun 2809 drivers/net/tun.c tun->flags = flags; tun 2810 drivers/net/tun.c tun->txflt.count = 0; tun 2811 drivers/net/tun.c tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); tun 2813 drivers/net/tun.c tun->align = NET_SKB_PAD; tun 2814 drivers/net/tun.c tun->filter_attached = false; tun 2815 drivers/net/tun.c tun->sndbuf = tfile->socket.sk->sk_sndbuf; tun 2816 drivers/net/tun.c tun->rx_batched = 0; tun 2817 drivers/net/tun.c RCU_INIT_POINTER(tun->steering_prog, NULL); tun 2819 drivers/net/tun.c tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); tun 2820 drivers/net/tun.c if (!tun->pcpu_stats) { tun 2825 drivers/net/tun.c spin_lock_init(&tun->lock); tun 2827 drivers/net/tun.c err = security_tun_dev_alloc_security(&tun->security); tun 2832 drivers/net/tun.c tun_flow_init(tun); tun 2842 drivers/net/tun.c tun->flags = (tun->flags & ~TUN_FEATURES) | tun 2845 drivers/net/tun.c INIT_LIST_HEAD(&tun->disabled); tun 2846 drivers/net/tun.c err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, tun 2851 drivers/net/tun.c err = register_netdevice(tun->dev); tun 2857 drivers/net/tun.c rcu_assign_pointer(tfile->tun, tun); tun 2860 drivers/net/tun.c netif_carrier_on(tun->dev); tun 2862 drivers/net/tun.c tun_debug(KERN_INFO, tun, "tun_set_iff\n"); tun 2867 drivers/net/tun.c if (netif_running(tun->dev)) tun 2868 drivers/net/tun.c netif_tx_wake_all_queues(tun->dev); tun 2870 drivers/net/tun.c strcpy(ifr->ifr_name, tun->dev->name); tun 2879 drivers/net/tun.c tun_flow_uninit(tun); tun 2880 drivers/net/tun.c security_tun_dev_free_security(tun->security); tun 2882 drivers/net/tun.c free_percpu(tun->pcpu_stats); tun 2888 drivers/net/tun.c static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr) tun 2890 drivers/net/tun.c tun_debug(KERN_INFO, tun, "tun_get_iff\n"); tun 2892 drivers/net/tun.c strcpy(ifr->ifr_name, tun->dev->name); tun 2894 drivers/net/tun.c ifr->ifr_flags = tun_flags(tun); tun 2900 drivers/net/tun.c static int set_offload(struct tun_struct *tun, unsigned long arg) tun 2928 drivers/net/tun.c tun->set_features = features; tun 2929 drivers/net/tun.c tun->dev->wanted_features &= ~TUN_USER_FEATURES; tun 2930 drivers/net/tun.c tun->dev->wanted_features |= features; tun 2931 drivers/net/tun.c netdev_update_features(tun->dev); tun 2936 drivers/net/tun.c static void tun_detach_filter(struct tun_struct *tun, int n) tun 2942 drivers/net/tun.c tfile = rtnl_dereference(tun->tfiles[i]); tun 2948 drivers/net/tun.c tun->filter_attached = false; tun 2951 drivers/net/tun.c static int tun_attach_filter(struct tun_struct *tun) tun 2956 drivers/net/tun.c for (i = 0; i < tun->numqueues; i++) { tun 2957 drivers/net/tun.c tfile = rtnl_dereference(tun->tfiles[i]); tun 2959 drivers/net/tun.c ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); tun 2962 drivers/net/tun.c tun_detach_filter(tun, i); tun 2967 drivers/net/tun.c tun->filter_attached = true; tun 2971 drivers/net/tun.c static void tun_set_sndbuf(struct tun_struct *tun) tun 2976 drivers/net/tun.c for (i = 0; i < tun->numqueues; i++) { tun 2977 drivers/net/tun.c tfile = rtnl_dereference(tun->tfiles[i]); tun 2978 drivers/net/tun.c tfile->socket.sk->sk_sndbuf = tun->sndbuf; tun 2985 drivers/net/tun.c struct tun_struct *tun; tun 2991 drivers/net/tun.c tun = tfile->detached; tun 2992 drivers/net/tun.c if (!tun) { tun 2996 drivers/net/tun.c ret = security_tun_dev_attach_queue(tun->security); tun 2999 drivers/net/tun.c ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, tun 3000 drivers/net/tun.c tun->flags & IFF_NAPI_FRAGS, true); tun 3002 drivers/net/tun.c tun = rtnl_dereference(tfile->tun); tun 3003 drivers/net/tun.c if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) tun 3011 drivers/net/tun.c netdev_state_change(tun->dev); tun 3018 drivers/net/tun.c static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p, tun 3035 drivers/net/tun.c return __tun_set_ebpf(tun, prog_p, prog); tun 3043 drivers/net/tun.c struct tun_struct *tun; tun 3080 drivers/net/tun.c tun = tun_get(tfile); tun 3083 drivers/net/tun.c if (tun) tun 3099 drivers/net/tun.c if (tun) tun 3112 drivers/net/tun.c if (!tun) tun 3115 drivers/net/tun.c tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd); tun 3117 drivers/net/tun.c net = dev_net(tun->dev); tun 3121 drivers/net/tun.c tun_get_iff(tun, &ifr); tun 3136 drivers/net/tun.c tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n", tun 3144 drivers/net/tun.c if (arg && !(tun->flags & IFF_PERSIST)) { tun 3145 drivers/net/tun.c tun->flags |= IFF_PERSIST; tun 3149 drivers/net/tun.c if (!arg && (tun->flags & IFF_PERSIST)) { tun 3150 drivers/net/tun.c tun->flags &= ~IFF_PERSIST; tun 3155 drivers/net/tun.c tun_debug(KERN_INFO, tun, "persist %s\n", tun 3166 drivers/net/tun.c tun->owner = owner; tun 3168 drivers/net/tun.c tun_debug(KERN_INFO, tun, "owner set to %u\n", tun 3169 drivers/net/tun.c from_kuid(&init_user_ns, tun->owner)); tun 3179 drivers/net/tun.c tun->group = group; tun 3181 drivers/net/tun.c tun_debug(KERN_INFO, tun, "group set to %u\n", tun 3182 drivers/net/tun.c from_kgid(&init_user_ns, tun->group)); tun 3187 drivers/net/tun.c if (tun->dev->flags & IFF_UP) { tun 3188 drivers/net/tun.c tun_debug(KERN_INFO, tun, tun 3192 drivers/net/tun.c tun->dev->type = (int) arg; tun 3193 drivers/net/tun.c tun_debug(KERN_INFO, tun, "linktype set to %d\n", tun 3194 drivers/net/tun.c tun->dev->type); tun 3201 drivers/net/tun.c tun->debug = arg; tun 3205 drivers/net/tun.c ret = set_offload(tun, arg); tun 3211 drivers/net/tun.c if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) tun 3213 drivers/net/tun.c ret = update_filter(&tun->txflt, (void __user *)arg); tun 3218 drivers/net/tun.c memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); tun 3219 drivers/net/tun.c ifr.ifr_hwaddr.sa_family = tun->dev->type; tun 3226 drivers/net/tun.c tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", tun 3229 drivers/net/tun.c ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL); tun 3248 drivers/net/tun.c tun->sndbuf = sndbuf; tun 3249 drivers/net/tun.c tun_set_sndbuf(tun); tun 3253 drivers/net/tun.c vnet_hdr_sz = tun->vnet_hdr_sz; tun 3268 drivers/net/tun.c tun->vnet_hdr_sz = vnet_hdr_sz; tun 3272 drivers/net/tun.c le = !!(tun->flags & TUN_VNET_LE); tun 3283 drivers/net/tun.c tun->flags |= TUN_VNET_LE; tun 3285 drivers/net/tun.c tun->flags &= ~TUN_VNET_LE; tun 3289 drivers/net/tun.c ret = tun_get_vnet_be(tun, argp); tun 3293 drivers/net/tun.c ret = tun_set_vnet_be(tun, argp); tun 3299 drivers/net/tun.c if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) tun 3302 drivers/net/tun.c if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) tun 3305 drivers/net/tun.c ret = tun_attach_filter(tun); tun 3311 drivers/net/tun.c if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) tun 3314 drivers/net/tun.c tun_detach_filter(tun, tun->numqueues); tun 3319 drivers/net/tun.c if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) tun 3322 drivers/net/tun.c if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) tun 3328 drivers/net/tun.c ret = tun_set_ebpf(tun, &tun->steering_prog, argp); tun 3332 drivers/net/tun.c ret = tun_set_ebpf(tun, &tun->filter_prog, argp); tun 3340 drivers/net/tun.c ret = tun_net_change_carrier(tun->dev, (bool)carrier); tun 3356 drivers/net/tun.c netdev_state_change(tun->dev); tun 3360 drivers/net/tun.c if (tun) tun 3361 drivers/net/tun.c tun_put(tun); tun 3435 drivers/net/tun.c RCU_INIT_POINTER(tfile->tun, NULL); tun 3470 drivers/net/tun.c struct tun_struct *tun; tun 3476 drivers/net/tun.c tun = tun_get(tfile); tun 3477 drivers/net/tun.c if (tun) tun 3478 drivers/net/tun.c tun_get_iff(tun, &ifr); tun 3481 drivers/net/tun.c if (tun) tun 3482 drivers/net/tun.c tun_put(tun); tun 3530 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 3532 drivers/net/tun.c memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); tun 3539 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 3541 drivers/net/tun.c memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); tun 3547 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 3552 drivers/net/tun.c switch (tun->flags & TUN_TYPE_MASK) { tun 3565 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 3566 drivers/net/tun.c return tun->debug; tun 3575 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 3576 drivers/net/tun.c tun->debug = value; tun 3583 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 3585 drivers/net/tun.c ec->rx_max_coalesced_frames = tun->rx_batched; tun 3593 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 3596 drivers/net/tun.c tun->rx_batched = NAPI_POLL_WEIGHT; tun 3598 drivers/net/tun.c tun->rx_batched = ec->rx_max_coalesced_frames; tun 3615 drivers/net/tun.c static int tun_queue_resize(struct tun_struct *tun) tun 3617 drivers/net/tun.c struct net_device *dev = tun->dev; tun 3620 drivers/net/tun.c int n = tun->numqueues + tun->numdisabled; tun 3627 drivers/net/tun.c for (i = 0; i < tun->numqueues; i++) { tun 3628 drivers/net/tun.c tfile = rtnl_dereference(tun->tfiles[i]); tun 3631 drivers/net/tun.c list_for_each_entry(tfile, &tun->disabled, next) tun 3646 drivers/net/tun.c struct tun_struct *tun = netdev_priv(dev); tun 3654 drivers/net/tun.c if (tun_queue_resize(tun)) tun 3658 drivers/net/tun.c for (i = 0; i < tun->numqueues; i++) { tun 3661 drivers/net/tun.c tfile = rtnl_dereference(tun->tfiles[i]); tun 67 include/net/tc_act/tc_tunnel_key.h struct ip_tunnel_info *tun = tcf_tunnel_info(a); tun 69 include/net/tc_act/tc_tunnel_key.h if (tun) { tun 70 include/net/tc_act/tc_tunnel_key.h size_t tun_size = sizeof(*tun) + tun->options_len; tun 71 include/net/tc_act/tc_tunnel_key.h struct ip_tunnel_info *tun_copy = kmemdup(tun, tun_size, tun 1037 net/openvswitch/actions.c struct ovs_tunnel_info *tun = nla_data(a); tun 1040 net/openvswitch/actions.c dst_hold((struct dst_entry *)tun->tun_dst); tun 1041 net/openvswitch/actions.c skb_dst_set(skb, (struct dst_entry *)tun->tun_dst); tun 21 net/qrtr/tun.c struct qrtr_tun *tun = container_of(ep, struct qrtr_tun, ep); tun 23 net/qrtr/tun.c skb_queue_tail(&tun->queue, skb); tun 26 net/qrtr/tun.c wake_up_interruptible(&tun->readq); tun 33 net/qrtr/tun.c struct qrtr_tun *tun; tun 35 net/qrtr/tun.c tun = kzalloc(sizeof(*tun), GFP_KERNEL); tun 36 net/qrtr/tun.c if (!tun) tun 39 net/qrtr/tun.c skb_queue_head_init(&tun->queue); tun 40 net/qrtr/tun.c init_waitqueue_head(&tun->readq); tun 42 net/qrtr/tun.c tun->ep.xmit = qrtr_tun_send; tun 44 net/qrtr/tun.c filp->private_data = tun; tun 46 net/qrtr/tun.c return qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO); tun 52 net/qrtr/tun.c struct qrtr_tun *tun = filp->private_data; tun 56 net/qrtr/tun.c while (!(skb = skb_dequeue(&tun->queue))) { tun 61 net/qrtr/tun.c if (wait_event_interruptible(tun->readq, tun 62 net/qrtr/tun.c !skb_queue_empty(&tun->queue))) tun 78 net/qrtr/tun.c struct qrtr_tun *tun = filp->private_data; tun 92 net/qrtr/tun.c ret = qrtr_endpoint_post(&tun->ep, kbuf, len); tun 100 net/qrtr/tun.c struct qrtr_tun *tun = filp->private_data; tun 103 net/qrtr/tun.c poll_wait(filp, &tun->readq, wait); tun 105 net/qrtr/tun.c if (!skb_queue_empty(&tun->queue)) tun 113 net/qrtr/tun.c struct qrtr_tun *tun = filp->private_data; tun 116 net/qrtr/tun.c qrtr_endpoint_unregister(&tun->ep); tun 119 net/qrtr/tun.c while (!skb_queue_empty(&tun->queue)) { tun 120 net/qrtr/tun.c skb = skb_dequeue(&tun->queue); tun 124 net/qrtr/tun.c kfree(tun);