nh 44 arch/arm/vfp/vfp.h static inline void add128(u64 *resh, u64 *resl, u64 nh, u64 nl, u64 mh, u64 ml) nh 50 arch/arm/vfp/vfp.h : "=r" (nl), "=r" (nh) nh 51 arch/arm/vfp/vfp.h : "0" (nl), "1" (nh), "r" (ml), "r" (mh) nh 53 arch/arm/vfp/vfp.h *resh = nh; nh 57 arch/arm/vfp/vfp.h static inline void sub128(u64 *resh, u64 *resl, u64 nh, u64 nl, u64 mh, u64 ml) nh 63 arch/arm/vfp/vfp.h : "=r" (nl), "=r" (nh) nh 64 arch/arm/vfp/vfp.h : "0" (nl), "1" (nh), "r" (ml), "r" (mh) nh 66 arch/arm/vfp/vfp.h *resh = nh; nh 72 arch/arm/vfp/vfp.h u32 nh, nl, mh, ml; nh 79 arch/arm/vfp/vfp.h nh = n >> 32; nh 80 arch/arm/vfp/vfp.h rma = (u64)nh * ml; nh 86 arch/arm/vfp/vfp.h rh = (u64)nh * mh; nh 110 arch/arm/vfp/vfp.h static inline u64 vfp_estimate_div128to64(u64 nh, u64 nl, u64 m) nh 114 arch/arm/vfp/vfp.h if (nh >= m) nh 117 arch/arm/vfp/vfp.h if (mh << 32 <= nh) { nh 120 arch/arm/vfp/vfp.h z = nh; nh 125 arch/arm/vfp/vfp.h sub128(&remh, &reml, nh, nl, termh, terml); nh 164 arch/sh/kernel/cpu/sh2a/fpu.c unsigned long long mh, ml, nh, nl; nh 176 arch/sh/kernel/cpu/sh2a/fpu.c nh = mh; nh 179 arch/sh/kernel/cpu/sh2a/fpu.c if (nh) { nh 180 arch/sh/kernel/cpu/sh2a/fpu.c while (nh) { nh >>= 1; w++;} nh 4626 drivers/block/drbd/drbd_nl.c struct drbd_notification_header nh = { nh 4630 drivers/block/drbd/drbd_nl.c return drbd_notification_header_to_skb(msg, &nh, true); nh 435 drivers/extcon/extcon.c raw_notifier_call_chain(&edev->nh[index], state, edev); nh 913 drivers/extcon/extcon.c ret = raw_notifier_chain_register(&edev->nh[idx], nb); nh 942 drivers/extcon/extcon.c ret = raw_notifier_chain_unregister(&edev->nh[idx], nb); nh 1240 drivers/extcon/extcon.c edev->nh = devm_kcalloc(&edev->dev, edev->max_supported, nh 1241 drivers/extcon/extcon.c sizeof(*edev->nh), GFP_KERNEL); nh 1242 drivers/extcon/extcon.c if (!edev->nh) { nh 1248 drivers/extcon/extcon.c RAW_INIT_NOTIFIER_HEAD(&edev->nh[index]); nh 50 drivers/extcon/extcon.h struct raw_notifier_head *nh; nh 80 drivers/net/ethernet/mellanox/mlx5/core/eq.c struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX]; nh 218 drivers/net/ethernet/mellanox/mlx5/core/eq.c atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe); nh 219 drivers/net/ethernet/mellanox/mlx5/core/eq.c atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe); nh 430 drivers/net/ethernet/mellanox/mlx5/core/eq.c ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]); nh 941 drivers/net/ethernet/mellanox/mlx5/core/eq.c return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb); nh 949 drivers/net/ethernet/mellanox/mlx5/core/eq.c return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb); nh 59 drivers/net/ethernet/mellanox/mlx5/core/events.c struct atomic_notifier_head nh; nh 334 drivers/net/ethernet/mellanox/mlx5/core/events.c atomic_notifier_call_chain(&events->nh, event, data); nh 345 drivers/net/ethernet/mellanox/mlx5/core/events.c ATOMIC_INIT_NOTIFIER_HEAD(&events->nh); nh 390 drivers/net/ethernet/mellanox/mlx5/core/events.c return atomic_notifier_chain_register(&events->nh, nb); nh 398 drivers/net/ethernet/mellanox/mlx5/core/events.c return atomic_notifier_chain_unregister(&events->nh, nb); nh 404 drivers/net/ethernet/mellanox/mlx5/core/events.c return atomic_notifier_call_chain(&events->nh, event, data); nh 129 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c struct fib_nh *nh = fib_info_nh(fi, 0); nh 130 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c struct net_device *nh_dev = nh->fib_nh_dev; nh 268 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c if (fi->nh) { nh 16 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c struct atomic_notifier_head nh; nh 64 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c return atomic_notifier_chain_register(&irq->nh, nb); nh 73 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c return atomic_notifier_chain_unregister(&irq->nh, nb); nh 76 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c static irqreturn_t mlx5_irq_int_handler(int irq, void *nh) nh 78 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c atomic_notifier_call_chain(nh, 0, NULL); nh 105 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh); nh 109 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c &irq->nh); nh 122 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c free_irq(irqn, &irq->nh); nh 258 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c &mlx5_irq_get(dev, i)->nh); nh 330 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c &mlx5_irq_get(dev, i)->nh); nh 910 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c struct mlxsw_sp_nexthop *nh; nh 913 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) nh 914 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c if (mlxsw_sp_nexthop_offload(nh) && nh 915 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c !mlxsw_sp_nexthop_group_has_ipip(nh)) nh 1064 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c struct mlxsw_sp_nexthop *nh, nh 1067 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c struct mlxsw_sp_rif *rif = mlxsw_sp_nexthop_rif(nh); nh 1068 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c unsigned char *ha = mlxsw_sp_nexthop_ha(nh); nh 1074 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size, &adj_hash_index); nh 1077 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c err = mlxsw_sp_nexthop_counter_get(mlxsw_sp, nh, &entry->counter); nh 1088 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c struct mlxsw_sp_nexthop *nh; nh 1105 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) { nh 1106 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c if (!mlxsw_sp_nexthop_offload(nh) || nh 1107 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c mlxsw_sp_nexthop_group_has_ipip(nh)) nh 1113 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c mlxsw_sp_dpipe_table_adj_entry_fill(mlxsw_sp, nh, entry); nh 1181 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c struct mlxsw_sp_nexthop *nh; nh 1186 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) { nh 1187 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c if (!mlxsw_sp_nexthop_offload(nh) || nh 1188 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c mlxsw_sp_nexthop_group_has_ipip(nh)) nh 1191 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size, nh 1194 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); nh 1196 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); nh 1198 drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c adj_index + adj_hash_index, nh); nh 2725 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 2734 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index)) nh 2737 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->counter_valid = true; nh 2741 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 2743 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!nh->counter_valid) nh 2745 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index); nh 2746 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->counter_valid = false; nh 2750 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh, u64 *p_counter) nh 2752 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!nh->counter_valid) nh 2755 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index, nh 2760 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 2762 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!nh) { nh 2767 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c typeof(*nh), router_list_node); nh 2769 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (list_is_last(&nh->router_list_node, &router->nexthop_list)) nh 2771 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return list_next_entry(nh, router_list_node); nh 2774 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh) nh 2776 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return nh->offloaded; nh 2779 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh) nh 2781 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!nh->offloaded) nh 2783 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return nh->neigh_entry->ha; nh 2786 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index, nh 2789 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp; nh 2793 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!nh->offloaded || !nh_grp->adj_index_valid) nh 2802 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (nh_iter == nh) nh 2812 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh) nh 2814 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return nh->rif; nh 2817 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh) nh 2819 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp; nh 2853 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c const struct mlxsw_sp_nexthop *nh; nh 2855 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh = &nh_grp->nexthops[i]; nh 2856 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (nh->ifindex == ifindex && nh->nh_weight == weight && nh 2857 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr)) nh 2916 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c const struct mlxsw_sp_nexthop *nh; nh 2928 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh = &nh_grp->nexthops[i]; nh 2929 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed); nh 3033 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 3036 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c &nh->ht_node, mlxsw_sp_nexthop_ht_params); nh 3040 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 3042 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node, nh 3093 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 3095 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; nh 3102 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (nh->counter_valid) nh 3103 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true); nh 3111 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 3115 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c for (i = 0; i < nh->num_adj_entries; i++) { nh 3118 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh); nh 3128 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 3132 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt]; nh 3133 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry); nh 3138 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 3142 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c for (i = 0; i < nh->num_adj_entries; i++) { nh 3146 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh); nh 3160 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh; nh 3165 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh = &nh_grp->nexthops[i]; nh 3167 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!nh->should_offload) { nh 3168 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->offloaded = 0; nh 3172 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (nh->update || reallocate) { nh 3173 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (nh->type) { nh 3176 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c (mlxsw_sp, adj_index, nh); nh 3180 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c (mlxsw_sp, adj_index, nh); nh 3185 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->update = 0; nh 3186 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->offloaded = 1; nh 3188 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c adj_index += nh->num_adj_entries; nh 3291 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh; nh 3294 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh = &nh_grp->nexthops[i]; nh 3296 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!nh->should_offload) nh 3299 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c g = gcd(nh->nh_weight, g); nh 3301 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c g = nh->nh_weight; nh 3305 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh = &nh_grp->nexthops[i]; nh 3307 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!nh->should_offload) nh 3309 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->norm_nh_weight = nh->nh_weight / g; nh 3310 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c sum_norm_weight += nh->norm_nh_weight; nh 3324 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; nh 3327 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!nh->should_offload) nh 3329 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c weight += nh->norm_nh_weight; nh 3331 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->num_adj_entries = upper_bound - lower_bound; nh 3341 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh; nh 3355 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh = &nh_grp->nexthops[i]; nh 3357 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (nh->should_offload != nh->offloaded) { nh 3359 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (nh->should_offload) nh 3360 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->update = 1; nh 3439 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh = &nh_grp->nexthops[i]; nh 3440 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->offloaded = 0; nh 3450 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, nh 3454 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->should_offload = 1; nh 3456 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->should_offload = 0; nh 3457 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->update = 1; nh 3465 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh; nh 3470 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh = list_first_entry(&neigh_entry->nexthop_list, nh 3473 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev); nh 3475 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh 3476 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->rif->dev); nh 3494 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c list_for_each_entry(nh, &neigh_entry->nexthop_list, nh 3498 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected); nh 3499 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); nh 3518 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh; nh 3533 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c list_for_each_entry(nh, &neigh_entry->nexthop_list, nh 3535 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c __mlxsw_sp_nexthop_neigh_update(nh, removing); nh 3536 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); nh 3540 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh, nh 3543 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (nh->rif) nh 3546 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->rif = rif; nh 3547 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c list_add(&nh->rif_list_node, &rif->nexthop_list); nh 3550 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh) nh 3552 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!nh->rif) nh 3555 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c list_del(&nh->rif_list_node); nh 3556 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->rif = NULL; nh 3560 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 3567 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!nh->nh_grp->gateway || nh->neigh_entry) nh 3575 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev); nh 3577 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh 3578 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->rif->dev); nh 3599 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->neigh_entry = neigh_entry; nh 3600 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list); nh 3605 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead)); nh 3615 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 3617 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; nh 3624 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c __mlxsw_sp_nexthop_neigh_update(nh, true); nh 3625 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c list_del(&nh->neigh_list_node); nh 3626 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->neigh_entry = NULL; nh 3648 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh, nh 3653 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!nh->nh_grp->gateway || nh->ipip_entry) nh 3656 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->ipip_entry = ipip_entry; nh 3658 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c __mlxsw_sp_nexthop_neigh_update(nh, removing); nh 3659 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common); nh 3663 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 3665 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry; nh 3670 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c __mlxsw_sp_nexthop_neigh_update(nh, true); nh 3671 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->ipip_entry = NULL; nh 3686 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 3688 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (nh->type) { nh 3690 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); nh 3691 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_rif_fini(nh); nh 3694 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_rif_fini(nh); nh 3695 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh); nh 3701 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh, nh 3715 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; nh 3716 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry); nh 3721 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; nh 3726 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_rif_init(nh, rif); nh 3727 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); nh 3734 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_rif_fini(nh); nh 3739 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 3741 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); nh 3746 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh, nh 3753 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->nh_grp = nh_grp; nh 3754 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->key.fib_nh = fib_nh; nh 3756 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->nh_weight = fib_nh->fib_nh_weight; nh 3758 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->nh_weight = 1; nh 3760 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4)); nh 3761 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh); nh 3765 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); nh 3766 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list); nh 3776 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh); nh 3783 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_remove(mlxsw_sp, nh); nh 3788 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 3790 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh); nh 3791 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c list_del(&nh->router_list_node); nh 3792 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); nh 3793 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_remove(mlxsw_sp, nh); nh 3800 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh; nh 3806 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key); nh 3807 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (WARN_ON_ONCE(!nh)) nh 3812 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh); nh 3815 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh); nh 3819 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); nh 3825 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh; nh 3828 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) { nh 3829 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c switch (nh->type) { nh 3841 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c __mlxsw_sp_nexthop_neigh_update(nh, removing); nh 3842 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); nh 3850 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh; nh 3853 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node) nh 3854 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->rif = new_rif; nh 3861 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh, *tmp; nh 3863 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) { nh 3864 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); nh 3865 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); nh 3872 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c const struct fib_nh *nh = fib_info_nh(fi, 0); nh 3874 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return nh->fib_nh_scope == RT_SCOPE_LINK || nh 3875 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL); nh 3883 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh; nh 3899 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh = &nh_grp->nexthops[i]; nh 3901 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh); nh 3914 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh = &nh_grp->nexthops[i]; nh 3915 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); nh 3926 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh; nh 3931 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh = &nh_grp->nexthops[i]; nh 3932 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); nh 4013 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; nh 4016 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev && nh 4017 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr, nh 4019 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return nh; nh 4041 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; nh 4043 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (nh->offloaded) nh 4044 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD; nh 4046 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; nh 4060 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; nh 4062 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; nh 4085 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh; nh 4087 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6); nh 4088 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (nh && nh->offloaded) nh 5050 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh, nh 5064 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; nh 5065 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry); nh 5070 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; nh 5074 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_rif_init(nh, rif); nh 5076 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); nh 5083 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_rif_fini(nh); nh 5088 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 5090 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); nh 5095 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh, nh 5100 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->nh_grp = nh_grp; nh 5101 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->nh_weight = rt->fib6_nh->fib_nh_weight; nh 5102 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr)); nh 5103 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); nh 5105 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list); nh 5109 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh->ifindex = dev->ifindex; nh 5111 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt); nh 5115 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh) nh 5117 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh); nh 5118 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c list_del(&nh->router_list_node); nh 5119 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); nh 5135 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh; nh 5154 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh = &nh_grp->nexthops[i]; nh 5155 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt); nh 5171 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh = &nh_grp->nexthops[i]; nh 5172 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); nh 5182 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_nexthop *nh; nh 5187 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nh = &nh_grp->nexthops[i]; nh 5188 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); nh 6271 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (fen_info->fi->nh) { nh 6281 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (fen6_info->rt->nh) { nh 82 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h struct mlxsw_sp_nexthop *nh); nh 83 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh); nh 84 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh); nh 85 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index, nh 87 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh); nh 88 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh); nh 89 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h #define mlxsw_sp_nexthop_for_each(nh, router) \ nh 90 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h for (nh = mlxsw_sp_nexthop_next(router, NULL); nh; \ nh 91 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h nh = mlxsw_sp_nexthop_next(router, nh)) nh 93 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h struct mlxsw_sp_nexthop *nh, u64 *p_counter); nh 95 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h struct mlxsw_sp_nexthop *nh); nh 97 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h struct mlxsw_sp_nexthop *nh); nh 99 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h struct mlxsw_sp_nexthop *nh); nh 2217 drivers/net/ethernet/rocker/rocker_main.c if (fen_info->fi->nh) { nh 2271 drivers/net/ethernet/rocker/rocker_ofdpa.c const struct fib_nh *nh; nh 2286 drivers/net/ethernet/rocker/rocker_ofdpa.c nh = fib_info_nh(fi, 0); nh 2287 drivers/net/ethernet/rocker/rocker_ofdpa.c nh_on_port = (nh->fib_nh_dev == ofdpa_port->dev); nh 2288 drivers/net/ethernet/rocker/rocker_ofdpa.c has_gw = !!nh->fib_nh_gw4; nh 2292 drivers/net/ethernet/rocker/rocker_ofdpa.c nh->fib_nh_gw4, &index); nh 2737 drivers/net/ethernet/rocker/rocker_ofdpa.c struct fib_nh *nh; nh 2742 drivers/net/ethernet/rocker/rocker_ofdpa.c nh = fib_info_nh(fen_info->fi, 0); nh 2743 drivers/net/ethernet/rocker/rocker_ofdpa.c ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker); nh 2751 drivers/net/ethernet/rocker/rocker_ofdpa.c nh->fib_nh_flags |= RTNH_F_OFFLOAD; nh 2760 drivers/net/ethernet/rocker/rocker_ofdpa.c struct fib_nh *nh; nh 2764 drivers/net/ethernet/rocker/rocker_ofdpa.c nh = fib_info_nh(fen_info->fi, 0); nh 2765 drivers/net/ethernet/rocker/rocker_ofdpa.c ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker); nh 2768 drivers/net/ethernet/rocker/rocker_ofdpa.c nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; nh 2788 drivers/net/ethernet/rocker/rocker_ofdpa.c struct fib_nh *nh; nh 2793 drivers/net/ethernet/rocker/rocker_ofdpa.c nh = fib_info_nh(flow_entry->fi, 0); nh 2794 drivers/net/ethernet/rocker/rocker_ofdpa.c ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker); nh 2797 drivers/net/ethernet/rocker/rocker_ofdpa.c nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; nh 329 drivers/of/unittest.c struct node_hash *nh; nh 337 drivers/of/unittest.c hash_for_each_possible(phandle_ht, nh, node, np->phandle) { nh 338 drivers/of/unittest.c if (nh->np->phandle == np->phandle) { nh 340 drivers/of/unittest.c np->phandle, nh->np, np); nh 346 drivers/of/unittest.c nh = kzalloc(sizeof(*nh), GFP_KERNEL); nh 347 drivers/of/unittest.c if (!nh) nh 350 drivers/of/unittest.c nh->np = np; nh 351 drivers/of/unittest.c hash_add(phandle_ht, &nh->node, np->phandle); nh 358 drivers/of/unittest.c hash_for_each_safe(phandle_ht, i, tmp, nh, node) { nh 359 drivers/of/unittest.c hash_del(&nh->node); nh 360 drivers/of/unittest.c kfree(nh); nh 130 drivers/staging/rtl8192e/rtllib_softmac.c int nh; nh 132 drivers/staging/rtl8192e/rtllib_softmac.c nh = (ieee->mgmt_queue_head + 1) % MGMT_QUEUE_NUM; nh 140 drivers/staging/rtl8192e/rtllib_softmac.c ieee->mgmt_queue_head = nh; nh 141 drivers/staging/rtl8192e/rtllib_softmac.c ieee->mgmt_queue_ring[nh] = skb; nh 140 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c int nh; nh 142 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c nh = (ieee->mgmt_queue_head + 1) % MGMT_QUEUE_NUM; nh 151 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c ieee->mgmt_queue_head = nh; nh 152 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c ieee->mgmt_queue_ring[nh] = skb; nh 30 drivers/usb/typec/bus.c blocking_notifier_call_chain(&port_altmode->nh, state, NULL); nh 74 drivers/usb/typec/bus.c blocking_notifier_call_chain(is_port ? &altmode->nh : &partner->nh, nh 26 drivers/usb/typec/bus.h struct blocking_notifier_head nh; nh 251 drivers/usb/typec/class.c ret = blocking_notifier_chain_register(&altmode->nh, nb); nh 266 drivers/usb/typec/class.c blocking_notifier_chain_unregister(&altmode->nh, nb); nh 520 drivers/usb/typec/class.c BLOCKING_INIT_NOTIFIER_HEAD(&alt->nh); nh 93 include/linux/notifier.h extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); nh 144 include/linux/notifier.h extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, nh 146 include/linux/notifier.h extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh, nh 148 include/linux/notifier.h extern int raw_notifier_chain_register(struct raw_notifier_head *nh, nh 150 include/linux/notifier.h extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh, nh 154 include/linux/notifier.h struct blocking_notifier_head *nh, nh 157 include/linux/notifier.h extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, nh 159 include/linux/notifier.h extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, nh 161 include/linux/notifier.h extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh, nh 163 include/linux/notifier.h extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, nh 166 include/linux/notifier.h extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh, nh 168 include/linux/notifier.h extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, nh 170 include/linux/notifier.h extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh, nh 172 include/linux/notifier.h extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, nh 174 include/linux/notifier.h extern int raw_notifier_call_chain(struct raw_notifier_head *nh, nh 176 include/linux/notifier.h extern int __raw_notifier_call_chain(struct raw_notifier_head *nh, nh 178 include/linux/notifier.h extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, nh 180 include/linux/notifier.h extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, nh 108 include/net/dsa.h struct raw_notifier_head nh; nh 173 include/net/ip6_fib.h struct nexthop *nh; nh 196 include/net/ip6_fib.h struct fib6_nh *nh; nh 77 include/net/ip6_route.h return !(f6i->fib6_flags & RTF_ADDRCONF) && !f6i->nh && nh 300 include/net/ip6_route.h if (a->nh || b->nh) nh 301 include/net/ip6_route.h return nexthop_cmp(a->nh, b->nh); nh 154 include/net/ip_fib.h struct nexthop *nh; nh 447 include/net/ip_fib.h int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope, nh 474 include/net/ip_fib.h struct fib_nh *nh; nh 476 include/net/ip_fib.h nh = container_of(nhc, struct fib_nh, nh_common); nh 477 include/net/ip_fib.h *itag = nh->nh_tclassid << 16; nh 524 include/net/ip_fib.h int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh, nh 526 include/net/ip_fib.h int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh, nh 64 include/net/nexthop.h struct nexthop *nh; nh 106 include/net/nexthop.h static inline bool nexthop_get(struct nexthop *nh) nh 108 include/net/nexthop.h return refcount_inc_not_zero(&nh->refcnt); nh 111 include/net/nexthop.h static inline void nexthop_put(struct nexthop *nh) nh 113 include/net/nexthop.h if (refcount_dec_and_test(&nh->refcnt)) nh 114 include/net/nexthop.h call_rcu(&nh->rcu, nexthop_free_rcu); nh 123 include/net/nexthop.h static inline bool nexthop_is_multipath(const struct nexthop *nh) nh 125 include/net/nexthop.h if (nh->is_group) { nh 128 include/net/nexthop.h nh_grp = rcu_dereference_rtnl(nh->nh_grp); nh 134 include/net/nexthop.h struct nexthop *nexthop_select_path(struct nexthop *nh, int hash); nh 136 include/net/nexthop.h static inline unsigned int nexthop_num_path(const struct nexthop *nh) nh 140 include/net/nexthop.h if (nh->is_group) { nh 143 include/net/nexthop.h nh_grp = rcu_dereference_rtnl(nh->nh_grp); nh 160 include/net/nexthop.h return nhg->nh_entries[nhsel].nh; nh 164 include/net/nexthop.h int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh, nh 167 include/net/nexthop.h struct nh_group *nhg = rtnl_dereference(nh->nh_grp); nh 171 include/net/nexthop.h struct nexthop *nhe = nhg->nh_entries[i].nh; nh 184 include/net/nexthop.h static inline bool nexthop_is_blackhole(const struct nexthop *nh) nh 188 include/net/nexthop.h if (nh->is_group) { nh 191 include/net/nexthop.h nh_grp = rcu_dereference_rtnl(nh->nh_grp); nh 195 include/net/nexthop.h nh = nh_grp->nh_entries[0].nh; nh 198 include/net/nexthop.h nhi = rcu_dereference_rtnl(nh->nh_info); nh 205 include/net/nexthop.h struct nexthop *nh; nh 207 include/net/nexthop.h nh = nexthop_select_path(res->fi->nh, hash); nh 208 include/net/nexthop.h nhi = rcu_dereference(nh->nh_info); nh 214 include/net/nexthop.h struct fib_nh_common *nexthop_fib_nhc(struct nexthop *nh, int nhsel) nh 221 include/net/nexthop.h if (nh->is_group) { nh 224 include/net/nexthop.h nh_grp = rcu_dereference_rtnl(nh->nh_grp); nh 226 include/net/nexthop.h nh = nexthop_mpath_select(nh_grp, nhsel); nh 227 include/net/nexthop.h if (!nh) nh 232 include/net/nexthop.h nhi = rcu_dereference_rtnl(nh->nh_info); nh 236 include/net/nexthop.h static inline bool nexthop_uses_dev(const struct nexthop *nh, nh 241 include/net/nexthop.h if (nh->is_group) { nh 242 include/net/nexthop.h struct nh_group *nhg = rcu_dereference(nh->nh_grp); nh 246 include/net/nexthop.h struct nexthop *nhe = nhg->nh_entries[i].nh; nh 253 include/net/nexthop.h nhi = rcu_dereference(nh->nh_info); nh 263 include/net/nexthop.h if (unlikely(fi->nh)) nh 264 include/net/nexthop.h return nexthop_num_path(fi->nh); nh 269 include/net/nexthop.h int fib_check_nexthop(struct nexthop *nh, u8 scope, nh 274 include/net/nexthop.h if (unlikely(fi->nh)) nh 275 include/net/nexthop.h return nexthop_fib_nhc(fi->nh, nhsel); nh 283 include/net/nexthop.h WARN_ON(fi->nh); nh 291 include/net/nexthop.h int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg, nh 294 include/net/nexthop.h static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh) nh 298 include/net/nexthop.h if (nh->is_group) { nh 301 include/net/nexthop.h nh_grp = rcu_dereference_rtnl(nh->nh_grp); nh 302 include/net/nexthop.h nh = nexthop_mpath_select(nh_grp, 0); nh 303 include/net/nexthop.h if (!nh) nh 307 include/net/nexthop.h nhi = rcu_dereference_rtnl(nh->nh_info); nh 318 include/net/nexthop.h fib6_nh = f6i->nh ? nexthop_fib6_nh(f6i->nh) : f6i->fib6_nh; nh 324 include/net/nexthop.h struct nexthop *nh = res->f6i->nh; nh 327 include/net/nexthop.h nh = nexthop_select_path(nh, hash); nh 329 include/net/nexthop.h nhi = rcu_dereference_rtnl(nh->nh_info); nh 333 include/net/nexthop.h res->nh = nexthop_fib6_nh(nh); nh 335 include/net/nexthop.h res->nh = &nhi->fib6_nh; nh 339 include/net/nexthop.h int nexthop_for_each_fib6_nh(struct nexthop *nh, nh 340 include/net/nexthop.h int (*cb)(struct fib6_nh *nh, void *arg), nh 65 include/trace/events/fib6.h if (res->nh && res->nh->fib_nh_dev) { nh 66 include/trace/events/fib6.h __assign_str(name, res->nh->fib_nh_dev); nh 76 include/trace/events/fib6.h } else if (res->nh) { nh 78 include/trace/events/fib6.h *in6 = res->nh->fib_nh_gw6; nh 123 kernel/notifier.c int atomic_notifier_chain_register(struct atomic_notifier_head *nh, nh 129 kernel/notifier.c spin_lock_irqsave(&nh->lock, flags); nh 130 kernel/notifier.c ret = notifier_chain_register(&nh->head, n); nh 131 kernel/notifier.c spin_unlock_irqrestore(&nh->lock, flags); nh 145 kernel/notifier.c int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, nh 151 kernel/notifier.c spin_lock_irqsave(&nh->lock, flags); nh 152 kernel/notifier.c ret = notifier_chain_unregister(&nh->head, n); nh 153 kernel/notifier.c spin_unlock_irqrestore(&nh->lock, flags); nh 178 kernel/notifier.c int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, nh 185 kernel/notifier.c ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); nh 192 kernel/notifier.c int atomic_notifier_call_chain(struct atomic_notifier_head *nh, nh 195 kernel/notifier.c return __atomic_notifier_call_chain(nh, val, v, -1, NULL); nh 215 kernel/notifier.c int blocking_notifier_chain_register(struct blocking_notifier_head *nh, nh 226 kernel/notifier.c return notifier_chain_register(&nh->head, n); nh 228 kernel/notifier.c down_write(&nh->rwsem); nh 229 kernel/notifier.c ret = notifier_chain_register(&nh->head, n); nh 230 kernel/notifier.c up_write(&nh->rwsem); nh 246 kernel/notifier.c int blocking_notifier_chain_cond_register(struct blocking_notifier_head *nh, nh 251 kernel/notifier.c down_write(&nh->rwsem); nh 252 kernel/notifier.c ret = notifier_chain_cond_register(&nh->head, n); nh 253 kernel/notifier.c up_write(&nh->rwsem); nh 268 kernel/notifier.c int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, nh 279 kernel/notifier.c return notifier_chain_unregister(&nh->head, n); nh 281 kernel/notifier.c down_write(&nh->rwsem); nh 282 kernel/notifier.c ret = notifier_chain_unregister(&nh->head, n); nh 283 kernel/notifier.c up_write(&nh->rwsem); nh 306 kernel/notifier.c int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, nh 317 kernel/notifier.c if (rcu_access_pointer(nh->head)) { nh 318 kernel/notifier.c down_read(&nh->rwsem); nh 319 kernel/notifier.c ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nh 321 kernel/notifier.c up_read(&nh->rwsem); nh 327 kernel/notifier.c int blocking_notifier_call_chain(struct blocking_notifier_head *nh, nh 330 kernel/notifier.c return __blocking_notifier_call_chain(nh, val, v, -1, NULL); nh 349 kernel/notifier.c int raw_notifier_chain_register(struct raw_notifier_head *nh, nh 352 kernel/notifier.c return notifier_chain_register(&nh->head, n); nh 366 kernel/notifier.c int raw_notifier_chain_unregister(struct raw_notifier_head *nh, nh 369 kernel/notifier.c return notifier_chain_unregister(&nh->head, n); nh 392 kernel/notifier.c int __raw_notifier_call_chain(struct raw_notifier_head *nh, nh 396 kernel/notifier.c return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); nh 400 kernel/notifier.c int raw_notifier_call_chain(struct raw_notifier_head *nh, nh 403 kernel/notifier.c return __raw_notifier_call_chain(nh, val, v, -1, NULL); nh 423 kernel/notifier.c int srcu_notifier_chain_register(struct srcu_notifier_head *nh, nh 434 kernel/notifier.c return notifier_chain_register(&nh->head, n); nh 436 kernel/notifier.c mutex_lock(&nh->mutex); nh 437 kernel/notifier.c ret = notifier_chain_register(&nh->head, n); nh 438 kernel/notifier.c mutex_unlock(&nh->mutex); nh 453 kernel/notifier.c int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, nh 464 kernel/notifier.c return notifier_chain_unregister(&nh->head, n); nh 466 kernel/notifier.c mutex_lock(&nh->mutex); nh 467 kernel/notifier.c ret = notifier_chain_unregister(&nh->head, n); nh 468 kernel/notifier.c mutex_unlock(&nh->mutex); nh 469 kernel/notifier.c synchronize_srcu(&nh->srcu); nh 492 kernel/notifier.c int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, nh 499 kernel/notifier.c idx = srcu_read_lock(&nh->srcu); nh 500 kernel/notifier.c ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); nh 501 kernel/notifier.c srcu_read_unlock(&nh->srcu, idx); nh 506 kernel/notifier.c int srcu_notifier_call_chain(struct srcu_notifier_head *nh, nh 509 kernel/notifier.c return __srcu_notifier_call_chain(nh, val, v, -1, NULL); nh 525 kernel/notifier.c void srcu_init_notifier_head(struct srcu_notifier_head *nh) nh 527 kernel/notifier.c mutex_init(&nh->mutex); nh 528 kernel/notifier.c if (init_srcu_struct(&nh->srcu) < 0) nh 530 kernel/notifier.c nh->head = NULL; nh 287 lib/mpi/longlong.h #define udiv_qrnnd(q, r, nh, nl, d) \ nh 291 lib/mpi/longlong.h : "1" ((USItype)(nh)), \ nh 486 lib/mpi/longlong.h #define udiv_qrnnd(q, r, nh, nl, d) \ nh 491 lib/mpi/longlong.h __nn.__i.__h = (nh); __nn.__i.__l = (nl); \ nh 827 lib/mpi/longlong.h #define sdiv_qrnnd(q, r, nh, nl, d) \ nh 830 lib/mpi/longlong.h : "r" ((SItype)(nh)), "1" ((SItype)(nl)), "r" ((SItype)(d))) nh 1345 lib/mpi/longlong.h #define udiv_qrnnd(q, r, nh, nl, d) \ nh 1348 lib/mpi/longlong.h (q) = __MPN(udiv_w_sdiv) (&__r, nh, nl, d); \ nh 50 net/bridge/br_netfilter_ipv6.c const unsigned char *nh = skb_network_header(skb); nh 51 net/bridge/br_netfilter_ipv6.c int off = raw - nh; nh 61 net/bridge/br_netfilter_ipv6.c int optlen = nh[off + 1] + 2; nh 63 net/bridge/br_netfilter_ipv6.c switch (nh[off]) { nh 72 net/bridge/br_netfilter_ipv6.c if (nh[off + 1] != 4 || (off & 3) != 2) nh 74 net/bridge/br_netfilter_ipv6.c pkt_len = ntohl(*(__be32 *)(nh + off + 2)); nh 83 net/bridge/br_netfilter_ipv6.c nh = skb_network_header(skb); nh 4820 net/core/filter.c if (res.nh->fib_nh_lws) nh 4823 net/core/filter.c if (res.nh->fib_nh_gw_family) nh 4824 net/core/filter.c *dst = res.nh->fib_nh_gw6; nh 4826 net/core/filter.c dev = res.nh->fib_nh_dev; nh 53 net/decnet/dn_fib.c #define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\ nh 54 net/decnet/dn_fib.c for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) nh 56 net/decnet/dn_fib.c #define change_nexthops(fi) { int nhsel; struct dn_fib_nh *nh;\ nh 57 net/decnet/dn_fib.c for(nhsel = 0, nh = (struct dn_fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++) nh 95 net/decnet/dn_fib.c if (nh->nh_dev) nh 96 net/decnet/dn_fib.c dev_put(nh->nh_dev); nh 97 net/decnet/dn_fib.c nh->nh_dev = NULL; nh 123 net/decnet/dn_fib.c if (nh->nh_oif != onh->nh_oif || nh 124 net/decnet/dn_fib.c nh->nh_gw != onh->nh_gw || nh 125 net/decnet/dn_fib.c nh->nh_scope != onh->nh_scope || nh 126 net/decnet/dn_fib.c nh->nh_weight != onh->nh_weight || nh 127 net/decnet/dn_fib.c ((nh->nh_flags^onh->nh_flags)&~RTNH_F_DEAD)) nh 176 net/decnet/dn_fib.c nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags; nh 177 net/decnet/dn_fib.c nh->nh_oif = nhp->rtnh_ifindex; nh 178 net/decnet/dn_fib.c nh->nh_weight = nhp->rtnh_hops + 1; nh 185 net/decnet/dn_fib.c nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0; nh 195 net/decnet/dn_fib.c static int dn_fib_check_nh(const struct rtmsg *r, struct dn_fib_info *fi, struct dn_fib_nh *nh) nh 199 net/decnet/dn_fib.c if (nh->nh_gw) { nh 203 net/decnet/dn_fib.c if (nh->nh_flags&RTNH_F_ONLINK) { nh 208 net/decnet/dn_fib.c if (dnet_addr_type(nh->nh_gw) != RTN_UNICAST) nh 210 net/decnet/dn_fib.c if ((dev = __dev_get_by_index(&init_net, nh->nh_oif)) == NULL) nh 214 net/decnet/dn_fib.c nh->nh_dev = dev; nh 216 net/decnet/dn_fib.c nh->nh_scope = RT_SCOPE_LINK; nh 221 net/decnet/dn_fib.c fld.daddr = nh->nh_gw; nh 222 net/decnet/dn_fib.c fld.flowidn_oif = nh->nh_oif; nh 234 net/decnet/dn_fib.c nh->nh_scope = res.scope; nh 235 net/decnet/dn_fib.c nh->nh_oif = DN_FIB_RES_OIF(res); nh 236 net/decnet/dn_fib.c nh->nh_dev = DN_FIB_RES_DEV(res); nh 237 net/decnet/dn_fib.c if (nh->nh_dev == NULL) nh 239 net/decnet/dn_fib.c dev_hold(nh->nh_dev); nh 241 net/decnet/dn_fib.c if (!(nh->nh_dev->flags & IFF_UP)) nh 250 net/decnet/dn_fib.c if (nh->nh_flags&(RTNH_F_PERVASIVE|RTNH_F_ONLINK)) nh 253 net/decnet/dn_fib.c dev = __dev_get_by_index(&init_net, nh->nh_oif); nh 258 net/decnet/dn_fib.c nh->nh_dev = dev; nh 259 net/decnet/dn_fib.c dev_hold(nh->nh_dev); nh 260 net/decnet/dn_fib.c nh->nh_scope = RT_SCOPE_HOST; nh 329 net/decnet/dn_fib.c struct dn_fib_nh *nh = fi->fib_nh; nh 332 net/decnet/dn_fib.c nh->nh_oif = nla_get_u32(attrs[RTA_OIF]); nh 335 net/decnet/dn_fib.c nh->nh_gw = nla_get_le16(attrs[RTA_GATEWAY]); nh 337 net/decnet/dn_fib.c nh->nh_flags = r->rtm_flags; nh 338 net/decnet/dn_fib.c nh->nh_weight = 1; nh 360 net/decnet/dn_fib.c struct dn_fib_nh *nh = fi->fib_nh; nh 363 net/decnet/dn_fib.c if (nhs != 1 || nh->nh_gw) nh 365 net/decnet/dn_fib.c nh->nh_scope = RT_SCOPE_NOWHERE; nh 366 net/decnet/dn_fib.c nh->nh_dev = dev_get_by_index(&init_net, fi->fib_nh->nh_oif); nh 368 net/decnet/dn_fib.c if (nh->nh_dev == NULL) nh 372 net/decnet/dn_fib.c if ((err = dn_fib_check_nh(r, fi, nh)) != 0) nh 434 net/decnet/dn_fib.c if (nh->nh_flags & RTNH_F_DEAD) nh 437 net/decnet/dn_fib.c fld->flowidn_oif == nh->nh_oif) nh 467 net/decnet/dn_fib.c if (!(nh->nh_flags&RTNH_F_DEAD)) { nh 468 net/decnet/dn_fib.c power += nh->nh_weight; nh 469 net/decnet/dn_fib.c nh->nh_power = nh->nh_weight; nh 483 net/decnet/dn_fib.c if (!(nh->nh_flags&RTNH_F_DEAD) && nh->nh_power) { nh 484 net/decnet/dn_fib.c if ((w -= nh->nh_power) <= 0) { nh 485 net/decnet/dn_fib.c nh->nh_power--; nh 719 net/decnet/dn_fib.c if (nh->nh_flags&RTNH_F_DEAD) nh 721 net/decnet/dn_fib.c else if (nh->nh_dev == dev && nh 722 net/decnet/dn_fib.c nh->nh_scope != scope) { nh 724 net/decnet/dn_fib.c nh->nh_flags |= RTNH_F_DEAD; nh 725 net/decnet/dn_fib.c fi->fib_power -= nh->nh_power; nh 726 net/decnet/dn_fib.c nh->nh_power = 0; nh 752 net/decnet/dn_fib.c if (!(nh->nh_flags&RTNH_F_DEAD)) { nh 756 net/decnet/dn_fib.c if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP)) nh 758 net/decnet/dn_fib.c if (nh->nh_dev != dev || dev->dn_ptr == NULL) nh 762 net/decnet/dn_fib.c nh->nh_power = 0; nh 763 net/decnet/dn_fib.c nh->nh_flags &= ~RTNH_F_DEAD; nh 64 net/decnet/dn_table.c #define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\ nh 65 net/decnet/dn_table.c for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) nh 257 net/decnet/dn_table.c if (nhp->rtnh_ifindex && nhp->rtnh_ifindex != nh->nh_oif) nh 265 net/decnet/dn_table.c if (gw && gw != nh->nh_gw) nh 359 net/decnet/dn_table.c nhp->rtnh_flags = nh->nh_flags & 0xFF; nh 360 net/decnet/dn_table.c nhp->rtnh_hops = nh->nh_weight - 1; nh 361 net/decnet/dn_table.c nhp->rtnh_ifindex = nh->nh_oif; nh 363 net/decnet/dn_table.c if (nh->nh_gw && nh 364 net/decnet/dn_table.c nla_put_le16(skb, RTA_GATEWAY, nh->nh_gw) < 0) nh 18 net/dsa/port.c struct raw_notifier_head *nh = &dp->ds->dst->nh; nh 21 net/dsa/port.c err = raw_notifier_call_chain(nh, e, v); nh 351 net/dsa/switch.c return raw_notifier_chain_register(&ds->dst->nh, &ds->nb); nh 358 net/dsa/switch.c err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb); nh 322 net/ipv4/fib_frontend.c if (unlikely(fi->nh)) { nh 323 net/ipv4/fib_frontend.c dev_match = nexthop_uses_dev(fi->nh, dev); nh 66 net/ipv4/fib_semantics.c int nhsel; const struct fib_nh *nh; \ nh 67 net/ipv4/fib_semantics.c for (nhsel = 0, nh = (fi)->fib_nh; \ nh 69 net/ipv4/fib_semantics.c nh++, nhsel++) nh 82 net/ipv4/fib_semantics.c int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \ nh 235 net/ipv4/fib_semantics.c if (fi->nh) { nh 236 net/ipv4/fib_semantics.c nexthop_put(fi->nh); nh 267 net/ipv4/fib_semantics.c if (fi->nh) { nh 286 net/ipv4/fib_semantics.c if (fi->nh || ofi->nh) nh 287 net/ipv4/fib_semantics.c return nexthop_cmp(fi->nh, ofi->nh) ? 0 : -1; nh 295 net/ipv4/fib_semantics.c if (nh->fib_nh_oif != onh->fib_nh_oif || nh 296 net/ipv4/fib_semantics.c nh->fib_nh_gw_family != onh->fib_nh_gw_family || nh 297 net/ipv4/fib_semantics.c nh->fib_nh_scope != onh->fib_nh_scope || nh 299 net/ipv4/fib_semantics.c nh->fib_nh_weight != onh->fib_nh_weight || nh 302 net/ipv4/fib_semantics.c nh->nh_tclassid != onh->nh_tclassid || nh 304 net/ipv4/fib_semantics.c lwtunnel_cmp_encap(nh->fib_nh_lws, onh->fib_nh_lws) || nh 305 net/ipv4/fib_semantics.c ((nh->fib_nh_flags ^ onh->fib_nh_flags) & ~RTNH_COMPARE_MASK)) nh 308 net/ipv4/fib_semantics.c if (nh->fib_nh_gw_family == AF_INET && nh 309 net/ipv4/fib_semantics.c nh->fib_nh_gw4 != onh->fib_nh_gw4) nh 312 net/ipv4/fib_semantics.c if (nh->fib_nh_gw_family == AF_INET6 && nh 313 net/ipv4/fib_semantics.c ipv6_addr_cmp(&nh->fib_nh_gw6, &onh->fib_nh_gw6)) nh 355 net/ipv4/fib_semantics.c if (fi->nh) { nh 356 net/ipv4/fib_semantics.c val ^= fib_devindex_hashfn(fi->nh->id); nh 359 net/ipv4/fib_semantics.c val ^= fib_devindex_hashfn(nh->fib_nh_oif); nh 384 net/ipv4/fib_semantics.c if (!fi->nh || fi->nh->id != cfg->fc_nh_id) nh 434 net/ipv4/fib_semantics.c struct fib_nh *nh; nh 441 net/ipv4/fib_semantics.c hlist_for_each_entry(nh, head, nh_hash) { nh 442 net/ipv4/fib_semantics.c if (nh->fib_nh_dev == dev && nh 443 net/ipv4/fib_semantics.c nh->fib_nh_gw4 == gw && nh 444 net/ipv4/fib_semantics.c !(nh->fib_nh_flags & RTNH_F_DEAD)) { nh 468 net/ipv4/fib_semantics.c if (fi->nh) nh 602 net/ipv4/fib_semantics.c int fib_nh_init(struct net *net, struct fib_nh *nh, nh 608 net/ipv4/fib_semantics.c nh->fib_nh_family = AF_INET; nh 610 net/ipv4/fib_semantics.c err = fib_nh_common_init(&nh->nh_common, cfg->fc_encap, nh 615 net/ipv4/fib_semantics.c nh->fib_nh_oif = cfg->fc_oif; nh 616 net/ipv4/fib_semantics.c nh->fib_nh_gw_family = cfg->fc_gw_family; nh 618 net/ipv4/fib_semantics.c nh->fib_nh_gw4 = cfg->fc_gw4; nh 620 net/ipv4/fib_semantics.c nh->fib_nh_gw6 = cfg->fc_gw6; nh 622 net/ipv4/fib_semantics.c nh->fib_nh_flags = cfg->fc_flags; nh 625 net/ipv4/fib_semantics.c nh->nh_tclassid = cfg->fc_flow; nh 626 net/ipv4/fib_semantics.c if (nh->nh_tclassid) nh 630 net/ipv4/fib_semantics.c nh->fib_nh_weight = nh_weight; nh 664 net/ipv4/fib_semantics.c struct fib_nh *nh; nh 727 net/ipv4/fib_semantics.c nh = fib_info_nh(fi, 0); nh 728 net/ipv4/fib_semantics.c if (cfg->fc_oif && nh->fib_nh_oif != cfg->fc_oif) { nh 734 net/ipv4/fib_semantics.c if (cfg->fc_gw_family != nh->fib_nh_gw_family || nh 736 net/ipv4/fib_semantics.c nh->fib_nh_gw4 != cfg->fc_gw4) || nh 738 net/ipv4/fib_semantics.c ipv6_addr_cmp(&nh->fib_nh_gw6, &cfg->fc_gw6))) { nh 745 net/ipv4/fib_semantics.c if (cfg->fc_flow && nh->nh_tclassid != cfg->fc_flow) { nh 767 net/ipv4/fib_semantics.c if (nh->fib_nh_flags & RTNH_F_DEAD) nh 770 net/ipv4/fib_semantics.c if (ip_ignore_linkdown(nh->fib_nh_dev) && nh 771 net/ipv4/fib_semantics.c nh->fib_nh_flags & RTNH_F_LINKDOWN) nh 774 net/ipv4/fib_semantics.c total += nh->fib_nh_weight; nh 812 net/ipv4/fib_semantics.c const struct fib_nh *nh, nh 825 net/ipv4/fib_semantics.c result = lwtunnel_cmp_encap(lwtstate, nh->fib_nh_lws); nh 844 net/ipv4/fib_semantics.c if (fi->nh && cfg->fc_nh_id == fi->nh->id) nh 850 net/ipv4/fib_semantics.c struct fib_nh *nh = fib_info_nh(fi, 0); nh 854 net/ipv4/fib_semantics.c nh, cfg, extack)) nh 859 net/ipv4/fib_semantics.c cfg->fc_flow != nh->nh_tclassid) nh 862 net/ipv4/fib_semantics.c if ((cfg->fc_oif && cfg->fc_oif != nh->fib_nh_oif) || nh 864 net/ipv4/fib_semantics.c cfg->fc_gw_family != nh->fib_nh_gw_family)) nh 868 net/ipv4/fib_semantics.c cfg->fc_gw4 != nh->fib_nh_gw4) nh 872 net/ipv4/fib_semantics.c ipv6_addr_cmp(&cfg->fc_gw6, &nh->fib_nh_gw6)) nh 891 net/ipv4/fib_semantics.c if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->fib_nh_oif) nh 907 net/ipv4/fib_semantics.c if (nh->fib_nh_gw_family != AF_INET || nh 908 net/ipv4/fib_semantics.c nla_get_in_addr(nla) != nh->fib_nh_gw4) nh 918 net/ipv4/fib_semantics.c switch (nh->fib_nh_gw_family) { nh 921 net/ipv4/fib_semantics.c cfg2.fc_gw4 != nh->fib_nh_gw4) nh 927 net/ipv4/fib_semantics.c &nh->fib_nh_gw6)) nh 935 net/ipv4/fib_semantics.c if (nla && nla_get_u32(nla) != nh->nh_tclassid) nh 986 net/ipv4/fib_semantics.c static int fib_check_nh_v6_gw(struct net *net, struct fib_nh *nh, nh 991 net/ipv4/fib_semantics.c .fc_flags = nh->fib_nh_flags | RTF_GATEWAY, nh 992 net/ipv4/fib_semantics.c .fc_ifindex = nh->fib_nh_oif, nh 993 net/ipv4/fib_semantics.c .fc_gateway = nh->fib_nh_gw6, nh 1000 net/ipv4/fib_semantics.c nh->fib_nh_dev = fib6_nh.fib_nh_dev; nh 1001 net/ipv4/fib_semantics.c dev_hold(nh->fib_nh_dev); nh 1002 net/ipv4/fib_semantics.c nh->fib_nh_oif = nh->fib_nh_dev->ifindex; nh 1003 net/ipv4/fib_semantics.c nh->fib_nh_scope = RT_SCOPE_LINK; nh 1054 net/ipv4/fib_semantics.c static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table, nh 1061 net/ipv4/fib_semantics.c if (nh->fib_nh_flags & RTNH_F_ONLINK) { nh 1068 net/ipv4/fib_semantics.c dev = __dev_get_by_index(net, nh->fib_nh_oif); nh 1077 net/ipv4/fib_semantics.c addr_type = inet_addr_type_dev_table(net, dev, nh->fib_nh_gw4); nh 1083 net/ipv4/fib_semantics.c nh->fib_nh_flags |= RTNH_F_LINKDOWN; nh 1084 net/ipv4/fib_semantics.c nh->fib_nh_dev = dev; nh 1086 net/ipv4/fib_semantics.c nh->fib_nh_scope = RT_SCOPE_LINK; nh 1093 net/ipv4/fib_semantics.c .daddr = nh->fib_nh_gw4, nh 1095 net/ipv4/fib_semantics.c .flowi4_oif = nh->fib_nh_oif, nh 1131 net/ipv4/fib_semantics.c nh->fib_nh_scope = res.scope; nh 1132 net/ipv4/fib_semantics.c nh->fib_nh_oif = FIB_RES_OIF(res); nh 1133 net/ipv4/fib_semantics.c nh->fib_nh_dev = dev = FIB_RES_DEV(res); nh 1141 net/ipv4/fib_semantics.c nh->fib_nh_flags |= RTNH_F_LINKDOWN; nh 1148 net/ipv4/fib_semantics.c static int fib_check_nh_nongw(struct net *net, struct fib_nh *nh, nh 1154 net/ipv4/fib_semantics.c if (nh->fib_nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) { nh 1163 net/ipv4/fib_semantics.c in_dev = inetdev_by_index(net, nh->fib_nh_oif); nh 1172 net/ipv4/fib_semantics.c nh->fib_nh_dev = in_dev->dev; nh 1173 net/ipv4/fib_semantics.c dev_hold(nh->fib_nh_dev); nh 1174 net/ipv4/fib_semantics.c nh->fib_nh_scope = RT_SCOPE_HOST; nh 1175 net/ipv4/fib_semantics.c if (!netif_carrier_ok(nh->fib_nh_dev)) nh 1176 net/ipv4/fib_semantics.c nh->fib_nh_flags |= RTNH_F_LINKDOWN; nh 1183 net/ipv4/fib_semantics.c int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope, nh 1188 net/ipv4/fib_semantics.c if (nh->fib_nh_gw_family == AF_INET) nh 1189 net/ipv4/fib_semantics.c err = fib_check_nh_v4_gw(net, nh, table, scope, extack); nh 1190 net/ipv4/fib_semantics.c else if (nh->fib_nh_gw_family == AF_INET6) nh 1191 net/ipv4/fib_semantics.c err = fib_check_nh_v6_gw(net, nh, table, extack); nh 1193 net/ipv4/fib_semantics.c err = fib_check_nh_nongw(net, nh, extack); nh 1283 net/ipv4/fib_semantics.c struct fib_nh *nh; nh 1288 net/ipv4/fib_semantics.c nh = container_of(nhc, struct fib_nh, nh_common); nh 1289 net/ipv4/fib_semantics.c nh->nh_saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope); nh 1290 net/ipv4/fib_semantics.c nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid); nh 1292 net/ipv4/fib_semantics.c return nh->nh_saddr; nh 1303 net/ipv4/fib_semantics.c struct fib_nh *nh; nh 1305 net/ipv4/fib_semantics.c nh = container_of(nhc, struct fib_nh, nh_common); nh 1306 net/ipv4/fib_semantics.c if (nh->nh_saddr_genid == atomic_read(&net->ipv4.dev_addr_genid)) nh 1307 net/ipv4/fib_semantics.c return nh->nh_saddr; nh 1342 net/ipv4/fib_semantics.c struct nexthop *nh = NULL; nh 1371 net/ipv4/fib_semantics.c nh = nexthop_find_by_id(net, cfg->fc_nh_id); nh 1372 net/ipv4/fib_semantics.c if (!nh) { nh 1431 net/ipv4/fib_semantics.c if (nh) { nh 1432 net/ipv4/fib_semantics.c if (!nexthop_get(nh)) { nh 1437 net/ipv4/fib_semantics.c fi->nh = nh; nh 1480 net/ipv4/fib_semantics.c if (fi->nh) { nh 1481 net/ipv4/fib_semantics.c err = fib_check_nexthop(fi->nh, cfg->fc_scope, extack); nh 1485 net/ipv4/fib_semantics.c struct fib_nh *nh = fi->fib_nh; nh 1493 net/ipv4/fib_semantics.c if (nh->fib_nh_gw_family) { nh 1498 net/ipv4/fib_semantics.c nh->fib_nh_scope = RT_SCOPE_NOWHERE; nh 1499 net/ipv4/fib_semantics.c nh->fib_nh_dev = dev_get_by_index(net, nh->fib_nh_oif); nh 1501 net/ipv4/fib_semantics.c if (!nh->fib_nh_dev) nh 1524 net/ipv4/fib_semantics.c if (!fi->nh) { nh 1555 net/ipv4/fib_semantics.c if (fi->nh) { nh 1556 net/ipv4/fib_semantics.c list_add(&fi->nh_list, &nh->fi_list); nh 1695 net/ipv4/fib_semantics.c if (unlikely(fi->nh)) { nh 1696 net/ipv4/fib_semantics.c if (nexthop_mpath_fill_node(skb, fi->nh, AF_INET) < 0) nh 1702 net/ipv4/fib_semantics.c if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight, nh 1706 net/ipv4/fib_semantics.c if (nh->nh_tclassid && nh 1707 net/ipv4/fib_semantics.c nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid)) nh 1768 net/ipv4/fib_semantics.c if (fi->nh) { nh 1769 net/ipv4/fib_semantics.c if (nla_put_u32(skb, RTA_NH_ID, fi->nh->id)) nh 1771 net/ipv4/fib_semantics.c if (nexthop_is_blackhole(fi->nh)) nh 1785 net/ipv4/fib_semantics.c struct fib_nh *nh; nh 1787 net/ipv4/fib_semantics.c nh = container_of(nhc, struct fib_nh, nh_common); nh 1788 net/ipv4/fib_semantics.c if (nh->nh_tclassid && nh 1789 net/ipv4/fib_semantics.c nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid)) nh 1836 net/ipv4/fib_semantics.c static int call_fib_nh_notifiers(struct fib_nh *nh, nh 1839 net/ipv4/fib_semantics.c bool ignore_link_down = ip_ignore_linkdown(nh->fib_nh_dev); nh 1841 net/ipv4/fib_semantics.c .fib_nh = nh, nh 1846 net/ipv4/fib_semantics.c if (nh->fib_nh_flags & RTNH_F_DEAD) nh 1848 net/ipv4/fib_semantics.c if (ignore_link_down && nh->fib_nh_flags & RTNH_F_LINKDOWN) nh 1850 net/ipv4/fib_semantics.c return call_fib4_notifiers(dev_net(nh->fib_nh_dev), event_type, nh 1853 net/ipv4/fib_semantics.c if ((ignore_link_down && nh->fib_nh_flags & RTNH_F_LINKDOWN) || nh 1854 net/ipv4/fib_semantics.c (nh->fib_nh_flags & RTNH_F_DEAD)) nh 1855 net/ipv4/fib_semantics.c return call_fib4_notifiers(dev_net(nh->fib_nh_dev), nh 1906 net/ipv4/fib_semantics.c struct fib_nh *nh; nh 1908 net/ipv4/fib_semantics.c hlist_for_each_entry(nh, head, nh_hash) { nh 1909 net/ipv4/fib_semantics.c if (nh->fib_nh_dev == dev) nh 1910 net/ipv4/fib_semantics.c fib_nhc_update_mtu(&nh->nh_common, dev->mtu, orig_mtu); nh 1929 net/ipv4/fib_semantics.c struct fib_nh *nh; nh 1934 net/ipv4/fib_semantics.c hlist_for_each_entry(nh, head, nh_hash) { nh 1935 net/ipv4/fib_semantics.c struct fib_info *fi = nh->nh_parent; nh 1939 net/ipv4/fib_semantics.c if (nh->fib_nh_dev != dev || fi == prev_fi) nh 2076 net/ipv4/fib_semantics.c struct fib_nh *nh; nh 2094 net/ipv4/fib_semantics.c hlist_for_each_entry(nh, head, nh_hash) { nh 2095 net/ipv4/fib_semantics.c struct fib_info *fi = nh->nh_parent; nh 2099 net/ipv4/fib_semantics.c if (nh->fib_nh_dev != dev || fi == prev_fi) nh 2132 net/ipv4/fib_semantics.c static bool fib_good_nh(const struct fib_nh *nh) nh 2136 net/ipv4/fib_semantics.c if (nh->fib_nh_scope == RT_SCOPE_LINK) { nh 2141 net/ipv4/fib_semantics.c if (likely(nh->fib_nh_gw_family == AF_INET)) nh 2142 net/ipv4/fib_semantics.c n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev, nh 2143 net/ipv4/fib_semantics.c (__force u32)nh->fib_nh_gw4); nh 2144 net/ipv4/fib_semantics.c else if (nh->fib_nh_gw_family == AF_INET6) nh 2145 net/ipv4/fib_semantics.c n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, nh 2146 net/ipv4/fib_semantics.c &nh->fib_nh_gw6); nh 2164 net/ipv4/fib_semantics.c if (unlikely(res->fi->nh)) { nh 1468 net/ipv4/fib_trie.c if (unlikely(fi->nh && nexthop_is_blackhole(fi->nh))) { nh 1005 net/ipv4/icmp.c int nh; nh 1014 net/ipv4/icmp.c nh = skb_network_offset(skb); nh 1020 net/ipv4/icmp.c skb_set_network_header(skb, nh); nh 19 net/ipv4/nexthop.c static void remove_nexthop(struct net *net, struct nexthop *nh, nh 61 net/ipv4/nexthop.c static void nexthop_free_mpath(struct nexthop *nh) nh 66 net/ipv4/nexthop.c nhg = rcu_dereference_raw(nh->nh_grp); nh 71 net/ipv4/nexthop.c nexthop_put(nhge->nh); nh 80 net/ipv4/nexthop.c static void nexthop_free_single(struct nexthop *nh) nh 84 net/ipv4/nexthop.c nhi = rcu_dereference_raw(nh->nh_info); nh 87 net/ipv4/nexthop.c fib_nh_release(nh->net, &nhi->fib_nh); nh 98 net/ipv4/nexthop.c struct nexthop *nh = container_of(head, struct nexthop, rcu); nh 100 net/ipv4/nexthop.c if (nh->is_group) nh 101 net/ipv4/nexthop.c nexthop_free_mpath(nh); nh 103 net/ipv4/nexthop.c nexthop_free_single(nh); nh 105 net/ipv4/nexthop.c kfree(nh); nh 111 net/ipv4/nexthop.c struct nexthop *nh; nh 113 net/ipv4/nexthop.c nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL); nh 114 net/ipv4/nexthop.c if (nh) { nh 115 net/ipv4/nexthop.c INIT_LIST_HEAD(&nh->fi_list); nh 116 net/ipv4/nexthop.c INIT_LIST_HEAD(&nh->f6i_list); nh 117 net/ipv4/nexthop.c INIT_LIST_HEAD(&nh->grp_list); nh 119 net/ipv4/nexthop.c return nh; nh 149 net/ipv4/nexthop.c struct nexthop *nh; nh 156 net/ipv4/nexthop.c nh = rb_entry(parent, struct nexthop, rb_node); nh 157 net/ipv4/nexthop.c if (id < nh->id) nh 159 net/ipv4/nexthop.c else if (id > nh->id) nh 162 net/ipv4/nexthop.c return nh; nh 204 net/ipv4/nexthop.c p->id = nhg->nh_entries[i].nh->id; nh 215 net/ipv4/nexthop.c static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh, nh 230 net/ipv4/nexthop.c nhm->nh_flags = nh->nh_flags; nh 231 net/ipv4/nexthop.c nhm->nh_protocol = nh->protocol; nh 235 net/ipv4/nexthop.c if (nla_put_u32(skb, NHA_ID, nh->id)) nh 238 net/ipv4/nexthop.c if (nh->is_group) { nh 239 net/ipv4/nexthop.c struct nh_group *nhg = rtnl_dereference(nh->nh_grp); nh 246 net/ipv4/nexthop.c nhi = rtnl_dereference(nh->nh_info); nh 291 net/ipv4/nexthop.c static size_t nh_nlmsg_size_grp(struct nexthop *nh) nh 293 net/ipv4/nexthop.c struct nh_group *nhg = rtnl_dereference(nh->nh_grp); nh 300 net/ipv4/nexthop.c static size_t nh_nlmsg_size_single(struct nexthop *nh) nh 302 net/ipv4/nexthop.c struct nh_info *nhi = rtnl_dereference(nh->nh_info); nh 331 net/ipv4/nexthop.c static size_t nh_nlmsg_size(struct nexthop *nh) nh 337 net/ipv4/nexthop.c if (nh->is_group) nh 338 net/ipv4/nexthop.c sz += nh_nlmsg_size_grp(nh); nh 340 net/ipv4/nexthop.c sz += nh_nlmsg_size_single(nh); nh 345 net/ipv4/nexthop.c static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info) nh 352 net/ipv4/nexthop.c skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any()); nh 356 net/ipv4/nexthop.c err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags); nh 372 net/ipv4/nexthop.c static bool valid_group_nh(struct nexthop *nh, unsigned int npaths, nh 375 net/ipv4/nexthop.c if (nh->is_group) { nh 376 net/ipv4/nexthop.c struct nh_group *nhg = rtnl_dereference(nh->nh_grp); nh 387 net/ipv4/nexthop.c struct nh_info *nhi = rtnl_dereference(nh->nh_info); nh 435 net/ipv4/nexthop.c struct nexthop *nh; nh 437 net/ipv4/nexthop.c nh = nexthop_find_by_id(net, nhg[i].id); nh 438 net/ipv4/nexthop.c if (!nh) { nh 442 net/ipv4/nexthop.c if (!valid_group_nh(nh, len, extack)) nh 457 net/ipv4/nexthop.c static bool ipv6_good_nh(const struct fib6_nh *nh) nh 464 net/ipv4/nexthop.c n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6); nh 473 net/ipv4/nexthop.c static bool ipv4_good_nh(const struct fib_nh *nh) nh 480 net/ipv4/nexthop.c n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev, nh 481 net/ipv4/nexthop.c (__force u32)nh->fib_nh_gw4); nh 490 net/ipv4/nexthop.c struct nexthop *nexthop_select_path(struct nexthop *nh, int hash) nh 496 net/ipv4/nexthop.c if (!nh->is_group) nh 497 net/ipv4/nexthop.c return nh; nh 499 net/ipv4/nexthop.c nhg = rcu_dereference(nh->nh_grp); nh 510 net/ipv4/nexthop.c nhi = rcu_dereference(nhge->nh->nh_info); nh 514 net/ipv4/nexthop.c return nhge->nh; nh 518 net/ipv4/nexthop.c return nhge->nh; nh 523 net/ipv4/nexthop.c rc = nhge->nh; nh 530 net/ipv4/nexthop.c int nexthop_for_each_fib6_nh(struct nexthop *nh, nh 531 net/ipv4/nexthop.c int (*cb)(struct fib6_nh *nh, void *arg), nh 537 net/ipv4/nexthop.c if (nh->is_group) { nh 541 net/ipv4/nexthop.c nhg = rcu_dereference_rtnl(nh->nh_grp); nh 545 net/ipv4/nexthop.c nhi = rcu_dereference_rtnl(nhge->nh->nh_info); nh 551 net/ipv4/nexthop.c nhi = rcu_dereference_rtnl(nh->nh_info); nh 571 net/ipv4/nexthop.c int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg, nh 585 net/ipv4/nexthop.c if (nh->is_group) { nh 588 net/ipv4/nexthop.c nhg = rtnl_dereference(nh->nh_grp); nh 592 net/ipv4/nexthop.c nhi = rtnl_dereference(nh->nh_info); nh 623 net/ipv4/nexthop.c static int nexthop_check_scope(struct nexthop *nh, u8 scope, nh 628 net/ipv4/nexthop.c nhi = rtnl_dereference(nh->nh_info); nh 647 net/ipv4/nexthop.c int fib_check_nexthop(struct nexthop *nh, u8 scope, nh 652 net/ipv4/nexthop.c if (nh->is_group) { nh 661 net/ipv4/nexthop.c nhg = rtnl_dereference(nh->nh_grp); nh 663 net/ipv4/nexthop.c err = nexthop_check_scope(nhg->nh_entries[0].nh, scope, extack); nh 665 net/ipv4/nexthop.c err = nexthop_check_scope(nh, scope, extack); nh 710 net/ipv4/nexthop.c struct nexthop *nh = nhge->nh; nh 714 net/ipv4/nexthop.c WARN_ON(!nh); nh 734 net/ipv4/nexthop.c if (nhg->nh_entries[i].nh == nh) { nh 741 net/ipv4/nexthop.c new_nhges[j].nh = nhges[i].nh; nh 743 net/ipv4/nexthop.c list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list); nh 751 net/ipv4/nexthop.c nexthop_put(nhge->nh); nh 757 net/ipv4/nexthop.c static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, nh 762 net/ipv4/nexthop.c list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) nh 769 net/ipv4/nexthop.c static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo) nh 771 net/ipv4/nexthop.c struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp); nh 777 net/ipv4/nexthop.c if (WARN_ON(!nhge->nh)) nh 785 net/ipv4/nexthop.c static void __remove_nexthop_fib(struct net *net, struct nexthop *nh) nh 791 net/ipv4/nexthop.c list_for_each_entry(fi, &nh->fi_list, nh_list) { nh 799 net/ipv4/nexthop.c list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) { nh 806 net/ipv4/nexthop.c static void __remove_nexthop(struct net *net, struct nexthop *nh, nh 809 net/ipv4/nexthop.c __remove_nexthop_fib(net, nh); nh 811 net/ipv4/nexthop.c if (nh->is_group) { nh 812 net/ipv4/nexthop.c remove_nexthop_group(nh, nlinfo); nh 816 net/ipv4/nexthop.c nhi = rtnl_dereference(nh->nh_info); nh 820 net/ipv4/nexthop.c remove_nexthop_from_groups(net, nh, nlinfo); nh 824 net/ipv4/nexthop.c static void remove_nexthop(struct net *net, struct nexthop *nh, nh 828 net/ipv4/nexthop.c rb_erase(&nh->rb_node, &net->nexthop.rb_root); nh 831 net/ipv4/nexthop.c nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo); nh 833 net/ipv4/nexthop.c __remove_nexthop(net, nh, nlinfo); nh 836 net/ipv4/nexthop.c nexthop_put(nh); nh 842 net/ipv4/nexthop.c static void nh_rt_cache_flush(struct net *net, struct nexthop *nh) nh 846 net/ipv4/nexthop.c if (!list_empty(&nh->fi_list)) nh 849 net/ipv4/nexthop.c list_for_each_entry(f6i, &nh->f6i_list, nh_list) nh 908 net/ipv4/nexthop.c static void __nexthop_replace_notify(struct net *net, struct nexthop *nh, nh 913 net/ipv4/nexthop.c if (!list_empty(&nh->fi_list)) { nh 920 net/ipv4/nexthop.c list_for_each_entry(fi, &nh->fi_list, nh_list) nh 925 net/ipv4/nexthop.c list_for_each_entry(fi, &nh->fi_list, nh_list) nh 929 net/ipv4/nexthop.c list_for_each_entry(f6i, &nh->f6i_list, nh_list) nh 937 net/ipv4/nexthop.c static void nexthop_replace_notify(struct net *net, struct nexthop *nh, nh 942 net/ipv4/nexthop.c __nexthop_replace_notify(net, nh, info); nh 944 net/ipv4/nexthop.c list_for_each_entry(nhge, &nh->grp_list, nh_list) nh 1020 net/ipv4/nexthop.c struct nexthop *nh; nh 1028 net/ipv4/nexthop.c nh = rb_entry(parent, struct nexthop, rb_node); nh 1029 net/ipv4/nexthop.c if (new_id < nh->id) { nh 1031 net/ipv4/nexthop.c } else if (new_id > nh->id) { nh 1034 net/ipv4/nexthop.c rc = replace_nexthop(net, nh, new_nh, extack); nh 1036 net/ipv4/nexthop.c new_nh = nh; /* send notification with old nh */ nh 1089 net/ipv4/nexthop.c struct nexthop *nh; nh 1092 net/ipv4/nexthop.c nh = rb_entry(node, struct nexthop, rb_node); nh 1093 net/ipv4/nexthop.c remove_nexthop(net, nh, NULL); nh 1105 net/ipv4/nexthop.c struct nexthop *nh; nh 1108 net/ipv4/nexthop.c nh = nexthop_alloc(); nh 1109 net/ipv4/nexthop.c if (!nh) nh 1112 net/ipv4/nexthop.c nh->is_group = 1; nh 1116 net/ipv4/nexthop.c kfree(nh); nh 1124 net/ipv4/nexthop.c kfree(nh); nh 1141 net/ipv4/nexthop.c nhg->nh_entries[i].nh = nhe; nh 1144 net/ipv4/nexthop.c nhg->nh_entries[i].nh_parent = nh; nh 1152 net/ipv4/nexthop.c rcu_assign_pointer(nh->nh_grp, nhg); nh 1154 net/ipv4/nexthop.c return nh; nh 1158 net/ipv4/nexthop.c nexthop_put(nhg->nh_entries[i].nh); nh 1162 net/ipv4/nexthop.c kfree(nh); nh 1167 net/ipv4/nexthop.c static int nh_create_ipv4(struct net *net, struct nexthop *nh, nh 1192 net/ipv4/nexthop.c nh->nh_flags = fib_nh->fib_nh_flags; nh 1202 net/ipv4/nexthop.c static int nh_create_ipv6(struct net *net, struct nexthop *nh, nh 1226 net/ipv4/nexthop.c nh->nh_flags = fib6_nh->fib_nh_flags; nh 1235 net/ipv4/nexthop.c struct nexthop *nh; nh 1238 net/ipv4/nexthop.c nh = nexthop_alloc(); nh 1239 net/ipv4/nexthop.c if (!nh) nh 1244 net/ipv4/nexthop.c kfree(nh); nh 1248 net/ipv4/nexthop.c nh->nh_flags = cfg->nh_flags; nh 1249 net/ipv4/nexthop.c nh->net = net; nh 1251 net/ipv4/nexthop.c nhi->nh_parent = nh; nh 1262 net/ipv4/nexthop.c err = nh_create_ipv4(net, nh, nhi, cfg, extack); nh 1265 net/ipv4/nexthop.c err = nh_create_ipv6(net, nh, nhi, cfg, extack); nh 1271 net/ipv4/nexthop.c kfree(nh); nh 1278 net/ipv4/nexthop.c rcu_assign_pointer(nh->nh_info, nhi); nh 1280 net/ipv4/nexthop.c return nh; nh 1287 net/ipv4/nexthop.c struct nexthop *nh; nh 1304 net/ipv4/nexthop.c nh = nexthop_create_group(net, cfg); nh 1306 net/ipv4/nexthop.c nh = nexthop_create(net, cfg, extack); nh 1308 net/ipv4/nexthop.c if (IS_ERR(nh)) nh 1309 net/ipv4/nexthop.c return nh; nh 1311 net/ipv4/nexthop.c refcount_set(&nh->refcnt, 1); nh 1312 net/ipv4/nexthop.c nh->id = cfg->nh_id; nh 1313 net/ipv4/nexthop.c nh->protocol = cfg->nh_protocol; nh 1314 net/ipv4/nexthop.c nh->net = net; nh 1316 net/ipv4/nexthop.c err = insert_nexthop(net, nh, cfg, extack); nh 1318 net/ipv4/nexthop.c __remove_nexthop(net, nh, NULL); nh 1319 net/ipv4/nexthop.c nexthop_put(nh); nh 1320 net/ipv4/nexthop.c nh = ERR_PTR(err); nh 1323 net/ipv4/nexthop.c return nh; nh 1498 net/ipv4/nexthop.c struct nexthop *nh; nh 1503 net/ipv4/nexthop.c nh = nexthop_add(net, &cfg, extack); nh 1504 net/ipv4/nexthop.c if (IS_ERR(nh)) nh 1505 net/ipv4/nexthop.c err = PTR_ERR(nh); nh 1566 net/ipv4/nexthop.c struct nexthop *nh; nh 1574 net/ipv4/nexthop.c nh = nexthop_find_by_id(net, id); nh 1575 net/ipv4/nexthop.c if (!nh) nh 1578 net/ipv4/nexthop.c remove_nexthop(net, nh, &nlinfo); nh 1589 net/ipv4/nexthop.c struct nexthop *nh; nh 1603 net/ipv4/nexthop.c nh = nexthop_find_by_id(net, id); nh 1604 net/ipv4/nexthop.c if (!nh) nh 1607 net/ipv4/nexthop.c err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid, nh 1622 net/ipv4/nexthop.c static bool nh_dump_filtered(struct nexthop *nh, int dev_idx, int master_idx, nh 1628 net/ipv4/nexthop.c if (group_filter && !nh->is_group) nh 1634 net/ipv4/nexthop.c if (nh->is_group) nh 1637 net/ipv4/nexthop.c nhi = rtnl_dereference(nh->nh_info); nh 1732 net/ipv4/nexthop.c struct nexthop *nh; nh 1737 net/ipv4/nexthop.c nh = rb_entry(node, struct nexthop, rb_node); nh 1738 net/ipv4/nexthop.c if (nh_dump_filtered(nh, dev_filter_idx, master_idx, nh 1742 net/ipv4/nexthop.c err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, nh 1585 net/ipv4/route.c struct fib_nh *nh; nh 1587 net/ipv4/route.c nh = container_of(nhc, struct fib_nh, nh_common); nh 1588 net/ipv4/route.c rt->dst.tclassid = nh->nh_tclassid; nh 2424 net/ipv6/addrconf.c if (rt->nh) nh 6400 net/ipv6/addrconf.c struct fib6_nh *nh = ifa->rt->fib6_nh; nh 6405 net/ipv6/addrconf.c if (nh->rt6i_pcpu) { nh 6409 net/ipv6/addrconf.c rtp = per_cpu_ptr(nh->rt6i_pcpu, cpu); nh 465 net/ipv6/datagram.c const unsigned char *nh = skb_network_header(skb); nh 470 net/ipv6/datagram.c const struct ipv6hdr *ip6h = container_of((struct in6_addr *)(nh + serr->addr_offset), nh 479 net/ipv6/datagram.c ipv6_addr_set_v4mapped(*(__be32 *)(nh + serr->addr_offset), nh 603 net/ipv6/datagram.c unsigned char *nh = skb_network_header(skb); nh 616 net/ipv6/datagram.c __be32 flowinfo = ip6_flowinfo((struct ipv6hdr *)nh); nh 623 net/ipv6/datagram.c u8 *ptr = nh + sizeof(struct ipv6hdr); nh 643 net/ipv6/datagram.c u8 *ptr = nh + off; nh 685 net/ipv6/datagram.c u8 *ptr = nh + sizeof(struct ipv6hdr); nh 689 net/ipv6/datagram.c u8 *ptr = nh + opt->dst0; nh 693 net/ipv6/datagram.c struct ipv6_rt_hdr *rthdr = (struct ipv6_rt_hdr *)(nh + opt->srcrt); nh 697 net/ipv6/datagram.c u8 *ptr = nh + opt->dst1; nh 118 net/ipv6/exthdrs.c const unsigned char *nh = skb_network_header(skb); nh 137 net/ipv6/exthdrs.c int optlen = nh[off + 1] + 2; nh 140 net/ipv6/exthdrs.c switch (nh[off]) { nh 162 net/ipv6/exthdrs.c if (nh[off + i] != 0) nh 176 net/ipv6/exthdrs.c if (curr->type == nh[off]) { nh 726 net/ipv6/exthdrs.c const unsigned char *nh = skb_network_header(skb); nh 728 net/ipv6/exthdrs.c if (nh[optoff + 1] == 2) { nh 730 net/ipv6/exthdrs.c memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra)); nh 734 net/ipv6/exthdrs.c nh[optoff + 1]); nh 743 net/ipv6/exthdrs.c const unsigned char *nh = skb_network_header(skb); nh 748 net/ipv6/exthdrs.c if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { nh 750 net/ipv6/exthdrs.c nh[optoff+1]); nh 755 net/ipv6/exthdrs.c pkt_len = ntohl(*(__be32 *)(nh + optoff + 2)); nh 787 net/ipv6/exthdrs.c const unsigned char *nh = skb_network_header(skb); nh 789 net/ipv6/exthdrs.c if (nh[optoff + 1] < 8) nh 792 net/ipv6/exthdrs.c if (nh[optoff + 6] * 4 + 8 > nh[optoff + 1]) nh 795 net/ipv6/exthdrs.c if (!calipso_validate(skb, nh + optoff)) nh 118 net/ipv6/exthdrs_core.c const unsigned char *nh = skb_network_header(skb); nh 125 net/ipv6/exthdrs_core.c hdr = (struct ipv6_opt_hdr *)(nh + offset); nh 135 net/ipv6/exthdrs_core.c int opttype = nh[offset]; nh 146 net/ipv6/exthdrs_core.c optlen = nh[offset + 1] + 2; nh 184 net/ipv6/fib6_rules.c res->nh->fib_nh_dev); nh 830 net/ipv6/icmp.c int nh; nh 839 net/ipv6/icmp.c nh = skb_network_offset(skb); nh 845 net/ipv6/icmp.c skb_set_network_header(skb, nh); nh 171 net/ipv6/ip6_fib.c if (f6i->nh) nh 172 net/ipv6/ip6_fib.c nexthop_put(f6i->nh); nh 938 net/ipv6/ip6_fib.c static int fib6_nh_drop_pcpu_from(struct fib6_nh *nh, void *_arg) nh 942 net/ipv6/ip6_fib.c __fib6_drop_pcpu_from(nh, arg->from, arg->table); nh 955 net/ipv6/ip6_fib.c if (f6i->nh) { nh 961 net/ipv6/ip6_fib.c nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_drop_pcpu_from, nh 978 net/ipv6/ip6_fib.c if (rt->nh && !list_empty(&rt->nh_list)) nh 1409 net/ipv6/ip6_fib.c if (rt->nh) nh 1410 net/ipv6/ip6_fib.c list_add(&rt->nh_list, &rt->nh->f6i_list); nh 2380 net/ipv6/ip6_fib.c if (rt->nh) nh 2381 net/ipv6/ip6_fib.c fib6_nh = nexthop_fib6_nh(rt->nh); nh 256 net/ipv6/mip6.c const unsigned char *nh = skb_network_header(skb); nh 292 net/ipv6/mip6.c exthdr = (struct ipv6_opt_hdr *)(nh + offset); nh 392 net/ipv6/mip6.c const unsigned char *nh = skb_network_header(skb); nh 407 net/ipv6/mip6.c rt = (struct ipv6_rt_hdr *)(nh + offset); nh 427 net/ipv6/mip6.c exthdr = (struct ipv6_opt_hdr *)(nh + offset); nh 197 net/ipv6/netfilter/nf_conntrack_reasm.c const unsigned char *nh = skb_network_header(skb); nh 199 net/ipv6/netfilter/nf_conntrack_reasm.c csum_partial(nh, (u8 *)(fhdr + 1) - nh, nh 134 net/ipv6/reassembly.c const unsigned char *nh = skb_network_header(skb); nh 136 net/ipv6/reassembly.c csum_partial(nh, (u8 *)(fhdr + 1) - nh, nh 102 net/ipv6/route.c static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif, nh 434 net/ipv6/route.c if ((!match->fib6_nsiblings && !match->nh) || have_oif_match) nh 441 net/ipv6/route.c (!match->nh || nexthop_is_multipath(match->nh))) nh 444 net/ipv6/route.c if (unlikely(match->nh)) { nh 454 net/ipv6/route.c const struct fib6_nh *nh = sibling->fib6_nh; nh 457 net/ipv6/route.c nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound); nh 460 net/ipv6/route.c if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0) nh 468 net/ipv6/route.c res->nh = match->fib6_nh; nh 475 net/ipv6/route.c static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh, nh 480 net/ipv6/route.c if (nh->fib_nh_flags & RTNH_F_DEAD) nh 483 net/ipv6/route.c dev = nh->fib_nh_dev; nh 501 net/ipv6/route.c struct fib6_nh *nh; nh 504 net/ipv6/route.c static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg) nh 508 net/ipv6/route.c arg->nh = nh; nh 509 net/ipv6/route.c return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif, nh 514 net/ipv6/route.c static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh, nh 526 net/ipv6/route.c if (nexthop_is_blackhole(nh)) nh 529 net/ipv6/route.c if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg)) nh 530 net/ipv6/route.c return arg.nh; nh 540 net/ipv6/route.c struct fib6_nh *nh; nh 543 net/ipv6/route.c if (unlikely(f6i->nh)) { nh 544 net/ipv6/route.c nh = nexthop_fib6_nh(f6i->nh); nh 545 net/ipv6/route.c if (nexthop_is_blackhole(f6i->nh)) nh 548 net/ipv6/route.c nh = f6i->fib6_nh; nh 550 net/ipv6/route.c if (!(nh->fib_nh_flags & RTNH_F_DEAD)) nh 557 net/ipv6/route.c if (unlikely(spf6i->nh)) { nh 558 net/ipv6/route.c nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr, nh 560 net/ipv6/route.c if (nh) nh 563 net/ipv6/route.c nh = spf6i->fib6_nh; nh 564 net/ipv6/route.c if (__rt6_device_match(net, nh, saddr, oif, flags)) nh 575 net/ipv6/route.c nh = res->f6i->fib6_nh; nh 579 net/ipv6/route.c if (unlikely(f6i->nh)) { nh 580 net/ipv6/route.c nh = nexthop_fib6_nh(f6i->nh); nh 581 net/ipv6/route.c if (nexthop_is_blackhole(f6i->nh)) nh 584 net/ipv6/route.c nh = f6i->fib6_nh; nh 587 net/ipv6/route.c if (nh->fib_nh_flags & RTNH_F_DEAD) { nh 589 net/ipv6/route.c nh = res->f6i->fib6_nh; nh 592 net/ipv6/route.c res->nh = nh; nh 600 net/ipv6/route.c res->nh = nh; nh 717 net/ipv6/route.c static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif, nh 722 net/ipv6/route.c if (!oif || nh->fib_nh_dev->ifindex == oif) nh 731 net/ipv6/route.c !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) { nh 732 net/ipv6/route.c int n = rt6_check_neigh(nh); nh 739 net/ipv6/route.c static bool find_match(struct fib6_nh *nh, u32 fib6_flags, nh 746 net/ipv6/route.c if (nh->fib_nh_flags & RTNH_F_DEAD) nh 749 net/ipv6/route.c if (ip6_ignore_linkdown(nh->fib_nh_dev) && nh 750 net/ipv6/route.c nh->fib_nh_flags & RTNH_F_LINKDOWN && nh 754 net/ipv6/route.c m = rt6_score_route(nh, fib6_flags, oif, strict); nh 763 net/ipv6/route.c rt6_probe(nh); nh 781 net/ipv6/route.c struct fib6_nh *nh; nh 784 net/ipv6/route.c static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg) nh 788 net/ipv6/route.c arg->nh = nh; nh 789 net/ipv6/route.c return find_match(nh, arg->flags, arg->oif, arg->strict, nh 804 net/ipv6/route.c struct fib6_nh *nh; nh 814 net/ipv6/route.c if (unlikely(f6i->nh)) { nh 823 net/ipv6/route.c if (nexthop_is_blackhole(f6i->nh)) { nh 827 net/ipv6/route.c res->nh = nexthop_fib6_nh(f6i->nh); nh 830 net/ipv6/route.c if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match, nh 833 net/ipv6/route.c nh = arg.nh; nh 836 net/ipv6/route.c nh = f6i->fib6_nh; nh 837 net/ipv6/route.c if (find_match(nh, f6i->fib6_flags, oif, strict, nh 843 net/ipv6/route.c res->nh = nh; nh 922 net/ipv6/route.c res->nh = res->f6i->fib6_nh; nh 931 net/ipv6/route.c res->nh->fib_nh_gw_family; nh 1017 net/ipv6/route.c struct net_device *dev = res->nh->fib_nh_dev; nh 1113 net/ipv6/route.c if (res->nh->fib_nh_lws) { nh 1114 net/ipv6/route.c rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws); nh 1132 net/ipv6/route.c const struct fib6_nh *nh = res->nh; nh 1133 net/ipv6/route.c const struct net_device *dev = nh->fib_nh_dev; nh 1141 net/ipv6/route.c if (nh->fib_nh_gw_family) { nh 1142 net/ipv6/route.c rt->rt6i_gateway = nh->fib_nh_gw6; nh 1188 net/ipv6/route.c struct net_device *dev = res->nh->fib_nh_dev; nh 1392 net/ipv6/route.c if (f6i->nh) nh 1408 net/ipv6/route.c pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu); nh 1413 net/ipv6/route.c p = this_cpu_ptr(res->nh->rt6i_pcpu); nh 1435 net/ipv6/route.c p = this_cpu_ptr(res->nh->rt6i_pcpu); nh 1585 net/ipv6/route.c const struct fib6_nh *nh = res->nh; nh 1591 net/ipv6/route.c struct net_device *dev = nh->fib_nh_dev; nh 1602 net/ipv6/route.c return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu); nh 1613 net/ipv6/route.c struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh, nh 1619 net/ipv6/route.c bucket = rcu_dereference_protected(nh->rt6i_exception_bucket, nh 1622 net/ipv6/route.c bucket = rcu_dereference(nh->rt6i_exception_bucket); nh 1643 net/ipv6/route.c static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh, nh 1649 net/ipv6/route.c bucket = rcu_dereference_protected(nh->rt6i_exception_bucket, nh 1655 net/ipv6/route.c rcu_assign_pointer(nh->rt6i_exception_bucket, bucket); nh 1666 net/ipv6/route.c struct fib6_nh *nh = res->nh; nh 1671 net/ipv6/route.c bucket = rcu_dereference_protected(nh->rt6i_exception_bucket, nh 1680 net/ipv6/route.c rcu_assign_pointer(nh->rt6i_exception_bucket, bucket); nh 1738 net/ipv6/route.c static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from) nh 1747 net/ipv6/route.c bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock); nh 1753 net/ipv6/route.c fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock); nh 1768 net/ipv6/route.c static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg) nh 1772 net/ipv6/route.c fib6_nh_flush_exceptions(nh, f6i); nh 1779 net/ipv6/route.c if (f6i->nh) nh 1780 net/ipv6/route.c nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions, nh 1814 net/ipv6/route.c bucket = fib6_nh_get_excptn_bucket(res->nh, NULL); nh 1832 net/ipv6/route.c static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen, nh 1840 net/ipv6/route.c if (!rcu_access_pointer(nh->rt6i_exception_bucket)) nh 1844 net/ipv6/route.c bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock); nh 1875 net/ipv6/route.c static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg) nh 1880 net/ipv6/route.c err = fib6_nh_remove_exception(nh, arg->plen, arg->rt); nh 1895 net/ipv6/route.c if (from->nh) { nh 1903 net/ipv6/route.c rc = nexthop_for_each_fib6_nh(from->nh, nh 1916 net/ipv6/route.c static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen, nh 1923 net/ipv6/route.c bucket = fib6_nh_get_excptn_bucket(nh, NULL); nh 1946 net/ipv6/route.c static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg) nh 1950 net/ipv6/route.c if (arg->dev != nh->fib_nh_dev || nh 1951 net/ipv6/route.c (arg->gw && !nh->fib_nh_gw_family) || nh 1952 net/ipv6/route.c (!arg->gw && nh->fib_nh_gw_family) || nh 1953 net/ipv6/route.c (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6))) nh 1956 net/ipv6/route.c arg->match = nh; nh 1973 net/ipv6/route.c if (from->nh) { nh 1979 net/ipv6/route.c nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg); nh 2015 net/ipv6/route.c const struct fib6_nh *nh, int mtu) nh 2021 net/ipv6/route.c bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock); nh 2043 net/ipv6/route.c static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh, nh 2051 net/ipv6/route.c if (!rcu_access_pointer(nh->rt6i_exception_bucket)) nh 2055 net/ipv6/route.c bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock); nh 2120 net/ipv6/route.c static void fib6_nh_age_exceptions(const struct fib6_nh *nh, nh 2129 net/ipv6/route.c if (!rcu_access_pointer(nh->rt6i_exception_bucket)) nh 2134 net/ipv6/route.c bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock); nh 2154 net/ipv6/route.c static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg) nh 2158 net/ipv6/route.c fib6_nh_age_exceptions(nh, arg->gc_args, arg->now); nh 2166 net/ipv6/route.c if (f6i->nh) { nh 2172 net/ipv6/route.c nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions, nh 2239 net/ipv6/route.c !res.nh->fib_nh_gw_family)) { nh 2771 net/ipv6/route.c if (res.f6i->nh) { nh 2777 net/ipv6/route.c nexthop_for_each_fib6_nh(res.f6i->nh, nh 2786 net/ipv6/route.c res.nh = arg.match; nh 2788 net/ipv6/route.c res.nh = res.f6i->fib6_nh; nh 2875 net/ipv6/route.c const struct fib6_nh *nh = res->nh; nh 2877 net/ipv6/route.c if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family || nh 2878 net/ipv6/route.c fl6->flowi6_oif != nh->fib_nh_dev->ifindex) nh 2886 net/ipv6/route.c if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) { nh 2907 net/ipv6/route.c static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg) nh 2911 net/ipv6/route.c arg->res->nh = nh; nh 2964 net/ipv6/route.c if (unlikely(rt->nh)) { nh 2965 net/ipv6/route.c if (nexthop_is_blackhole(rt->nh)) nh 2968 net/ipv6/route.c if (nexthop_for_each_fib6_nh(rt->nh, nh 2973 net/ipv6/route.c res.nh = rt->fib6_nh; nh 2994 net/ipv6/route.c res.nh = rt->fib6_nh; nh 3128 net/ipv6/route.c const struct fib6_nh *nh = res->nh; nh 3144 net/ipv6/route.c struct net_device *dev = nh->fib_nh_dev; nh 3154 net/ipv6/route.c return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu); nh 3264 net/ipv6/route.c (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) { nh 3291 net/ipv6/route.c res.nh->fib_nh_gw_family || nh 3292 net/ipv6/route.c (dev && dev != res.nh->fib_nh_dev)) nh 3304 net/ipv6/route.c res.nh->fib_nh_gw_family) nh 3316 net/ipv6/route.c if (dev != res.nh->fib_nh_dev) nh 3319 net/ipv6/route.c *_dev = dev = res.nh->fib_nh_dev; nh 3580 net/ipv6/route.c struct nexthop *nh = NULL; nh 3619 net/ipv6/route.c nh = nexthop_find_by_id(net, cfg->fc_nh_id); nh 3620 net/ipv6/route.c if (!nh) { nh 3624 net/ipv6/route.c err = fib6_check_nexthop(nh, cfg, extack); nh 3645 net/ipv6/route.c rt = fib6_info_alloc(gfp_flags, !nh); nh 3685 net/ipv6/route.c if (nh) { nh 3686 net/ipv6/route.c if (!nexthop_get(nh)) { nh 3694 net/ipv6/route.c rt->nh = nh; nh 3695 net/ipv6/route.c fib6_nh = nexthop_fib6_nh(rt->nh); nh 3850 net/ipv6/route.c struct fib6_nh *nh) nh 3854 net/ipv6/route.c .nh = nh, nh 3870 net/ipv6/route.c static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg) nh 3875 net/ipv6/route.c rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh); nh 3886 net/ipv6/route.c return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg); nh 3912 net/ipv6/route.c struct fib6_nh *nh; nh 3914 net/ipv6/route.c if (rt->nh && cfg->fc_nh_id && nh 3915 net/ipv6/route.c rt->nh->id != cfg->fc_nh_id) nh 3921 net/ipv6/route.c if (rt->nh) { nh 3926 net/ipv6/route.c nh = rt->fib6_nh; nh 3927 net/ipv6/route.c rc = ip6_del_cached_rt(cfg, rt, nh); nh 3942 net/ipv6/route.c if (rt->nh) { nh 3952 net/ipv6/route.c nh = rt->fib6_nh; nh 3954 net/ipv6/route.c (!nh->fib_nh_dev || nh 3955 net/ipv6/route.c nh->fib_nh_dev->ifindex != cfg->fc_ifindex)) nh 3958 net/ipv6/route.c !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6)) nh 4070 net/ipv6/route.c if (res.f6i->nh) { nh 4076 net/ipv6/route.c nexthop_for_each_fib6_nh(res.f6i->nh, nh 4084 net/ipv6/route.c res.nh = arg.match; nh 4086 net/ipv6/route.c res.nh = res.f6i->fib6_nh; nh 4141 net/ipv6/route.c if (rt->nh) nh 4206 net/ipv6/route.c struct fib6_nh *nh; nh 4209 net/ipv6/route.c if (rt->nh) nh 4212 net/ipv6/route.c nh = rt->fib6_nh; nh 4213 net/ipv6/route.c if (dev == nh->fib_nh_dev && nh 4215 net/ipv6/route.c ipv6_addr_equal(&nh->fib_nh_gw6, addr)) nh 4467 net/ipv6/route.c if (!rt->nh && nh 4496 net/ipv6/route.c struct fib6_nh *nh; nh 4499 net/ipv6/route.c if (rt->nh) nh 4502 net/ipv6/route.c nh = rt->fib6_nh; nh 4504 net/ipv6/route.c nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6)) nh 4511 net/ipv6/route.c fib6_nh_exceptions_clean_tohost(nh, gateway); nh 4628 net/ipv6/route.c if (rt != net->ipv6.fib6_null_entry && !rt->nh && nh 4714 net/ipv6/route.c if (rt == net->ipv6.fib6_null_entry || rt->nh) nh 4780 net/ipv6/route.c static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg) nh 4790 net/ipv6/route.c if (nh->fib_nh_dev == arg->dev) { nh 4799 net/ipv6/route.c rt6_exceptions_update_pmtu(idev, nh, arg->mtu); nh 4825 net/ipv6/route.c if (f6i->nh) { nh 4827 net/ipv6/route.c return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change, nh 5018 net/ipv6/route.c struct rt6_nh *nh; nh 5021 net/ipv6/route.c list_for_each_entry(nh, rt6_nh_list, next) { nh 5023 net/ipv6/route.c if (rt6_duplicate_nexthop(nh->fib6_info, rt)) nh 5027 net/ipv6/route.c nh = kzalloc(sizeof(*nh), GFP_KERNEL); nh 5028 net/ipv6/route.c if (!nh) nh 5030 net/ipv6/route.c nh->fib6_info = rt; nh 5031 net/ipv6/route.c memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg)); nh 5032 net/ipv6/route.c list_add_tail(&nh->next, rt6_nh_list); nh 5068 net/ipv6/route.c struct rt6_nh *nh, *nh_safe; nh 5153 net/ipv6/route.c list_for_each_entry(nh, &rt6_nh_list, next) { nh 5154 net/ipv6/route.c err = __ip6_ins_rt(nh->fib6_info, info, extack); nh 5155 net/ipv6/route.c fib6_info_release(nh->fib6_info); nh 5159 net/ipv6/route.c rt_last = nh->fib6_info; nh 5163 net/ipv6/route.c rt_notif = nh->fib6_info; nh 5167 net/ipv6/route.c nh->fib6_info = NULL; nh 5172 net/ipv6/route.c err_nh = nh; nh 5211 net/ipv6/route.c list_for_each_entry(nh, &rt6_nh_list, next) { nh 5212 net/ipv6/route.c if (err_nh == nh) nh 5214 net/ipv6/route.c ip6_route_del(&nh->r_cfg, extack); nh 5218 net/ipv6/route.c list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) { nh 5219 net/ipv6/route.c if (nh->fib6_info) nh 5220 net/ipv6/route.c fib6_info_release(nh->fib6_info); nh 5221 net/ipv6/route.c list_del(&nh->next); nh 5222 net/ipv6/route.c kfree(nh); nh 5310 net/ipv6/route.c static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg) nh 5318 net/ipv6/route.c if (nh->fib_nh_lws) { nh 5320 net/ipv6/route.c *nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws); nh 5332 net/ipv6/route.c if (f6i->nh) { nh 5334 net/ipv6/route.c nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size, nh 5337 net/ipv6/route.c struct fib6_nh *nh = f6i->fib6_nh; nh 5344 net/ipv6/route.c + lwtunnel_get_encap_size(nh->fib_nh_lws); nh 5348 net/ipv6/route.c nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws); nh 5367 net/ipv6/route.c static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh, nh 5370 net/ipv6/route.c if (nexthop_is_multipath(nh)) { nh 5377 net/ipv6/route.c if (nexthop_mpath_fill_node(skb, nh, AF_INET6)) nh 5384 net/ipv6/route.c fib6_nh = nexthop_fib6_nh(nh); nh 5526 net/ipv6/route.c } else if (rt->nh) { nh 5527 net/ipv6/route.c if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id)) nh 5530 net/ipv6/route.c if (nexthop_is_blackhole(rt->nh)) nh 5533 net/ipv6/route.c if (rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0) nh 5565 net/ipv6/route.c static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg) nh 5569 net/ipv6/route.c if (nh->fib_nh_dev == dev) nh 5578 net/ipv6/route.c if (f6i->nh) { nh 5581 net/ipv6/route.c return !!nexthop_for_each_fib6_nh(f6i->nh, nh 5610 net/ipv6/route.c static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg) nh 5618 net/ipv6/route.c bucket = fib6_nh_get_excptn_bucket(nh, NULL); nh 5713 net/ipv6/route.c if (rt->nh) { nh 5714 net/ipv6/route.c err = nexthop_for_each_fib6_nh(rt->nh, nh 95 net/mpls/af_mpls.c static u8 *__mpls_nh_via(struct mpls_route *rt, struct mpls_nh *nh) nh 97 net/mpls/af_mpls.c return (u8 *)nh + rt->rt_via_offset; nh 101 net/mpls/af_mpls.c const struct mpls_nh *nh) nh 103 net/mpls/af_mpls.c return __mpls_nh_via((struct mpls_route *)rt, (struct mpls_nh *)nh); nh 106 net/mpls/af_mpls.c static unsigned int mpls_nh_header_size(const struct mpls_nh *nh) nh 109 net/mpls/af_mpls.c return nh->nh_labels * sizeof(struct mpls_shim_hdr); nh 256 net/mpls/af_mpls.c unsigned int nh_flags = READ_ONCE(nh->nh_flags); nh 261 net/mpls/af_mpls.c return nh; nh 347 net/mpls/af_mpls.c struct mpls_nh *nh; nh 390 net/mpls/af_mpls.c nh = mpls_select_multipath(rt, skb); nh 391 net/mpls/af_mpls.c if (!nh) nh 411 net/mpls/af_mpls.c out_dev = rcu_dereference(nh->nh_dev); nh 416 net/mpls/af_mpls.c new_header_size = mpls_nh_header_size(nh); nh 444 net/mpls/af_mpls.c for (i = nh->nh_labels - 1; i >= 0; i--) { nh 445 net/mpls/af_mpls.c hdr[i] = mpls_entry_encode(nh->nh_label[i], nh 454 net/mpls/af_mpls.c if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC) nh 458 net/mpls/af_mpls.c err = neigh_xmit(nh->nh_via_table, out_dev, nh 459 net/mpls/af_mpls.c mpls_nh_via(rt, nh), skb); nh 646 net/mpls/af_mpls.c struct mpls_nh *nh, int oif) nh 651 net/mpls/af_mpls.c switch (nh->nh_via_table) { nh 653 net/mpls/af_mpls.c dev = inet_fib_lookup_dev(net, mpls_nh_via(rt, nh)); nh 656 net/mpls/af_mpls.c dev = inet6_fib_lookup_dev(net, mpls_nh_via(rt, nh)); nh 678 net/mpls/af_mpls.c struct mpls_nh *nh, int oif) nh 683 net/mpls/af_mpls.c dev = find_outdev(net, rt, nh, oif); nh 695 net/mpls/af_mpls.c if ((nh->nh_via_table == NEIGH_LINK_TABLE) && nh 696 net/mpls/af_mpls.c (dev->addr_len != nh->nh_via_alen)) nh 699 net/mpls/af_mpls.c RCU_INIT_POINTER(nh->nh_dev, dev); nh 702 net/mpls/af_mpls.c nh->nh_flags |= RTNH_F_DEAD; nh 708 net/mpls/af_mpls.c nh->nh_flags |= RTNH_F_LINKDOWN; nh 769 net/mpls/af_mpls.c struct mpls_nh *nh = rt->rt_nh; nh 773 net/mpls/af_mpls.c if (!nh) nh 776 net/mpls/af_mpls.c nh->nh_labels = cfg->rc_output_labels; nh 777 net/mpls/af_mpls.c for (i = 0; i < nh->nh_labels; i++) nh 778 net/mpls/af_mpls.c nh->nh_label[i] = cfg->rc_output_label[i]; nh 780 net/mpls/af_mpls.c nh->nh_via_table = cfg->rc_via_table; nh 781 net/mpls/af_mpls.c memcpy(__mpls_nh_via(rt, nh), cfg->rc_via, cfg->rc_via_alen); nh 782 net/mpls/af_mpls.c nh->nh_via_alen = cfg->rc_via_alen; nh 784 net/mpls/af_mpls.c err = mpls_nh_assign_dev(net, rt, nh, cfg->rc_ifindex); nh 788 net/mpls/af_mpls.c if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) nh 798 net/mpls/af_mpls.c struct mpls_nh *nh, int oif, struct nlattr *via, nh 804 net/mpls/af_mpls.c if (!nh) nh 808 net/mpls/af_mpls.c err = nla_get_labels(newdst, max_labels, &nh->nh_labels, nh 809 net/mpls/af_mpls.c nh->nh_label, extack); nh 815 net/mpls/af_mpls.c err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table, nh 816 net/mpls/af_mpls.c __mpls_nh_via(rt, nh), extack); nh 820 net/mpls/af_mpls.c nh->nh_via_table = MPLS_NEIGH_TABLE_UNSPEC; nh 823 net/mpls/af_mpls.c err = mpls_nh_assign_dev(net, rt, nh, oif); nh 916 net/mpls/af_mpls.c err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh, nh 922 net/mpls/af_mpls.c if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) nh 1511 net/mpls/af_mpls.c unsigned int nh_flags = nh->nh_flags; nh 1513 net/mpls/af_mpls.c if (rtnl_dereference(nh->nh_dev) != dev) nh 1526 net/mpls/af_mpls.c RCU_INIT_POINTER(nh->nh_dev, NULL); nh 1528 net/mpls/af_mpls.c if (nh->nh_flags != nh_flags) nh 1529 net/mpls/af_mpls.c WRITE_ONCE(nh->nh_flags, nh_flags); nh 1533 net/mpls/af_mpls.c if (!rtnl_dereference(nh->nh_dev)) nh 1561 net/mpls/af_mpls.c unsigned int nh_flags = nh->nh_flags; nh 1563 net/mpls/af_mpls.c rtnl_dereference(nh->nh_dev); nh 1573 net/mpls/af_mpls.c WRITE_ONCE(nh->nh_flags, nh_flags); nh 1997 net/mpls/af_mpls.c const struct mpls_nh *nh = rt->rt_nh; nh 1999 net/mpls/af_mpls.c if (nh->nh_labels && nh 2000 net/mpls/af_mpls.c nla_put_labels(skb, RTA_NEWDST, nh->nh_labels, nh 2001 net/mpls/af_mpls.c nh->nh_label)) nh 2003 net/mpls/af_mpls.c if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC && nh 2004 net/mpls/af_mpls.c nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh), nh 2005 net/mpls/af_mpls.c nh->nh_via_alen)) nh 2007 net/mpls/af_mpls.c dev = rtnl_dereference(nh->nh_dev); nh 2010 net/mpls/af_mpls.c if (nh->nh_flags & RTNH_F_LINKDOWN) nh 2012 net/mpls/af_mpls.c if (nh->nh_flags & RTNH_F_DEAD) nh 2025 net/mpls/af_mpls.c dev = rtnl_dereference(nh->nh_dev); nh 2034 net/mpls/af_mpls.c if (nh->nh_flags & RTNH_F_LINKDOWN) { nh 2038 net/mpls/af_mpls.c if (nh->nh_flags & RTNH_F_DEAD) { nh 2043 net/mpls/af_mpls.c if (nh->nh_labels && nla_put_labels(skb, RTA_NEWDST, nh 2044 net/mpls/af_mpls.c nh->nh_labels, nh 2045 net/mpls/af_mpls.c nh->nh_label)) nh 2047 net/mpls/af_mpls.c if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC && nh 2048 net/mpls/af_mpls.c nla_put_via(skb, nh->nh_via_table, nh 2049 net/mpls/af_mpls.c mpls_nh_via(rt, nh), nh 2050 net/mpls/af_mpls.c nh->nh_via_alen)) nh 2139 net/mpls/af_mpls.c struct mpls_nh *nh = rt->rt_nh; nh 2141 net/mpls/af_mpls.c nh_dev = rtnl_dereference(nh->nh_dev); nh 2146 net/mpls/af_mpls.c nh_dev = rtnl_dereference(nh->nh_dev); nh 2222 net/mpls/af_mpls.c struct mpls_nh *nh = rt->rt_nh; nh 2224 net/mpls/af_mpls.c if (nh->nh_dev) nh 2226 net/mpls/af_mpls.c if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) /* RTA_VIA */ nh 2227 net/mpls/af_mpls.c payload += nla_total_size(2 + nh->nh_via_alen); nh 2228 net/mpls/af_mpls.c if (nh->nh_labels) /* RTA_NEWDST */ nh 2229 net/mpls/af_mpls.c payload += nla_total_size(nh->nh_labels * 4); nh 2235 net/mpls/af_mpls.c if (!rtnl_dereference(nh->nh_dev)) nh 2239 net/mpls/af_mpls.c if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) nh 2240 net/mpls/af_mpls.c nhsize += nla_total_size(2 + nh->nh_via_alen); nh 2241 net/mpls/af_mpls.c if (nh->nh_labels) nh 2242 net/mpls/af_mpls.c nhsize += nla_total_size(nh->nh_labels * 4); nh 2351 net/mpls/af_mpls.c struct mpls_nh *nh; nh 2441 net/mpls/af_mpls.c nh = mpls_select_multipath(rt, skb); nh 2442 net/mpls/af_mpls.c if (!nh) { nh 2472 net/mpls/af_mpls.c if (nh->nh_labels && nh 2473 net/mpls/af_mpls.c nla_put_labels(skb, RTA_NEWDST, nh->nh_labels, nh 2474 net/mpls/af_mpls.c nh->nh_label)) nh 2477 net/mpls/af_mpls.c if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC && nh 2478 net/mpls/af_mpls.c nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh), nh 2479 net/mpls/af_mpls.c nh->nh_via_alen)) nh 2481 net/mpls/af_mpls.c dev = rtnl_dereference(nh->nh_dev); nh 161 net/mpls/internal.h int nhsel; struct mpls_nh *nh; u8 *__nh; \ nh 162 net/mpls/internal.h for (nhsel = 0, nh = (rt)->rt_nh, __nh = (u8 *)((rt)->rt_nh); \ nh 164 net/mpls/internal.h __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++) nh 167 net/mpls/internal.h int nhsel; struct mpls_nh *nh; u8 *__nh; \ nh 168 net/mpls/internal.h for (nhsel = 0, nh = (struct mpls_nh *)((rt)->rt_nh), \ nh 171 net/mpls/internal.h __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++) nh 25 net/netfilter/nf_nat_masquerade.c __be32 newsrc, nh; nh 41 net/netfilter/nf_nat_masquerade.c nh = rt_nexthop(rt, ip_hdr(skb)->daddr); nh 42 net/netfilter/nf_nat_masquerade.c newsrc = inet_select_addr(out, nh, RT_SCOPE_UNIVERSE); nh 16 net/nsh/nsh.c struct nshhdr *nh; nh 33 net/nsh/nsh.c nh = (struct nshhdr *)(skb->data); nh 34 net/nsh/nsh.c memcpy(nh, pushed_nh, length); nh 35 net/nsh/nsh.c nh->np = next_proto; nh 36 net/nsh/nsh.c skb_postpush_rcsum(skb, nh, length); nh 49 net/nsh/nsh.c struct nshhdr *nh; nh 55 net/nsh/nsh.c nh = (struct nshhdr *)(skb->data); nh 56 net/nsh/nsh.c length = nsh_hdr_len(nh); nh 59 net/nsh/nsh.c inner_proto = tun_p_to_eth_p(nh->np); nh 314 net/openvswitch/actions.c const struct nshhdr *nh) nh 318 net/openvswitch/actions.c err = nsh_push(skb, nh); nh 345 net/openvswitch/actions.c static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh, nh 350 net/openvswitch/actions.c if (nh->frag_off & htons(IP_OFFSET)) nh 353 net/openvswitch/actions.c if (nh->protocol == IPPROTO_TCP) { nh 357 net/openvswitch/actions.c } else if (nh->protocol == IPPROTO_UDP) { nh 371 net/openvswitch/actions.c static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, nh 374 net/openvswitch/actions.c update_ip_l4_checksum(skb, nh, *addr, new_addr); nh 375 net/openvswitch/actions.c csum_replace4(&nh->check, *addr, new_addr); nh 427 net/openvswitch/actions.c static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask) nh 430 net/openvswitch/actions.c OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16)); nh 431 net/openvswitch/actions.c OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8)); nh 432 net/openvswitch/actions.c OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask); nh 435 net/openvswitch/actions.c static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl, nh 438 net/openvswitch/actions.c new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask); nh 440 net/openvswitch/actions.c csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8)); nh 441 net/openvswitch/actions.c nh->ttl = new_ttl; nh 448 net/openvswitch/actions.c struct iphdr *nh; nh 457 net/openvswitch/actions.c nh = ip_hdr(skb); nh 464 net/openvswitch/actions.c new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src); nh 466 net/openvswitch/actions.c if (unlikely(new_addr != nh->saddr)) { nh 467 net/openvswitch/actions.c set_ip_addr(skb, nh, &nh->saddr, new_addr); nh 472 net/openvswitch/actions.c new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst); nh 474 net/openvswitch/actions.c if (unlikely(new_addr != nh->daddr)) { nh 475 net/openvswitch/actions.c set_ip_addr(skb, nh, &nh->daddr, new_addr); nh 480 net/openvswitch/actions.c ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos); nh 481 net/openvswitch/actions.c flow_key->ip.tos = nh->tos; nh 484 net/openvswitch/actions.c set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl); nh 485 net/openvswitch/actions.c flow_key->ip.ttl = nh->ttl; nh 500 net/openvswitch/actions.c struct ipv6hdr *nh; nh 508 net/openvswitch/actions.c nh = ipv6_hdr(skb); nh 515 net/openvswitch/actions.c __be32 *saddr = (__be32 *)&nh->saddr; nh 531 net/openvswitch/actions.c __be32 *daddr = (__be32 *)&nh->daddr; nh 537 net/openvswitch/actions.c if (ipv6_ext_hdr(nh->nexthdr)) nh 550 net/openvswitch/actions.c ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass); nh 551 net/openvswitch/actions.c flow_key->ip.tos = ipv6_get_dsfield(nh); nh 554 net/openvswitch/actions.c set_ipv6_fl(nh, ntohl(key->ipv6_label), nh 557 net/openvswitch/actions.c *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); nh 560 net/openvswitch/actions.c OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit, nh 562 net/openvswitch/actions.c flow_key->ip.ttl = nh->hop_limit; nh 570 net/openvswitch/actions.c struct nshhdr *nh; nh 588 net/openvswitch/actions.c nh = nsh_hdr(skb); nh 589 net/openvswitch/actions.c length = nsh_hdr_len(nh); nh 597 net/openvswitch/actions.c nh = nsh_hdr(skb); nh 598 net/openvswitch/actions.c skb_postpull_rcsum(skb, nh, length); nh 599 net/openvswitch/actions.c flags = nsh_get_flags(nh); nh 602 net/openvswitch/actions.c ttl = nsh_get_ttl(nh); nh 605 net/openvswitch/actions.c nsh_set_flags_and_ttl(nh, flags, ttl); nh 606 net/openvswitch/actions.c nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr, nh 608 net/openvswitch/actions.c flow_key->nsh.base.path_hdr = nh->path_hdr; nh 609 net/openvswitch/actions.c switch (nh->mdtype) { nh 612 net/openvswitch/actions.c nh->md1.context[i] = nh 613 net/openvswitch/actions.c OVS_MASKED(nh->md1.context[i], key.context[i], nh 616 net/openvswitch/actions.c memcpy(flow_key->nsh.context, nh->md1.context, nh 617 net/openvswitch/actions.c sizeof(nh->md1.context)); nh 626 net/openvswitch/actions.c skb_postpush_rcsum(skb, nh, length); nh 1310 net/openvswitch/actions.c struct nshhdr *nh = (struct nshhdr *)buffer; nh 1312 net/openvswitch/actions.c err = nsh_hdr_from_nlattr(nla_data(a), nh, nh 1316 net/openvswitch/actions.c err = push_nsh(skb, key, nh); nh 248 net/openvswitch/flow.c struct ipv6hdr *nh; nh 251 net/openvswitch/flow.c err = check_header(skb, nh_ofs + sizeof(*nh)); nh 255 net/openvswitch/flow.c nh = ipv6_hdr(skb); nh 258 net/openvswitch/flow.c key->ip.tos = ipv6_get_dsfield(nh); nh 259 net/openvswitch/flow.c key->ip.ttl = nh->hop_limit; nh 260 net/openvswitch/flow.c key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); nh 261 net/openvswitch/flow.c key->ipv6.addr.src = nh->saddr; nh 262 net/openvswitch/flow.c key->ipv6.addr.dst = nh->daddr; nh 481 net/openvswitch/flow.c struct nshhdr *nh; nh 490 net/openvswitch/flow.c nh = nsh_hdr(skb); nh 491 net/openvswitch/flow.c version = nsh_get_ver(nh); nh 492 net/openvswitch/flow.c length = nsh_hdr_len(nh); nh 501 net/openvswitch/flow.c nh = nsh_hdr(skb); nh 502 net/openvswitch/flow.c key->nsh.base.flags = nsh_get_flags(nh); nh 503 net/openvswitch/flow.c key->nsh.base.ttl = nsh_get_ttl(nh); nh 504 net/openvswitch/flow.c key->nsh.base.mdtype = nh->mdtype; nh 505 net/openvswitch/flow.c key->nsh.base.np = nh->np; nh 506 net/openvswitch/flow.c key->nsh.base.path_hdr = nh->path_hdr; nh 511 net/openvswitch/flow.c memcpy(key->nsh.context, nh->md1.context, nh 512 net/openvswitch/flow.c sizeof(nh->md1)); nh 516 net/openvswitch/flow.c sizeof(nh->md1)); nh 538 net/openvswitch/flow.c struct iphdr *nh; nh 552 net/openvswitch/flow.c nh = ip_hdr(skb); nh 553 net/openvswitch/flow.c key->ipv4.addr.src = nh->saddr; nh 554 net/openvswitch/flow.c key->ipv4.addr.dst = nh->daddr; nh 556 net/openvswitch/flow.c key->ip.proto = nh->protocol; nh 557 net/openvswitch/flow.c key->ip.tos = nh->tos; nh 558 net/openvswitch/flow.c key->ip.ttl = nh->ttl; nh 560 net/openvswitch/flow.c offset = nh->frag_off & htons(IP_OFFSET); nh 566 net/openvswitch/flow.c if (nh->frag_off & htons(IP_MF) || nh 1281 net/openvswitch/flow_netlink.c struct nshhdr *nh, size_t size) nh 1303 net/openvswitch/flow_netlink.c nh->np = base->np; nh 1304 net/openvswitch/flow_netlink.c nh->mdtype = base->mdtype; nh 1305 net/openvswitch/flow_netlink.c nh->path_hdr = base->path_hdr; nh 1312 net/openvswitch/flow_netlink.c memcpy(&nh->md1, nla_data(a), mdlen); nh 1319 net/openvswitch/flow_netlink.c memcpy(&nh->md2, nla_data(a), mdlen); nh 1328 net/openvswitch/flow_netlink.c nh->ver_flags_ttl_len = 0; nh 1329 net/openvswitch/flow_netlink.c nsh_set_flags_ttl_len(nh, flags, ttl, NSH_BASE_HDR_LEN + mdlen); nh 70 net/openvswitch/flow_netlink.h int nsh_hdr_from_nlattr(const struct nlattr *attr, struct nshhdr *nh, nh 3379 net/xfrm/xfrm_policy.c const unsigned char *nh = skb_network_header(skb); nh 3387 net/xfrm/xfrm_policy.c nexthdr = nh[nhoff]; nh 3399 net/xfrm/xfrm_policy.c while (nh + offset + sizeof(*exthdr) < skb->data || nh 3400 net/xfrm/xfrm_policy.c pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) { nh 3401 net/xfrm/xfrm_policy.c nh = skb_network_header(skb); nh 3402 net/xfrm/xfrm_policy.c exthdr = (struct ipv6_opt_hdr *)(nh + offset); nh 3413 net/xfrm/xfrm_policy.c exthdr = (struct ipv6_opt_hdr *)(nh + offset); nh 3420 net/xfrm/xfrm_policy.c if (!onlyproto && (nh + offset + 4 < skb->data || nh 3421 net/xfrm/xfrm_policy.c pskb_may_pull(skb, nh + offset + 4 - skb->data))) { nh 3424 net/xfrm/xfrm_policy.c nh = skb_network_header(skb); nh 3425 net/xfrm/xfrm_policy.c ports = (__be16 *)(nh + offset); nh 3432 net/xfrm/xfrm_policy.c if (!onlyproto && (nh + offset + 2 < skb->data || nh 3433 net/xfrm/xfrm_policy.c pskb_may_pull(skb, nh + offset + 2 - skb->data))) { nh 3436 net/xfrm/xfrm_policy.c nh = skb_network_header(skb); nh 3437 net/xfrm/xfrm_policy.c icmp = (u8 *)(nh + offset); nh 3446 net/xfrm/xfrm_policy.c if (!onlyproto && (nh + offset + 3 < skb->data || nh 3447 net/xfrm/xfrm_policy.c pskb_may_pull(skb, nh + offset + 3 - skb->data))) { nh 3450 net/xfrm/xfrm_policy.c nh = skb_network_header(skb); nh 3451 net/xfrm/xfrm_policy.c mh = (struct ip6_mh *)(nh + offset); nh 94 samples/bpf/xdp_router_ipv4_user.c struct nlmsghdr *nh; nh 104 samples/bpf/xdp_router_ipv4_user.c nh = (struct nlmsghdr *)buf_ptr; nh 106 samples/bpf/xdp_router_ipv4_user.c if (nh->nlmsg_type == NLMSG_DONE) nh 122 samples/bpf/xdp_router_ipv4_user.c static void read_route(struct nlmsghdr *nh, int nll) nh 148 samples/bpf/xdp_router_ipv4_user.c if (nh->nlmsg_type == RTM_DELROUTE) nh 150 samples/bpf/xdp_router_ipv4_user.c else if (nh->nlmsg_type == RTM_GETROUTE) nh 152 samples/bpf/xdp_router_ipv4_user.c else if (nh->nlmsg_type == RTM_NEWROUTE) nh 155 samples/bpf/xdp_router_ipv4_user.c printf("%d\n", nh->nlmsg_type); nh 159 samples/bpf/xdp_router_ipv4_user.c for (; NLMSG_OK(nh, nll); nh = NLMSG_NEXT(nh, nll)) { nh 160 samples/bpf/xdp_router_ipv4_user.c rt_msg = (struct rtmsg *)NLMSG_DATA(nh); nh 166 samples/bpf/xdp_router_ipv4_user.c rtl = RTM_PAYLOAD(nh); nh 221 samples/bpf/xdp_router_ipv4_user.c if (nh->nlmsg_type == RTM_DELROUTE) { nh 259 samples/bpf/xdp_router_ipv4_user.c if (nh->nlmsg_type == RTM_DELROUTE) { nh 315 samples/bpf/xdp_router_ipv4_user.c struct nlmsghdr *nh; nh 367 samples/bpf/xdp_router_ipv4_user.c nh = (struct nlmsghdr *)buf; nh 368 samples/bpf/xdp_router_ipv4_user.c read_route(nh, nll); nh 377 samples/bpf/xdp_router_ipv4_user.c static void read_arp(struct nlmsghdr *nh, int nll) nh 394 samples/bpf/xdp_router_ipv4_user.c if (nh->nlmsg_type == RTM_GETNEIGH) nh 397 samples/bpf/xdp_router_ipv4_user.c for (; NLMSG_OK(nh, nll); nh = NLMSG_NEXT(nh, nll)) { nh 398 samples/bpf/xdp_router_ipv4_user.c rt_msg = (struct ndmsg *)NLMSG_DATA(nh); nh 401 samples/bpf/xdp_router_ipv4_user.c rtl = RTM_PAYLOAD(nh); nh 423 samples/bpf/xdp_router_ipv4_user.c if (nh->nlmsg_type == RTM_DELNEIGH) { nh 426 samples/bpf/xdp_router_ipv4_user.c } else if (nh->nlmsg_type == RTM_NEWNEIGH) { nh 436 samples/bpf/xdp_router_ipv4_user.c if (nh->nlmsg_type == RTM_DELNEIGH) { nh 439 samples/bpf/xdp_router_ipv4_user.c } else if (nh->nlmsg_type == RTM_NEWNEIGH) { nh 455 samples/bpf/xdp_router_ipv4_user.c struct nlmsghdr *nh; nh 505 samples/bpf/xdp_router_ipv4_user.c nh = (struct nlmsghdr *)buf; nh 506 samples/bpf/xdp_router_ipv4_user.c read_arp(nh, nll); nh 523 samples/bpf/xdp_router_ipv4_user.c struct nlmsghdr *nh; nh 593 samples/bpf/xdp_router_ipv4_user.c nh = (struct nlmsghdr *)buf; nh 595 samples/bpf/xdp_router_ipv4_user.c read_route(nh, nll); nh 606 samples/bpf/xdp_router_ipv4_user.c nh = (struct nlmsghdr *)buf; nh 607 samples/bpf/xdp_router_ipv4_user.c read_arp(nh, nll); nh 287 scripts/dtc/libfdt/fdt_ro.c const struct fdt_node_header *nh = fdt_offset_ptr_(fdt, nodeoffset); nh 295 scripts/dtc/libfdt/fdt_ro.c nameptr = nh->name; nh 321 scripts/dtc/libfdt/fdt_rw.c struct fdt_node_header *nh; nh 343 scripts/dtc/libfdt/fdt_rw.c nh = fdt_offset_ptr_w_(fdt, offset); nh 344 scripts/dtc/libfdt/fdt_rw.c nodelen = sizeof(*nh) + FDT_TAGALIGN(namelen+1) + FDT_TAGSIZE; nh 346 scripts/dtc/libfdt/fdt_rw.c err = fdt_splice_struct_(fdt, nh, 0, nodelen); nh 350 scripts/dtc/libfdt/fdt_rw.c nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE); nh 351 scripts/dtc/libfdt/fdt_rw.c memset(nh->name, 0, FDT_TAGALIGN(namelen+1)); nh 352 scripts/dtc/libfdt/fdt_rw.c memcpy(nh->name, name, namelen); nh 353 scripts/dtc/libfdt/fdt_rw.c endtag = (fdt32_t *)((char *)nh + nodelen - FDT_TAGSIZE); nh 213 scripts/dtc/libfdt/fdt_sw.c struct fdt_node_header *nh; nh 219 scripts/dtc/libfdt/fdt_sw.c nh = fdt_grab_space_(fdt, sizeof(*nh) + FDT_TAGALIGN(namelen)); nh 220 scripts/dtc/libfdt/fdt_sw.c if (! nh) nh 223 scripts/dtc/libfdt/fdt_sw.c nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE); nh 224 scripts/dtc/libfdt/fdt_sw.c memcpy(nh->name, name, namelen); nh 79 tools/lib/bpf/netlink.c struct nlmsghdr *nh; nh 94 tools/lib/bpf/netlink.c for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len); nh 95 tools/lib/bpf/netlink.c nh = NLMSG_NEXT(nh, len)) { nh 96 tools/lib/bpf/netlink.c if (nh->nlmsg_pid != nl_pid) { nh 100 tools/lib/bpf/netlink.c if (nh->nlmsg_seq != seq) { nh 104 tools/lib/bpf/netlink.c if (nh->nlmsg_flags & NLM_F_MULTI) nh 106 tools/lib/bpf/netlink.c switch (nh->nlmsg_type) { nh 108 tools/lib/bpf/netlink.c err = (struct nlmsgerr *)NLMSG_DATA(nh); nh 112 tools/lib/bpf/netlink.c libbpf_nla_dump_errormsg(nh); nh 120 tools/lib/bpf/netlink.c ret = _fn(nh, fn, cookie); nh 136 tools/lib/bpf/netlink.c struct nlmsghdr nh; nh 147 tools/lib/bpf/netlink.c req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)); nh 148 tools/lib/bpf/netlink.c req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; nh 149 tools/lib/bpf/netlink.c req.nh.nlmsg_type = RTM_SETLINK; nh 150 tools/lib/bpf/netlink.c req.nh.nlmsg_pid = 0; nh 151 tools/lib/bpf/netlink.c req.nh.nlmsg_seq = ++seq; nh 157 tools/lib/bpf/netlink.c + NLMSG_ALIGN(req.nh.nlmsg_len)); nh 177 tools/lib/bpf/netlink.c req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len); nh 179 tools/lib/bpf/netlink.c if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) { nh 491 tools/testing/selftests/net/msg_zerocopy.c } nh; nh 526 tools/testing/selftests/net/msg_zerocopy.c iov[1].iov_len = setup_iph(&nh.iph, cfg_payload_len); nh 528 tools/testing/selftests/net/msg_zerocopy.c iov[1].iov_len = setup_ip6h(&nh.ip6h, cfg_payload_len); nh 530 tools/testing/selftests/net/msg_zerocopy.c iov[1].iov_base = (void *) &nh; nh 361 tools/testing/selftests/net/udpgso.c struct nlmsghdr *nh; nh 364 tools/testing/selftests/net/udpgso.c char data[NLMSG_ALIGN(sizeof(*nh)) + nh 379 tools/testing/selftests/net/udpgso.c nh = (void *)data; nh 380 tools/testing/selftests/net/udpgso.c nh->nlmsg_type = RTM_NEWROUTE; nh 381 tools/testing/selftests/net/udpgso.c nh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE; nh 382 tools/testing/selftests/net/udpgso.c off += NLMSG_ALIGN(sizeof(*nh)); nh 420 tools/testing/selftests/net/udpgso.c nh->nlmsg_len = off;