Lines Matching refs:dst
114 static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
115 static unsigned int dn_dst_mtu(const struct dst_entry *dst);
120 static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
122 static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
124 static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
154 static void dn_dst_destroy(struct dst_entry *dst) in dn_dst_destroy() argument
156 struct dn_route *rt = (struct dn_route *) dst; in dn_dst_destroy()
160 dst_destroy_metrics_generic(dst); in dn_dst_destroy()
163 static void dn_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how) in dn_dst_ifdown() argument
166 struct dn_route *rt = (struct dn_route *) dst; in dn_dst_ifdown()
177 static __inline__ unsigned int dn_hash(__le16 src, __le16 dst) in dn_hash() argument
179 __u16 tmp = (__u16 __force)(src ^ dst); in dn_hash()
188 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); in dnrt_free()
193 dst_release(&rt->dst); in dnrt_drop()
194 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); in dnrt_drop()
211 if (atomic_read(&rt->dst.__refcnt) || in dn_dst_check_expire()
212 (now - rt->dst.lastuse) < expire) { in dn_dst_check_expire()
213 rtp = &rt->dst.dn_next; in dn_dst_check_expire()
216 *rtp = rt->dst.dn_next; in dn_dst_check_expire()
217 rt->dst.dn_next = NULL; in dn_dst_check_expire()
244 if (atomic_read(&rt->dst.__refcnt) || in dn_dst_gc()
245 (now - rt->dst.lastuse) < expire) { in dn_dst_gc()
246 rtp = &rt->dst.dn_next; in dn_dst_gc()
249 *rtp = rt->dst.dn_next; in dn_dst_gc()
250 rt->dst.dn_next = NULL; in dn_dst_gc()
270 static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk, in dn_dst_update_pmtu() argument
273 struct dn_route *rt = (struct dn_route *) dst; in dn_dst_update_pmtu()
285 if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) { in dn_dst_update_pmtu()
286 if (!(dst_metric_locked(dst, RTAX_MTU))) { in dn_dst_update_pmtu()
287 dst_metric_set(dst, RTAX_MTU, mtu); in dn_dst_update_pmtu()
288 dst_set_expires(dst, dn_rt_mtu_expires); in dn_dst_update_pmtu()
290 if (!(dst_metric_locked(dst, RTAX_ADVMSS))) { in dn_dst_update_pmtu()
292 u32 existing_mss = dst_metric_raw(dst, RTAX_ADVMSS); in dn_dst_update_pmtu()
294 dst_metric_set(dst, RTAX_ADVMSS, mss); in dn_dst_update_pmtu()
299 static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk, in dn_dst_redirect() argument
307 static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie) in dn_dst_check() argument
312 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst) in dn_dst_negative_advice() argument
314 dst_release(dst); in dn_dst_negative_advice()
345 *rthp = rth->dst.dn_next; in dn_insert_route()
346 rcu_assign_pointer(rth->dst.dn_next, in dn_insert_route()
350 dst_use(&rth->dst, now); in dn_insert_route()
357 rthp = &rth->dst.dn_next; in dn_insert_route()
360 rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain); in dn_insert_route()
363 dst_use(&rt->dst, now); in dn_insert_route()
381 next = rcu_dereference_raw(rt->dst.dn_next); in dn_run_flush()
382 RCU_INIT_POINTER(rt->dst.dn_next, NULL); in dn_run_flush()
437 __le16 *dst; in dn_return_short() local
450 dst = (__le16 *)ptr; in dn_return_short()
456 swap(*src, *dst); in dn_return_short()
530 le16_to_cpu(cb->src), le16_to_cpu(cb->dst), in dn_route_rx_packet()
560 cb->dst = dn_eth2dn(ptr); in dn_route_rx_long()
598 cb->dst = *(__le16 *)ptr; in dn_route_rx_short()
749 struct dst_entry *dst = skb_dst(skb); in dn_output() local
750 struct dn_route *rt = (struct dn_route *)dst; in dn_output()
751 struct net_device *dev = dst->dev; in dn_output()
762 cb->dst = rt->rt_daddr; in dn_output()
788 struct dst_entry *dst = skb_dst(skb); in dn_forward() local
789 struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr); in dn_forward()
802 if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len)) in dn_forward()
811 skb->dev = rt->dst.dev; in dn_forward()
840 le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); in dn_rt_bug_sk()
852 le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); in dn_rt_bug()
859 static unsigned int dn_dst_default_advmss(const struct dst_entry *dst) in dn_dst_default_advmss() argument
861 return dn_mss_from_pmtu(dst->dev, dst_mtu(dst)); in dn_dst_default_advmss()
864 static unsigned int dn_dst_mtu(const struct dst_entry *dst) in dn_dst_mtu() argument
866 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); in dn_dst_mtu()
868 return mtu ? : dst->dev->mtu; in dn_dst_mtu()
871 static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, in dn_dst_neigh_lookup() argument
875 return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev); in dn_dst_neigh_lookup()
881 struct net_device *dev = rt->dst.dev; in dn_rt_set_next_hop()
889 dst_init_metrics(&rt->dst, fi->fib_metrics, true); in dn_rt_set_next_hop()
900 if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) in dn_rt_set_next_hop()
901 dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu); in dn_rt_set_next_hop()
902 mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS); in dn_rt_set_next_hop()
904 unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst)); in dn_rt_set_next_hop()
906 dst_metric_set(&rt->dst, RTAX_ADVMSS, mss); in dn_rt_set_next_hop()
1207 rt->dst.lastuse = jiffies; in dn_route_output_slow()
1208 rt->dst.output = dn_output; in dn_route_output_slow()
1209 rt->dst.input = dn_rt_bug; in dn_route_output_slow()
1212 rt->dst.input = dn_nsp_rx; in dn_route_output_slow()
1241 dst_free(&rt->dst); in dn_route_output_slow()
1257 rt = rcu_dereference_bh(rt->dst.dn_next)) { in __dn_route_output_key()
1263 dst_use(&rt->dst, jiffies); in __dn_route_output_key()
1265 *pprt = &rt->dst; in __dn_route_output_key()
1320 .daddr = cb->dst, in dn_route_input_slow()
1357 if (!dn_dev_islocal(in_dev, cb->dst)) in dn_route_input_slow()
1421 fld.saddr = cb->dst; in dn_route_input_slow()
1465 rt->fld.daddr = cb->dst; in dn_route_input_slow()
1471 rt->dst.lastuse = jiffies; in dn_route_input_slow()
1472 rt->dst.output = dn_rt_bug_sk; in dn_route_input_slow()
1475 rt->dst.input = dn_forward; in dn_route_input_slow()
1478 rt->dst.output = dn_output; in dn_route_input_slow()
1479 rt->dst.input = dn_nsp_rx; in dn_route_input_slow()
1480 rt->dst.dev = in_dev; in dn_route_input_slow()
1486 rt->dst.input = dst_discard; in dn_route_input_slow()
1496 skb_dst_set(skb, &rt->dst); in dn_route_input_slow()
1518 dst_free(&rt->dst); in dn_route_input_slow()
1526 unsigned int hash = dn_hash(cb->src, cb->dst); in dn_route_input()
1533 rt = rcu_dereference(rt->dst.dn_next)) { in dn_route_input()
1535 (rt->fld.daddr == cb->dst) && in dn_route_input()
1539 dst_use(&rt->dst, jiffies); in dn_route_input()
1585 if (rt->dst.dev && in dn_rt_fill_info()
1586 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex) < 0) in dn_rt_fill_info()
1601 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) in dn_rt_fill_info()
1604 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; in dn_rt_fill_info()
1605 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, in dn_rt_fill_info()
1606 rt->dst.error) < 0) in dn_rt_fill_info()
1684 cb->dst = fld.daddr; in dn_cache_getroute()
1690 if (!err && -rt->dst.error) in dn_cache_getroute()
1691 err = rt->dst.error; in dn_cache_getroute()
1702 skb_dst_set(skb, &rt->dst); in dn_cache_getroute()
1751 rt = rcu_dereference_bh(rt->dst.dn_next), idx++) { in dn_cache_dump()
1754 skb_dst_set(skb, dst_clone(&rt->dst)); in dn_cache_dump()
1797 rt = rcu_dereference_bh(rt->dst.dn_next); in dn_rt_cache_get_next()
1838 rt->dst.dev ? rt->dst.dev->name : "*", in dn_rt_cache_seq_show()
1841 atomic_read(&rt->dst.__refcnt), in dn_rt_cache_seq_show()
1842 rt->dst.__use, 0); in dn_rt_cache_seq_show()