Lines Matching refs:dst
114 static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
115 static unsigned int dn_dst_mtu(const struct dst_entry *dst);
120 static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
122 static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
124 static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
154 static void dn_dst_destroy(struct dst_entry *dst) in dn_dst_destroy() argument
156 struct dn_route *rt = (struct dn_route *) dst; in dn_dst_destroy()
160 dst_destroy_metrics_generic(dst); in dn_dst_destroy()
163 static void dn_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how) in dn_dst_ifdown() argument
166 struct dn_route *rt = (struct dn_route *) dst; in dn_dst_ifdown()
177 static __inline__ unsigned int dn_hash(__le16 src, __le16 dst) in dn_hash() argument
179 __u16 tmp = (__u16 __force)(src ^ dst); in dn_hash()
188 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); in dnrt_free()
193 dst_release(&rt->dst); in dnrt_drop()
194 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); in dnrt_drop()
211 if (atomic_read(&rt->dst.__refcnt) || in dn_dst_check_expire()
212 (now - rt->dst.lastuse) < expire) { in dn_dst_check_expire()
213 rtp = &rt->dst.dn_next; in dn_dst_check_expire()
216 *rtp = rt->dst.dn_next; in dn_dst_check_expire()
217 rt->dst.dn_next = NULL; in dn_dst_check_expire()
244 if (atomic_read(&rt->dst.__refcnt) || in dn_dst_gc()
245 (now - rt->dst.lastuse) < expire) { in dn_dst_gc()
246 rtp = &rt->dst.dn_next; in dn_dst_gc()
249 *rtp = rt->dst.dn_next; in dn_dst_gc()
250 rt->dst.dn_next = NULL; in dn_dst_gc()
270 static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk, in dn_dst_update_pmtu() argument
273 struct dn_route *rt = (struct dn_route *) dst; in dn_dst_update_pmtu()
285 if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) { in dn_dst_update_pmtu()
286 if (!(dst_metric_locked(dst, RTAX_MTU))) { in dn_dst_update_pmtu()
287 dst_metric_set(dst, RTAX_MTU, mtu); in dn_dst_update_pmtu()
288 dst_set_expires(dst, dn_rt_mtu_expires); in dn_dst_update_pmtu()
290 if (!(dst_metric_locked(dst, RTAX_ADVMSS))) { in dn_dst_update_pmtu()
292 u32 existing_mss = dst_metric_raw(dst, RTAX_ADVMSS); in dn_dst_update_pmtu()
294 dst_metric_set(dst, RTAX_ADVMSS, mss); in dn_dst_update_pmtu()
299 static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk, in dn_dst_redirect() argument
307 static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie) in dn_dst_check() argument
312 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst) in dn_dst_negative_advice() argument
314 dst_release(dst); in dn_dst_negative_advice()
345 *rthp = rth->dst.dn_next; in dn_insert_route()
346 rcu_assign_pointer(rth->dst.dn_next, in dn_insert_route()
350 dst_use(&rth->dst, now); in dn_insert_route()
357 rthp = &rth->dst.dn_next; in dn_insert_route()
360 rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain); in dn_insert_route()
363 dst_use(&rt->dst, now); in dn_insert_route()
381 next = rcu_dereference_raw(rt->dst.dn_next); in dn_run_flush()
382 RCU_INIT_POINTER(rt->dst.dn_next, NULL); in dn_run_flush()
437 __le16 *dst; in dn_return_short() local
450 dst = (__le16 *)ptr; in dn_return_short()
456 swap(*src, *dst); in dn_return_short()
530 le16_to_cpu(cb->src), le16_to_cpu(cb->dst), in dn_route_rx_packet()
560 cb->dst = dn_eth2dn(ptr); in dn_route_rx_long()
598 cb->dst = *(__le16 *)ptr; in dn_route_rx_short()
749 struct dst_entry *dst = skb_dst(skb); in dn_output() local
750 struct dn_route *rt = (struct dn_route *)dst; in dn_output()
751 struct net_device *dev = dst->dev; in dn_output()
762 cb->dst = rt->rt_daddr; in dn_output()
788 struct dst_entry *dst = skb_dst(skb); in dn_forward() local
789 struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr); in dn_forward()
800 if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len)) in dn_forward()
809 skb->dev = rt->dst.dev; in dn_forward()
838 le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); in dn_rt_bug_out()
850 le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); in dn_rt_bug()
857 static unsigned int dn_dst_default_advmss(const struct dst_entry *dst) in dn_dst_default_advmss() argument
859 return dn_mss_from_pmtu(dst->dev, dst_mtu(dst)); in dn_dst_default_advmss()
862 static unsigned int dn_dst_mtu(const struct dst_entry *dst) in dn_dst_mtu() argument
864 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); in dn_dst_mtu()
866 return mtu ? : dst->dev->mtu; in dn_dst_mtu()
869 static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, in dn_dst_neigh_lookup() argument
873 return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev); in dn_dst_neigh_lookup()
879 struct net_device *dev = rt->dst.dev; in dn_rt_set_next_hop()
887 dst_init_metrics(&rt->dst, fi->fib_metrics, true); in dn_rt_set_next_hop()
898 if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) in dn_rt_set_next_hop()
899 dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu); in dn_rt_set_next_hop()
900 mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS); in dn_rt_set_next_hop()
902 unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst)); in dn_rt_set_next_hop()
904 dst_metric_set(&rt->dst, RTAX_ADVMSS, mss); in dn_rt_set_next_hop()
1212 rt->dst.lastuse = jiffies; in dn_route_output_slow()
1213 rt->dst.output = dn_output; in dn_route_output_slow()
1214 rt->dst.input = dn_rt_bug; in dn_route_output_slow()
1217 rt->dst.input = dn_nsp_rx; in dn_route_output_slow()
1246 dst_free(&rt->dst); in dn_route_output_slow()
1262 rt = rcu_dereference_bh(rt->dst.dn_next)) { in __dn_route_output_key()
1268 dst_use(&rt->dst, jiffies); in __dn_route_output_key()
1270 *pprt = &rt->dst; in __dn_route_output_key()
1325 .daddr = cb->dst, in dn_route_input_slow()
1362 if (!dn_dev_islocal(in_dev, cb->dst)) in dn_route_input_slow()
1426 fld.saddr = cb->dst; in dn_route_input_slow()
1470 rt->fld.daddr = cb->dst; in dn_route_input_slow()
1476 rt->dst.lastuse = jiffies; in dn_route_input_slow()
1477 rt->dst.output = dn_rt_bug_out; in dn_route_input_slow()
1480 rt->dst.input = dn_forward; in dn_route_input_slow()
1483 rt->dst.output = dn_output; in dn_route_input_slow()
1484 rt->dst.input = dn_nsp_rx; in dn_route_input_slow()
1485 rt->dst.dev = in_dev; in dn_route_input_slow()
1491 rt->dst.input = dst_discard; in dn_route_input_slow()
1501 skb_dst_set(skb, &rt->dst); in dn_route_input_slow()
1523 dst_free(&rt->dst); in dn_route_input_slow()
1531 unsigned int hash = dn_hash(cb->src, cb->dst); in dn_route_input()
1538 rt = rcu_dereference(rt->dst.dn_next)) { in dn_route_input()
1540 (rt->fld.daddr == cb->dst) && in dn_route_input()
1544 dst_use(&rt->dst, jiffies); in dn_route_input()
1590 if (rt->dst.dev && in dn_rt_fill_info()
1591 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex) < 0) in dn_rt_fill_info()
1606 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) in dn_rt_fill_info()
1609 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; in dn_rt_fill_info()
1610 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, in dn_rt_fill_info()
1611 rt->dst.error) < 0) in dn_rt_fill_info()
1689 cb->dst = fld.daddr; in dn_cache_getroute()
1695 if (!err && -rt->dst.error) in dn_cache_getroute()
1696 err = rt->dst.error; in dn_cache_getroute()
1707 skb_dst_set(skb, &rt->dst); in dn_cache_getroute()
1756 rt = rcu_dereference_bh(rt->dst.dn_next), idx++) { in dn_cache_dump()
1759 skb_dst_set(skb, dst_clone(&rt->dst)); in dn_cache_dump()
1802 rt = rcu_dereference_bh(rt->dst.dn_next); in dn_rt_cache_get_next()
1843 rt->dst.dev ? rt->dst.dev->name : "*", in dn_rt_cache_seq_show()
1846 atomic_read(&rt->dst.__refcnt), in dn_rt_cache_seq_show()
1847 rt->dst.__use, 0); in dn_rt_cache_seq_show()