/linux-4.1.27/net/sched/ |
D | cls_route.c | 131 struct route4_head *head = rcu_dereference_bh(tp->root); in route4_classify() 168 b = rcu_dereference_bh(head->table[h]); in route4_classify() 170 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]); in route4_classify() 172 f = rcu_dereference_bh(f->next)) in route4_classify() 176 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]); in route4_classify() 178 f = rcu_dereference_bh(f->next)) in route4_classify() 182 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]); in route4_classify() 184 f = rcu_dereference_bh(f->next)) in route4_classify()
|
D | cls_u32.c | 112 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root); in u32_classify() 124 n = rcu_dereference_bh(ht->ht[sel]); in u32_classify() 137 n = rcu_dereference_bh(n->next); in u32_classify() 155 n = rcu_dereference_bh(n->next); in u32_classify() 164 ht = rcu_dereference_bh(n->ht_down); in u32_classify() 172 n = rcu_dereference_bh(n->next); in u32_classify() 181 n = rcu_dereference_bh(n->next); in u32_classify() 187 n = rcu_dereference_bh(n->next); in u32_classify() 198 ht = rcu_dereference_bh(n->ht_down); in u32_classify() 240 ht = rcu_dereference_bh(n->ht_up); in u32_classify()
|
D | cls_rsvp.h | 134 struct rsvp_head *head = rcu_dereference_bh(tp->root); in rsvp_classify() 175 for (s = rcu_dereference_bh(head->ht[h1]); s; in rsvp_classify() 176 s = rcu_dereference_bh(s->next)) { in rsvp_classify() 188 for (f = rcu_dereference_bh(s->ht[h2]); f; in rsvp_classify() 189 f = rcu_dereference_bh(f->next)) { in rsvp_classify() 213 for (f = rcu_dereference_bh(s->ht[16]); f; in rsvp_classify() 214 f = rcu_dereference_bh(f->next)) { in rsvp_classify()
|
D | cls_fw.c | 62 struct fw_head *head = rcu_dereference_bh(tp->root); in fw_classify() 70 for (f = rcu_dereference_bh(head->ht[fw_hash(id)]); f; in fw_classify() 71 f = rcu_dereference_bh(f->next)) { in fw_classify()
|
D | sch_ingress.c | 63 struct tcf_proto *fl = rcu_dereference_bh(p->filter_list); in ingress_enqueue()
|
D | cls_cgroup.c | 32 struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); in cls_cgroup_classify()
|
D | cls_basic.c | 44 struct basic_head *head = rcu_dereference_bh(tp->root); in basic_classify()
|
D | sch_prio.c | 44 fl = rcu_dereference_bh(q->filter_list); in prio_classify()
|
D | sch_multiq.c | 45 struct tcf_proto *fl = rcu_dereference_bh(q->filter_list); in multiq_classify()
|
D | cls_bpf.c | 65 struct cls_bpf_head *head = rcu_dereference_bh(tp->root); in cls_bpf_classify()
|
D | sch_teql.c | 103 q = rcu_dereference_bh(dat_queue->qdisc); in teql_dequeue()
|
D | sch_dsmark.c | 232 struct tcf_proto *fl = rcu_dereference_bh(p->filter_list); in dsmark_enqueue()
|
D | sch_htb.c | 226 tcf = rcu_dereference_bh(cl->filter_list); in htb_classify() 228 tcf = rcu_dereference_bh(q->filter_list); in htb_classify() 254 tcf = rcu_dereference_bh(cl->filter_list); in htb_classify()
|
D | sch_drr.c | 333 fl = rcu_dereference_bh(q->filter_list); in drr_classify()
|
D | sch_choke.c | 209 fl = rcu_dereference_bh(q->filter_list); in choke_classify()
|
D | sch_fq_codel.c | 97 filter = rcu_dereference_bh(q->filter_list); in fq_codel_classify()
|
D | cls_tcindex.c | 84 struct tcindex_data *p = rcu_dereference_bh(tp->root); in tcindex_classify()
|
D | sch_atm.c | 376 fl = rcu_dereference_bh(flow->filter_list); in atm_tc_enqueue()
|
D | cls_flow.c | 282 struct flow_head *head = rcu_dereference_bh(tp->root); in flow_classify()
|
D | sch_sfb.c | 310 fl = rcu_dereference_bh(q->filter_list); in sfb_enqueue()
|
D | sch_hfsc.c | 1167 tcf = rcu_dereference_bh(q->root.filter_list); in hfsc_classify() 1191 tcf = rcu_dereference_bh(cl->filter_list); in hfsc_classify()
|
D | sch_sfq.c | 198 fl = rcu_dereference_bh(q->filter_list); in sfq_classify()
|
D | sch_qfq.c | 721 fl = rcu_dereference_bh(q->filter_list); in qfq_classify()
|
D | sch_api.c | 1824 for (; tp; tp = rcu_dereference_bh(tp->next)) { in tc_classify_compat()
|
D | sch_cbq.c | 239 fl = rcu_dereference_bh(head->filter_list); in cbq_classify()
|
/linux-4.1.27/include/linux/ |
D | rculist.h | 515 for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ 518 pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\ 539 for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ 542 pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
|
D | rcupdate.h | 859 #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) macro
|
/linux-4.1.27/net/ipv6/ |
D | ip6_flowlabel.c | 61 for (fl = rcu_dereference_bh(fl_ht[(hash)]); \ 63 fl = rcu_dereference_bh(fl->next)) 65 for (fl = rcu_dereference_bh(fl->next); \ 67 fl = rcu_dereference_bh(fl->next)) 70 for (sfl = rcu_dereference_bh(np->ipv6_fl_list); \ 72 sfl = rcu_dereference_bh(sfl->next))
|
D | ip6_fib.c | 1949 node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist)); in ipv6_route_seq_next_table() 1956 node = rcu_dereference_bh( in ipv6_route_seq_next_table()
|
/linux-4.1.27/net/netfilter/ipset/ |
D | ip_set_hash_gen.h | 14 #ifndef rcu_dereference_bh 15 #define rcu_dereference_bh(p) rcu_dereference(p) macro 480 t = rcu_dereference_bh(h->table); in mtype_expire() 637 t = rcu_dereference_bh(h->table); in mtype_add() 744 t = rcu_dereference_bh(h->table); in mtype_del() 811 struct htable *t = rcu_dereference_bh(h->table); in mtype_test_cidrs() 875 t = rcu_dereference_bh(h->table); in mtype_test()
|
/linux-4.1.27/include/net/ |
D | neighbour.h | 278 struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht); in ___neigh_lookup_noref() 283 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); in ___neigh_lookup_noref() 285 n = rcu_dereference_bh(n->next)) { in ___neigh_lookup_noref()
|
D | sch_generic.h | 494 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab); in qdisc_calculate_pkt_len()
|
/linux-4.1.27/net/core/ |
D | neighbour.c | 427 nht = rcu_dereference_bh(tbl->nht); in neigh_lookup_nodev() 430 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); in neigh_lookup_nodev() 432 n = rcu_dereference_bh(n->next)) { in neigh_lookup_nodev() 1820 nht = rcu_dereference_bh(tbl->nht); in neightbl_fill_info() 2240 nht = rcu_dereference_bh(tbl->nht); in neigh_dump_table() 2245 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; in neigh_dump_table() 2247 n = rcu_dereference_bh(n->next)) { in neigh_dump_table() 2357 nht = rcu_dereference_bh(tbl->nht); in neigh_for_each() 2363 for (n = rcu_dereference_bh(nht->hash_buckets[chain]); in neigh_for_each() 2365 n = rcu_dereference_bh(n->next)) in neigh_for_each() [all …]
|
D | netpoll.c | 181 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); in netpoll_poll_dev() 328 npinfo = rcu_dereference_bh(np->dev->npinfo); in netpoll_send_skb_on_dev()
|
D | dev.c | 2870 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); in skb_update_prio() 2959 q = rcu_dereference_bh(txq->qdisc); in __dev_queue_xmit()
|
/linux-4.1.27/drivers/net/team/ |
D | team_mode_activebackup.c | 44 active_port = rcu_dereference_bh(ab_priv(team)->active_port); in ab_transmit()
|
D | team_mode_loadbalance.c | 126 return rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash)); in lb_htpm_select_tx_port() 181 fp = rcu_dereference_bh(lb_priv->fp); in lb_get_skb_hash() 215 select_tx_port_func = rcu_dereference_bh(lb_priv->select_tx_port_func); in lb_transmit()
|
/linux-4.1.27/net/decnet/ |
D | dn_route.c | 1256 for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt; in __dn_route_output_key() 1257 rt = rcu_dereference_bh(rt->dst.dn_next)) { in __dn_route_output_key() 1749 for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0; in dn_cache_dump() 1751 rt = rcu_dereference_bh(rt->dst.dn_next), idx++) { in dn_cache_dump() 1785 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); in dn_rt_cache_get_first() 1797 rt = rcu_dereference_bh(rt->dst.dn_next); in dn_rt_cache_get_next() 1803 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); in dn_rt_cache_get_next()
|
/linux-4.1.27/net/netfilter/ |
D | nfnetlink_log.c | 952 return rcu_dereference_bh(hlist_first_rcu(head)); in get_first() 960 h = rcu_dereference_bh(hlist_next_rcu(h)); in get_next() 970 h = rcu_dereference_bh(hlist_first_rcu(head)); in get_next()
|
/linux-4.1.27/Documentation/RCU/ |
D | lockdep.txt | 28 rcu_dereference_bh(p):
|
D | whatisRCU.txt | 330 call_rcu_bh() rcu_dereference_bh() 849 rcu_dereference_bh synchronize_rcu_bh_expedited
|
D | checklist.txt | 299 order to keep lockdep happy, in this case, rcu_dereference_bh().
|
/linux-4.1.27/net/caif/ |
D | caif_dev.c | 189 qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc)); in transmit()
|
/linux-4.1.27/drivers/infiniband/ulp/ipoib/ |
D | ipoib_main.c | 890 htbl = rcu_dereference_bh(ntbl->htbl); in ipoib_neigh_get() 896 for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]); in ipoib_neigh_get() 898 neigh = rcu_dereference_bh(neigh->hnext)) { in ipoib_neigh_get()
|
/linux-4.1.27/crypto/ |
D | pcrypt.c | 81 cpumask = rcu_dereference_bh(pcrypt->cb_cpumask); in pcrypt_do_parallel()
|
/linux-4.1.27/kernel/ |
D | padata.c | 115 pd = rcu_dereference_bh(pinst->pd); in padata_do_parallel()
|
/linux-4.1.27/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_main.c | 429 struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel); in fm10k_type_trans()
|
/linux-4.1.27/drivers/net/bonding/ |
D | bond_main.c | 981 ni = rcu_dereference_bh(slave->dev->npinfo); in bond_poll_controller()
|