sk_storage 70 net/core/bpf_sk_storage.c struct bpf_sk_storage __rcu *sk_storage; sk_storage 147 net/core/bpf_sk_storage.c static bool __selem_unlink_sk(struct bpf_sk_storage *sk_storage, sk_storage 156 net/core/bpf_sk_storage.c sk = sk_storage->sk; sk_storage 165 net/core/bpf_sk_storage.c &sk_storage->list); sk_storage 168 net/core/bpf_sk_storage.c sk_storage->sk = NULL; sk_storage 187 net/core/bpf_sk_storage.c if (rcu_access_pointer(sk_storage->cache[smap->cache_idx]) == sk_storage 189 net/core/bpf_sk_storage.c RCU_INIT_POINTER(sk_storage->cache[smap->cache_idx], NULL); sk_storage 198 net/core/bpf_sk_storage.c struct bpf_sk_storage *sk_storage; sk_storage 205 net/core/bpf_sk_storage.c sk_storage = rcu_dereference(selem->sk_storage); sk_storage 206 net/core/bpf_sk_storage.c raw_spin_lock_bh(&sk_storage->lock); sk_storage 208 net/core/bpf_sk_storage.c free_sk_storage = __selem_unlink_sk(sk_storage, selem, true); sk_storage 209 net/core/bpf_sk_storage.c raw_spin_unlock_bh(&sk_storage->lock); sk_storage 212 net/core/bpf_sk_storage.c kfree_rcu(sk_storage, rcu); sk_storage 215 net/core/bpf_sk_storage.c static void __selem_link_sk(struct bpf_sk_storage *sk_storage, sk_storage 218 net/core/bpf_sk_storage.c RCU_INIT_POINTER(selem->sk_storage, sk_storage); sk_storage 219 net/core/bpf_sk_storage.c hlist_add_head(&selem->snode, &sk_storage->list); sk_storage 261 net/core/bpf_sk_storage.c __sk_storage_lookup(struct bpf_sk_storage *sk_storage, sk_storage 269 net/core/bpf_sk_storage.c sdata = rcu_dereference(sk_storage->cache[smap->cache_idx]); sk_storage 274 net/core/bpf_sk_storage.c hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) sk_storage 288 net/core/bpf_sk_storage.c raw_spin_lock_bh(&sk_storage->lock); sk_storage 290 net/core/bpf_sk_storage.c rcu_assign_pointer(sk_storage->cache[smap->cache_idx], sk_storage 292 net/core/bpf_sk_storage.c raw_spin_unlock_bh(&sk_storage->lock); sk_storage 301 net/core/bpf_sk_storage.c struct bpf_sk_storage *sk_storage; sk_storage 304 net/core/bpf_sk_storage.c sk_storage = rcu_dereference(sk->sk_bpf_storage); sk_storage 305 net/core/bpf_sk_storage.c if (!sk_storage) sk_storage 309 net/core/bpf_sk_storage.c return __sk_storage_lookup(sk_storage, smap, cacheit_lockit); sk_storage 330 net/core/bpf_sk_storage.c struct bpf_sk_storage *prev_sk_storage, *sk_storage; sk_storage 333 net/core/bpf_sk_storage.c err = omem_charge(sk, sizeof(*sk_storage)); sk_storage 337 net/core/bpf_sk_storage.c sk_storage = kzalloc(sizeof(*sk_storage), GFP_ATOMIC | __GFP_NOWARN); sk_storage 338 net/core/bpf_sk_storage.c if (!sk_storage) { sk_storage 342 net/core/bpf_sk_storage.c INIT_HLIST_HEAD(&sk_storage->list); sk_storage 343 net/core/bpf_sk_storage.c raw_spin_lock_init(&sk_storage->lock); sk_storage 344 net/core/bpf_sk_storage.c sk_storage->sk = sk; sk_storage 346 net/core/bpf_sk_storage.c __selem_link_sk(sk_storage, first_selem); sk_storage 358 net/core/bpf_sk_storage.c NULL, sk_storage); sk_storage 377 net/core/bpf_sk_storage.c kfree(sk_storage); sk_storage 378 net/core/bpf_sk_storage.c atomic_sub(sizeof(*sk_storage), &sk->sk_omem_alloc); sk_storage 394 net/core/bpf_sk_storage.c struct bpf_sk_storage *sk_storage; sk_storage 405 net/core/bpf_sk_storage.c sk_storage = rcu_dereference(sk->sk_bpf_storage); sk_storage 406 net/core/bpf_sk_storage.c if (!sk_storage || hlist_empty(&sk_storage->list)) { sk_storage 431 net/core/bpf_sk_storage.c old_sdata = __sk_storage_lookup(sk_storage, smap, false); sk_storage 442 net/core/bpf_sk_storage.c raw_spin_lock_bh(&sk_storage->lock); sk_storage 445 net/core/bpf_sk_storage.c if (unlikely(hlist_empty(&sk_storage->list))) { sk_storage 455 net/core/bpf_sk_storage.c old_sdata = __sk_storage_lookup(sk_storage, smap, false); sk_storage 484 net/core/bpf_sk_storage.c __selem_link_sk(sk_storage, selem); sk_storage 489 net/core/bpf_sk_storage.c __selem_unlink_sk(sk_storage, SELEM(old_sdata), false); sk_storage 493 net/core/bpf_sk_storage.c raw_spin_unlock_bh(&sk_storage->lock); sk_storage 497 net/core/bpf_sk_storage.c raw_spin_unlock_bh(&sk_storage->lock); sk_storage 518 net/core/bpf_sk_storage.c struct bpf_sk_storage *sk_storage; sk_storage 523 net/core/bpf_sk_storage.c sk_storage = rcu_dereference(sk->sk_bpf_storage); sk_storage 524 net/core/bpf_sk_storage.c if (!sk_storage) { sk_storage 538 net/core/bpf_sk_storage.c raw_spin_lock_bh(&sk_storage->lock); sk_storage 539 net/core/bpf_sk_storage.c hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) { sk_storage 544 net/core/bpf_sk_storage.c free_sk_storage = __selem_unlink_sk(sk_storage, selem, true); sk_storage 546 net/core/bpf_sk_storage.c raw_spin_unlock_bh(&sk_storage->lock); sk_storage 550 net/core/bpf_sk_storage.c kfree_rcu(sk_storage, rcu); sk_storage 776 net/core/bpf_sk_storage.c struct bpf_sk_storage *sk_storage; sk_storage 783 net/core/bpf_sk_storage.c sk_storage = rcu_dereference(sk->sk_bpf_storage); sk_storage 785 net/core/bpf_sk_storage.c if (!sk_storage || hlist_empty(&sk_storage->list)) sk_storage 788 net/core/bpf_sk_storage.c hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) { sk_storage 826 net/core/bpf_sk_storage.c new_sk_storage = rcu_dereference(copy_selem->sk_storage);