Lines Matching refs:xfrm
66 struct netns_xfrm *xfrm) in flow_entry_valid() argument
68 if (atomic_read(&xfrm->flow_cache_genid) != fle->genid) in flow_entry_valid()
76 struct netns_xfrm *xfrm) in flow_entry_kill() argument
87 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm, in flow_cache_gc_task() local
91 spin_lock_bh(&xfrm->flow_cache_gc_lock); in flow_cache_gc_task()
92 list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list); in flow_cache_gc_task()
93 spin_unlock_bh(&xfrm->flow_cache_gc_lock); in flow_cache_gc_task()
96 flow_entry_kill(fce, xfrm); in flow_cache_gc_task()
101 struct netns_xfrm *xfrm) in flow_cache_queue_garbage() argument
105 spin_lock_bh(&xfrm->flow_cache_gc_lock); in flow_cache_queue_garbage()
106 list_splice_tail(gc_list, &xfrm->flow_cache_gc_list); in flow_cache_queue_garbage()
107 spin_unlock_bh(&xfrm->flow_cache_gc_lock); in flow_cache_queue_garbage()
108 schedule_work(&xfrm->flow_cache_gc_work); in flow_cache_queue_garbage()
120 struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm, in __flow_cache_shrink() local
129 flow_entry_valid(fle, xfrm)) { in __flow_cache_shrink()
139 flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm); in __flow_cache_shrink()
195 struct flow_cache *fc = &net->xfrm.flow_cache_global; in flow_cache_lookup()
245 } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) { in flow_cache_lookup()
266 fle->genid = atomic_read(&net->xfrm.flow_cache_genid); in flow_cache_lookup()
290 struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm, in flow_cache_flush_tasklet() local
297 if (flow_entry_valid(fle, xfrm)) in flow_cache_flush_tasklet()
306 flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm); in flow_cache_flush_tasklet()
353 mutex_lock(&net->xfrm.flow_flush_sem); in flow_cache_flush()
354 info.cache = &net->xfrm.flow_cache_global; in flow_cache_flush()
374 mutex_unlock(&net->xfrm.flow_flush_sem); in flow_cache_flush()
381 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm, in flow_cache_flush_task() local
383 struct net *net = container_of(xfrm, struct net, xfrm); in flow_cache_flush_task()
390 schedule_work(&net->xfrm.flow_cache_flush_work); in flow_cache_flush_deferred()
438 struct flow_cache *fc = &net->xfrm.flow_cache_global; in flow_cache_init()
444 spin_lock_init(&net->xfrm.flow_cache_gc_lock); in flow_cache_init()
445 INIT_LIST_HEAD(&net->xfrm.flow_cache_gc_list); in flow_cache_init()
446 INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task); in flow_cache_init()
447 INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task); in flow_cache_init()
448 mutex_init(&net->xfrm.flow_flush_sem); in flow_cache_init()
497 struct flow_cache *fc = &net->xfrm.flow_cache_global; in flow_cache_fini()