Lines Matching refs:xprt

22 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
26 static void svc_delete_xprt(struct svc_xprt *xprt);
128 struct svc_xprt *xprt = in svc_xprt_free() local
130 struct module *owner = xprt->xpt_class->xcl_owner; in svc_xprt_free()
131 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) in svc_xprt_free()
132 svcauth_unix_info_release(xprt); in svc_xprt_free()
133 put_net(xprt->xpt_net); in svc_xprt_free()
135 if (xprt->xpt_bc_xprt) in svc_xprt_free()
136 xprt_put(xprt->xpt_bc_xprt); in svc_xprt_free()
137 xprt->xpt_ops->xpo_free(xprt); in svc_xprt_free()
141 void svc_xprt_put(struct svc_xprt *xprt) in svc_xprt_put() argument
143 kref_put(&xprt->xpt_ref, svc_xprt_free); in svc_xprt_put()
152 struct svc_xprt *xprt, struct svc_serv *serv) in svc_xprt_init() argument
154 memset(xprt, 0, sizeof(*xprt)); in svc_xprt_init()
155 xprt->xpt_class = xcl; in svc_xprt_init()
156 xprt->xpt_ops = xcl->xcl_ops; in svc_xprt_init()
157 kref_init(&xprt->xpt_ref); in svc_xprt_init()
158 xprt->xpt_server = serv; in svc_xprt_init()
159 INIT_LIST_HEAD(&xprt->xpt_list); in svc_xprt_init()
160 INIT_LIST_HEAD(&xprt->xpt_ready); in svc_xprt_init()
161 INIT_LIST_HEAD(&xprt->xpt_deferred); in svc_xprt_init()
162 INIT_LIST_HEAD(&xprt->xpt_users); in svc_xprt_init()
163 mutex_init(&xprt->xpt_mutex); in svc_xprt_init()
164 spin_lock_init(&xprt->xpt_lock); in svc_xprt_init()
165 set_bit(XPT_BUSY, &xprt->xpt_flags); in svc_xprt_init()
166 rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); in svc_xprt_init()
167 xprt->xpt_net = get_net(net); in svc_xprt_init()
219 static void svc_xprt_received(struct svc_xprt *xprt) in svc_xprt_received() argument
221 if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) { in svc_xprt_received()
222 WARN_ONCE(1, "xprt=0x%p already busy!", xprt); in svc_xprt_received()
229 svc_xprt_get(xprt); in svc_xprt_received()
231 clear_bit(XPT_BUSY, &xprt->xpt_flags); in svc_xprt_received()
232 xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); in svc_xprt_received()
233 svc_xprt_put(xprt); in svc_xprt_received()
286 void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) in svc_xprt_copy_addrs() argument
288 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); in svc_xprt_copy_addrs()
289 rqstp->rq_addrlen = xprt->xpt_remotelen; in svc_xprt_copy_addrs()
295 memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen); in svc_xprt_copy_addrs()
296 rqstp->rq_daddrlen = xprt->xpt_locallen; in svc_xprt_copy_addrs()
313 static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) in svc_xprt_has_something_to_do() argument
315 if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) in svc_xprt_has_something_to_do()
317 if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) in svc_xprt_has_something_to_do()
318 return xprt->xpt_ops->xpo_has_wspace(xprt); in svc_xprt_has_something_to_do()
322 void svc_xprt_do_enqueue(struct svc_xprt *xprt) in svc_xprt_do_enqueue() argument
329 if (!svc_xprt_has_something_to_do(xprt)) in svc_xprt_do_enqueue()
337 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { in svc_xprt_do_enqueue()
339 dprintk("svc: transport %p busy, not enqueued\n", xprt); in svc_xprt_do_enqueue()
344 pool = svc_pool_for_cpu(xprt->xpt_server, cpu); in svc_xprt_do_enqueue()
371 rqstp->rq_xprt = xprt; in svc_xprt_do_enqueue()
372 svc_xprt_get(xprt); in svc_xprt_do_enqueue()
392 dprintk("svc: transport %p put into queue\n", xprt); in svc_xprt_do_enqueue()
394 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); in svc_xprt_do_enqueue()
402 trace_svc_xprt_do_enqueue(xprt, rqstp); in svc_xprt_do_enqueue()
411 void svc_xprt_enqueue(struct svc_xprt *xprt) in svc_xprt_enqueue() argument
413 if (test_bit(XPT_BUSY, &xprt->xpt_flags)) in svc_xprt_enqueue()
415 xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); in svc_xprt_enqueue()
424 struct svc_xprt *xprt = NULL; in svc_xprt_dequeue() local
431 xprt = list_first_entry(&pool->sp_sockets, in svc_xprt_dequeue()
433 list_del_init(&xprt->xpt_ready); in svc_xprt_dequeue()
434 svc_xprt_get(xprt); in svc_xprt_dequeue()
437 xprt, atomic_read(&xprt->xpt_ref.refcount)); in svc_xprt_dequeue()
441 trace_svc_xprt_dequeue(xprt); in svc_xprt_dequeue()
442 return xprt; in svc_xprt_dequeue()
460 struct svc_xprt *xprt = rqstp->rq_xprt; in svc_reserve() local
461 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); in svc_reserve()
464 if (xprt->xpt_ops->xpo_adjust_wspace) in svc_reserve()
465 xprt->xpt_ops->xpo_adjust_wspace(xprt); in svc_reserve()
466 svc_xprt_enqueue(xprt); in svc_reserve()
473 struct svc_xprt *xprt = rqstp->rq_xprt; in svc_xprt_release() local
498 svc_xprt_put(xprt); in svc_xprt_release()
573 struct svc_xprt *xprt = NULL; in svc_check_conn_limits() local
585 xprt = list_entry(serv->sv_tempsocks.prev, in svc_check_conn_limits()
588 set_bit(XPT_CLOSE, &xprt->xpt_flags); in svc_check_conn_limits()
589 svc_xprt_get(xprt); in svc_check_conn_limits()
593 if (xprt) { in svc_check_conn_limits()
594 svc_xprt_enqueue(xprt); in svc_check_conn_limits()
595 svc_xprt_put(xprt); in svc_check_conn_limits()
668 struct svc_xprt *xprt; in svc_get_next_xprt() local
680 xprt = svc_xprt_dequeue(pool); in svc_get_next_xprt()
681 if (xprt) { in svc_get_next_xprt()
682 rqstp->rq_xprt = xprt; in svc_get_next_xprt()
690 return xprt; in svc_get_next_xprt()
712 xprt = rqstp->rq_xprt; in svc_get_next_xprt()
713 if (xprt != NULL) in svc_get_next_xprt()
714 return xprt; in svc_get_next_xprt()
741 static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) in svc_handle_xprt() argument
746 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { in svc_handle_xprt()
748 svc_delete_xprt(xprt); in svc_handle_xprt()
752 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { in svc_handle_xprt()
758 __module_get(xprt->xpt_class->xcl_owner); in svc_handle_xprt()
759 svc_check_conn_limits(xprt->xpt_server); in svc_handle_xprt()
760 newxpt = xprt->xpt_ops->xpo_accept(xprt); in svc_handle_xprt()
764 module_put(xprt->xpt_class->xcl_owner); in svc_handle_xprt()
768 rqstp, rqstp->rq_pool->sp_id, xprt, in svc_handle_xprt()
769 atomic_read(&xprt->xpt_ref.refcount)); in svc_handle_xprt()
770 rqstp->rq_deferred = svc_deferred_dequeue(xprt); in svc_handle_xprt()
774 len = xprt->xpt_ops->xpo_recvfrom(rqstp); in svc_handle_xprt()
777 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); in svc_handle_xprt()
780 svc_xprt_received(xprt); in svc_handle_xprt()
782 trace_svc_handle_xprt(xprt, len); in svc_handle_xprt()
793 struct svc_xprt *xprt = NULL; in svc_recv() local
815 xprt = svc_get_next_xprt(rqstp, timeout); in svc_recv()
816 if (IS_ERR(xprt)) { in svc_recv()
817 err = PTR_ERR(xprt); in svc_recv()
821 len = svc_handle_xprt(rqstp, xprt); in svc_recv()
828 clear_bit(XPT_OLD, &xprt->xpt_flags); in svc_recv()
830 if (xprt->xpt_ops->xpo_secure_port(rqstp)) in svc_recv()
865 struct svc_xprt *xprt; in svc_send() local
869 xprt = rqstp->rq_xprt; in svc_send()
870 if (!xprt) in svc_send()
883 mutex_lock(&xprt->xpt_mutex); in svc_send()
884 if (test_bit(XPT_DEAD, &xprt->xpt_flags) in svc_send()
885 || test_bit(XPT_CLOSE, &xprt->xpt_flags)) in svc_send()
888 len = xprt->xpt_ops->xpo_sendto(rqstp); in svc_send()
889 mutex_unlock(&xprt->xpt_mutex); in svc_send()
890 rpc_wake_up(&xprt->xpt_bc_pending); in svc_send()
907 struct svc_xprt *xprt; in svc_age_temp_xprts() local
920 xprt = list_entry(le, struct svc_xprt, xpt_list); in svc_age_temp_xprts()
924 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) in svc_age_temp_xprts()
926 if (atomic_read(&xprt->xpt_ref.refcount) > 1 || in svc_age_temp_xprts()
927 test_bit(XPT_BUSY, &xprt->xpt_flags)) in svc_age_temp_xprts()
930 set_bit(XPT_CLOSE, &xprt->xpt_flags); in svc_age_temp_xprts()
931 dprintk("queuing xprt %p for closing\n", xprt); in svc_age_temp_xprts()
934 svc_xprt_enqueue(xprt); in svc_age_temp_xprts()
941 static void call_xpt_users(struct svc_xprt *xprt) in call_xpt_users() argument
945 spin_lock(&xprt->xpt_lock); in call_xpt_users()
946 while (!list_empty(&xprt->xpt_users)) { in call_xpt_users()
947 u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list); in call_xpt_users()
951 spin_unlock(&xprt->xpt_lock); in call_xpt_users()
957 static void svc_delete_xprt(struct svc_xprt *xprt) in svc_delete_xprt() argument
959 struct svc_serv *serv = xprt->xpt_server; in svc_delete_xprt()
963 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) in svc_delete_xprt()
966 dprintk("svc: svc_delete_xprt(%p)\n", xprt); in svc_delete_xprt()
967 xprt->xpt_ops->xpo_detach(xprt); in svc_delete_xprt()
970 list_del_init(&xprt->xpt_list); in svc_delete_xprt()
971 WARN_ON_ONCE(!list_empty(&xprt->xpt_ready)); in svc_delete_xprt()
972 if (test_bit(XPT_TEMP, &xprt->xpt_flags)) in svc_delete_xprt()
976 while ((dr = svc_deferred_dequeue(xprt)) != NULL) in svc_delete_xprt()
979 call_xpt_users(xprt); in svc_delete_xprt()
980 svc_xprt_put(xprt); in svc_delete_xprt()
983 void svc_close_xprt(struct svc_xprt *xprt) in svc_close_xprt() argument
985 set_bit(XPT_CLOSE, &xprt->xpt_flags); in svc_close_xprt()
986 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) in svc_close_xprt()
995 svc_delete_xprt(xprt); in svc_close_xprt()
1001 struct svc_xprt *xprt; in svc_close_list() local
1005 list_for_each_entry(xprt, xprt_list, xpt_list) { in svc_close_list()
1006 if (xprt->xpt_net != net) in svc_close_list()
1009 set_bit(XPT_CLOSE, &xprt->xpt_flags); in svc_close_list()
1010 svc_xprt_enqueue(xprt); in svc_close_list()
1019 struct svc_xprt *xprt; in svc_dequeue_net() local
1027 list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) { in svc_dequeue_net()
1028 if (xprt->xpt_net != net) in svc_dequeue_net()
1030 list_del_init(&xprt->xpt_ready); in svc_dequeue_net()
1032 return xprt; in svc_dequeue_net()
1041 struct svc_xprt *xprt; in svc_clean_up_xprts() local
1043 while ((xprt = svc_dequeue_net(serv, net))) { in svc_clean_up_xprts()
1044 set_bit(XPT_CLOSE, &xprt->xpt_flags); in svc_clean_up_xprts()
1045 svc_delete_xprt(xprt); in svc_clean_up_xprts()
1081 struct svc_xprt *xprt = dr->xprt; in svc_revisit() local
1083 spin_lock(&xprt->xpt_lock); in svc_revisit()
1084 set_bit(XPT_DEFERRED, &xprt->xpt_flags); in svc_revisit()
1085 if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { in svc_revisit()
1086 spin_unlock(&xprt->xpt_lock); in svc_revisit()
1088 svc_xprt_put(xprt); in svc_revisit()
1093 dr->xprt = NULL; in svc_revisit()
1094 list_add(&dr->handle.recent, &xprt->xpt_deferred); in svc_revisit()
1095 spin_unlock(&xprt->xpt_lock); in svc_revisit()
1096 svc_xprt_enqueue(xprt); in svc_revisit()
1097 svc_xprt_put(xprt); in svc_revisit()
1142 dr->xprt = rqstp->rq_xprt; in svc_defer()
1174 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) in svc_deferred_dequeue() argument
1178 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) in svc_deferred_dequeue()
1180 spin_lock(&xprt->xpt_lock); in svc_deferred_dequeue()
1181 if (!list_empty(&xprt->xpt_deferred)) { in svc_deferred_dequeue()
1182 dr = list_entry(xprt->xpt_deferred.next, in svc_deferred_dequeue()
1187 clear_bit(XPT_DEFERRED, &xprt->xpt_flags); in svc_deferred_dequeue()
1188 spin_unlock(&xprt->xpt_lock); in svc_deferred_dequeue()
1212 struct svc_xprt *xprt; in svc_find_xprt() local
1220 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { in svc_find_xprt()
1221 if (xprt->xpt_net != net) in svc_find_xprt()
1223 if (strcmp(xprt->xpt_class->xcl_name, xcl_name)) in svc_find_xprt()
1225 if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family) in svc_find_xprt()
1227 if (port != 0 && port != svc_xprt_local_port(xprt)) in svc_find_xprt()
1229 found = xprt; in svc_find_xprt()
1230 svc_xprt_get(xprt); in svc_find_xprt()
1238 static int svc_one_xprt_name(const struct svc_xprt *xprt, in svc_one_xprt_name() argument
1244 xprt->xpt_class->xcl_name, in svc_one_xprt_name()
1245 svc_xprt_local_port(xprt)); in svc_one_xprt_name()
1265 struct svc_xprt *xprt; in svc_xprt_names() local
1277 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { in svc_xprt_names()
1278 len = svc_one_xprt_name(xprt, pos, buflen - totlen); in svc_xprt_names()