Lines Matching refs:xprt

67 static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
71 static void xprt_destroy(struct rpc_xprt *xprt);
181 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_reserve_xprt() argument
186 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { in xprt_reserve_xprt()
187 if (task == xprt->snd_task) in xprt_reserve_xprt()
191 xprt->snd_task = task; in xprt_reserve_xprt()
199 task->tk_pid, xprt); in xprt_reserve_xprt()
208 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority); in xprt_reserve_xprt()
213 static void xprt_clear_locked(struct rpc_xprt *xprt) in xprt_clear_locked() argument
215 xprt->snd_task = NULL; in xprt_clear_locked()
216 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { in xprt_clear_locked()
218 clear_bit(XPRT_LOCKED, &xprt->state); in xprt_clear_locked()
221 queue_work(rpciod_workqueue, &xprt->task_cleanup); in xprt_clear_locked()
232 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_reserve_xprt_cong() argument
237 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { in xprt_reserve_xprt_cong()
238 if (task == xprt->snd_task) in xprt_reserve_xprt_cong()
243 xprt->snd_task = task; in xprt_reserve_xprt_cong()
246 if (__xprt_get_cong(xprt, task)) { in xprt_reserve_xprt_cong()
247 xprt->snd_task = task; in xprt_reserve_xprt_cong()
251 xprt_clear_locked(xprt); in xprt_reserve_xprt_cong()
253 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); in xprt_reserve_xprt_cong()
262 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority); in xprt_reserve_xprt_cong()
267 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_lock_write() argument
271 spin_lock_bh(&xprt->transport_lock); in xprt_lock_write()
272 retval = xprt->ops->reserve_xprt(xprt, task); in xprt_lock_write()
273 spin_unlock_bh(&xprt->transport_lock); in xprt_lock_write()
279 struct rpc_xprt *xprt = data; in __xprt_lock_write_func() local
283 xprt->snd_task = task; in __xprt_lock_write_func()
289 static void __xprt_lock_write_next(struct rpc_xprt *xprt) in __xprt_lock_write_next() argument
291 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) in __xprt_lock_write_next()
294 if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt)) in __xprt_lock_write_next()
296 xprt_clear_locked(xprt); in __xprt_lock_write_next()
301 struct rpc_xprt *xprt = data; in __xprt_lock_write_cong_func() local
306 xprt->snd_task = task; in __xprt_lock_write_cong_func()
309 if (__xprt_get_cong(xprt, task)) { in __xprt_lock_write_cong_func()
310 xprt->snd_task = task; in __xprt_lock_write_cong_func()
317 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) in __xprt_lock_write_next_cong() argument
319 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) in __xprt_lock_write_next_cong()
321 if (RPCXPRT_CONGESTED(xprt)) in __xprt_lock_write_next_cong()
323 if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt)) in __xprt_lock_write_next_cong()
326 xprt_clear_locked(xprt); in __xprt_lock_write_next_cong()
345 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_release_xprt() argument
347 if (xprt->snd_task == task) { in xprt_release_xprt()
349 xprt_clear_locked(xprt); in xprt_release_xprt()
350 __xprt_lock_write_next(xprt); in xprt_release_xprt()
363 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_release_xprt_cong() argument
365 if (xprt->snd_task == task) { in xprt_release_xprt_cong()
367 xprt_clear_locked(xprt); in xprt_release_xprt_cong()
368 __xprt_lock_write_next_cong(xprt); in xprt_release_xprt_cong()
373 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_release_write() argument
375 spin_lock_bh(&xprt->transport_lock); in xprt_release_write()
376 xprt->ops->release_xprt(xprt, task); in xprt_release_write()
377 spin_unlock_bh(&xprt->transport_lock); in xprt_release_write()
385 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task) in __xprt_get_cong() argument
392 task->tk_pid, xprt->cong, xprt->cwnd); in __xprt_get_cong()
393 if (RPCXPRT_CONGESTED(xprt)) in __xprt_get_cong()
396 xprt->cong += RPC_CWNDSCALE; in __xprt_get_cong()
405 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) in __xprt_put_cong() argument
410 xprt->cong -= RPC_CWNDSCALE; in __xprt_put_cong()
411 __xprt_lock_write_next_cong(xprt); in __xprt_put_cong()
444 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result) in xprt_adjust_cwnd() argument
447 unsigned long cwnd = xprt->cwnd; in xprt_adjust_cwnd()
449 if (result >= 0 && cwnd <= xprt->cong) { in xprt_adjust_cwnd()
453 if (cwnd > RPC_MAXCWND(xprt)) in xprt_adjust_cwnd()
454 cwnd = RPC_MAXCWND(xprt); in xprt_adjust_cwnd()
455 __xprt_lock_write_next_cong(xprt); in xprt_adjust_cwnd()
462 xprt->cong, xprt->cwnd, cwnd); in xprt_adjust_cwnd()
463 xprt->cwnd = cwnd; in xprt_adjust_cwnd()
464 __xprt_put_cong(xprt, req); in xprt_adjust_cwnd()
474 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) in xprt_wake_pending_tasks() argument
477 rpc_wake_up_status(&xprt->pending, status); in xprt_wake_pending_tasks()
479 rpc_wake_up(&xprt->pending); in xprt_wake_pending_tasks()
495 struct rpc_xprt *xprt = req->rq_xprt; in xprt_wait_for_buffer_space() local
498 rpc_sleep_on(&xprt->pending, task, action); in xprt_wait_for_buffer_space()
508 void xprt_write_space(struct rpc_xprt *xprt) in xprt_write_space() argument
510 spin_lock_bh(&xprt->transport_lock); in xprt_write_space()
511 if (xprt->snd_task) { in xprt_write_space()
513 "xprt %p\n", xprt); in xprt_write_space()
514 rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task); in xprt_write_space()
516 spin_unlock_bh(&xprt->transport_lock); in xprt_write_space()
576 struct rpc_xprt *xprt = req->rq_xprt; in xprt_adjust_timeout() local
593 spin_lock_bh(&xprt->transport_lock); in xprt_adjust_timeout()
595 spin_unlock_bh(&xprt->transport_lock); in xprt_adjust_timeout()
608 struct rpc_xprt *xprt = in xprt_autoclose() local
611 xprt->ops->close(xprt); in xprt_autoclose()
612 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); in xprt_autoclose()
613 xprt_release_write(xprt, NULL); in xprt_autoclose()
614 wake_up_bit(&xprt->state, XPRT_LOCKED); in xprt_autoclose()
622 void xprt_disconnect_done(struct rpc_xprt *xprt) in xprt_disconnect_done() argument
624 dprintk("RPC: disconnected transport %p\n", xprt); in xprt_disconnect_done()
625 spin_lock_bh(&xprt->transport_lock); in xprt_disconnect_done()
626 xprt_clear_connected(xprt); in xprt_disconnect_done()
627 xprt_wake_pending_tasks(xprt, -EAGAIN); in xprt_disconnect_done()
628 spin_unlock_bh(&xprt->transport_lock); in xprt_disconnect_done()
637 void xprt_force_disconnect(struct rpc_xprt *xprt) in xprt_force_disconnect() argument
640 spin_lock_bh(&xprt->transport_lock); in xprt_force_disconnect()
641 set_bit(XPRT_CLOSE_WAIT, &xprt->state); in xprt_force_disconnect()
643 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) in xprt_force_disconnect()
644 queue_work(rpciod_workqueue, &xprt->task_cleanup); in xprt_force_disconnect()
645 xprt_wake_pending_tasks(xprt, -EAGAIN); in xprt_force_disconnect()
646 spin_unlock_bh(&xprt->transport_lock); in xprt_force_disconnect()
660 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) in xprt_conditional_disconnect() argument
663 spin_lock_bh(&xprt->transport_lock); in xprt_conditional_disconnect()
664 if (cookie != xprt->connect_cookie) in xprt_conditional_disconnect()
666 if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt)) in xprt_conditional_disconnect()
668 set_bit(XPRT_CLOSE_WAIT, &xprt->state); in xprt_conditional_disconnect()
670 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) in xprt_conditional_disconnect()
671 queue_work(rpciod_workqueue, &xprt->task_cleanup); in xprt_conditional_disconnect()
672 xprt_wake_pending_tasks(xprt, -EAGAIN); in xprt_conditional_disconnect()
674 spin_unlock_bh(&xprt->transport_lock); in xprt_conditional_disconnect()
680 struct rpc_xprt *xprt = (struct rpc_xprt *)data; in xprt_init_autodisconnect() local
682 spin_lock(&xprt->transport_lock); in xprt_init_autodisconnect()
683 if (!list_empty(&xprt->recv)) in xprt_init_autodisconnect()
685 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) in xprt_init_autodisconnect()
687 spin_unlock(&xprt->transport_lock); in xprt_init_autodisconnect()
688 queue_work(rpciod_workqueue, &xprt->task_cleanup); in xprt_init_autodisconnect()
691 spin_unlock(&xprt->transport_lock); in xprt_init_autodisconnect()
694 bool xprt_lock_connect(struct rpc_xprt *xprt, in xprt_lock_connect() argument
700 spin_lock_bh(&xprt->transport_lock); in xprt_lock_connect()
701 if (!test_bit(XPRT_LOCKED, &xprt->state)) in xprt_lock_connect()
703 if (xprt->snd_task != task) in xprt_lock_connect()
706 xprt->snd_task = cookie; in xprt_lock_connect()
709 spin_unlock_bh(&xprt->transport_lock); in xprt_lock_connect()
713 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) in xprt_unlock_connect() argument
715 spin_lock_bh(&xprt->transport_lock); in xprt_unlock_connect()
716 if (xprt->snd_task != cookie) in xprt_unlock_connect()
718 if (!test_bit(XPRT_LOCKED, &xprt->state)) in xprt_unlock_connect()
720 xprt->snd_task =NULL; in xprt_unlock_connect()
721 xprt->ops->release_xprt(xprt, NULL); in xprt_unlock_connect()
723 spin_unlock_bh(&xprt->transport_lock); in xprt_unlock_connect()
724 wake_up_bit(&xprt->state, XPRT_LOCKED); in xprt_unlock_connect()
734 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; in xprt_connect() local
737 xprt, (xprt_connected(xprt) ? "is" : "is not")); in xprt_connect()
739 if (!xprt_bound(xprt)) { in xprt_connect()
743 if (!xprt_lock_write(xprt, task)) in xprt_connect()
746 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) in xprt_connect()
747 xprt->ops->close(xprt); in xprt_connect()
749 if (!xprt_connected(xprt)) { in xprt_connect()
752 rpc_sleep_on(&xprt->pending, task, xprt_connect_status); in xprt_connect()
754 if (test_bit(XPRT_CLOSING, &xprt->state)) in xprt_connect()
756 if (xprt_test_and_set_connecting(xprt)) in xprt_connect()
758 xprt->stat.connect_start = jiffies; in xprt_connect()
759 xprt->ops->connect(xprt, task); in xprt_connect()
761 xprt_release_write(xprt, task); in xprt_connect()
766 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; in xprt_connect_status() local
769 xprt->stat.connect_count++; in xprt_connect_status()
770 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start; in xprt_connect_status()
793 xprt->servername); in xprt_connect_status()
804 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) in xprt_lookup_rqst() argument
808 list_for_each_entry(entry, &xprt->recv, rq_list) in xprt_lookup_rqst()
810 trace_xprt_lookup_rqst(xprt, xid, 0); in xprt_lookup_rqst()
816 trace_xprt_lookup_rqst(xprt, xid, -ENOENT); in xprt_lookup_rqst()
817 xprt->stat.bad_xids++; in xprt_lookup_rqst()
846 struct rpc_xprt *xprt = req->rq_xprt; in xprt_complete_rqst() local
850 trace_xprt_complete_rqst(xprt, req->rq_xid, copied); in xprt_complete_rqst()
852 xprt->stat.recvs++; in xprt_complete_rqst()
854 if (xprt->ops->timer != NULL) in xprt_complete_rqst()
863 rpc_wake_up_queued_task(&xprt->pending, task); in xprt_complete_rqst()
870 struct rpc_xprt *xprt = req->rq_xprt; in xprt_timer() local
876 spin_lock_bh(&xprt->transport_lock); in xprt_timer()
878 if (xprt->ops->timer) in xprt_timer()
879 xprt->ops->timer(xprt, task); in xprt_timer()
882 spin_unlock_bh(&xprt->transport_lock); in xprt_timer()
885 static inline int xprt_has_timer(struct rpc_xprt *xprt) in xprt_has_timer() argument
887 return xprt->idle_timeout != 0; in xprt_has_timer()
898 struct rpc_xprt *xprt = req->rq_xprt; in xprt_prepare_transmit() local
903 spin_lock_bh(&xprt->transport_lock); in xprt_prepare_transmit()
910 && xprt_connected(xprt) in xprt_prepare_transmit()
911 && req->rq_connect_cookie == xprt->connect_cookie) { in xprt_prepare_transmit()
912 xprt->ops->set_retrans_timeout(task); in xprt_prepare_transmit()
913 rpc_sleep_on(&xprt->pending, task, xprt_timer); in xprt_prepare_transmit()
917 if (!xprt->ops->reserve_xprt(xprt, task)) { in xprt_prepare_transmit()
923 spin_unlock_bh(&xprt->transport_lock); in xprt_prepare_transmit()
941 struct rpc_xprt *xprt = req->rq_xprt; in xprt_transmit() local
951 spin_lock_bh(&xprt->transport_lock); in xprt_transmit()
956 list_add_tail(&req->rq_list, &xprt->recv); in xprt_transmit()
957 spin_unlock_bh(&xprt->transport_lock); in xprt_transmit()
960 del_singleshot_timer_sync(&xprt->timer); in xprt_transmit()
966 status = xprt->ops->send_request(task); in xprt_transmit()
967 trace_xprt_transmit(xprt, req->rq_xid, status); in xprt_transmit()
975 spin_lock_bh(&xprt->transport_lock); in xprt_transmit()
977 xprt->ops->set_retrans_timeout(task); in xprt_transmit()
979 numreqs = atomic_read(&xprt->num_reqs); in xprt_transmit()
980 if (numreqs > xprt->stat.max_slots) in xprt_transmit()
981 xprt->stat.max_slots = numreqs; in xprt_transmit()
982 xprt->stat.sends++; in xprt_transmit()
983 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; in xprt_transmit()
984 xprt->stat.bklog_u += xprt->backlog.qlen; in xprt_transmit()
985 xprt->stat.sending_u += xprt->sending.qlen; in xprt_transmit()
986 xprt->stat.pending_u += xprt->pending.qlen; in xprt_transmit()
989 if (!xprt_connected(xprt)) in xprt_transmit()
997 rpc_sleep_on(&xprt->pending, task, xprt_timer); in xprt_transmit()
998 req->rq_connect_cookie = xprt->connect_cookie; in xprt_transmit()
1000 spin_unlock_bh(&xprt->transport_lock); in xprt_transmit()
1003 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_add_backlog() argument
1005 set_bit(XPRT_CONGESTED, &xprt->state); in xprt_add_backlog()
1006 rpc_sleep_on(&xprt->backlog, task, NULL); in xprt_add_backlog()
1009 static void xprt_wake_up_backlog(struct rpc_xprt *xprt) in xprt_wake_up_backlog() argument
1011 if (rpc_wake_up_next(&xprt->backlog) == NULL) in xprt_wake_up_backlog()
1012 clear_bit(XPRT_CONGESTED, &xprt->state); in xprt_wake_up_backlog()
1015 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_throttle_congested() argument
1019 if (!test_bit(XPRT_CONGESTED, &xprt->state)) in xprt_throttle_congested()
1021 spin_lock(&xprt->reserve_lock); in xprt_throttle_congested()
1022 if (test_bit(XPRT_CONGESTED, &xprt->state)) { in xprt_throttle_congested()
1023 rpc_sleep_on(&xprt->backlog, task, NULL); in xprt_throttle_congested()
1026 spin_unlock(&xprt->reserve_lock); in xprt_throttle_congested()
1031 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags) in xprt_dynamic_alloc_slot() argument
1035 if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs)) in xprt_dynamic_alloc_slot()
1040 atomic_dec(&xprt->num_reqs); in xprt_dynamic_alloc_slot()
1046 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) in xprt_dynamic_free_slot() argument
1048 if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) { in xprt_dynamic_free_slot()
1055 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_alloc_slot() argument
1059 spin_lock(&xprt->reserve_lock); in xprt_alloc_slot()
1060 if (!list_empty(&xprt->free)) { in xprt_alloc_slot()
1061 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); in xprt_alloc_slot()
1065 req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN); in xprt_alloc_slot()
1075 xprt_add_backlog(xprt, task); in xprt_alloc_slot()
1080 spin_unlock(&xprt->reserve_lock); in xprt_alloc_slot()
1085 xprt_request_init(task, xprt); in xprt_alloc_slot()
1086 spin_unlock(&xprt->reserve_lock); in xprt_alloc_slot()
1090 void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_lock_and_alloc_slot() argument
1097 if (xprt_lock_write(xprt, task)) { in xprt_lock_and_alloc_slot()
1098 xprt_alloc_slot(xprt, task); in xprt_lock_and_alloc_slot()
1099 xprt_release_write(xprt, task); in xprt_lock_and_alloc_slot()
1104 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) in xprt_free_slot() argument
1106 spin_lock(&xprt->reserve_lock); in xprt_free_slot()
1107 if (!xprt_dynamic_free_slot(xprt, req)) { in xprt_free_slot()
1109 list_add(&req->rq_list, &xprt->free); in xprt_free_slot()
1111 xprt_wake_up_backlog(xprt); in xprt_free_slot()
1112 spin_unlock(&xprt->reserve_lock); in xprt_free_slot()
1115 static void xprt_free_all_slots(struct rpc_xprt *xprt) in xprt_free_all_slots() argument
1118 while (!list_empty(&xprt->free)) { in xprt_free_all_slots()
1119 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); in xprt_free_all_slots()
1129 struct rpc_xprt *xprt; in xprt_alloc() local
1133 xprt = kzalloc(size, GFP_KERNEL); in xprt_alloc()
1134 if (xprt == NULL) in xprt_alloc()
1137 xprt_init(xprt, net); in xprt_alloc()
1143 list_add(&req->rq_list, &xprt->free); in xprt_alloc()
1146 xprt->max_reqs = max_alloc; in xprt_alloc()
1148 xprt->max_reqs = num_prealloc; in xprt_alloc()
1149 xprt->min_reqs = num_prealloc; in xprt_alloc()
1150 atomic_set(&xprt->num_reqs, num_prealloc); in xprt_alloc()
1152 return xprt; in xprt_alloc()
1155 xprt_free(xprt); in xprt_alloc()
1161 void xprt_free(struct rpc_xprt *xprt) in xprt_free() argument
1163 put_net(xprt->xprt_net); in xprt_free()
1164 xprt_free_all_slots(xprt); in xprt_free()
1165 kfree(xprt); in xprt_free()
1179 struct rpc_xprt *xprt; in xprt_reserve() local
1188 xprt = rcu_dereference(task->tk_client->cl_xprt); in xprt_reserve()
1189 if (!xprt_throttle_congested(xprt, task)) in xprt_reserve()
1190 xprt->ops->alloc_slot(xprt, task); in xprt_reserve()
1205 struct rpc_xprt *xprt; in xprt_retry_reserve() local
1214 xprt = rcu_dereference(task->tk_client->cl_xprt); in xprt_retry_reserve()
1215 xprt->ops->alloc_slot(xprt, task); in xprt_retry_reserve()
1219 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) in xprt_alloc_xid() argument
1221 return (__force __be32)xprt->xid++; in xprt_alloc_xid()
1224 static inline void xprt_init_xid(struct rpc_xprt *xprt) in xprt_init_xid() argument
1226 xprt->xid = prandom_u32(); in xprt_init_xid()
1229 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) in xprt_request_init() argument
1236 req->rq_xprt = xprt; in xprt_request_init()
1238 req->rq_xid = xprt_alloc_xid(xprt); in xprt_request_init()
1239 req->rq_connect_cookie = xprt->connect_cookie - 1; in xprt_request_init()
1258 struct rpc_xprt *xprt; in xprt_release() local
1264 xprt = rcu_dereference(task->tk_client->cl_xprt); in xprt_release()
1265 if (xprt->snd_task == task) in xprt_release()
1266 xprt_release_write(xprt, task); in xprt_release()
1272 xprt = req->rq_xprt; in xprt_release()
1277 spin_lock_bh(&xprt->transport_lock); in xprt_release()
1278 xprt->ops->release_xprt(xprt, task); in xprt_release()
1279 if (xprt->ops->release_request) in xprt_release()
1280 xprt->ops->release_request(task); in xprt_release()
1283 xprt->last_used = jiffies; in xprt_release()
1284 if (list_empty(&xprt->recv) && xprt_has_timer(xprt)) in xprt_release()
1285 mod_timer(&xprt->timer, in xprt_release()
1286 xprt->last_used + xprt->idle_timeout); in xprt_release()
1287 spin_unlock_bh(&xprt->transport_lock); in xprt_release()
1289 xprt->ops->buf_free(req->rq_buffer); in xprt_release()
1298 xprt_free_slot(xprt, req); in xprt_release()
1303 static void xprt_init(struct rpc_xprt *xprt, struct net *net) in xprt_init() argument
1305 atomic_set(&xprt->count, 1); in xprt_init()
1307 spin_lock_init(&xprt->transport_lock); in xprt_init()
1308 spin_lock_init(&xprt->reserve_lock); in xprt_init()
1310 INIT_LIST_HEAD(&xprt->free); in xprt_init()
1311 INIT_LIST_HEAD(&xprt->recv); in xprt_init()
1313 spin_lock_init(&xprt->bc_pa_lock); in xprt_init()
1314 INIT_LIST_HEAD(&xprt->bc_pa_list); in xprt_init()
1317 xprt->last_used = jiffies; in xprt_init()
1318 xprt->cwnd = RPC_INITCWND; in xprt_init()
1319 xprt->bind_index = 0; in xprt_init()
1321 rpc_init_wait_queue(&xprt->binding, "xprt_binding"); in xprt_init()
1322 rpc_init_wait_queue(&xprt->pending, "xprt_pending"); in xprt_init()
1323 rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending"); in xprt_init()
1324 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); in xprt_init()
1326 xprt_init_xid(xprt); in xprt_init()
1328 xprt->xprt_net = get_net(net); in xprt_init()
1338 struct rpc_xprt *xprt; in xprt_create_transport() local
1353 xprt = t->setup(args); in xprt_create_transport()
1354 if (IS_ERR(xprt)) { in xprt_create_transport()
1356 -PTR_ERR(xprt)); in xprt_create_transport()
1360 xprt->idle_timeout = 0; in xprt_create_transport()
1361 INIT_WORK(&xprt->task_cleanup, xprt_autoclose); in xprt_create_transport()
1362 if (xprt_has_timer(xprt)) in xprt_create_transport()
1363 setup_timer(&xprt->timer, xprt_init_autodisconnect, in xprt_create_transport()
1364 (unsigned long)xprt); in xprt_create_transport()
1366 init_timer(&xprt->timer); in xprt_create_transport()
1369 xprt_destroy(xprt); in xprt_create_transport()
1372 xprt->servername = kstrdup(args->servername, GFP_KERNEL); in xprt_create_transport()
1373 if (xprt->servername == NULL) { in xprt_create_transport()
1374 xprt_destroy(xprt); in xprt_create_transport()
1378 rpc_xprt_debugfs_register(xprt); in xprt_create_transport()
1380 dprintk("RPC: created transport %p with %u slots\n", xprt, in xprt_create_transport()
1381 xprt->max_reqs); in xprt_create_transport()
1383 return xprt; in xprt_create_transport()
1391 static void xprt_destroy(struct rpc_xprt *xprt) in xprt_destroy() argument
1393 dprintk("RPC: destroying transport %p\n", xprt); in xprt_destroy()
1396 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); in xprt_destroy()
1398 del_timer_sync(&xprt->timer); in xprt_destroy()
1400 rpc_xprt_debugfs_unregister(xprt); in xprt_destroy()
1401 rpc_destroy_wait_queue(&xprt->binding); in xprt_destroy()
1402 rpc_destroy_wait_queue(&xprt->pending); in xprt_destroy()
1403 rpc_destroy_wait_queue(&xprt->sending); in xprt_destroy()
1404 rpc_destroy_wait_queue(&xprt->backlog); in xprt_destroy()
1405 cancel_work_sync(&xprt->task_cleanup); in xprt_destroy()
1406 kfree(xprt->servername); in xprt_destroy()
1410 xprt->ops->destroy(xprt); in xprt_destroy()
1418 void xprt_put(struct rpc_xprt *xprt) in xprt_put() argument
1420 if (atomic_dec_and_test(&xprt->count)) in xprt_put()
1421 xprt_destroy(xprt); in xprt_put()