Lines Matching refs:call

105 static void rxrpc_call_hash_add(struct rxrpc_call *call)  in rxrpc_call_hash_add()  argument
111 switch (call->proto) { in rxrpc_call_hash_add()
113 addr_size = sizeof(call->peer_ip.ipv4_addr); in rxrpc_call_hash_add()
116 addr_size = sizeof(call->peer_ip.ipv6_addr); in rxrpc_call_hash_add()
121 key = rxrpc_call_hashfunc(call->in_clientflag, call->cid, in rxrpc_call_hash_add()
122 call->call_id, call->epoch, in rxrpc_call_hash_add()
123 call->service_id, call->proto, in rxrpc_call_hash_add()
124 call->conn->trans->local, addr_size, in rxrpc_call_hash_add()
125 call->peer_ip.ipv6_addr); in rxrpc_call_hash_add()
127 call->hash_key = key; in rxrpc_call_hash_add()
129 hash_add_rcu(rxrpc_call_hash, &call->hash_node, key); in rxrpc_call_hash_add()
137 static void rxrpc_call_hash_del(struct rxrpc_call *call) in rxrpc_call_hash_del() argument
141 hash_del_rcu(&call->hash_node); in rxrpc_call_hash_del()
162 struct rxrpc_call *call = NULL; in rxrpc_find_call_hash() local
168 addr_size = sizeof(call->peer_ip.ipv4_addr); in rxrpc_find_call_hash()
171 addr_size = sizeof(call->peer_ip.ipv6_addr); in rxrpc_find_call_hash()
180 hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) { in rxrpc_find_call_hash()
181 if (call->hash_key == key && in rxrpc_find_call_hash()
182 call->call_id == call_id && in rxrpc_find_call_hash()
183 call->cid == cid && in rxrpc_find_call_hash()
184 call->in_clientflag == clientflag && in rxrpc_find_call_hash()
185 call->service_id == service_id && in rxrpc_find_call_hash()
186 call->proto == proto && in rxrpc_find_call_hash()
187 call->local == localptr && in rxrpc_find_call_hash()
188 memcmp(call->peer_ip.ipv6_addr, peer_addr, in rxrpc_find_call_hash()
190 call->epoch == epoch) { in rxrpc_find_call_hash()
191 ret = call; in rxrpc_find_call_hash()
204 struct rxrpc_call *call; in rxrpc_alloc_call() local
206 call = kmem_cache_zalloc(rxrpc_call_jar, gfp); in rxrpc_alloc_call()
207 if (!call) in rxrpc_alloc_call()
210 call->acks_winsz = 16; in rxrpc_alloc_call()
211 call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long), in rxrpc_alloc_call()
213 if (!call->acks_window) { in rxrpc_alloc_call()
214 kmem_cache_free(rxrpc_call_jar, call); in rxrpc_alloc_call()
218 setup_timer(&call->lifetimer, &rxrpc_call_life_expired, in rxrpc_alloc_call()
219 (unsigned long) call); in rxrpc_alloc_call()
220 setup_timer(&call->deadspan, &rxrpc_dead_call_expired, in rxrpc_alloc_call()
221 (unsigned long) call); in rxrpc_alloc_call()
222 setup_timer(&call->ack_timer, &rxrpc_ack_time_expired, in rxrpc_alloc_call()
223 (unsigned long) call); in rxrpc_alloc_call()
224 setup_timer(&call->resend_timer, &rxrpc_resend_time_expired, in rxrpc_alloc_call()
225 (unsigned long) call); in rxrpc_alloc_call()
226 INIT_WORK(&call->destroyer, &rxrpc_destroy_call); in rxrpc_alloc_call()
227 INIT_WORK(&call->processor, &rxrpc_process_call); in rxrpc_alloc_call()
228 INIT_LIST_HEAD(&call->accept_link); in rxrpc_alloc_call()
229 skb_queue_head_init(&call->rx_queue); in rxrpc_alloc_call()
230 skb_queue_head_init(&call->rx_oos_queue); in rxrpc_alloc_call()
231 init_waitqueue_head(&call->tx_waitq); in rxrpc_alloc_call()
232 spin_lock_init(&call->lock); in rxrpc_alloc_call()
233 rwlock_init(&call->state_lock); in rxrpc_alloc_call()
234 atomic_set(&call->usage, 1); in rxrpc_alloc_call()
235 call->debug_id = atomic_inc_return(&rxrpc_debug_id); in rxrpc_alloc_call()
236 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; in rxrpc_alloc_call()
238 memset(&call->sock_node, 0xed, sizeof(call->sock_node)); in rxrpc_alloc_call()
240 call->rx_data_expect = 1; in rxrpc_alloc_call()
241 call->rx_data_eaten = 0; in rxrpc_alloc_call()
242 call->rx_first_oos = 0; in rxrpc_alloc_call()
243 call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size; in rxrpc_alloc_call()
244 call->creation_jif = jiffies; in rxrpc_alloc_call()
245 return call; in rxrpc_alloc_call()
257 struct rxrpc_call *call; in rxrpc_alloc_client_call() local
266 call = rxrpc_alloc_call(gfp); in rxrpc_alloc_client_call()
267 if (!call) in rxrpc_alloc_client_call()
271 call->socket = rx; in rxrpc_alloc_client_call()
272 call->rx_data_post = 1; in rxrpc_alloc_client_call()
274 ret = rxrpc_connect_call(rx, trans, bundle, call, gfp); in rxrpc_alloc_client_call()
276 kmem_cache_free(rxrpc_call_jar, call); in rxrpc_alloc_client_call()
281 call->proto = rx->proto; in rxrpc_alloc_client_call()
282 call->local = trans->local; in rxrpc_alloc_client_call()
283 switch (call->proto) { in rxrpc_alloc_client_call()
285 call->peer_ip.ipv4_addr = in rxrpc_alloc_client_call()
289 memcpy(call->peer_ip.ipv6_addr, in rxrpc_alloc_client_call()
291 sizeof(call->peer_ip.ipv6_addr)); in rxrpc_alloc_client_call()
294 call->epoch = call->conn->epoch; in rxrpc_alloc_client_call()
295 call->service_id = call->conn->service_id; in rxrpc_alloc_client_call()
296 call->in_clientflag = call->conn->in_clientflag; in rxrpc_alloc_client_call()
298 rxrpc_call_hash_add(call); in rxrpc_alloc_client_call()
300 spin_lock(&call->conn->trans->peer->lock); in rxrpc_alloc_client_call()
301 list_add(&call->error_link, &call->conn->trans->peer->error_targets); in rxrpc_alloc_client_call()
302 spin_unlock(&call->conn->trans->peer->lock); in rxrpc_alloc_client_call()
304 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime; in rxrpc_alloc_client_call()
305 add_timer(&call->lifetimer); in rxrpc_alloc_client_call()
307 _leave(" = %p", call); in rxrpc_alloc_client_call()
308 return call; in rxrpc_alloc_client_call()
322 struct rxrpc_call *call, *candidate; in rxrpc_get_client_call() local
335 call = rb_entry(p, struct rxrpc_call, sock_node); in rxrpc_get_client_call()
337 if (user_call_ID < call->user_call_ID) in rxrpc_get_client_call()
339 else if (user_call_ID > call->user_call_ID) in rxrpc_get_client_call()
367 call = rb_entry(parent, struct rxrpc_call, sock_node); in rxrpc_get_client_call()
369 if (user_call_ID < call->user_call_ID) in rxrpc_get_client_call()
371 else if (user_call_ID > call->user_call_ID) in rxrpc_get_client_call()
378 call = candidate; in rxrpc_get_client_call()
380 rxrpc_get_call(call); in rxrpc_get_client_call()
382 rb_link_node(&call->sock_node, parent, pp); in rxrpc_get_client_call()
383 rb_insert_color(&call->sock_node, &rx->calls); in rxrpc_get_client_call()
387 list_add_tail(&call->link, &rxrpc_calls); in rxrpc_get_client_call()
390 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); in rxrpc_get_client_call()
392 _leave(" = %p [new]", call); in rxrpc_get_client_call()
393 return call; in rxrpc_get_client_call()
397 rxrpc_get_call(call); in rxrpc_get_client_call()
399 _leave(" = %p [extant %d]", call, atomic_read(&call->usage)); in rxrpc_get_client_call()
400 return call; in rxrpc_get_client_call()
404 rxrpc_get_call(call); in rxrpc_get_client_call()
407 _leave(" = %p [second %d]", call, atomic_read(&call->usage)); in rxrpc_get_client_call()
408 return call; in rxrpc_get_client_call()
420 struct rxrpc_call *call, *candidate; in rxrpc_incoming_call() local
445 call = conn->channels[candidate->channel]; in rxrpc_incoming_call()
446 _debug("channel[%u] is %p", candidate->channel, call); in rxrpc_incoming_call()
447 if (call && call->call_id == hdr->callNumber) { in rxrpc_incoming_call()
449 _debug("extant call [%d]", call->state); in rxrpc_incoming_call()
450 ASSERTCMP(call->conn, ==, conn); in rxrpc_incoming_call()
452 read_lock(&call->state_lock); in rxrpc_incoming_call()
453 switch (call->state) { in rxrpc_incoming_call()
455 if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) in rxrpc_incoming_call()
456 rxrpc_queue_call(call); in rxrpc_incoming_call()
458 read_unlock(&call->state_lock); in rxrpc_incoming_call()
461 rxrpc_get_call(call); in rxrpc_incoming_call()
462 read_unlock(&call->state_lock); in rxrpc_incoming_call()
467 if (call) { in rxrpc_incoming_call()
471 call->debug_id, rxrpc_call_states[call->state]); in rxrpc_incoming_call()
473 if (call->state >= RXRPC_CALL_COMPLETE) { in rxrpc_incoming_call()
474 conn->channels[call->channel] = NULL; in rxrpc_incoming_call()
490 call = rb_entry(parent, struct rxrpc_call, conn_node); in rxrpc_incoming_call()
495 if ((__force u32)call_id < (__force u32)call->call_id) in rxrpc_incoming_call()
497 else if ((__force u32)call_id > (__force u32)call->call_id) in rxrpc_incoming_call()
505 call = candidate; in rxrpc_incoming_call()
507 rb_link_node(&call->conn_node, parent, p); in rxrpc_incoming_call()
508 rb_insert_color(&call->conn_node, &conn->calls); in rxrpc_incoming_call()
509 conn->channels[call->channel] = call; in rxrpc_incoming_call()
515 list_add(&call->error_link, &conn->trans->peer->error_targets); in rxrpc_incoming_call()
519 list_add_tail(&call->link, &rxrpc_calls); in rxrpc_incoming_call()
523 call->proto = rx->proto; in rxrpc_incoming_call()
524 call->local = conn->trans->local; in rxrpc_incoming_call()
525 switch (call->proto) { in rxrpc_incoming_call()
527 call->peer_ip.ipv4_addr = in rxrpc_incoming_call()
531 memcpy(call->peer_ip.ipv6_addr, in rxrpc_incoming_call()
533 sizeof(call->peer_ip.ipv6_addr)); in rxrpc_incoming_call()
538 call->epoch = conn->epoch; in rxrpc_incoming_call()
539 call->service_id = conn->service_id; in rxrpc_incoming_call()
540 call->in_clientflag = conn->in_clientflag; in rxrpc_incoming_call()
542 rxrpc_call_hash_add(call); in rxrpc_incoming_call()
544 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id); in rxrpc_incoming_call()
546 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime; in rxrpc_incoming_call()
547 add_timer(&call->lifetimer); in rxrpc_incoming_call()
548 _leave(" = %p {%d} [new]", call, call->debug_id); in rxrpc_incoming_call()
549 return call; in rxrpc_incoming_call()
554 _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1); in rxrpc_incoming_call()
555 return call; in rxrpc_incoming_call()
577 struct rxrpc_call *call; in rxrpc_find_server_call() local
588 call = rb_entry(p, struct rxrpc_call, sock_node); in rxrpc_find_server_call()
590 if (user_call_ID < call->user_call_ID) in rxrpc_find_server_call()
592 else if (user_call_ID > call->user_call_ID) in rxrpc_find_server_call()
604 rxrpc_get_call(call); in rxrpc_find_server_call()
606 _leave(" = %p [%d]", call, atomic_read(&call->usage)); in rxrpc_find_server_call()
607 return call; in rxrpc_find_server_call()
613 void rxrpc_release_call(struct rxrpc_call *call) in rxrpc_release_call() argument
615 struct rxrpc_connection *conn = call->conn; in rxrpc_release_call()
616 struct rxrpc_sock *rx = call->socket; in rxrpc_release_call()
619 call->debug_id, atomic_read(&call->usage), in rxrpc_release_call()
620 atomic_read(&call->ackr_not_idle), in rxrpc_release_call()
621 call->rx_first_oos); in rxrpc_release_call()
623 spin_lock_bh(&call->lock); in rxrpc_release_call()
624 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags)) in rxrpc_release_call()
626 spin_unlock_bh(&call->lock); in rxrpc_release_call()
631 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); in rxrpc_release_call()
634 if (!list_empty(&call->accept_link)) { in rxrpc_release_call()
636 call, call->events, call->flags); in rxrpc_release_call()
637 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); in rxrpc_release_call()
638 list_del_init(&call->accept_link); in rxrpc_release_call()
640 } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { in rxrpc_release_call()
641 rb_erase(&call->sock_node, &rx->calls); in rxrpc_release_call()
642 memset(&call->sock_node, 0xdd, sizeof(call->sock_node)); in rxrpc_release_call()
643 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags); in rxrpc_release_call()
650 write_lock(&call->state_lock); in rxrpc_release_call()
652 if (conn->channels[call->channel] == call) in rxrpc_release_call()
653 conn->channels[call->channel] = NULL; in rxrpc_release_call()
684 if (call->state < RXRPC_CALL_COMPLETE && in rxrpc_release_call()
685 call->state != RXRPC_CALL_CLIENT_FINAL_ACK) { in rxrpc_release_call()
686 _debug("+++ ABORTING STATE %d +++\n", call->state); in rxrpc_release_call()
687 call->state = RXRPC_CALL_LOCALLY_ABORTED; in rxrpc_release_call()
688 call->abort_code = RX_CALL_DEAD; in rxrpc_release_call()
689 set_bit(RXRPC_CALL_ABORT, &call->events); in rxrpc_release_call()
690 rxrpc_queue_call(call); in rxrpc_release_call()
692 write_unlock(&call->state_lock); in rxrpc_release_call()
696 if (!skb_queue_empty(&call->rx_queue) || in rxrpc_release_call()
697 !skb_queue_empty(&call->rx_oos_queue)) { in rxrpc_release_call()
703 spin_lock_bh(&call->lock); in rxrpc_release_call()
704 while ((skb = skb_dequeue(&call->rx_queue)) || in rxrpc_release_call()
705 (skb = skb_dequeue(&call->rx_oos_queue))) { in rxrpc_release_call()
707 if (sp->call) { in rxrpc_release_call()
708 ASSERTCMP(sp->call, ==, call); in rxrpc_release_call()
709 rxrpc_put_call(call); in rxrpc_release_call()
710 sp->call = NULL; in rxrpc_release_call()
713 spin_unlock_bh(&call->lock); in rxrpc_release_call()
720 spin_lock_bh(&call->lock); in rxrpc_release_call()
722 spin_unlock_bh(&call->lock); in rxrpc_release_call()
724 ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE); in rxrpc_release_call()
727 del_timer_sync(&call->resend_timer); in rxrpc_release_call()
728 del_timer_sync(&call->ack_timer); in rxrpc_release_call()
729 del_timer_sync(&call->lifetimer); in rxrpc_release_call()
730 call->deadspan.expires = jiffies + rxrpc_dead_call_expiry; in rxrpc_release_call()
731 add_timer(&call->deadspan); in rxrpc_release_call()
741 struct rxrpc_call *call = (struct rxrpc_call *) _call; in rxrpc_dead_call_expired() local
743 _enter("{%d}", call->debug_id); in rxrpc_dead_call_expired()
745 write_lock_bh(&call->state_lock); in rxrpc_dead_call_expired()
746 call->state = RXRPC_CALL_DEAD; in rxrpc_dead_call_expired()
747 write_unlock_bh(&call->state_lock); in rxrpc_dead_call_expired()
748 rxrpc_put_call(call); in rxrpc_dead_call_expired()
755 static void rxrpc_mark_call_released(struct rxrpc_call *call) in rxrpc_mark_call_released() argument
759 write_lock(&call->state_lock); in rxrpc_mark_call_released()
760 if (call->state < RXRPC_CALL_DEAD) { in rxrpc_mark_call_released()
762 if (call->state < RXRPC_CALL_COMPLETE) { in rxrpc_mark_call_released()
763 _debug("abort call %p", call); in rxrpc_mark_call_released()
764 call->state = RXRPC_CALL_LOCALLY_ABORTED; in rxrpc_mark_call_released()
765 call->abort_code = RX_CALL_DEAD; in rxrpc_mark_call_released()
766 if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) in rxrpc_mark_call_released()
769 if (!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) in rxrpc_mark_call_released()
772 rxrpc_queue_call(call); in rxrpc_mark_call_released()
774 write_unlock(&call->state_lock); in rxrpc_mark_call_released()
782 struct rxrpc_call *call; in rxrpc_release_calls_on_socket() local
791 call = rb_entry(p, struct rxrpc_call, sock_node); in rxrpc_release_calls_on_socket()
792 rxrpc_mark_call_released(call); in rxrpc_release_calls_on_socket()
796 list_for_each_entry(call, &rx->secureq, accept_link) { in rxrpc_release_calls_on_socket()
797 rxrpc_mark_call_released(call); in rxrpc_release_calls_on_socket()
800 list_for_each_entry(call, &rx->acceptq, accept_link) { in rxrpc_release_calls_on_socket()
801 rxrpc_mark_call_released(call); in rxrpc_release_calls_on_socket()
811 void __rxrpc_put_call(struct rxrpc_call *call) in __rxrpc_put_call() argument
813 ASSERT(call != NULL); in __rxrpc_put_call()
815 _enter("%p{u=%d}", call, atomic_read(&call->usage)); in __rxrpc_put_call()
817 ASSERTCMP(atomic_read(&call->usage), >, 0); in __rxrpc_put_call()
819 if (atomic_dec_and_test(&call->usage)) { in __rxrpc_put_call()
820 _debug("call %d dead", call->debug_id); in __rxrpc_put_call()
821 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); in __rxrpc_put_call()
822 rxrpc_queue_work(&call->destroyer); in __rxrpc_put_call()
830 static void rxrpc_cleanup_call(struct rxrpc_call *call) in rxrpc_cleanup_call() argument
832 _net("DESTROY CALL %d", call->debug_id); in rxrpc_cleanup_call()
834 ASSERT(call->socket); in rxrpc_cleanup_call()
836 memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); in rxrpc_cleanup_call()
838 del_timer_sync(&call->lifetimer); in rxrpc_cleanup_call()
839 del_timer_sync(&call->deadspan); in rxrpc_cleanup_call()
840 del_timer_sync(&call->ack_timer); in rxrpc_cleanup_call()
841 del_timer_sync(&call->resend_timer); in rxrpc_cleanup_call()
843 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); in rxrpc_cleanup_call()
844 ASSERTCMP(call->events, ==, 0); in rxrpc_cleanup_call()
845 if (work_pending(&call->processor)) { in rxrpc_cleanup_call()
847 rxrpc_queue_work(&call->destroyer); in rxrpc_cleanup_call()
851 if (call->conn) { in rxrpc_cleanup_call()
852 spin_lock(&call->conn->trans->peer->lock); in rxrpc_cleanup_call()
853 list_del(&call->error_link); in rxrpc_cleanup_call()
854 spin_unlock(&call->conn->trans->peer->lock); in rxrpc_cleanup_call()
856 write_lock_bh(&call->conn->lock); in rxrpc_cleanup_call()
857 rb_erase(&call->conn_node, &call->conn->calls); in rxrpc_cleanup_call()
858 write_unlock_bh(&call->conn->lock); in rxrpc_cleanup_call()
859 rxrpc_put_connection(call->conn); in rxrpc_cleanup_call()
863 rxrpc_call_hash_del(call); in rxrpc_cleanup_call()
865 if (call->acks_window) { in rxrpc_cleanup_call()
867 CIRC_CNT(call->acks_head, call->acks_tail, in rxrpc_cleanup_call()
868 call->acks_winsz)); in rxrpc_cleanup_call()
870 while (CIRC_CNT(call->acks_head, call->acks_tail, in rxrpc_cleanup_call()
871 call->acks_winsz) > 0) { in rxrpc_cleanup_call()
875 _skb = call->acks_window[call->acks_tail] & ~1; in rxrpc_cleanup_call()
879 call->acks_tail = in rxrpc_cleanup_call()
880 (call->acks_tail + 1) & (call->acks_winsz - 1); in rxrpc_cleanup_call()
883 kfree(call->acks_window); in rxrpc_cleanup_call()
886 rxrpc_free_skb(call->tx_pending); in rxrpc_cleanup_call()
888 rxrpc_purge_queue(&call->rx_queue); in rxrpc_cleanup_call()
889 ASSERT(skb_queue_empty(&call->rx_oos_queue)); in rxrpc_cleanup_call()
890 sock_put(&call->socket->sk); in rxrpc_cleanup_call()
891 kmem_cache_free(rxrpc_call_jar, call); in rxrpc_cleanup_call()
899 struct rxrpc_call *call = in rxrpc_destroy_call() local
903 call, atomic_read(&call->usage), call->channel, call->conn); in rxrpc_destroy_call()
905 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); in rxrpc_destroy_call()
908 list_del_init(&call->link); in rxrpc_destroy_call()
911 rxrpc_cleanup_call(call); in rxrpc_destroy_call()
921 struct rxrpc_call *call; in rxrpc_destroy_all_calls() local
927 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link); in rxrpc_destroy_all_calls()
928 _debug("Zapping call %p", call); in rxrpc_destroy_all_calls()
930 list_del_init(&call->link); in rxrpc_destroy_all_calls()
932 switch (atomic_read(&call->usage)) { in rxrpc_destroy_all_calls()
934 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); in rxrpc_destroy_all_calls()
937 if (del_timer_sync(&call->deadspan) != 0 && in rxrpc_destroy_all_calls()
938 call->state != RXRPC_CALL_DEAD) in rxrpc_destroy_all_calls()
939 rxrpc_dead_call_expired((unsigned long) call); in rxrpc_destroy_all_calls()
940 if (call->state != RXRPC_CALL_DEAD) in rxrpc_destroy_all_calls()
945 call, atomic_read(&call->usage), in rxrpc_destroy_all_calls()
946 atomic_read(&call->ackr_not_idle), in rxrpc_destroy_all_calls()
947 rxrpc_call_states[call->state], in rxrpc_destroy_all_calls()
948 call->flags, call->events); in rxrpc_destroy_all_calls()
949 if (!skb_queue_empty(&call->rx_queue)) in rxrpc_destroy_all_calls()
951 if (!skb_queue_empty(&call->rx_oos_queue)) in rxrpc_destroy_all_calls()
970 struct rxrpc_call *call = (struct rxrpc_call *) _call; in rxrpc_call_life_expired() local
972 if (call->state >= RXRPC_CALL_COMPLETE) in rxrpc_call_life_expired()
975 _enter("{%d}", call->debug_id); in rxrpc_call_life_expired()
976 read_lock_bh(&call->state_lock); in rxrpc_call_life_expired()
977 if (call->state < RXRPC_CALL_COMPLETE) { in rxrpc_call_life_expired()
978 set_bit(RXRPC_CALL_LIFE_TIMER, &call->events); in rxrpc_call_life_expired()
979 rxrpc_queue_call(call); in rxrpc_call_life_expired()
981 read_unlock_bh(&call->state_lock); in rxrpc_call_life_expired()
990 struct rxrpc_call *call = (struct rxrpc_call *) _call; in rxrpc_resend_time_expired() local
992 _enter("{%d}", call->debug_id); in rxrpc_resend_time_expired()
994 if (call->state >= RXRPC_CALL_COMPLETE) in rxrpc_resend_time_expired()
997 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); in rxrpc_resend_time_expired()
998 if (!test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) in rxrpc_resend_time_expired()
999 rxrpc_queue_call(call); in rxrpc_resend_time_expired()
1007 struct rxrpc_call *call = (struct rxrpc_call *) _call; in rxrpc_ack_time_expired() local
1009 _enter("{%d}", call->debug_id); in rxrpc_ack_time_expired()
1011 if (call->state >= RXRPC_CALL_COMPLETE) in rxrpc_ack_time_expired()
1014 read_lock_bh(&call->state_lock); in rxrpc_ack_time_expired()
1015 if (call->state < RXRPC_CALL_COMPLETE && in rxrpc_ack_time_expired()
1016 !test_and_set_bit(RXRPC_CALL_ACK, &call->events)) in rxrpc_ack_time_expired()
1017 rxrpc_queue_call(call); in rxrpc_ack_time_expired()
1018 read_unlock_bh(&call->state_lock); in rxrpc_ack_time_expired()