root/net/rxrpc/call_object.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rxrpc_call_timer_expired
  2. rxrpc_find_call_by_user_ID
  3. rxrpc_alloc_call
  4. rxrpc_alloc_client_call
  5. rxrpc_start_call_timer
  6. rxrpc_new_client_call
  7. rxrpc_incoming_call
  8. rxrpc_queue_call
  9. __rxrpc_queue_call
  10. rxrpc_see_call
  11. rxrpc_get_call
  12. rxrpc_cleanup_ring
  13. rxrpc_release_call
  14. rxrpc_release_calls_on_socket
  15. rxrpc_put_call
  16. rxrpc_destroy_call
  17. rxrpc_rcu_destroy_call
  18. rxrpc_cleanup_call
  19. rxrpc_destroy_all_calls

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /* RxRPC individual remote procedure call handling
   3  *
   4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   5  * Written by David Howells (dhowells@redhat.com)
   6  */
   7 
   8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9 
  10 #include <linux/slab.h>
  11 #include <linux/module.h>
  12 #include <linux/circ_buf.h>
  13 #include <linux/spinlock_types.h>
  14 #include <net/sock.h>
  15 #include <net/af_rxrpc.h>
  16 #include "ar-internal.h"
  17 
  18 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
  19         [RXRPC_CALL_UNINITIALISED]              = "Uninit  ",
  20         [RXRPC_CALL_CLIENT_AWAIT_CONN]          = "ClWtConn",
  21         [RXRPC_CALL_CLIENT_SEND_REQUEST]        = "ClSndReq",
  22         [RXRPC_CALL_CLIENT_AWAIT_REPLY]         = "ClAwtRpl",
  23         [RXRPC_CALL_CLIENT_RECV_REPLY]          = "ClRcvRpl",
  24         [RXRPC_CALL_SERVER_PREALLOC]            = "SvPrealc",
  25         [RXRPC_CALL_SERVER_SECURING]            = "SvSecure",
  26         [RXRPC_CALL_SERVER_ACCEPTING]           = "SvAccept",
  27         [RXRPC_CALL_SERVER_RECV_REQUEST]        = "SvRcvReq",
  28         [RXRPC_CALL_SERVER_ACK_REQUEST]         = "SvAckReq",
  29         [RXRPC_CALL_SERVER_SEND_REPLY]          = "SvSndRpl",
  30         [RXRPC_CALL_SERVER_AWAIT_ACK]           = "SvAwtACK",
  31         [RXRPC_CALL_COMPLETE]                   = "Complete",
  32 };
  33 
  34 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
  35         [RXRPC_CALL_SUCCEEDED]                  = "Complete",
  36         [RXRPC_CALL_REMOTELY_ABORTED]           = "RmtAbort",
  37         [RXRPC_CALL_LOCALLY_ABORTED]            = "LocAbort",
  38         [RXRPC_CALL_LOCAL_ERROR]                = "LocError",
  39         [RXRPC_CALL_NETWORK_ERROR]              = "NetError",
  40 };
  41 
  42 struct kmem_cache *rxrpc_call_jar;
  43 
  44 static void rxrpc_call_timer_expired(struct timer_list *t)
  45 {
  46         struct rxrpc_call *call = from_timer(call, t, timer);
  47 
  48         _enter("%d", call->debug_id);
  49 
  50         if (call->state < RXRPC_CALL_COMPLETE) {
  51                 trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
  52                 rxrpc_queue_call(call);
  53         }
  54 }
  55 
  56 static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
  57 
  58 /*
  59  * find an extant server call
  60  * - called in process context with IRQs enabled
  61  */
  62 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
  63                                               unsigned long user_call_ID)
  64 {
  65         struct rxrpc_call *call;
  66         struct rb_node *p;
  67 
  68         _enter("%p,%lx", rx, user_call_ID);
  69 
  70         read_lock(&rx->call_lock);
  71 
  72         p = rx->calls.rb_node;
  73         while (p) {
  74                 call = rb_entry(p, struct rxrpc_call, sock_node);
  75 
  76                 if (user_call_ID < call->user_call_ID)
  77                         p = p->rb_left;
  78                 else if (user_call_ID > call->user_call_ID)
  79                         p = p->rb_right;
  80                 else
  81                         goto found_extant_call;
  82         }
  83 
  84         read_unlock(&rx->call_lock);
  85         _leave(" = NULL");
  86         return NULL;
  87 
  88 found_extant_call:
  89         rxrpc_get_call(call, rxrpc_call_got);
  90         read_unlock(&rx->call_lock);
  91         _leave(" = %p [%d]", call, atomic_read(&call->usage));
  92         return call;
  93 }
  94 
  95 /*
  96  * allocate a new call
  97  */
  98 struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
  99                                     unsigned int debug_id)
 100 {
 101         struct rxrpc_call *call;
 102         struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
 103 
 104         call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
 105         if (!call)
 106                 return NULL;
 107 
 108         call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
 109                                     sizeof(struct sk_buff *),
 110                                     gfp);
 111         if (!call->rxtx_buffer)
 112                 goto nomem;
 113 
 114         call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
 115         if (!call->rxtx_annotations)
 116                 goto nomem_2;
 117 
 118         mutex_init(&call->user_mutex);
 119 
 120         /* Prevent lockdep reporting a deadlock false positive between the afs
 121          * filesystem and sys_sendmsg() via the mmap sem.
 122          */
 123         if (rx->sk.sk_kern_sock)
 124                 lockdep_set_class(&call->user_mutex,
 125                                   &rxrpc_call_user_mutex_lock_class_key);
 126 
 127         timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
 128         INIT_WORK(&call->processor, &rxrpc_process_call);
 129         INIT_LIST_HEAD(&call->link);
 130         INIT_LIST_HEAD(&call->chan_wait_link);
 131         INIT_LIST_HEAD(&call->accept_link);
 132         INIT_LIST_HEAD(&call->recvmsg_link);
 133         INIT_LIST_HEAD(&call->sock_link);
 134         init_waitqueue_head(&call->waitq);
 135         spin_lock_init(&call->lock);
 136         spin_lock_init(&call->notify_lock);
 137         spin_lock_init(&call->input_lock);
 138         rwlock_init(&call->state_lock);
 139         atomic_set(&call->usage, 1);
 140         call->debug_id = debug_id;
 141         call->tx_total_len = -1;
 142         call->next_rx_timo = 20 * HZ;
 143         call->next_req_timo = 1 * HZ;
 144 
 145         memset(&call->sock_node, 0xed, sizeof(call->sock_node));
 146 
 147         /* Leave space in the ring to handle a maxed-out jumbo packet */
 148         call->rx_winsize = rxrpc_rx_window_size;
 149         call->tx_winsize = 16;
 150         call->rx_expect_next = 1;
 151 
 152         call->cong_cwnd = 2;
 153         call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
 154 
 155         call->rxnet = rxnet;
 156         atomic_inc(&rxnet->nr_calls);
 157         return call;
 158 
 159 nomem_2:
 160         kfree(call->rxtx_buffer);
 161 nomem:
 162         kmem_cache_free(rxrpc_call_jar, call);
 163         return NULL;
 164 }
 165 
 166 /*
 167  * Allocate a new client call.
 168  */
 169 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
 170                                                   struct sockaddr_rxrpc *srx,
 171                                                   gfp_t gfp,
 172                                                   unsigned int debug_id)
 173 {
 174         struct rxrpc_call *call;
 175         ktime_t now;
 176 
 177         _enter("");
 178 
 179         call = rxrpc_alloc_call(rx, gfp, debug_id);
 180         if (!call)
 181                 return ERR_PTR(-ENOMEM);
 182         call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
 183         call->service_id = srx->srx_service;
 184         call->tx_phase = true;
 185         now = ktime_get_real();
 186         call->acks_latest_ts = now;
 187         call->cong_tstamp = now;
 188 
 189         _leave(" = %p", call);
 190         return call;
 191 }
 192 
 193 /*
 194  * Initiate the call ack/resend/expiry timer.
 195  */
 196 static void rxrpc_start_call_timer(struct rxrpc_call *call)
 197 {
 198         unsigned long now = jiffies;
 199         unsigned long j = now + MAX_JIFFY_OFFSET;
 200 
 201         call->ack_at = j;
 202         call->ack_lost_at = j;
 203         call->resend_at = j;
 204         call->ping_at = j;
 205         call->expect_rx_by = j;
 206         call->expect_req_by = j;
 207         call->expect_term_by = j;
 208         call->timer.expires = now;
 209 }
 210 
 211 /*
 212  * Set up a call for the given parameters.
 213  * - Called with the socket lock held, which it must release.
 214  * - If it returns a call, the call's lock will need releasing by the caller.
 215  */
 216 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
 217                                          struct rxrpc_conn_parameters *cp,
 218                                          struct sockaddr_rxrpc *srx,
 219                                          struct rxrpc_call_params *p,
 220                                          gfp_t gfp,
 221                                          unsigned int debug_id)
 222         __releases(&rx->sk.sk_lock.slock)
 223         __acquires(&call->user_mutex)
 224 {
 225         struct rxrpc_call *call, *xcall;
 226         struct rxrpc_net *rxnet;
 227         struct rb_node *parent, **pp;
 228         const void *here = __builtin_return_address(0);
 229         int ret;
 230 
 231         _enter("%p,%lx", rx, p->user_call_ID);
 232 
 233         call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
 234         if (IS_ERR(call)) {
 235                 release_sock(&rx->sk);
 236                 _leave(" = %ld", PTR_ERR(call));
 237                 return call;
 238         }
 239 
 240         call->interruptibility = p->interruptibility;
 241         call->tx_total_len = p->tx_total_len;
 242         trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
 243                          atomic_read(&call->usage),
 244                          here, (const void *)p->user_call_ID);
 245 
 246         /* We need to protect a partially set up call against the user as we
 247          * will be acting outside the socket lock.
 248          */
 249         mutex_lock(&call->user_mutex);
 250 
 251         /* Publish the call, even though it is incompletely set up as yet */
 252         write_lock(&rx->call_lock);
 253 
 254         pp = &rx->calls.rb_node;
 255         parent = NULL;
 256         while (*pp) {
 257                 parent = *pp;
 258                 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
 259 
 260                 if (p->user_call_ID < xcall->user_call_ID)
 261                         pp = &(*pp)->rb_left;
 262                 else if (p->user_call_ID > xcall->user_call_ID)
 263                         pp = &(*pp)->rb_right;
 264                 else
 265                         goto error_dup_user_ID;
 266         }
 267 
 268         rcu_assign_pointer(call->socket, rx);
 269         call->user_call_ID = p->user_call_ID;
 270         __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
 271         rxrpc_get_call(call, rxrpc_call_got_userid);
 272         rb_link_node(&call->sock_node, parent, pp);
 273         rb_insert_color(&call->sock_node, &rx->calls);
 274         list_add(&call->sock_link, &rx->sock_calls);
 275 
 276         write_unlock(&rx->call_lock);
 277 
 278         rxnet = call->rxnet;
 279         write_lock(&rxnet->call_lock);
 280         list_add_tail(&call->link, &rxnet->calls);
 281         write_unlock(&rxnet->call_lock);
 282 
 283         /* From this point on, the call is protected by its own lock. */
 284         release_sock(&rx->sk);
 285 
 286         /* Set up or get a connection record and set the protocol parameters,
 287          * including channel number and call ID.
 288          */
 289         ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
 290         if (ret < 0)
 291                 goto error;
 292 
 293         trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
 294                          atomic_read(&call->usage), here, NULL);
 295 
 296         rxrpc_start_call_timer(call);
 297 
 298         _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
 299 
 300         _leave(" = %p [new]", call);
 301         return call;
 302 
 303         /* We unexpectedly found the user ID in the list after taking
 304          * the call_lock.  This shouldn't happen unless the user races
 305          * with itself and tries to add the same user ID twice at the
 306          * same time in different threads.
 307          */
 308 error_dup_user_ID:
 309         write_unlock(&rx->call_lock);
 310         release_sock(&rx->sk);
 311         ret = -EEXIST;
 312 
 313 error:
 314         __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
 315                                     RX_CALL_DEAD, ret);
 316         trace_rxrpc_call(call->debug_id, rxrpc_call_error,
 317                          atomic_read(&call->usage), here, ERR_PTR(ret));
 318         rxrpc_release_call(rx, call);
 319         mutex_unlock(&call->user_mutex);
 320         rxrpc_put_call(call, rxrpc_call_put);
 321         _leave(" = %d", ret);
 322         return ERR_PTR(ret);
 323 }
 324 
 325 /*
 326  * Set up an incoming call.  call->conn points to the connection.
 327  * This is called in BH context and isn't allowed to fail.
 328  */
 329 void rxrpc_incoming_call(struct rxrpc_sock *rx,
 330                          struct rxrpc_call *call,
 331                          struct sk_buff *skb)
 332 {
 333         struct rxrpc_connection *conn = call->conn;
 334         struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 335         u32 chan;
 336 
 337         _enter(",%d", call->conn->debug_id);
 338 
 339         rcu_assign_pointer(call->socket, rx);
 340         call->call_id           = sp->hdr.callNumber;
 341         call->service_id        = sp->hdr.serviceId;
 342         call->cid               = sp->hdr.cid;
 343         call->state             = RXRPC_CALL_SERVER_ACCEPTING;
 344         if (sp->hdr.securityIndex > 0)
 345                 call->state     = RXRPC_CALL_SERVER_SECURING;
 346         call->cong_tstamp       = skb->tstamp;
 347 
 348         /* Set the channel for this call.  We don't get channel_lock as we're
 349          * only defending against the data_ready handler (which we're called
 350          * from) and the RESPONSE packet parser (which is only really
 351          * interested in call_counter and can cope with a disagreement with the
 352          * call pointer).
 353          */
 354         chan = sp->hdr.cid & RXRPC_CHANNELMASK;
 355         conn->channels[chan].call_counter = call->call_id;
 356         conn->channels[chan].call_id = call->call_id;
 357         rcu_assign_pointer(conn->channels[chan].call, call);
 358 
 359         spin_lock(&conn->params.peer->lock);
 360         hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
 361         spin_unlock(&conn->params.peer->lock);
 362 
 363         _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
 364 
 365         rxrpc_start_call_timer(call);
 366         _leave("");
 367 }
 368 
 369 /*
 370  * Queue a call's work processor, getting a ref to pass to the work queue.
 371  */
 372 bool rxrpc_queue_call(struct rxrpc_call *call)
 373 {
 374         const void *here = __builtin_return_address(0);
 375         int n = atomic_fetch_add_unless(&call->usage, 1, 0);
 376         if (n == 0)
 377                 return false;
 378         if (rxrpc_queue_work(&call->processor))
 379                 trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
 380                                  here, NULL);
 381         else
 382                 rxrpc_put_call(call, rxrpc_call_put_noqueue);
 383         return true;
 384 }
 385 
 386 /*
 387  * Queue a call's work processor, passing the callers ref to the work queue.
 388  */
 389 bool __rxrpc_queue_call(struct rxrpc_call *call)
 390 {
 391         const void *here = __builtin_return_address(0);
 392         int n = atomic_read(&call->usage);
 393         ASSERTCMP(n, >=, 1);
 394         if (rxrpc_queue_work(&call->processor))
 395                 trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
 396                                  here, NULL);
 397         else
 398                 rxrpc_put_call(call, rxrpc_call_put_noqueue);
 399         return true;
 400 }
 401 
 402 /*
 403  * Note the re-emergence of a call.
 404  */
 405 void rxrpc_see_call(struct rxrpc_call *call)
 406 {
 407         const void *here = __builtin_return_address(0);
 408         if (call) {
 409                 int n = atomic_read(&call->usage);
 410 
 411                 trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
 412                                  here, NULL);
 413         }
 414 }
 415 
 416 /*
 417  * Note the addition of a ref on a call.
 418  */
 419 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
 420 {
 421         const void *here = __builtin_return_address(0);
 422         int n = atomic_inc_return(&call->usage);
 423 
 424         trace_rxrpc_call(call->debug_id, op, n, here, NULL);
 425 }
 426 
 427 /*
 428  * Clean up the RxTx skb ring.
 429  */
 430 static void rxrpc_cleanup_ring(struct rxrpc_call *call)
 431 {
 432         int i;
 433 
 434         for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
 435                 rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned);
 436                 call->rxtx_buffer[i] = NULL;
 437         }
 438 }
 439 
 440 /*
 441  * Detach a call from its owning socket.
 442  */
 443 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
 444 {
 445         const void *here = __builtin_return_address(0);
 446         struct rxrpc_connection *conn = call->conn;
 447         bool put = false;
 448 
 449         _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
 450 
 451         trace_rxrpc_call(call->debug_id, rxrpc_call_release,
 452                          atomic_read(&call->usage),
 453                          here, (const void *)call->flags);
 454 
 455         ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
 456 
 457         spin_lock_bh(&call->lock);
 458         if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
 459                 BUG();
 460         spin_unlock_bh(&call->lock);
 461 
 462         del_timer_sync(&call->timer);
 463 
 464         /* Make sure we don't get any more notifications */
 465         write_lock_bh(&rx->recvmsg_lock);
 466 
 467         if (!list_empty(&call->recvmsg_link)) {
 468                 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
 469                        call, call->events, call->flags);
 470                 list_del(&call->recvmsg_link);
 471                 put = true;
 472         }
 473 
 474         /* list_empty() must return false in rxrpc_notify_socket() */
 475         call->recvmsg_link.next = NULL;
 476         call->recvmsg_link.prev = NULL;
 477 
 478         write_unlock_bh(&rx->recvmsg_lock);
 479         if (put)
 480                 rxrpc_put_call(call, rxrpc_call_put);
 481 
 482         write_lock(&rx->call_lock);
 483 
 484         if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
 485                 rb_erase(&call->sock_node, &rx->calls);
 486                 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
 487                 rxrpc_put_call(call, rxrpc_call_put_userid);
 488         }
 489 
 490         list_del(&call->sock_link);
 491         write_unlock(&rx->call_lock);
 492 
 493         _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
 494 
 495         if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
 496                 rxrpc_disconnect_call(call);
 497         if (call->security)
 498                 call->security->free_call_crypto(call);
 499 
 500         rxrpc_cleanup_ring(call);
 501         _leave("");
 502 }
 503 
 504 /*
 505  * release all the calls associated with a socket
 506  */
 507 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
 508 {
 509         struct rxrpc_call *call;
 510 
 511         _enter("%p", rx);
 512 
 513         while (!list_empty(&rx->to_be_accepted)) {
 514                 call = list_entry(rx->to_be_accepted.next,
 515                                   struct rxrpc_call, accept_link);
 516                 list_del(&call->accept_link);
 517                 rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
 518                 rxrpc_put_call(call, rxrpc_call_put);
 519         }
 520 
 521         while (!list_empty(&rx->sock_calls)) {
 522                 call = list_entry(rx->sock_calls.next,
 523                                   struct rxrpc_call, sock_link);
 524                 rxrpc_get_call(call, rxrpc_call_got);
 525                 rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
 526                 rxrpc_send_abort_packet(call);
 527                 rxrpc_release_call(rx, call);
 528                 rxrpc_put_call(call, rxrpc_call_put);
 529         }
 530 
 531         _leave("");
 532 }
 533 
 534 /*
 535  * release a call
 536  */
 537 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
 538 {
 539         struct rxrpc_net *rxnet = call->rxnet;
 540         const void *here = __builtin_return_address(0);
 541         unsigned int debug_id = call->debug_id;
 542         int n;
 543 
 544         ASSERT(call != NULL);
 545 
 546         n = atomic_dec_return(&call->usage);
 547         trace_rxrpc_call(debug_id, op, n, here, NULL);
 548         ASSERTCMP(n, >=, 0);
 549         if (n == 0) {
 550                 _debug("call %d dead", call->debug_id);
 551                 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
 552 
 553                 if (!list_empty(&call->link)) {
 554                         write_lock(&rxnet->call_lock);
 555                         list_del_init(&call->link);
 556                         write_unlock(&rxnet->call_lock);
 557                 }
 558 
 559                 rxrpc_cleanup_call(call);
 560         }
 561 }
 562 
 563 /*
 564  * Final call destruction - but must be done in process context.
 565  */
 566 static void rxrpc_destroy_call(struct work_struct *work)
 567 {
 568         struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
 569         struct rxrpc_net *rxnet = call->rxnet;
 570 
 571         rxrpc_put_connection(call->conn);
 572         rxrpc_put_peer(call->peer);
 573         kfree(call->rxtx_buffer);
 574         kfree(call->rxtx_annotations);
 575         kmem_cache_free(rxrpc_call_jar, call);
 576         if (atomic_dec_and_test(&rxnet->nr_calls))
 577                 wake_up_var(&rxnet->nr_calls);
 578 }
 579 
 580 /*
 581  * Final call destruction under RCU.
 582  */
 583 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
 584 {
 585         struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
 586 
 587         if (in_softirq()) {
 588                 INIT_WORK(&call->processor, rxrpc_destroy_call);
 589                 if (!rxrpc_queue_work(&call->processor))
 590                         BUG();
 591         } else {
 592                 rxrpc_destroy_call(&call->processor);
 593         }
 594 }
 595 
 596 /*
 597  * clean up a call
 598  */
 599 void rxrpc_cleanup_call(struct rxrpc_call *call)
 600 {
 601         _net("DESTROY CALL %d", call->debug_id);
 602 
 603         memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
 604 
 605         del_timer_sync(&call->timer);
 606 
 607         ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
 608         ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
 609 
 610         rxrpc_cleanup_ring(call);
 611         rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
 612 
 613         call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
 614 }
 615 
 616 /*
 617  * Make sure that all calls are gone from a network namespace.  To reach this
 618  * point, any open UDP sockets in that namespace must have been closed, so any
 619  * outstanding calls cannot be doing I/O.
 620  */
 621 void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
 622 {
 623         struct rxrpc_call *call;
 624 
 625         _enter("");
 626 
 627         if (!list_empty(&rxnet->calls)) {
 628                 write_lock(&rxnet->call_lock);
 629 
 630                 while (!list_empty(&rxnet->calls)) {
 631                         call = list_entry(rxnet->calls.next,
 632                                           struct rxrpc_call, link);
 633                         _debug("Zapping call %p", call);
 634 
 635                         rxrpc_see_call(call);
 636                         list_del_init(&call->link);
 637 
 638                         pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
 639                                call, atomic_read(&call->usage),
 640                                rxrpc_call_states[call->state],
 641                                call->flags, call->events);
 642 
 643                         write_unlock(&rxnet->call_lock);
 644                         cond_resched();
 645                         write_lock(&rxnet->call_lock);
 646                 }
 647 
 648                 write_unlock(&rxnet->call_lock);
 649         }
 650 
 651         atomic_dec(&rxnet->nr_calls);
 652         wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
 653 }

/* [<][>][^][v][top][bottom][index][help] */