root/fs/afs/rxrpc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. afs_open_socket
  2. afs_close_socket
  3. afs_alloc_call
  4. afs_put_call
  5. afs_get_call
  6. afs_queue_call_work
  7. afs_alloc_flat_call
  8. afs_flat_call_destructor
  9. afs_load_bvec
  10. afs_notify_end_request_tx
  11. afs_send_pages
  12. afs_make_call
  13. afs_deliver_to_call
  14. afs_wait_for_call_to_complete
  15. afs_wake_up_call_waiter
  16. afs_wake_up_async_call
  17. afs_process_async_call
  18. afs_rx_attach
  19. afs_charge_preallocation
  20. afs_rx_discard_new_call
  21. afs_rx_new_call
  22. afs_deliver_cm_op_id
  23. afs_notify_end_reply_tx
  24. afs_send_empty_reply
  25. afs_send_simple_reply
  26. afs_extract_data
  27. afs_protocol_error

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /* Maintain an RxRPC server socket to do AFS communications through
   3  *
   4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   5  * Written by David Howells (dhowells@redhat.com)
   6  */
   7 
   8 #include <linux/slab.h>
   9 #include <linux/sched/signal.h>
  10 
  11 #include <net/sock.h>
  12 #include <net/af_rxrpc.h>
  13 #include "internal.h"
  14 #include "afs_cm.h"
  15 #include "protocol_yfs.h"
  16 
  17 struct workqueue_struct *afs_async_calls;
  18 
  19 static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
  20 static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
  21 static void afs_process_async_call(struct work_struct *);
  22 static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
  23 static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
  24 static int afs_deliver_cm_op_id(struct afs_call *);
  25 
  26 /* asynchronous incoming call initial processing */
  27 static const struct afs_call_type afs_RXCMxxxx = {
  28         .name           = "CB.xxxx",
  29         .deliver        = afs_deliver_cm_op_id,
  30 };
  31 
  32 /*
  33  * open an RxRPC socket and bind it to be a server for callback notifications
  34  * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
  35  */
  36 int afs_open_socket(struct afs_net *net)
  37 {
  38         struct sockaddr_rxrpc srx;
  39         struct socket *socket;
  40         unsigned int min_level;
  41         int ret;
  42 
  43         _enter("");
  44 
  45         ret = sock_create_kern(net->net, AF_RXRPC, SOCK_DGRAM, PF_INET6, &socket);
  46         if (ret < 0)
  47                 goto error_1;
  48 
  49         socket->sk->sk_allocation = GFP_NOFS;
  50 
  51         /* bind the callback manager's address to make this a server socket */
  52         memset(&srx, 0, sizeof(srx));
  53         srx.srx_family                  = AF_RXRPC;
  54         srx.srx_service                 = CM_SERVICE;
  55         srx.transport_type              = SOCK_DGRAM;
  56         srx.transport_len               = sizeof(srx.transport.sin6);
  57         srx.transport.sin6.sin6_family  = AF_INET6;
  58         srx.transport.sin6.sin6_port    = htons(AFS_CM_PORT);
  59 
  60         min_level = RXRPC_SECURITY_ENCRYPT;
  61         ret = kernel_setsockopt(socket, SOL_RXRPC, RXRPC_MIN_SECURITY_LEVEL,
  62                                 (void *)&min_level, sizeof(min_level));
  63         if (ret < 0)
  64                 goto error_2;
  65 
  66         ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
  67         if (ret == -EADDRINUSE) {
  68                 srx.transport.sin6.sin6_port = 0;
  69                 ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
  70         }
  71         if (ret < 0)
  72                 goto error_2;
  73 
  74         srx.srx_service = YFS_CM_SERVICE;
  75         ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
  76         if (ret < 0)
  77                 goto error_2;
  78 
  79         /* Ideally, we'd turn on service upgrade here, but we can't because
  80          * OpenAFS is buggy and leaks the userStatus field from packet to
  81          * packet and between FS packets and CB packets - so if we try to do an
  82          * upgrade on an FS packet, OpenAFS will leak that into the CB packet
  83          * it sends back to us.
  84          */
  85 
  86         rxrpc_kernel_new_call_notification(socket, afs_rx_new_call,
  87                                            afs_rx_discard_new_call);
  88 
  89         ret = kernel_listen(socket, INT_MAX);
  90         if (ret < 0)
  91                 goto error_2;
  92 
  93         net->socket = socket;
  94         afs_charge_preallocation(&net->charge_preallocation_work);
  95         _leave(" = 0");
  96         return 0;
  97 
  98 error_2:
  99         sock_release(socket);
 100 error_1:
 101         _leave(" = %d", ret);
 102         return ret;
 103 }
 104 
 105 /*
 106  * close the RxRPC socket AFS was using
 107  */
 108 void afs_close_socket(struct afs_net *net)
 109 {
 110         _enter("");
 111 
 112         kernel_listen(net->socket, 0);
 113         flush_workqueue(afs_async_calls);
 114 
 115         if (net->spare_incoming_call) {
 116                 afs_put_call(net->spare_incoming_call);
 117                 net->spare_incoming_call = NULL;
 118         }
 119 
 120         _debug("outstanding %u", atomic_read(&net->nr_outstanding_calls));
 121         wait_var_event(&net->nr_outstanding_calls,
 122                        !atomic_read(&net->nr_outstanding_calls));
 123         _debug("no outstanding calls");
 124 
 125         kernel_sock_shutdown(net->socket, SHUT_RDWR);
 126         flush_workqueue(afs_async_calls);
 127         sock_release(net->socket);
 128 
 129         _debug("dework");
 130         _leave("");
 131 }
 132 
 133 /*
 134  * Allocate a call.
 135  */
 136 static struct afs_call *afs_alloc_call(struct afs_net *net,
 137                                        const struct afs_call_type *type,
 138                                        gfp_t gfp)
 139 {
 140         struct afs_call *call;
 141         int o;
 142 
 143         call = kzalloc(sizeof(*call), gfp);
 144         if (!call)
 145                 return NULL;
 146 
 147         call->type = type;
 148         call->net = net;
 149         call->debug_id = atomic_inc_return(&rxrpc_debug_id);
 150         atomic_set(&call->usage, 1);
 151         INIT_WORK(&call->async_work, afs_process_async_call);
 152         init_waitqueue_head(&call->waitq);
 153         spin_lock_init(&call->state_lock);
 154         call->_iter = &call->iter;
 155 
 156         o = atomic_inc_return(&net->nr_outstanding_calls);
 157         trace_afs_call(call, afs_call_trace_alloc, 1, o,
 158                        __builtin_return_address(0));
 159         return call;
 160 }
 161 
 162 /*
 163  * Dispose of a reference on a call.
 164  */
 165 void afs_put_call(struct afs_call *call)
 166 {
 167         struct afs_net *net = call->net;
 168         int n = atomic_dec_return(&call->usage);
 169         int o = atomic_read(&net->nr_outstanding_calls);
 170 
 171         trace_afs_call(call, afs_call_trace_put, n, o,
 172                        __builtin_return_address(0));
 173 
 174         ASSERTCMP(n, >=, 0);
 175         if (n == 0) {
 176                 ASSERT(!work_pending(&call->async_work));
 177                 ASSERT(call->type->name != NULL);
 178 
 179                 if (call->rxcall) {
 180                         rxrpc_kernel_end_call(net->socket, call->rxcall);
 181                         call->rxcall = NULL;
 182                 }
 183                 if (call->type->destructor)
 184                         call->type->destructor(call);
 185 
 186                 afs_put_server(call->net, call->server, afs_server_trace_put_call);
 187                 afs_put_cb_interest(call->net, call->cbi);
 188                 afs_put_addrlist(call->alist);
 189                 kfree(call->request);
 190 
 191                 trace_afs_call(call, afs_call_trace_free, 0, o,
 192                                __builtin_return_address(0));
 193                 kfree(call);
 194 
 195                 o = atomic_dec_return(&net->nr_outstanding_calls);
 196                 if (o == 0)
 197                         wake_up_var(&net->nr_outstanding_calls);
 198         }
 199 }
 200 
 201 static struct afs_call *afs_get_call(struct afs_call *call,
 202                                      enum afs_call_trace why)
 203 {
 204         int u = atomic_inc_return(&call->usage);
 205 
 206         trace_afs_call(call, why, u,
 207                        atomic_read(&call->net->nr_outstanding_calls),
 208                        __builtin_return_address(0));
 209         return call;
 210 }
 211 
 212 /*
 213  * Queue the call for actual work.
 214  */
 215 static void afs_queue_call_work(struct afs_call *call)
 216 {
 217         if (call->type->work) {
 218                 INIT_WORK(&call->work, call->type->work);
 219 
 220                 afs_get_call(call, afs_call_trace_work);
 221                 if (!queue_work(afs_wq, &call->work))
 222                         afs_put_call(call);
 223         }
 224 }
 225 
 226 /*
 227  * allocate a call with flat request and reply buffers
 228  */
 229 struct afs_call *afs_alloc_flat_call(struct afs_net *net,
 230                                      const struct afs_call_type *type,
 231                                      size_t request_size, size_t reply_max)
 232 {
 233         struct afs_call *call;
 234 
 235         call = afs_alloc_call(net, type, GFP_NOFS);
 236         if (!call)
 237                 goto nomem_call;
 238 
 239         if (request_size) {
 240                 call->request_size = request_size;
 241                 call->request = kmalloc(request_size, GFP_NOFS);
 242                 if (!call->request)
 243                         goto nomem_free;
 244         }
 245 
 246         if (reply_max) {
 247                 call->reply_max = reply_max;
 248                 call->buffer = kmalloc(reply_max, GFP_NOFS);
 249                 if (!call->buffer)
 250                         goto nomem_free;
 251         }
 252 
 253         afs_extract_to_buf(call, call->reply_max);
 254         call->operation_ID = type->op;
 255         init_waitqueue_head(&call->waitq);
 256         return call;
 257 
 258 nomem_free:
 259         afs_put_call(call);
 260 nomem_call:
 261         return NULL;
 262 }
 263 
 264 /*
 265  * clean up a call with flat buffer
 266  */
 267 void afs_flat_call_destructor(struct afs_call *call)
 268 {
 269         _enter("");
 270 
 271         kfree(call->request);
 272         call->request = NULL;
 273         kfree(call->buffer);
 274         call->buffer = NULL;
 275 }
 276 
 277 #define AFS_BVEC_MAX 8
 278 
 279 /*
 280  * Load the given bvec with the next few pages.
 281  */
 282 static void afs_load_bvec(struct afs_call *call, struct msghdr *msg,
 283                           struct bio_vec *bv, pgoff_t first, pgoff_t last,
 284                           unsigned offset)
 285 {
 286         struct page *pages[AFS_BVEC_MAX];
 287         unsigned int nr, n, i, to, bytes = 0;
 288 
 289         nr = min_t(pgoff_t, last - first + 1, AFS_BVEC_MAX);
 290         n = find_get_pages_contig(call->mapping, first, nr, pages);
 291         ASSERTCMP(n, ==, nr);
 292 
 293         msg->msg_flags |= MSG_MORE;
 294         for (i = 0; i < nr; i++) {
 295                 to = PAGE_SIZE;
 296                 if (first + i >= last) {
 297                         to = call->last_to;
 298                         msg->msg_flags &= ~MSG_MORE;
 299                 }
 300                 bv[i].bv_page = pages[i];
 301                 bv[i].bv_len = to - offset;
 302                 bv[i].bv_offset = offset;
 303                 bytes += to - offset;
 304                 offset = 0;
 305         }
 306 
 307         iov_iter_bvec(&msg->msg_iter, WRITE, bv, nr, bytes);
 308 }
 309 
 310 /*
 311  * Advance the AFS call state when the RxRPC call ends the transmit phase.
 312  */
 313 static void afs_notify_end_request_tx(struct sock *sock,
 314                                       struct rxrpc_call *rxcall,
 315                                       unsigned long call_user_ID)
 316 {
 317         struct afs_call *call = (struct afs_call *)call_user_ID;
 318 
 319         afs_set_call_state(call, AFS_CALL_CL_REQUESTING, AFS_CALL_CL_AWAIT_REPLY);
 320 }
 321 
 322 /*
 323  * attach the data from a bunch of pages on an inode to a call
 324  */
 325 static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
 326 {
 327         struct bio_vec bv[AFS_BVEC_MAX];
 328         unsigned int bytes, nr, loop, offset;
 329         pgoff_t first = call->first, last = call->last;
 330         int ret;
 331 
 332         offset = call->first_offset;
 333         call->first_offset = 0;
 334 
 335         do {
 336                 afs_load_bvec(call, msg, bv, first, last, offset);
 337                 trace_afs_send_pages(call, msg, first, last, offset);
 338 
 339                 offset = 0;
 340                 bytes = msg->msg_iter.count;
 341                 nr = msg->msg_iter.nr_segs;
 342 
 343                 ret = rxrpc_kernel_send_data(call->net->socket, call->rxcall, msg,
 344                                              bytes, afs_notify_end_request_tx);
 345                 for (loop = 0; loop < nr; loop++)
 346                         put_page(bv[loop].bv_page);
 347                 if (ret < 0)
 348                         break;
 349 
 350                 first += nr;
 351         } while (first <= last);
 352 
 353         trace_afs_sent_pages(call, call->first, last, first, ret);
 354         return ret;
 355 }
 356 
 357 /*
 358  * Initiate a call and synchronously queue up the parameters for dispatch.  Any
 359  * error is stored into the call struct, which the caller must check for.
 360  */
 361 void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
 362 {
 363         struct sockaddr_rxrpc *srx = &ac->alist->addrs[ac->index];
 364         struct rxrpc_call *rxcall;
 365         struct msghdr msg;
 366         struct kvec iov[1];
 367         s64 tx_total_len;
 368         int ret;
 369 
 370         _enter(",{%pISp},", &srx->transport);
 371 
 372         ASSERT(call->type != NULL);
 373         ASSERT(call->type->name != NULL);
 374 
 375         _debug("____MAKE %p{%s,%x} [%d]____",
 376                call, call->type->name, key_serial(call->key),
 377                atomic_read(&call->net->nr_outstanding_calls));
 378 
 379         call->addr_ix = ac->index;
 380         call->alist = afs_get_addrlist(ac->alist);
 381 
 382         /* Work out the length we're going to transmit.  This is awkward for
 383          * calls such as FS.StoreData where there's an extra injection of data
 384          * after the initial fixed part.
 385          */
 386         tx_total_len = call->request_size;
 387         if (call->send_pages) {
 388                 if (call->last == call->first) {
 389                         tx_total_len += call->last_to - call->first_offset;
 390                 } else {
 391                         /* It looks mathematically like you should be able to
 392                          * combine the following lines with the ones above, but
 393                          * unsigned arithmetic is fun when it wraps...
 394                          */
 395                         tx_total_len += PAGE_SIZE - call->first_offset;
 396                         tx_total_len += call->last_to;
 397                         tx_total_len += (call->last - call->first - 1) * PAGE_SIZE;
 398                 }
 399         }
 400 
 401         /* If the call is going to be asynchronous, we need an extra ref for
 402          * the call to hold itself so the caller need not hang on to its ref.
 403          */
 404         if (call->async) {
 405                 afs_get_call(call, afs_call_trace_get);
 406                 call->drop_ref = true;
 407         }
 408 
 409         /* create a call */
 410         rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
 411                                          (unsigned long)call,
 412                                          tx_total_len, gfp,
 413                                          (call->async ?
 414                                           afs_wake_up_async_call :
 415                                           afs_wake_up_call_waiter),
 416                                          call->upgrade,
 417                                          (call->intr ? RXRPC_PREINTERRUPTIBLE :
 418                                           RXRPC_UNINTERRUPTIBLE),
 419                                          call->debug_id);
 420         if (IS_ERR(rxcall)) {
 421                 ret = PTR_ERR(rxcall);
 422                 call->error = ret;
 423                 goto error_kill_call;
 424         }
 425 
 426         call->rxcall = rxcall;
 427 
 428         if (call->max_lifespan)
 429                 rxrpc_kernel_set_max_life(call->net->socket, rxcall,
 430                                           call->max_lifespan);
 431 
 432         /* send the request */
 433         iov[0].iov_base = call->request;
 434         iov[0].iov_len  = call->request_size;
 435 
 436         msg.msg_name            = NULL;
 437         msg.msg_namelen         = 0;
 438         iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, call->request_size);
 439         msg.msg_control         = NULL;
 440         msg.msg_controllen      = 0;
 441         msg.msg_flags           = MSG_WAITALL | (call->send_pages ? MSG_MORE : 0);
 442 
 443         ret = rxrpc_kernel_send_data(call->net->socket, rxcall,
 444                                      &msg, call->request_size,
 445                                      afs_notify_end_request_tx);
 446         if (ret < 0)
 447                 goto error_do_abort;
 448 
 449         if (call->send_pages) {
 450                 ret = afs_send_pages(call, &msg);
 451                 if (ret < 0)
 452                         goto error_do_abort;
 453         }
 454 
 455         /* Note that at this point, we may have received the reply or an abort
 456          * - and an asynchronous call may already have completed.
 457          *
 458          * afs_wait_for_call_to_complete(call, ac)
 459          * must be called to synchronously clean up.
 460          */
 461         return;
 462 
 463 error_do_abort:
 464         if (ret != -ECONNABORTED) {
 465                 rxrpc_kernel_abort_call(call->net->socket, rxcall,
 466                                         RX_USER_ABORT, ret, "KSD");
 467         } else {
 468                 iov_iter_kvec(&msg.msg_iter, READ, NULL, 0, 0);
 469                 rxrpc_kernel_recv_data(call->net->socket, rxcall,
 470                                        &msg.msg_iter, false,
 471                                        &call->abort_code, &call->service_id);
 472                 ac->abort_code = call->abort_code;
 473                 ac->responded = true;
 474         }
 475         call->error = ret;
 476         trace_afs_call_done(call);
 477 error_kill_call:
 478         if (call->type->done)
 479                 call->type->done(call);
 480 
 481         /* We need to dispose of the extra ref we grabbed for an async call.
 482          * The call, however, might be queued on afs_async_calls and we need to
 483          * make sure we don't get any more notifications that might requeue it.
 484          */
 485         if (call->rxcall) {
 486                 rxrpc_kernel_end_call(call->net->socket, call->rxcall);
 487                 call->rxcall = NULL;
 488         }
 489         if (call->async) {
 490                 if (cancel_work_sync(&call->async_work))
 491                         afs_put_call(call);
 492                 afs_put_call(call);
 493         }
 494 
 495         ac->error = ret;
 496         call->state = AFS_CALL_COMPLETE;
 497         _leave(" = %d", ret);
 498 }
 499 
 500 /*
 501  * deliver messages to a call
 502  */
 503 static void afs_deliver_to_call(struct afs_call *call)
 504 {
 505         enum afs_call_state state;
 506         u32 abort_code, remote_abort = 0;
 507         int ret;
 508 
 509         _enter("%s", call->type->name);
 510 
 511         while (state = READ_ONCE(call->state),
 512                state == AFS_CALL_CL_AWAIT_REPLY ||
 513                state == AFS_CALL_SV_AWAIT_OP_ID ||
 514                state == AFS_CALL_SV_AWAIT_REQUEST ||
 515                state == AFS_CALL_SV_AWAIT_ACK
 516                ) {
 517                 if (state == AFS_CALL_SV_AWAIT_ACK) {
 518                         iov_iter_kvec(&call->iter, READ, NULL, 0, 0);
 519                         ret = rxrpc_kernel_recv_data(call->net->socket,
 520                                                      call->rxcall, &call->iter,
 521                                                      false, &remote_abort,
 522                                                      &call->service_id);
 523                         trace_afs_receive_data(call, &call->iter, false, ret);
 524 
 525                         if (ret == -EINPROGRESS || ret == -EAGAIN)
 526                                 return;
 527                         if (ret < 0 || ret == 1) {
 528                                 if (ret == 1)
 529                                         ret = 0;
 530                                 goto call_complete;
 531                         }
 532                         return;
 533                 }
 534 
 535                 if (!call->have_reply_time &&
 536                     rxrpc_kernel_get_reply_time(call->net->socket,
 537                                                 call->rxcall,
 538                                                 &call->reply_time))
 539                         call->have_reply_time = true;
 540 
 541                 ret = call->type->deliver(call);
 542                 state = READ_ONCE(call->state);
 543                 switch (ret) {
 544                 case 0:
 545                         afs_queue_call_work(call);
 546                         if (state == AFS_CALL_CL_PROC_REPLY) {
 547                                 if (call->cbi)
 548                                         set_bit(AFS_SERVER_FL_MAY_HAVE_CB,
 549                                                 &call->cbi->server->flags);
 550                                 goto call_complete;
 551                         }
 552                         ASSERTCMP(state, >, AFS_CALL_CL_PROC_REPLY);
 553                         goto done;
 554                 case -EINPROGRESS:
 555                 case -EAGAIN:
 556                         goto out;
 557                 case -ECONNABORTED:
 558                         ASSERTCMP(state, ==, AFS_CALL_COMPLETE);
 559                         goto done;
 560                 case -ENOTSUPP:
 561                         abort_code = RXGEN_OPCODE;
 562                         rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
 563                                                 abort_code, ret, "KIV");
 564                         goto local_abort;
 565                 case -EIO:
 566                         pr_err("kAFS: Call %u in bad state %u\n",
 567                                call->debug_id, state);
 568                         /* Fall through */
 569                 case -ENODATA:
 570                 case -EBADMSG:
 571                 case -EMSGSIZE:
 572                         abort_code = RXGEN_CC_UNMARSHAL;
 573                         if (state != AFS_CALL_CL_AWAIT_REPLY)
 574                                 abort_code = RXGEN_SS_UNMARSHAL;
 575                         rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
 576                                                 abort_code, ret, "KUM");
 577                         goto local_abort;
 578                 default:
 579                         abort_code = RX_USER_ABORT;
 580                         rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
 581                                                 abort_code, ret, "KER");
 582                         goto local_abort;
 583                 }
 584         }
 585 
 586 done:
 587         if (call->type->done)
 588                 call->type->done(call);
 589 out:
 590         _leave("");
 591         return;
 592 
 593 local_abort:
 594         abort_code = 0;
 595 call_complete:
 596         afs_set_call_complete(call, ret, remote_abort);
 597         state = AFS_CALL_COMPLETE;
 598         goto done;
 599 }
 600 
 601 /*
 602  * Wait synchronously for a call to complete and clean up the call struct.
 603  */
 604 long afs_wait_for_call_to_complete(struct afs_call *call,
 605                                    struct afs_addr_cursor *ac)
 606 {
 607         long ret;
 608         bool rxrpc_complete = false;
 609 
 610         DECLARE_WAITQUEUE(myself, current);
 611 
 612         _enter("");
 613 
 614         ret = call->error;
 615         if (ret < 0)
 616                 goto out;
 617 
 618         add_wait_queue(&call->waitq, &myself);
 619         for (;;) {
 620                 set_current_state(TASK_UNINTERRUPTIBLE);
 621 
 622                 /* deliver any messages that are in the queue */
 623                 if (!afs_check_call_state(call, AFS_CALL_COMPLETE) &&
 624                     call->need_attention) {
 625                         call->need_attention = false;
 626                         __set_current_state(TASK_RUNNING);
 627                         afs_deliver_to_call(call);
 628                         continue;
 629                 }
 630 
 631                 if (afs_check_call_state(call, AFS_CALL_COMPLETE))
 632                         break;
 633 
 634                 if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall)) {
 635                         /* rxrpc terminated the call. */
 636                         rxrpc_complete = true;
 637                         break;
 638                 }
 639 
 640                 schedule();
 641         }
 642 
 643         remove_wait_queue(&call->waitq, &myself);
 644         __set_current_state(TASK_RUNNING);
 645 
 646         if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) {
 647                 if (rxrpc_complete) {
 648                         afs_set_call_complete(call, call->error, call->abort_code);
 649                 } else {
 650                         /* Kill off the call if it's still live. */
 651                         _debug("call interrupted");
 652                         if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
 653                                                     RX_USER_ABORT, -EINTR, "KWI"))
 654                                 afs_set_call_complete(call, -EINTR, 0);
 655                 }
 656         }
 657 
 658         spin_lock_bh(&call->state_lock);
 659         ac->abort_code = call->abort_code;
 660         ac->error = call->error;
 661         spin_unlock_bh(&call->state_lock);
 662 
 663         ret = ac->error;
 664         switch (ret) {
 665         case 0:
 666                 ret = call->ret0;
 667                 call->ret0 = 0;
 668 
 669                 /* Fall through */
 670         case -ECONNABORTED:
 671                 ac->responded = true;
 672                 break;
 673         }
 674 
 675 out:
 676         _debug("call complete");
 677         afs_put_call(call);
 678         _leave(" = %p", (void *)ret);
 679         return ret;
 680 }
 681 
 682 /*
 683  * wake up a waiting call
 684  */
 685 static void afs_wake_up_call_waiter(struct sock *sk, struct rxrpc_call *rxcall,
 686                                     unsigned long call_user_ID)
 687 {
 688         struct afs_call *call = (struct afs_call *)call_user_ID;
 689 
 690         call->need_attention = true;
 691         wake_up(&call->waitq);
 692 }
 693 
 694 /*
 695  * wake up an asynchronous call
 696  */
 697 static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
 698                                    unsigned long call_user_ID)
 699 {
 700         struct afs_call *call = (struct afs_call *)call_user_ID;
 701         int u;
 702 
 703         trace_afs_notify_call(rxcall, call);
 704         call->need_attention = true;
 705 
 706         u = atomic_fetch_add_unless(&call->usage, 1, 0);
 707         if (u != 0) {
 708                 trace_afs_call(call, afs_call_trace_wake, u + 1,
 709                                atomic_read(&call->net->nr_outstanding_calls),
 710                                __builtin_return_address(0));
 711 
 712                 if (!queue_work(afs_async_calls, &call->async_work))
 713                         afs_put_call(call);
 714         }
 715 }
 716 
 717 /*
 718  * Perform I/O processing on an asynchronous call.  The work item carries a ref
 719  * to the call struct that we either need to release or to pass on.
 720  */
 721 static void afs_process_async_call(struct work_struct *work)
 722 {
 723         struct afs_call *call = container_of(work, struct afs_call, async_work);
 724 
 725         _enter("");
 726 
 727         if (call->state < AFS_CALL_COMPLETE && call->need_attention) {
 728                 call->need_attention = false;
 729                 afs_deliver_to_call(call);
 730         }
 731 
 732         afs_put_call(call);
 733         _leave("");
 734 }
 735 
 736 static void afs_rx_attach(struct rxrpc_call *rxcall, unsigned long user_call_ID)
 737 {
 738         struct afs_call *call = (struct afs_call *)user_call_ID;
 739 
 740         call->rxcall = rxcall;
 741 }
 742 
 743 /*
 744  * Charge the incoming call preallocation.
 745  */
 746 void afs_charge_preallocation(struct work_struct *work)
 747 {
 748         struct afs_net *net =
 749                 container_of(work, struct afs_net, charge_preallocation_work);
 750         struct afs_call *call = net->spare_incoming_call;
 751 
 752         for (;;) {
 753                 if (!call) {
 754                         call = afs_alloc_call(net, &afs_RXCMxxxx, GFP_KERNEL);
 755                         if (!call)
 756                                 break;
 757 
 758                         call->drop_ref = true;
 759                         call->async = true;
 760                         call->state = AFS_CALL_SV_AWAIT_OP_ID;
 761                         init_waitqueue_head(&call->waitq);
 762                         afs_extract_to_tmp(call);
 763                 }
 764 
 765                 if (rxrpc_kernel_charge_accept(net->socket,
 766                                                afs_wake_up_async_call,
 767                                                afs_rx_attach,
 768                                                (unsigned long)call,
 769                                                GFP_KERNEL,
 770                                                call->debug_id) < 0)
 771                         break;
 772                 call = NULL;
 773         }
 774         net->spare_incoming_call = call;
 775 }
 776 
 777 /*
 778  * Discard a preallocated call when a socket is shut down.
 779  */
 780 static void afs_rx_discard_new_call(struct rxrpc_call *rxcall,
 781                                     unsigned long user_call_ID)
 782 {
 783         struct afs_call *call = (struct afs_call *)user_call_ID;
 784 
 785         call->rxcall = NULL;
 786         afs_put_call(call);
 787 }
 788 
 789 /*
 790  * Notification of an incoming call.
 791  */
 792 static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall,
 793                             unsigned long user_call_ID)
 794 {
 795         struct afs_net *net = afs_sock2net(sk);
 796 
 797         queue_work(afs_wq, &net->charge_preallocation_work);
 798 }
 799 
 800 /*
 801  * Grab the operation ID from an incoming cache manager call.  The socket
 802  * buffer is discarded on error or if we don't yet have sufficient data.
 803  */
 804 static int afs_deliver_cm_op_id(struct afs_call *call)
 805 {
 806         int ret;
 807 
 808         _enter("{%zu}", iov_iter_count(call->_iter));
 809 
 810         /* the operation ID forms the first four bytes of the request data */
 811         ret = afs_extract_data(call, true);
 812         if (ret < 0)
 813                 return ret;
 814 
 815         call->operation_ID = ntohl(call->tmp);
 816         afs_set_call_state(call, AFS_CALL_SV_AWAIT_OP_ID, AFS_CALL_SV_AWAIT_REQUEST);
 817 
 818         /* ask the cache manager to route the call (it'll change the call type
 819          * if successful) */
 820         if (!afs_cm_incoming_call(call))
 821                 return -ENOTSUPP;
 822 
 823         trace_afs_cb_call(call);
 824 
 825         /* pass responsibility for the remainer of this message off to the
 826          * cache manager op */
 827         return call->type->deliver(call);
 828 }
 829 
 830 /*
 831  * Advance the AFS call state when an RxRPC service call ends the transmit
 832  * phase.
 833  */
 834 static void afs_notify_end_reply_tx(struct sock *sock,
 835                                     struct rxrpc_call *rxcall,
 836                                     unsigned long call_user_ID)
 837 {
 838         struct afs_call *call = (struct afs_call *)call_user_ID;
 839 
 840         afs_set_call_state(call, AFS_CALL_SV_REPLYING, AFS_CALL_SV_AWAIT_ACK);
 841 }
 842 
 843 /*
 844  * send an empty reply
 845  */
 846 void afs_send_empty_reply(struct afs_call *call)
 847 {
 848         struct afs_net *net = call->net;
 849         struct msghdr msg;
 850 
 851         _enter("");
 852 
 853         rxrpc_kernel_set_tx_length(net->socket, call->rxcall, 0);
 854 
 855         msg.msg_name            = NULL;
 856         msg.msg_namelen         = 0;
 857         iov_iter_kvec(&msg.msg_iter, WRITE, NULL, 0, 0);
 858         msg.msg_control         = NULL;
 859         msg.msg_controllen      = 0;
 860         msg.msg_flags           = 0;
 861 
 862         switch (rxrpc_kernel_send_data(net->socket, call->rxcall, &msg, 0,
 863                                        afs_notify_end_reply_tx)) {
 864         case 0:
 865                 _leave(" [replied]");
 866                 return;
 867 
 868         case -ENOMEM:
 869                 _debug("oom");
 870                 rxrpc_kernel_abort_call(net->socket, call->rxcall,
 871                                         RX_USER_ABORT, -ENOMEM, "KOO");
 872                 /* Fall through */
 873         default:
 874                 _leave(" [error]");
 875                 return;
 876         }
 877 }
 878 
 879 /*
 880  * send a simple reply
 881  */
 882 void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
 883 {
 884         struct afs_net *net = call->net;
 885         struct msghdr msg;
 886         struct kvec iov[1];
 887         int n;
 888 
 889         _enter("");
 890 
 891         rxrpc_kernel_set_tx_length(net->socket, call->rxcall, len);
 892 
 893         iov[0].iov_base         = (void *) buf;
 894         iov[0].iov_len          = len;
 895         msg.msg_name            = NULL;
 896         msg.msg_namelen         = 0;
 897         iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
 898         msg.msg_control         = NULL;
 899         msg.msg_controllen      = 0;
 900         msg.msg_flags           = 0;
 901 
 902         n = rxrpc_kernel_send_data(net->socket, call->rxcall, &msg, len,
 903                                    afs_notify_end_reply_tx);
 904         if (n >= 0) {
 905                 /* Success */
 906                 _leave(" [replied]");
 907                 return;
 908         }
 909 
 910         if (n == -ENOMEM) {
 911                 _debug("oom");
 912                 rxrpc_kernel_abort_call(net->socket, call->rxcall,
 913                                         RX_USER_ABORT, -ENOMEM, "KOO");
 914         }
 915         _leave(" [error]");
 916 }
 917 
 918 /*
 919  * Extract a piece of data from the received data socket buffers.
 920  */
 921 int afs_extract_data(struct afs_call *call, bool want_more)
 922 {
 923         struct afs_net *net = call->net;
 924         struct iov_iter *iter = call->_iter;
 925         enum afs_call_state state;
 926         u32 remote_abort = 0;
 927         int ret;
 928 
 929         _enter("{%s,%zu},%d", call->type->name, iov_iter_count(iter), want_more);
 930 
 931         ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, iter,
 932                                      want_more, &remote_abort,
 933                                      &call->service_id);
 934         if (ret == 0 || ret == -EAGAIN)
 935                 return ret;
 936 
 937         state = READ_ONCE(call->state);
 938         if (ret == 1) {
 939                 switch (state) {
 940                 case AFS_CALL_CL_AWAIT_REPLY:
 941                         afs_set_call_state(call, state, AFS_CALL_CL_PROC_REPLY);
 942                         break;
 943                 case AFS_CALL_SV_AWAIT_REQUEST:
 944                         afs_set_call_state(call, state, AFS_CALL_SV_REPLYING);
 945                         break;
 946                 case AFS_CALL_COMPLETE:
 947                         kdebug("prem complete %d", call->error);
 948                         return afs_io_error(call, afs_io_error_extract);
 949                 default:
 950                         break;
 951                 }
 952                 return 0;
 953         }
 954 
 955         afs_set_call_complete(call, ret, remote_abort);
 956         return ret;
 957 }
 958 
 959 /*
 960  * Log protocol error production.
 961  */
 962 noinline int afs_protocol_error(struct afs_call *call, int error,
 963                                 enum afs_eproto_cause cause)
 964 {
 965         trace_afs_protocol_error(call, error, cause);
 966         return error;
 967 }

/* [<][>][^][v][top][bottom][index][help] */