root/drivers/misc/mic/scif/scif_epd.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. scif_cleanup_ep_qp
  2. scif_teardown_ep
  3. scif_add_epd_to_zombie_list
  4. scif_find_listen_ep
  5. scif_cleanup_zombie_epd
  6. scif_cnctreq
  7. scif_cnctgnt
  8. scif_cnctgnt_ack
  9. scif_cnctgnt_nack
  10. scif_cnctrej
  11. scif_discnct
  12. scif_discnt_ack
  13. scif_clientsend
  14. scif_clientrcvd

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Intel MIC Platform Software Stack (MPSS)
   4  *
   5  * Copyright(c) 2014 Intel Corporation.
   6  *
   7  * Intel SCIF driver.
   8  */
   9 #include "scif_main.h"
  10 #include "scif_map.h"
  11 
  12 void scif_cleanup_ep_qp(struct scif_endpt *ep)
  13 {
  14         struct scif_qp *qp = ep->qp_info.qp;
  15 
  16         if (qp->outbound_q.rb_base) {
  17                 scif_iounmap((void *)qp->outbound_q.rb_base,
  18                              qp->outbound_q.size, ep->remote_dev);
  19                 qp->outbound_q.rb_base = NULL;
  20         }
  21         if (qp->remote_qp) {
  22                 scif_iounmap((void *)qp->remote_qp,
  23                              sizeof(struct scif_qp), ep->remote_dev);
  24                 qp->remote_qp = NULL;
  25         }
  26         if (qp->local_qp) {
  27                 scif_unmap_single(qp->local_qp, ep->remote_dev,
  28                                   sizeof(struct scif_qp));
  29                 qp->local_qp = 0x0;
  30         }
  31         if (qp->local_buf) {
  32                 scif_unmap_single(qp->local_buf, ep->remote_dev,
  33                                   SCIF_ENDPT_QP_SIZE);
  34                 qp->local_buf = 0;
  35         }
  36 }
  37 
  38 void scif_teardown_ep(void *endpt)
  39 {
  40         struct scif_endpt *ep = endpt;
  41         struct scif_qp *qp = ep->qp_info.qp;
  42 
  43         if (qp) {
  44                 spin_lock(&ep->lock);
  45                 scif_cleanup_ep_qp(ep);
  46                 spin_unlock(&ep->lock);
  47                 kfree(qp->inbound_q.rb_base);
  48                 kfree(qp);
  49         }
  50 }
  51 
  52 /*
  53  * Enqueue the endpoint to the zombie list for cleanup.
  54  * The endpoint should not be accessed once this API returns.
  55  */
  56 void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held)
  57 {
  58         if (!eplock_held)
  59                 mutex_lock(&scif_info.eplock);
  60         spin_lock(&ep->lock);
  61         ep->state = SCIFEP_ZOMBIE;
  62         spin_unlock(&ep->lock);
  63         list_add_tail(&ep->list, &scif_info.zombie);
  64         scif_info.nr_zombies++;
  65         if (!eplock_held)
  66                 mutex_unlock(&scif_info.eplock);
  67         schedule_work(&scif_info.misc_work);
  68 }
  69 
  70 static struct scif_endpt *scif_find_listen_ep(u16 port)
  71 {
  72         struct scif_endpt *ep = NULL;
  73         struct list_head *pos, *tmpq;
  74 
  75         mutex_lock(&scif_info.eplock);
  76         list_for_each_safe(pos, tmpq, &scif_info.listen) {
  77                 ep = list_entry(pos, struct scif_endpt, list);
  78                 if (ep->port.port == port) {
  79                         mutex_unlock(&scif_info.eplock);
  80                         return ep;
  81                 }
  82         }
  83         mutex_unlock(&scif_info.eplock);
  84         return NULL;
  85 }
  86 
  87 void scif_cleanup_zombie_epd(void)
  88 {
  89         struct list_head *pos, *tmpq;
  90         struct scif_endpt *ep;
  91 
  92         mutex_lock(&scif_info.eplock);
  93         list_for_each_safe(pos, tmpq, &scif_info.zombie) {
  94                 ep = list_entry(pos, struct scif_endpt, list);
  95                 if (scif_rma_ep_can_uninit(ep)) {
  96                         list_del(pos);
  97                         scif_info.nr_zombies--;
  98                         put_iova_domain(&ep->rma_info.iovad);
  99                         kfree(ep);
 100                 }
 101         }
 102         mutex_unlock(&scif_info.eplock);
 103 }
 104 
 105 /**
 106  * scif_cnctreq() - Respond to SCIF_CNCT_REQ interrupt message
 107  * @msg:        Interrupt message
 108  *
 109  * This message is initiated by the remote node to request a connection
 110  * to the local node.  This function looks for an end point in the
 111  * listen state on the requested port id.
 112  *
 113  * If it finds a listening port it places the connect request on the
 114  * listening end points queue and wakes up any pending accept calls.
 115  *
 116  * If it does not find a listening end point it sends a connection
 117  * reject message to the remote node.
 118  */
 119 void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg)
 120 {
 121         struct scif_endpt *ep = NULL;
 122         struct scif_conreq *conreq;
 123 
 124         conreq = kmalloc(sizeof(*conreq), GFP_KERNEL);
 125         if (!conreq)
 126                 /* Lack of resources so reject the request. */
 127                 goto conreq_sendrej;
 128 
 129         ep = scif_find_listen_ep(msg->dst.port);
 130         if (!ep)
 131                 /*  Send reject due to no listening ports */
 132                 goto conreq_sendrej_free;
 133         else
 134                 spin_lock(&ep->lock);
 135 
 136         if (ep->backlog <= ep->conreqcnt) {
 137                 /*  Send reject due to too many pending requests */
 138                 spin_unlock(&ep->lock);
 139                 goto conreq_sendrej_free;
 140         }
 141 
 142         conreq->msg = *msg;
 143         list_add_tail(&conreq->list, &ep->conlist);
 144         ep->conreqcnt++;
 145         wake_up_interruptible(&ep->conwq);
 146         spin_unlock(&ep->lock);
 147         return;
 148 
 149 conreq_sendrej_free:
 150         kfree(conreq);
 151 conreq_sendrej:
 152         msg->uop = SCIF_CNCT_REJ;
 153         scif_nodeqp_send(&scif_dev[msg->src.node], msg);
 154 }
 155 
 156 /**
 157  * scif_cnctgnt() - Respond to SCIF_CNCT_GNT interrupt message
 158  * @msg:        Interrupt message
 159  *
 160  * An accept() on the remote node has occurred and sent this message
 161  * to indicate success.  Place the end point in the MAPPING state and
 162  * save the remote nodes memory information.  Then wake up the connect
 163  * request so it can finish.
 164  */
 165 void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg)
 166 {
 167         struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
 168 
 169         spin_lock(&ep->lock);
 170         if (SCIFEP_CONNECTING == ep->state) {
 171                 ep->peer.node = msg->src.node;
 172                 ep->peer.port = msg->src.port;
 173                 ep->qp_info.gnt_pld = msg->payload[1];
 174                 ep->remote_ep = msg->payload[2];
 175                 ep->state = SCIFEP_MAPPING;
 176 
 177                 wake_up(&ep->conwq);
 178         }
 179         spin_unlock(&ep->lock);
 180 }
 181 
 182 /**
 183  * scif_cnctgnt_ack() - Respond to SCIF_CNCT_GNTACK interrupt message
 184  * @msg:        Interrupt message
 185  *
 186  * The remote connection request has finished mapping the local memory.
 187  * Place the connection in the connected state and wake up the pending
 188  * accept() call.
 189  */
 190 void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
 191 {
 192         struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
 193 
 194         mutex_lock(&scif_info.connlock);
 195         spin_lock(&ep->lock);
 196         /* New ep is now connected with all resources set. */
 197         ep->state = SCIFEP_CONNECTED;
 198         list_add_tail(&ep->list, &scif_info.connected);
 199         wake_up(&ep->conwq);
 200         spin_unlock(&ep->lock);
 201         mutex_unlock(&scif_info.connlock);
 202 }
 203 
 204 /**
 205  * scif_cnctgnt_nack() - Respond to SCIF_CNCT_GNTNACK interrupt message
 206  * @msg:        Interrupt message
 207  *
 208  * The remote connection request failed to map the local memory it was sent.
 209  * Place the end point in the CLOSING state to indicate it and wake up
 210  * the pending accept();
 211  */
 212 void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg)
 213 {
 214         struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
 215 
 216         spin_lock(&ep->lock);
 217         ep->state = SCIFEP_CLOSING;
 218         wake_up(&ep->conwq);
 219         spin_unlock(&ep->lock);
 220 }
 221 
 222 /**
 223  * scif_cnctrej() - Respond to SCIF_CNCT_REJ interrupt message
 224  * @msg:        Interrupt message
 225  *
 226  * The remote end has rejected the connection request.  Set the end
 227  * point back to the bound state and wake up the pending connect().
 228  */
 229 void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg)
 230 {
 231         struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
 232 
 233         spin_lock(&ep->lock);
 234         if (SCIFEP_CONNECTING == ep->state) {
 235                 ep->state = SCIFEP_BOUND;
 236                 wake_up(&ep->conwq);
 237         }
 238         spin_unlock(&ep->lock);
 239 }
 240 
 241 /**
 242  * scif_discnct() - Respond to SCIF_DISCNCT interrupt message
 243  * @msg:        Interrupt message
 244  *
 245  * The remote node has indicated close() has been called on its end
 246  * point.  Remove the local end point from the connected list, set its
 247  * state to disconnected and ensure accesses to the remote node are
 248  * shutdown.
 249  *
 250  * When all accesses to the remote end have completed then send a
 251  * DISCNT_ACK to indicate it can remove its resources and complete
 252  * the close routine.
 253  */
 254 void scif_discnct(struct scif_dev *scifdev, struct scifmsg *msg)
 255 {
 256         struct scif_endpt *ep = NULL;
 257         struct scif_endpt *tmpep;
 258         struct list_head *pos, *tmpq;
 259 
 260         mutex_lock(&scif_info.connlock);
 261         list_for_each_safe(pos, tmpq, &scif_info.connected) {
 262                 tmpep = list_entry(pos, struct scif_endpt, list);
 263                 /*
 264                  * The local ep may have sent a disconnect and and been closed
 265                  * due to a message response time out. It may have been
 266                  * allocated again and formed a new connection so we want to
 267                  * check if the remote ep matches
 268                  */
 269                 if (((u64)tmpep == msg->payload[1]) &&
 270                     ((u64)tmpep->remote_ep == msg->payload[0])) {
 271                         list_del(pos);
 272                         ep = tmpep;
 273                         spin_lock(&ep->lock);
 274                         break;
 275                 }
 276         }
 277 
 278         /*
 279          * If the terminated end is not found then this side started closing
 280          * before the other side sent the disconnect.  If so the ep will no
 281          * longer be on the connected list.  Regardless the other side
 282          * needs to be acked to let it know close is complete.
 283          */
 284         if (!ep) {
 285                 mutex_unlock(&scif_info.connlock);
 286                 goto discnct_ack;
 287         }
 288 
 289         ep->state = SCIFEP_DISCONNECTED;
 290         list_add_tail(&ep->list, &scif_info.disconnected);
 291 
 292         wake_up_interruptible(&ep->sendwq);
 293         wake_up_interruptible(&ep->recvwq);
 294         spin_unlock(&ep->lock);
 295         mutex_unlock(&scif_info.connlock);
 296 
 297 discnct_ack:
 298         msg->uop = SCIF_DISCNT_ACK;
 299         scif_nodeqp_send(&scif_dev[msg->src.node], msg);
 300 }
 301 
 302 /**
 303  * scif_discnct_ack() - Respond to SCIF_DISCNT_ACK interrupt message
 304  * @msg:        Interrupt message
 305  *
 306  * Remote side has indicated it has not more references to local resources
 307  */
 308 void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
 309 {
 310         struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
 311 
 312         spin_lock(&ep->lock);
 313         ep->state = SCIFEP_DISCONNECTED;
 314         spin_unlock(&ep->lock);
 315         complete(&ep->discon);
 316 }
 317 
 318 /**
 319  * scif_clientsend() - Respond to SCIF_CLIENT_SEND interrupt message
 320  * @msg:        Interrupt message
 321  *
 322  * Remote side is confirming send or receive interrupt handling is complete.
 323  */
 324 void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg)
 325 {
 326         struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
 327 
 328         spin_lock(&ep->lock);
 329         if (SCIFEP_CONNECTED == ep->state)
 330                 wake_up_interruptible(&ep->recvwq);
 331         spin_unlock(&ep->lock);
 332 }
 333 
 334 /**
 335  * scif_clientrcvd() - Respond to SCIF_CLIENT_RCVD interrupt message
 336  * @msg:        Interrupt message
 337  *
 338  * Remote side is confirming send or receive interrupt handling is complete.
 339  */
 340 void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg)
 341 {
 342         struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
 343 
 344         spin_lock(&ep->lock);
 345         if (SCIFEP_CONNECTED == ep->state)
 346                 wake_up_interruptible(&ep->sendwq);
 347         spin_unlock(&ep->lock);
 348 }

/* [<][>][^][v][top][bottom][index][help] */