root/drivers/misc/mic/scif/scif_fence.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. scif_recv_mark
  2. scif_recv_mark_resp
  3. scif_recv_wait
  4. scif_recv_wait_resp
  5. scif_recv_sig_local
  6. scif_recv_sig_remote
  7. scif_recv_sig_resp
  8. scif_get_local_va
  9. scif_prog_signal_cb
  10. _scif_prog_signal
  11. scif_prog_signal
  12. _scif_fence_wait
  13. scif_rma_handle_remote_fences
  14. _scif_send_fence
  15. scif_send_fence_mark
  16. scif_send_fence_wait
  17. _scif_send_fence_signal_wait
  18. scif_send_fence_signal
  19. scif_fence_mark_cb
  20. _scif_fence_mark
  21. scif_fence_mark
  22. scif_fence_wait
  23. scif_fence_signal

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Intel MIC Platform Software Stack (MPSS)
   4  *
   5  * Copyright(c) 2015 Intel Corporation.
   6  *
   7  * Intel SCIF driver.
   8  */
   9 
  10 #include "scif_main.h"
  11 
  12 /**
  13  * scif_recv_mark: Handle SCIF_MARK request
  14  * @msg:        Interrupt message
  15  *
  16  * The peer has requested a mark.
  17  */
  18 void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg)
  19 {
  20         struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  21         int mark = 0;
  22         int err;
  23 
  24         err = _scif_fence_mark(ep, &mark);
  25         if (err)
  26                 msg->uop = SCIF_MARK_NACK;
  27         else
  28                 msg->uop = SCIF_MARK_ACK;
  29         msg->payload[0] = ep->remote_ep;
  30         msg->payload[2] = mark;
  31         scif_nodeqp_send(ep->remote_dev, msg);
  32 }
  33 
  34 /**
  35  * scif_recv_mark_resp: Handle SCIF_MARK_(N)ACK messages.
  36  * @msg:        Interrupt message
  37  *
  38  * The peer has responded to a SCIF_MARK message.
  39  */
  40 void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg)
  41 {
  42         struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  43         struct scif_fence_info *fence_req =
  44                 (struct scif_fence_info *)msg->payload[1];
  45 
  46         mutex_lock(&ep->rma_info.rma_lock);
  47         if (msg->uop == SCIF_MARK_ACK) {
  48                 fence_req->state = OP_COMPLETED;
  49                 fence_req->dma_mark = (int)msg->payload[2];
  50         } else {
  51                 fence_req->state = OP_FAILED;
  52         }
  53         mutex_unlock(&ep->rma_info.rma_lock);
  54         complete(&fence_req->comp);
  55 }
  56 
  57 /**
  58  * scif_recv_wait: Handle SCIF_WAIT request
  59  * @msg:        Interrupt message
  60  *
  61  * The peer has requested waiting on a fence.
  62  */
  63 void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg)
  64 {
  65         struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  66         struct scif_remote_fence_info *fence;
  67 
  68         /*
  69          * Allocate structure for remote fence information and
  70          * send a NACK if the allocation failed. The peer will
  71          * return ENOMEM upon receiving a NACK.
  72          */
  73         fence = kmalloc(sizeof(*fence), GFP_KERNEL);
  74         if (!fence) {
  75                 msg->payload[0] = ep->remote_ep;
  76                 msg->uop = SCIF_WAIT_NACK;
  77                 scif_nodeqp_send(ep->remote_dev, msg);
  78                 return;
  79         }
  80 
  81         /* Prepare the fence request */
  82         memcpy(&fence->msg, msg, sizeof(struct scifmsg));
  83         INIT_LIST_HEAD(&fence->list);
  84 
  85         /* Insert to the global remote fence request list */
  86         mutex_lock(&scif_info.fencelock);
  87         atomic_inc(&ep->rma_info.fence_refcount);
  88         list_add_tail(&fence->list, &scif_info.fence);
  89         mutex_unlock(&scif_info.fencelock);
  90 
  91         schedule_work(&scif_info.misc_work);
  92 }
  93 
  94 /**
  95  * scif_recv_wait_resp: Handle SCIF_WAIT_(N)ACK messages.
  96  * @msg:        Interrupt message
  97  *
  98  * The peer has responded to a SCIF_WAIT message.
  99  */
 100 void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg)
 101 {
 102         struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
 103         struct scif_fence_info *fence_req =
 104                 (struct scif_fence_info *)msg->payload[1];
 105 
 106         mutex_lock(&ep->rma_info.rma_lock);
 107         if (msg->uop == SCIF_WAIT_ACK)
 108                 fence_req->state = OP_COMPLETED;
 109         else
 110                 fence_req->state = OP_FAILED;
 111         mutex_unlock(&ep->rma_info.rma_lock);
 112         complete(&fence_req->comp);
 113 }
 114 
 115 /**
 116  * scif_recv_sig_local: Handle SCIF_SIG_LOCAL request
 117  * @msg:        Interrupt message
 118  *
 119  * The peer has requested a signal on a local offset.
 120  */
 121 void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg)
 122 {
 123         struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
 124         int err;
 125 
 126         err = scif_prog_signal(ep, msg->payload[1], msg->payload[2],
 127                                SCIF_WINDOW_SELF);
 128         if (err)
 129                 msg->uop = SCIF_SIG_NACK;
 130         else
 131                 msg->uop = SCIF_SIG_ACK;
 132         msg->payload[0] = ep->remote_ep;
 133         scif_nodeqp_send(ep->remote_dev, msg);
 134 }
 135 
 136 /**
 137  * scif_recv_sig_remote: Handle SCIF_SIGNAL_REMOTE request
 138  * @msg:        Interrupt message
 139  *
 140  * The peer has requested a signal on a remote offset.
 141  */
 142 void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg)
 143 {
 144         struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
 145         int err;
 146 
 147         err = scif_prog_signal(ep, msg->payload[1], msg->payload[2],
 148                                SCIF_WINDOW_PEER);
 149         if (err)
 150                 msg->uop = SCIF_SIG_NACK;
 151         else
 152                 msg->uop = SCIF_SIG_ACK;
 153         msg->payload[0] = ep->remote_ep;
 154         scif_nodeqp_send(ep->remote_dev, msg);
 155 }
 156 
 157 /**
 158  * scif_recv_sig_resp: Handle SCIF_SIG_(N)ACK messages.
 159  * @msg:        Interrupt message
 160  *
 161  * The peer has responded to a signal request.
 162  */
 163 void scif_recv_sig_resp(struct scif_dev *scifdev, struct scifmsg *msg)
 164 {
 165         struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
 166         struct scif_fence_info *fence_req =
 167                 (struct scif_fence_info *)msg->payload[3];
 168 
 169         mutex_lock(&ep->rma_info.rma_lock);
 170         if (msg->uop == SCIF_SIG_ACK)
 171                 fence_req->state = OP_COMPLETED;
 172         else
 173                 fence_req->state = OP_FAILED;
 174         mutex_unlock(&ep->rma_info.rma_lock);
 175         complete(&fence_req->comp);
 176 }
 177 
 178 static inline void *scif_get_local_va(off_t off, struct scif_window *window)
 179 {
 180         struct page **pages = window->pinned_pages->pages;
 181         int page_nr = (off - window->offset) >> PAGE_SHIFT;
 182         off_t page_off = off & ~PAGE_MASK;
 183 
 184         return page_address(pages[page_nr]) + page_off;
 185 }
 186 
 187 static void scif_prog_signal_cb(void *arg)
 188 {
 189         struct scif_cb_arg *cb_arg = arg;
 190 
 191         dma_pool_free(cb_arg->ep->remote_dev->signal_pool, cb_arg->status,
 192                       cb_arg->src_dma_addr);
 193         kfree(cb_arg);
 194 }
 195 
 196 static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
 197 {
 198         struct scif_endpt *ep = (struct scif_endpt *)epd;
 199         struct dma_chan *chan = ep->rma_info.dma_chan;
 200         struct dma_device *ddev = chan->device;
 201         bool x100 = !is_dma_copy_aligned(chan->device, 1, 1, 1);
 202         struct dma_async_tx_descriptor *tx;
 203         struct scif_status *status = NULL;
 204         struct scif_cb_arg *cb_arg = NULL;
 205         dma_addr_t src;
 206         dma_cookie_t cookie;
 207         int err;
 208 
 209         tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE);
 210         if (!tx) {
 211                 err = -ENOMEM;
 212                 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
 213                         __func__, __LINE__, err);
 214                 goto alloc_fail;
 215         }
 216         cookie = tx->tx_submit(tx);
 217         if (dma_submit_error(cookie)) {
 218                 err = (int)cookie;
 219                 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
 220                         __func__, __LINE__, err);
 221                 goto alloc_fail;
 222         }
 223         dma_async_issue_pending(chan);
 224         if (x100) {
 225                 /*
 226                  * For X100 use the status descriptor to write the value to
 227                  * the destination.
 228                  */
 229                 tx = ddev->device_prep_dma_imm_data(chan, dst, val, 0);
 230         } else {
 231                 status = dma_pool_alloc(ep->remote_dev->signal_pool, GFP_KERNEL,
 232                                         &src);
 233                 if (!status) {
 234                         err = -ENOMEM;
 235                         dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
 236                                 __func__, __LINE__, err);
 237                         goto alloc_fail;
 238                 }
 239                 status->val = val;
 240                 status->src_dma_addr = src;
 241                 status->ep = ep;
 242                 src += offsetof(struct scif_status, val);
 243                 tx = ddev->device_prep_dma_memcpy(chan, dst, src, sizeof(val),
 244                                                   DMA_PREP_INTERRUPT);
 245         }
 246         if (!tx) {
 247                 err = -ENOMEM;
 248                 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
 249                         __func__, __LINE__, err);
 250                 goto dma_fail;
 251         }
 252         if (!x100) {
 253                 cb_arg = kmalloc(sizeof(*cb_arg), GFP_KERNEL);
 254                 if (!cb_arg) {
 255                         err = -ENOMEM;
 256                         goto dma_fail;
 257                 }
 258                 cb_arg->src_dma_addr = src;
 259                 cb_arg->status = status;
 260                 cb_arg->ep = ep;
 261                 tx->callback = scif_prog_signal_cb;
 262                 tx->callback_param = cb_arg;
 263         }
 264         cookie = tx->tx_submit(tx);
 265         if (dma_submit_error(cookie)) {
 266                 err = -EIO;
 267                 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
 268                         __func__, __LINE__, err);
 269                 goto dma_fail;
 270         }
 271         dma_async_issue_pending(chan);
 272         return 0;
 273 dma_fail:
 274         if (!x100) {
 275                 dma_pool_free(ep->remote_dev->signal_pool, status,
 276                               src - offsetof(struct scif_status, val));
 277                 kfree(cb_arg);
 278         }
 279 alloc_fail:
 280         return err;
 281 }
 282 
 283 /*
 284  * scif_prog_signal:
 285  * @epd - Endpoint Descriptor
 286  * @offset - registered address to write @val to
 287  * @val - Value to be written at @offset
 288  * @type - Type of the window.
 289  *
 290  * Arrange to write a value to the registered offset after ensuring that the
 291  * offset provided is indeed valid.
 292  */
 293 int scif_prog_signal(scif_epd_t epd, off_t offset, u64 val,
 294                      enum scif_window_type type)
 295 {
 296         struct scif_endpt *ep = (struct scif_endpt *)epd;
 297         struct scif_window *window = NULL;
 298         struct scif_rma_req req;
 299         dma_addr_t dst_dma_addr;
 300         int err;
 301 
 302         mutex_lock(&ep->rma_info.rma_lock);
 303         req.out_window = &window;
 304         req.offset = offset;
 305         req.nr_bytes = sizeof(u64);
 306         req.prot = SCIF_PROT_WRITE;
 307         req.type = SCIF_WINDOW_SINGLE;
 308         if (type == SCIF_WINDOW_SELF)
 309                 req.head = &ep->rma_info.reg_list;
 310         else
 311                 req.head = &ep->rma_info.remote_reg_list;
 312         /* Does a valid window exist? */
 313         err = scif_query_window(&req);
 314         if (err) {
 315                 dev_err(scif_info.mdev.this_device,
 316                         "%s %d err %d\n", __func__, __LINE__, err);
 317                 goto unlock_ret;
 318         }
 319 
 320         if (scif_is_mgmt_node() && scifdev_self(ep->remote_dev)) {
 321                 u64 *dst_virt;
 322 
 323                 if (type == SCIF_WINDOW_SELF)
 324                         dst_virt = scif_get_local_va(offset, window);
 325                 else
 326                         dst_virt =
 327                         scif_get_local_va(offset, (struct scif_window *)
 328                                           window->peer_window);
 329                 *dst_virt = val;
 330         } else {
 331                 dst_dma_addr = __scif_off_to_dma_addr(window, offset);
 332                 err = _scif_prog_signal(epd, dst_dma_addr, val);
 333         }
 334 unlock_ret:
 335         mutex_unlock(&ep->rma_info.rma_lock);
 336         return err;
 337 }
 338 
 339 static int _scif_fence_wait(scif_epd_t epd, int mark)
 340 {
 341         struct scif_endpt *ep = (struct scif_endpt *)epd;
 342         dma_cookie_t cookie = mark & ~SCIF_REMOTE_FENCE;
 343         int err;
 344 
 345         /* Wait for DMA callback in scif_fence_mark_cb(..) */
 346         err = wait_event_interruptible_timeout(ep->rma_info.markwq,
 347                                                dma_async_is_tx_complete(
 348                                                ep->rma_info.dma_chan,
 349                                                cookie, NULL, NULL) ==
 350                                                DMA_COMPLETE,
 351                                                SCIF_NODE_ALIVE_TIMEOUT);
 352         if (!err)
 353                 err = -ETIMEDOUT;
 354         else if (err > 0)
 355                 err = 0;
 356         return err;
 357 }
 358 
 359 /**
 360  * scif_rma_handle_remote_fences:
 361  *
 362  * This routine services remote fence requests.
 363  */
 364 void scif_rma_handle_remote_fences(void)
 365 {
 366         struct list_head *item, *tmp;
 367         struct scif_remote_fence_info *fence;
 368         struct scif_endpt *ep;
 369         int mark, err;
 370 
 371         might_sleep();
 372         mutex_lock(&scif_info.fencelock);
 373         list_for_each_safe(item, tmp, &scif_info.fence) {
 374                 fence = list_entry(item, struct scif_remote_fence_info,
 375                                    list);
 376                 /* Remove fence from global list */
 377                 list_del(&fence->list);
 378 
 379                 /* Initiate the fence operation */
 380                 ep = (struct scif_endpt *)fence->msg.payload[0];
 381                 mark = fence->msg.payload[2];
 382                 err = _scif_fence_wait(ep, mark);
 383                 if (err)
 384                         fence->msg.uop = SCIF_WAIT_NACK;
 385                 else
 386                         fence->msg.uop = SCIF_WAIT_ACK;
 387                 fence->msg.payload[0] = ep->remote_ep;
 388                 scif_nodeqp_send(ep->remote_dev, &fence->msg);
 389                 kfree(fence);
 390                 if (!atomic_sub_return(1, &ep->rma_info.fence_refcount))
 391                         schedule_work(&scif_info.misc_work);
 392         }
 393         mutex_unlock(&scif_info.fencelock);
 394 }
 395 
 396 static int _scif_send_fence(scif_epd_t epd, int uop, int mark, int *out_mark)
 397 {
 398         int err;
 399         struct scifmsg msg;
 400         struct scif_fence_info *fence_req;
 401         struct scif_endpt *ep = (struct scif_endpt *)epd;
 402 
 403         fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL);
 404         if (!fence_req) {
 405                 err = -ENOMEM;
 406                 goto error;
 407         }
 408 
 409         fence_req->state = OP_IN_PROGRESS;
 410         init_completion(&fence_req->comp);
 411 
 412         msg.src = ep->port;
 413         msg.uop = uop;
 414         msg.payload[0] = ep->remote_ep;
 415         msg.payload[1] = (u64)fence_req;
 416         if (uop == SCIF_WAIT)
 417                 msg.payload[2] = mark;
 418         spin_lock(&ep->lock);
 419         if (ep->state == SCIFEP_CONNECTED)
 420                 err = scif_nodeqp_send(ep->remote_dev, &msg);
 421         else
 422                 err = -ENOTCONN;
 423         spin_unlock(&ep->lock);
 424         if (err)
 425                 goto error_free;
 426 retry:
 427         /* Wait for a SCIF_WAIT_(N)ACK message */
 428         err = wait_for_completion_timeout(&fence_req->comp,
 429                                           SCIF_NODE_ALIVE_TIMEOUT);
 430         if (!err && scifdev_alive(ep))
 431                 goto retry;
 432         if (!err)
 433                 err = -ENODEV;
 434         if (err > 0)
 435                 err = 0;
 436         mutex_lock(&ep->rma_info.rma_lock);
 437         if (err < 0) {
 438                 if (fence_req->state == OP_IN_PROGRESS)
 439                         fence_req->state = OP_FAILED;
 440         }
 441         if (fence_req->state == OP_FAILED && !err)
 442                 err = -ENOMEM;
 443         if (uop == SCIF_MARK && fence_req->state == OP_COMPLETED)
 444                 *out_mark = SCIF_REMOTE_FENCE | fence_req->dma_mark;
 445         mutex_unlock(&ep->rma_info.rma_lock);
 446 error_free:
 447         kfree(fence_req);
 448 error:
 449         return err;
 450 }
 451 
 452 /**
 453  * scif_send_fence_mark:
 454  * @epd: end point descriptor.
 455  * @out_mark: Output DMA mark reported by peer.
 456  *
 457  * Send a remote fence mark request.
 458  */
 459 static int scif_send_fence_mark(scif_epd_t epd, int *out_mark)
 460 {
 461         return _scif_send_fence(epd, SCIF_MARK, 0, out_mark);
 462 }
 463 
 464 /**
 465  * scif_send_fence_wait:
 466  * @epd: end point descriptor.
 467  * @mark: DMA mark to wait for.
 468  *
 469  * Send a remote fence wait request.
 470  */
 471 static int scif_send_fence_wait(scif_epd_t epd, int mark)
 472 {
 473         return _scif_send_fence(epd, SCIF_WAIT, mark, NULL);
 474 }
 475 
 476 static int _scif_send_fence_signal_wait(struct scif_endpt *ep,
 477                                         struct scif_fence_info *fence_req)
 478 {
 479         int err;
 480 
 481 retry:
 482         /* Wait for a SCIF_SIG_(N)ACK message */
 483         err = wait_for_completion_timeout(&fence_req->comp,
 484                                           SCIF_NODE_ALIVE_TIMEOUT);
 485         if (!err && scifdev_alive(ep))
 486                 goto retry;
 487         if (!err)
 488                 err = -ENODEV;
 489         if (err > 0)
 490                 err = 0;
 491         if (err < 0) {
 492                 mutex_lock(&ep->rma_info.rma_lock);
 493                 if (fence_req->state == OP_IN_PROGRESS)
 494                         fence_req->state = OP_FAILED;
 495                 mutex_unlock(&ep->rma_info.rma_lock);
 496         }
 497         if (fence_req->state == OP_FAILED && !err)
 498                 err = -ENXIO;
 499         return err;
 500 }
 501 
 502 /**
 503  * scif_send_fence_signal:
 504  * @epd - endpoint descriptor
 505  * @loff - local offset
 506  * @lval - local value to write to loffset
 507  * @roff - remote offset
 508  * @rval - remote value to write to roffset
 509  * @flags - flags
 510  *
 511  * Sends a remote fence signal request
 512  */
 513 static int scif_send_fence_signal(scif_epd_t epd, off_t roff, u64 rval,
 514                                   off_t loff, u64 lval, int flags)
 515 {
 516         int err = 0;
 517         struct scifmsg msg;
 518         struct scif_fence_info *fence_req;
 519         struct scif_endpt *ep = (struct scif_endpt *)epd;
 520 
 521         fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL);
 522         if (!fence_req) {
 523                 err = -ENOMEM;
 524                 goto error;
 525         }
 526 
 527         fence_req->state = OP_IN_PROGRESS;
 528         init_completion(&fence_req->comp);
 529         msg.src = ep->port;
 530         if (flags & SCIF_SIGNAL_LOCAL) {
 531                 msg.uop = SCIF_SIG_LOCAL;
 532                 msg.payload[0] = ep->remote_ep;
 533                 msg.payload[1] = roff;
 534                 msg.payload[2] = rval;
 535                 msg.payload[3] = (u64)fence_req;
 536                 spin_lock(&ep->lock);
 537                 if (ep->state == SCIFEP_CONNECTED)
 538                         err = scif_nodeqp_send(ep->remote_dev, &msg);
 539                 else
 540                         err = -ENOTCONN;
 541                 spin_unlock(&ep->lock);
 542                 if (err)
 543                         goto error_free;
 544                 err = _scif_send_fence_signal_wait(ep, fence_req);
 545                 if (err)
 546                         goto error_free;
 547         }
 548         fence_req->state = OP_IN_PROGRESS;
 549 
 550         if (flags & SCIF_SIGNAL_REMOTE) {
 551                 msg.uop = SCIF_SIG_REMOTE;
 552                 msg.payload[0] = ep->remote_ep;
 553                 msg.payload[1] = loff;
 554                 msg.payload[2] = lval;
 555                 msg.payload[3] = (u64)fence_req;
 556                 spin_lock(&ep->lock);
 557                 if (ep->state == SCIFEP_CONNECTED)
 558                         err = scif_nodeqp_send(ep->remote_dev, &msg);
 559                 else
 560                         err = -ENOTCONN;
 561                 spin_unlock(&ep->lock);
 562                 if (err)
 563                         goto error_free;
 564                 err = _scif_send_fence_signal_wait(ep, fence_req);
 565         }
 566 error_free:
 567         kfree(fence_req);
 568 error:
 569         return err;
 570 }
 571 
 572 static void scif_fence_mark_cb(void *arg)
 573 {
 574         struct scif_endpt *ep = (struct scif_endpt *)arg;
 575 
 576         wake_up_interruptible(&ep->rma_info.markwq);
 577         atomic_dec(&ep->rma_info.fence_refcount);
 578 }
 579 
 580 /*
 581  * _scif_fence_mark:
 582  *
 583  * @epd - endpoint descriptor
 584  * Set up a mark for this endpoint and return the value of the mark.
 585  */
 586 int _scif_fence_mark(scif_epd_t epd, int *mark)
 587 {
 588         struct scif_endpt *ep = (struct scif_endpt *)epd;
 589         struct dma_chan *chan = ep->rma_info.dma_chan;
 590         struct dma_device *ddev = chan->device;
 591         struct dma_async_tx_descriptor *tx;
 592         dma_cookie_t cookie;
 593         int err;
 594 
 595         tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE);
 596         if (!tx) {
 597                 err = -ENOMEM;
 598                 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
 599                         __func__, __LINE__, err);
 600                 return err;
 601         }
 602         cookie = tx->tx_submit(tx);
 603         if (dma_submit_error(cookie)) {
 604                 err = (int)cookie;
 605                 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
 606                         __func__, __LINE__, err);
 607                 return err;
 608         }
 609         dma_async_issue_pending(chan);
 610         tx = ddev->device_prep_dma_interrupt(chan, DMA_PREP_INTERRUPT);
 611         if (!tx) {
 612                 err = -ENOMEM;
 613                 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
 614                         __func__, __LINE__, err);
 615                 return err;
 616         }
 617         tx->callback = scif_fence_mark_cb;
 618         tx->callback_param = ep;
 619         *mark = cookie = tx->tx_submit(tx);
 620         if (dma_submit_error(cookie)) {
 621                 err = (int)cookie;
 622                 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
 623                         __func__, __LINE__, err);
 624                 return err;
 625         }
 626         atomic_inc(&ep->rma_info.fence_refcount);
 627         dma_async_issue_pending(chan);
 628         return 0;
 629 }
 630 
 631 #define SCIF_LOOPB_MAGIC_MARK 0xdead
 632 
 633 int scif_fence_mark(scif_epd_t epd, int flags, int *mark)
 634 {
 635         struct scif_endpt *ep = (struct scif_endpt *)epd;
 636         int err = 0;
 637 
 638         dev_dbg(scif_info.mdev.this_device,
 639                 "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x\n",
 640                 ep, flags, *mark);
 641         err = scif_verify_epd(ep);
 642         if (err)
 643                 return err;
 644 
 645         /* Invalid flags? */
 646         if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER))
 647                 return -EINVAL;
 648 
 649         /* At least one of init self or peer RMA should be set */
 650         if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)))
 651                 return -EINVAL;
 652 
 653         /* Exactly one of init self or peer RMA should be set but not both */
 654         if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER))
 655                 return -EINVAL;
 656 
 657         /*
 658          * Management node loopback does not need to use DMA.
 659          * Return a valid mark to be symmetric.
 660          */
 661         if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) {
 662                 *mark = SCIF_LOOPB_MAGIC_MARK;
 663                 return 0;
 664         }
 665 
 666         if (flags & SCIF_FENCE_INIT_SELF)
 667                 err = _scif_fence_mark(epd, mark);
 668         else
 669                 err = scif_send_fence_mark(ep, mark);
 670 
 671         if (err)
 672                 dev_err(scif_info.mdev.this_device,
 673                         "%s %d err %d\n", __func__, __LINE__, err);
 674         dev_dbg(scif_info.mdev.this_device,
 675                 "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x err %d\n",
 676                 ep, flags, *mark, err);
 677         return err;
 678 }
 679 EXPORT_SYMBOL_GPL(scif_fence_mark);
 680 
 681 int scif_fence_wait(scif_epd_t epd, int mark)
 682 {
 683         struct scif_endpt *ep = (struct scif_endpt *)epd;
 684         int err = 0;
 685 
 686         dev_dbg(scif_info.mdev.this_device,
 687                 "SCIFAPI fence_wait: ep %p mark 0x%x\n",
 688                 ep, mark);
 689         err = scif_verify_epd(ep);
 690         if (err)
 691                 return err;
 692         /*
 693          * Management node loopback does not need to use DMA.
 694          * The only valid mark provided is 0 so simply
 695          * return success if the mark is valid.
 696          */
 697         if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) {
 698                 if (mark == SCIF_LOOPB_MAGIC_MARK)
 699                         return 0;
 700                 else
 701                         return -EINVAL;
 702         }
 703         if (mark & SCIF_REMOTE_FENCE)
 704                 err = scif_send_fence_wait(epd, mark);
 705         else
 706                 err = _scif_fence_wait(epd, mark);
 707         if (err < 0)
 708                 dev_err(scif_info.mdev.this_device,
 709                         "%s %d err %d\n", __func__, __LINE__, err);
 710         return err;
 711 }
 712 EXPORT_SYMBOL_GPL(scif_fence_wait);
 713 
 714 int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval,
 715                       off_t roff, u64 rval, int flags)
 716 {
 717         struct scif_endpt *ep = (struct scif_endpt *)epd;
 718         int err = 0;
 719 
 720         dev_dbg(scif_info.mdev.this_device,
 721                 "SCIFAPI fence_signal: ep %p loff 0x%lx lval 0x%llx roff 0x%lx rval 0x%llx flags 0x%x\n",
 722                 ep, loff, lval, roff, rval, flags);
 723         err = scif_verify_epd(ep);
 724         if (err)
 725                 return err;
 726 
 727         /* Invalid flags? */
 728         if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER |
 729                         SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE))
 730                 return -EINVAL;
 731 
 732         /* At least one of init self or peer RMA should be set */
 733         if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)))
 734                 return -EINVAL;
 735 
 736         /* Exactly one of init self or peer RMA should be set but not both */
 737         if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER))
 738                 return -EINVAL;
 739 
 740         /* At least one of SCIF_SIGNAL_LOCAL or SCIF_SIGNAL_REMOTE required */
 741         if (!(flags & (SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE)))
 742                 return -EINVAL;
 743 
 744         /* Only Dword offsets allowed */
 745         if ((flags & SCIF_SIGNAL_LOCAL) && (loff & (sizeof(u32) - 1)))
 746                 return -EINVAL;
 747 
 748         /* Only Dword aligned offsets allowed */
 749         if ((flags & SCIF_SIGNAL_REMOTE) && (roff & (sizeof(u32) - 1)))
 750                 return -EINVAL;
 751 
 752         if (flags & SCIF_FENCE_INIT_PEER) {
 753                 err = scif_send_fence_signal(epd, roff, rval, loff,
 754                                              lval, flags);
 755         } else {
 756                 /* Local Signal in Local RAS */
 757                 if (flags & SCIF_SIGNAL_LOCAL) {
 758                         err = scif_prog_signal(epd, loff, lval,
 759                                                SCIF_WINDOW_SELF);
 760                         if (err)
 761                                 goto error_ret;
 762                 }
 763 
 764                 /* Signal in Remote RAS */
 765                 if (flags & SCIF_SIGNAL_REMOTE)
 766                         err = scif_prog_signal(epd, roff,
 767                                                rval, SCIF_WINDOW_PEER);
 768         }
 769 error_ret:
 770         if (err)
 771                 dev_err(scif_info.mdev.this_device,
 772                         "%s %d err %d\n", __func__, __LINE__, err);
 773         return err;
 774 }
 775 EXPORT_SYMBOL_GPL(scif_fence_signal);

/* [<][>][^][v][top][bottom][index][help] */