root/drivers/firmware/tegra/ivc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. tegra_ivc_invalidate
  2. tegra_ivc_flush
  3. tegra_ivc_empty
  4. tegra_ivc_full
  5. tegra_ivc_available
  6. tegra_ivc_advance_tx
  7. tegra_ivc_advance_rx
  8. tegra_ivc_check_read
  9. tegra_ivc_check_write
  10. tegra_ivc_frame_virt
  11. tegra_ivc_frame_phys
  12. tegra_ivc_invalidate_frame
  13. tegra_ivc_flush_frame
  14. tegra_ivc_read_get_next_frame
  15. tegra_ivc_read_advance
  16. tegra_ivc_write_get_next_frame
  17. tegra_ivc_write_advance
  18. tegra_ivc_reset
  19. tegra_ivc_notified
  20. tegra_ivc_align
  21. tegra_ivc_total_queue_size
  22. tegra_ivc_check_params
  23. tegra_ivc_init
  24. tegra_ivc_cleanup

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 2014-2016, NVIDIA CORPORATION.  All rights reserved.
   4  */
   5 
   6 #include <soc/tegra/ivc.h>
   7 
   8 #define TEGRA_IVC_ALIGN 64
   9 
  10 /*
  11  * IVC channel reset protocol.
  12  *
  13  * Each end uses its tx_channel.state to indicate its synchronization state.
  14  */
  15 enum tegra_ivc_state {
  16         /*
  17          * This value is zero for backwards compatibility with services that
  18          * assume channels to be initially zeroed. Such channels are in an
  19          * initially valid state, but cannot be asynchronously reset, and must
  20          * maintain a valid state at all times.
  21          *
  22          * The transmitting end can enter the established state from the sync or
  23          * ack state when it observes the receiving endpoint in the ack or
  24          * established state, indicating that has cleared the counters in our
  25          * rx_channel.
  26          */
  27         TEGRA_IVC_STATE_ESTABLISHED = 0,
  28 
  29         /*
  30          * If an endpoint is observed in the sync state, the remote endpoint is
  31          * allowed to clear the counters it owns asynchronously with respect to
  32          * the current endpoint. Therefore, the current endpoint is no longer
  33          * allowed to communicate.
  34          */
  35         TEGRA_IVC_STATE_SYNC,
  36 
  37         /*
  38          * When the transmitting end observes the receiving end in the sync
  39          * state, it can clear the w_count and r_count and transition to the ack
  40          * state. If the remote endpoint observes us in the ack state, it can
  41          * return to the established state once it has cleared its counters.
  42          */
  43         TEGRA_IVC_STATE_ACK
  44 };
  45 
  46 /*
  47  * This structure is divided into two-cache aligned parts, the first is only
  48  * written through the tx.channel pointer, while the second is only written
  49  * through the rx.channel pointer. This delineates ownership of the cache
  50  * lines, which is critical to performance and necessary in non-cache coherent
  51  * implementations.
  52  */
  53 struct tegra_ivc_header {
  54         union {
  55                 struct {
  56                         /* fields owned by the transmitting end */
  57                         u32 count;
  58                         u32 state;
  59                 };
  60 
  61                 u8 pad[TEGRA_IVC_ALIGN];
  62         } tx;
  63 
  64         union {
  65                 /* fields owned by the receiving end */
  66                 u32 count;
  67                 u8 pad[TEGRA_IVC_ALIGN];
  68         } rx;
  69 };
  70 
  71 static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
  72 {
  73         if (!ivc->peer)
  74                 return;
  75 
  76         dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
  77                                 DMA_FROM_DEVICE);
  78 }
  79 
  80 static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
  81 {
  82         if (!ivc->peer)
  83                 return;
  84 
  85         dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
  86                                    DMA_TO_DEVICE);
  87 }
  88 
  89 static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
  90                                    struct tegra_ivc_header *header)
  91 {
  92         /*
  93          * This function performs multiple checks on the same values with
  94          * security implications, so create snapshots with READ_ONCE() to
  95          * ensure that these checks use the same values.
  96          */
  97         u32 tx = READ_ONCE(header->tx.count);
  98         u32 rx = READ_ONCE(header->rx.count);
  99 
 100         /*
 101          * Perform an over-full check to prevent denial of service attacks
 102          * where a server could be easily fooled into believing that there's
 103          * an extremely large number of frames ready, since receivers are not
 104          * expected to check for full or over-full conditions.
 105          *
 106          * Although the channel isn't empty, this is an invalid case caused by
 107          * a potentially malicious peer, so returning empty is safer, because
 108          * it gives the impression that the channel has gone silent.
 109          */
 110         if (tx - rx > ivc->num_frames)
 111                 return true;
 112 
 113         return tx == rx;
 114 }
 115 
 116 static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
 117                                   struct tegra_ivc_header *header)
 118 {
 119         u32 tx = READ_ONCE(header->tx.count);
 120         u32 rx = READ_ONCE(header->rx.count);
 121 
 122         /*
 123          * Invalid cases where the counters indicate that the queue is over
 124          * capacity also appear full.
 125          */
 126         return tx - rx >= ivc->num_frames;
 127 }
 128 
 129 static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
 130                                       struct tegra_ivc_header *header)
 131 {
 132         u32 tx = READ_ONCE(header->tx.count);
 133         u32 rx = READ_ONCE(header->rx.count);
 134 
 135         /*
 136          * This function isn't expected to be used in scenarios where an
 137          * over-full situation can lead to denial of service attacks. See the
 138          * comment in tegra_ivc_empty() for an explanation about special
 139          * over-full considerations.
 140          */
 141         return tx - rx;
 142 }
 143 
 144 static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
 145 {
 146         WRITE_ONCE(ivc->tx.channel->tx.count,
 147                    READ_ONCE(ivc->tx.channel->tx.count) + 1);
 148 
 149         if (ivc->tx.position == ivc->num_frames - 1)
 150                 ivc->tx.position = 0;
 151         else
 152                 ivc->tx.position++;
 153 }
 154 
 155 static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
 156 {
 157         WRITE_ONCE(ivc->rx.channel->rx.count,
 158                    READ_ONCE(ivc->rx.channel->rx.count) + 1);
 159 
 160         if (ivc->rx.position == ivc->num_frames - 1)
 161                 ivc->rx.position = 0;
 162         else
 163                 ivc->rx.position++;
 164 }
 165 
 166 static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
 167 {
 168         unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
 169 
 170         /*
 171          * tx.channel->state is set locally, so it is not synchronized with
 172          * state from the remote peer. The remote peer cannot reset its
 173          * transmit counters until we've acknowledged its synchronization
 174          * request, so no additional synchronization is required because an
 175          * asynchronous transition of rx.channel->state to
 176          * TEGRA_IVC_STATE_ACK is not allowed.
 177          */
 178         if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
 179                 return -ECONNRESET;
 180 
 181         /*
 182          * Avoid unnecessary invalidations when performing repeated accesses
 183          * to an IVC channel by checking the old queue pointers first.
 184          *
 185          * Synchronization is only necessary when these pointers indicate
 186          * empty or full.
 187          */
 188         if (!tegra_ivc_empty(ivc, ivc->rx.channel))
 189                 return 0;
 190 
 191         tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
 192 
 193         if (tegra_ivc_empty(ivc, ivc->rx.channel))
 194                 return -ENOSPC;
 195 
 196         return 0;
 197 }
 198 
 199 static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
 200 {
 201         unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
 202 
 203         if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
 204                 return -ECONNRESET;
 205 
 206         if (!tegra_ivc_full(ivc, ivc->tx.channel))
 207                 return 0;
 208 
 209         tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
 210 
 211         if (tegra_ivc_full(ivc, ivc->tx.channel))
 212                 return -ENOSPC;
 213 
 214         return 0;
 215 }
 216 
 217 static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc,
 218                                   struct tegra_ivc_header *header,
 219                                   unsigned int frame)
 220 {
 221         if (WARN_ON(frame >= ivc->num_frames))
 222                 return ERR_PTR(-EINVAL);
 223 
 224         return (void *)(header + 1) + ivc->frame_size * frame;
 225 }
 226 
 227 static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
 228                                               dma_addr_t phys,
 229                                               unsigned int frame)
 230 {
 231         unsigned long offset;
 232 
 233         offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
 234 
 235         return phys + offset;
 236 }
 237 
 238 static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
 239                                               dma_addr_t phys,
 240                                               unsigned int frame,
 241                                               unsigned int offset,
 242                                               size_t size)
 243 {
 244         if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
 245                 return;
 246 
 247         phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
 248 
 249         dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
 250 }
 251 
 252 static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
 253                                          dma_addr_t phys,
 254                                          unsigned int frame,
 255                                          unsigned int offset,
 256                                          size_t size)
 257 {
 258         if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
 259                 return;
 260 
 261         phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
 262 
 263         dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
 264 }
 265 
 266 /* directly peek at the next frame rx'ed */
 267 void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc)
 268 {
 269         int err;
 270 
 271         if (WARN_ON(ivc == NULL))
 272                 return ERR_PTR(-EINVAL);
 273 
 274         err = tegra_ivc_check_read(ivc);
 275         if (err < 0)
 276                 return ERR_PTR(err);
 277 
 278         /*
 279          * Order observation of ivc->rx.position potentially indicating new
 280          * data before data read.
 281          */
 282         smp_rmb();
 283 
 284         tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
 285                                    ivc->frame_size);
 286 
 287         return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position);
 288 }
 289 EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
 290 
 291 int tegra_ivc_read_advance(struct tegra_ivc *ivc)
 292 {
 293         unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
 294         unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
 295         int err;
 296 
 297         /*
 298          * No read barriers or synchronization here: the caller is expected to
 299          * have already observed the channel non-empty. This check is just to
 300          * catch programming errors.
 301          */
 302         err = tegra_ivc_check_read(ivc);
 303         if (err < 0)
 304                 return err;
 305 
 306         tegra_ivc_advance_rx(ivc);
 307 
 308         tegra_ivc_flush(ivc, ivc->rx.phys + rx);
 309 
 310         /*
 311          * Ensure our write to ivc->rx.position occurs before our read from
 312          * ivc->tx.position.
 313          */
 314         smp_mb();
 315 
 316         /*
 317          * Notify only upon transition from full to non-full. The available
 318          * count can only asynchronously increase, so the worst possible
 319          * side-effect will be a spurious notification.
 320          */
 321         tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
 322 
 323         if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1)
 324                 ivc->notify(ivc, ivc->notify_data);
 325 
 326         return 0;
 327 }
 328 EXPORT_SYMBOL(tegra_ivc_read_advance);
 329 
 330 /* directly poke at the next frame to be tx'ed */
 331 void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc)
 332 {
 333         int err;
 334 
 335         err = tegra_ivc_check_write(ivc);
 336         if (err < 0)
 337                 return ERR_PTR(err);
 338 
 339         return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position);
 340 }
 341 EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
 342 
 343 /* advance the tx buffer */
 344 int tegra_ivc_write_advance(struct tegra_ivc *ivc)
 345 {
 346         unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
 347         unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
 348         int err;
 349 
 350         err = tegra_ivc_check_write(ivc);
 351         if (err < 0)
 352                 return err;
 353 
 354         tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
 355                               ivc->frame_size);
 356 
 357         /*
 358          * Order any possible stores to the frame before update of
 359          * ivc->tx.position.
 360          */
 361         smp_wmb();
 362 
 363         tegra_ivc_advance_tx(ivc);
 364         tegra_ivc_flush(ivc, ivc->tx.phys + tx);
 365 
 366         /*
 367          * Ensure our write to ivc->tx.position occurs before our read from
 368          * ivc->rx.position.
 369          */
 370         smp_mb();
 371 
 372         /*
 373          * Notify only upon transition from empty to non-empty. The available
 374          * count can only asynchronously decrease, so the worst possible
 375          * side-effect will be a spurious notification.
 376          */
 377         tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
 378 
 379         if (tegra_ivc_available(ivc, ivc->tx.channel) == 1)
 380                 ivc->notify(ivc, ivc->notify_data);
 381 
 382         return 0;
 383 }
 384 EXPORT_SYMBOL(tegra_ivc_write_advance);
 385 
 386 void tegra_ivc_reset(struct tegra_ivc *ivc)
 387 {
 388         unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
 389 
 390         ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC;
 391         tegra_ivc_flush(ivc, ivc->tx.phys + offset);
 392         ivc->notify(ivc, ivc->notify_data);
 393 }
 394 EXPORT_SYMBOL(tegra_ivc_reset);
 395 
 396 /*
 397  * =======================================================
 398  *  IVC State Transition Table - see tegra_ivc_notified()
 399  * =======================================================
 400  *
 401  *      local   remote  action
 402  *      -----   ------  -----------------------------------
 403  *      SYNC    EST     <none>
 404  *      SYNC    ACK     reset counters; move to EST; notify
 405  *      SYNC    SYNC    reset counters; move to ACK; notify
 406  *      ACK     EST     move to EST; notify
 407  *      ACK     ACK     move to EST; notify
 408  *      ACK     SYNC    reset counters; move to ACK; notify
 409  *      EST     EST     <none>
 410  *      EST     ACK     <none>
 411  *      EST     SYNC    reset counters; move to ACK; notify
 412  *
 413  * ===============================================================
 414  */
 415 
 416 int tegra_ivc_notified(struct tegra_ivc *ivc)
 417 {
 418         unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
 419         enum tegra_ivc_state state;
 420 
 421         /* Copy the receiver's state out of shared memory. */
 422         tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
 423         state = READ_ONCE(ivc->rx.channel->tx.state);
 424 
 425         if (state == TEGRA_IVC_STATE_SYNC) {
 426                 offset = offsetof(struct tegra_ivc_header, tx.count);
 427 
 428                 /*
 429                  * Order observation of TEGRA_IVC_STATE_SYNC before stores
 430                  * clearing tx.channel.
 431                  */
 432                 smp_rmb();
 433 
 434                 /*
 435                  * Reset tx.channel counters. The remote end is in the SYNC
 436                  * state and won't make progress until we change our state,
 437                  * so the counters are not in use at this time.
 438                  */
 439                 ivc->tx.channel->tx.count = 0;
 440                 ivc->rx.channel->rx.count = 0;
 441 
 442                 ivc->tx.position = 0;
 443                 ivc->rx.position = 0;
 444 
 445                 /*
 446                  * Ensure that counters appear cleared before new state can be
 447                  * observed.
 448                  */
 449                 smp_wmb();
 450 
 451                 /*
 452                  * Move to ACK state. We have just cleared our counters, so it
 453                  * is now safe for the remote end to start using these values.
 454                  */
 455                 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK;
 456                 tegra_ivc_flush(ivc, ivc->tx.phys + offset);
 457 
 458                 /*
 459                  * Notify remote end to observe state transition.
 460                  */
 461                 ivc->notify(ivc, ivc->notify_data);
 462 
 463         } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC &&
 464                    state == TEGRA_IVC_STATE_ACK) {
 465                 offset = offsetof(struct tegra_ivc_header, tx.count);
 466 
 467                 /*
 468                  * Order observation of ivc_state_sync before stores clearing
 469                  * tx_channel.
 470                  */
 471                 smp_rmb();
 472 
 473                 /*
 474                  * Reset tx.channel counters. The remote end is in the ACK
 475                  * state and won't make progress until we change our state,
 476                  * so the counters are not in use at this time.
 477                  */
 478                 ivc->tx.channel->tx.count = 0;
 479                 ivc->rx.channel->rx.count = 0;
 480 
 481                 ivc->tx.position = 0;
 482                 ivc->rx.position = 0;
 483 
 484                 /*
 485                  * Ensure that counters appear cleared before new state can be
 486                  * observed.
 487                  */
 488                 smp_wmb();
 489 
 490                 /*
 491                  * Move to ESTABLISHED state. We know that the remote end has
 492                  * already cleared its counters, so it is safe to start
 493                  * writing/reading on this channel.
 494                  */
 495                 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
 496                 tegra_ivc_flush(ivc, ivc->tx.phys + offset);
 497 
 498                 /*
 499                  * Notify remote end to observe state transition.
 500                  */
 501                 ivc->notify(ivc, ivc->notify_data);
 502 
 503         } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) {
 504                 offset = offsetof(struct tegra_ivc_header, tx.count);
 505 
 506                 /*
 507                  * At this point, we have observed the peer to be in either
 508                  * the ACK or ESTABLISHED state. Next, order observation of
 509                  * peer state before storing to tx.channel.
 510                  */
 511                 smp_rmb();
 512 
 513                 /*
 514                  * Move to ESTABLISHED state. We know that we have previously
 515                  * cleared our counters, and we know that the remote end has
 516                  * cleared its counters, so it is safe to start writing/reading
 517                  * on this channel.
 518                  */
 519                 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
 520                 tegra_ivc_flush(ivc, ivc->tx.phys + offset);
 521 
 522                 /*
 523                  * Notify remote end to observe state transition.
 524                  */
 525                 ivc->notify(ivc, ivc->notify_data);
 526 
 527         } else {
 528                 /*
 529                  * There is no need to handle any further action. Either the
 530                  * channel is already fully established, or we are waiting for
 531                  * the remote end to catch up with our current state. Refer
 532                  * to the diagram in "IVC State Transition Table" above.
 533                  */
 534         }
 535 
 536         if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
 537                 return -EAGAIN;
 538 
 539         return 0;
 540 }
 541 EXPORT_SYMBOL(tegra_ivc_notified);
 542 
 543 size_t tegra_ivc_align(size_t size)
 544 {
 545         return ALIGN(size, TEGRA_IVC_ALIGN);
 546 }
 547 EXPORT_SYMBOL(tegra_ivc_align);
 548 
 549 unsigned tegra_ivc_total_queue_size(unsigned queue_size)
 550 {
 551         if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
 552                 pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
 553                        __func__, queue_size, TEGRA_IVC_ALIGN);
 554                 return 0;
 555         }
 556 
 557         return queue_size + sizeof(struct tegra_ivc_header);
 558 }
 559 EXPORT_SYMBOL(tegra_ivc_total_queue_size);
 560 
 561 static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
 562                                   unsigned int num_frames, size_t frame_size)
 563 {
 564         BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
 565                                  TEGRA_IVC_ALIGN));
 566         BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
 567                                  TEGRA_IVC_ALIGN));
 568         BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
 569                                  TEGRA_IVC_ALIGN));
 570 
 571         if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
 572                 pr_err("num_frames * frame_size overflows\n");
 573                 return -EINVAL;
 574         }
 575 
 576         if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
 577                 pr_err("frame size not adequately aligned: %zu\n", frame_size);
 578                 return -EINVAL;
 579         }
 580 
 581         /*
 582          * The headers must at least be aligned enough for counters
 583          * to be accessed atomically.
 584          */
 585         if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
 586                 pr_err("IVC channel start not aligned: %#lx\n", rx);
 587                 return -EINVAL;
 588         }
 589 
 590         if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
 591                 pr_err("IVC channel start not aligned: %#lx\n", tx);
 592                 return -EINVAL;
 593         }
 594 
 595         if (rx < tx) {
 596                 if (rx + frame_size * num_frames > tx) {
 597                         pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
 598                                rx, frame_size * num_frames, tx);
 599                         return -EINVAL;
 600                 }
 601         } else {
 602                 if (tx + frame_size * num_frames > rx) {
 603                         pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
 604                                tx, frame_size * num_frames, rx);
 605                         return -EINVAL;
 606                 }
 607         }
 608 
 609         return 0;
 610 }
 611 
 612 int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
 613                    dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
 614                    unsigned int num_frames, size_t frame_size,
 615                    void (*notify)(struct tegra_ivc *ivc, void *data),
 616                    void *data)
 617 {
 618         size_t queue_size;
 619         int err;
 620 
 621         if (WARN_ON(!ivc || !notify))
 622                 return -EINVAL;
 623 
 624         /*
 625          * All sizes that can be returned by communication functions should
 626          * fit in an int.
 627          */
 628         if (frame_size > INT_MAX)
 629                 return -E2BIG;
 630 
 631         err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx,
 632                                      num_frames, frame_size);
 633         if (err < 0)
 634                 return err;
 635 
 636         queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
 637 
 638         if (peer) {
 639                 ivc->rx.phys = dma_map_single(peer, rx, queue_size,
 640                                               DMA_BIDIRECTIONAL);
 641                 if (dma_mapping_error(peer, ivc->rx.phys))
 642                         return -ENOMEM;
 643 
 644                 ivc->tx.phys = dma_map_single(peer, tx, queue_size,
 645                                               DMA_BIDIRECTIONAL);
 646                 if (dma_mapping_error(peer, ivc->tx.phys)) {
 647                         dma_unmap_single(peer, ivc->rx.phys, queue_size,
 648                                          DMA_BIDIRECTIONAL);
 649                         return -ENOMEM;
 650                 }
 651         } else {
 652                 ivc->rx.phys = rx_phys;
 653                 ivc->tx.phys = tx_phys;
 654         }
 655 
 656         ivc->rx.channel = rx;
 657         ivc->tx.channel = tx;
 658         ivc->peer = peer;
 659         ivc->notify = notify;
 660         ivc->notify_data = data;
 661         ivc->frame_size = frame_size;
 662         ivc->num_frames = num_frames;
 663 
 664         /*
 665          * These values aren't necessarily correct until the channel has been
 666          * reset.
 667          */
 668         ivc->tx.position = 0;
 669         ivc->rx.position = 0;
 670 
 671         return 0;
 672 }
 673 EXPORT_SYMBOL(tegra_ivc_init);
 674 
 675 void tegra_ivc_cleanup(struct tegra_ivc *ivc)
 676 {
 677         if (ivc->peer) {
 678                 size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
 679                                                          ivc->frame_size);
 680 
 681                 dma_unmap_single(ivc->peer, ivc->rx.phys, size,
 682                                  DMA_BIDIRECTIONAL);
 683                 dma_unmap_single(ivc->peer, ivc->tx.phys, size,
 684                                  DMA_BIDIRECTIONAL);
 685         }
 686 }
 687 EXPORT_SYMBOL(tegra_ivc_cleanup);

/* [<][>][^][v][top][bottom][index][help] */