root/drivers/net/ethernet/altera/altera_sgdma.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sgdma_initialize
  2. sgdma_uninitialize
  3. sgdma_reset
  4. sgdma_enable_rxirq
  5. sgdma_enable_txirq
  6. sgdma_disable_rxirq
  7. sgdma_disable_txirq
  8. sgdma_clear_rxirq
  9. sgdma_clear_txirq
  10. sgdma_tx_buffer
  11. sgdma_tx_completions
  12. sgdma_start_rxdma
  13. sgdma_add_rx_desc
  14. sgdma_rx_status
  15. sgdma_setup_descrip
  16. sgdma_async_read
  17. sgdma_async_write
  18. sgdma_txphysaddr
  19. sgdma_rxphysaddr
  20. queue_tx
  21. queue_rx
  22. dequeue_tx
  23. dequeue_rx
  24. queue_rx_peekhead
  25. sgdma_rxbusy
  26. sgdma_txbusy

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /* Altera TSE SGDMA and MSGDMA Linux driver
   3  * Copyright (C) 2014 Altera Corporation. All rights reserved
   4  */
   5 
   6 #include <linux/list.h>
   7 #include "altera_utils.h"
   8 #include "altera_tse.h"
   9 #include "altera_sgdmahw.h"
  10 #include "altera_sgdma.h"
  11 
  12 static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
  13                                 struct sgdma_descrip __iomem *ndesc,
  14                                 dma_addr_t ndesc_phys,
  15                                 dma_addr_t raddr,
  16                                 dma_addr_t waddr,
  17                                 u16 length,
  18                                 int generate_eop,
  19                                 int rfixed,
  20                                 int wfixed);
  21 
  22 static int sgdma_async_write(struct altera_tse_private *priv,
  23                               struct sgdma_descrip __iomem *desc);
  24 
  25 static int sgdma_async_read(struct altera_tse_private *priv);
  26 
  27 static dma_addr_t
  28 sgdma_txphysaddr(struct altera_tse_private *priv,
  29                  struct sgdma_descrip __iomem *desc);
  30 
  31 static dma_addr_t
  32 sgdma_rxphysaddr(struct altera_tse_private *priv,
  33                  struct sgdma_descrip __iomem *desc);
  34 
  35 static int sgdma_txbusy(struct altera_tse_private *priv);
  36 
  37 static int sgdma_rxbusy(struct altera_tse_private *priv);
  38 
  39 static void
  40 queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
  41 
  42 static void
  43 queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
  44 
  45 static struct tse_buffer *
  46 dequeue_tx(struct altera_tse_private *priv);
  47 
  48 static struct tse_buffer *
  49 dequeue_rx(struct altera_tse_private *priv);
  50 
  51 static struct tse_buffer *
  52 queue_rx_peekhead(struct altera_tse_private *priv);
  53 
  54 int sgdma_initialize(struct altera_tse_private *priv)
  55 {
  56         priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
  57                       SGDMA_CTRLREG_INTEN;
  58 
  59         priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
  60                       SGDMA_CTRLREG_INTEN |
  61                       SGDMA_CTRLREG_ILASTD;
  62 
  63         INIT_LIST_HEAD(&priv->txlisthd);
  64         INIT_LIST_HEAD(&priv->rxlisthd);
  65 
  66         priv->rxdescphys = (dma_addr_t) 0;
  67         priv->txdescphys = (dma_addr_t) 0;
  68 
  69         priv->rxdescphys = dma_map_single(priv->device,
  70                                           (void __force *)priv->rx_dma_desc,
  71                                           priv->rxdescmem, DMA_BIDIRECTIONAL);
  72 
  73         if (dma_mapping_error(priv->device, priv->rxdescphys)) {
  74                 sgdma_uninitialize(priv);
  75                 netdev_err(priv->dev, "error mapping rx descriptor memory\n");
  76                 return -EINVAL;
  77         }
  78 
  79         priv->txdescphys = dma_map_single(priv->device,
  80                                           (void __force *)priv->tx_dma_desc,
  81                                           priv->txdescmem, DMA_TO_DEVICE);
  82 
  83         if (dma_mapping_error(priv->device, priv->txdescphys)) {
  84                 sgdma_uninitialize(priv);
  85                 netdev_err(priv->dev, "error mapping tx descriptor memory\n");
  86                 return -EINVAL;
  87         }
  88 
  89         /* Initialize descriptor memory to all 0's, sync memory to cache */
  90         memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
  91         memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
  92 
  93         dma_sync_single_for_device(priv->device, priv->txdescphys,
  94                                    priv->txdescmem, DMA_TO_DEVICE);
  95 
  96         dma_sync_single_for_device(priv->device, priv->rxdescphys,
  97                                    priv->rxdescmem, DMA_TO_DEVICE);
  98 
  99         return 0;
 100 }
 101 
 102 void sgdma_uninitialize(struct altera_tse_private *priv)
 103 {
 104         if (priv->rxdescphys)
 105                 dma_unmap_single(priv->device, priv->rxdescphys,
 106                                  priv->rxdescmem, DMA_BIDIRECTIONAL);
 107 
 108         if (priv->txdescphys)
 109                 dma_unmap_single(priv->device, priv->txdescphys,
 110                                  priv->txdescmem, DMA_TO_DEVICE);
 111 }
 112 
 113 /* This function resets the SGDMA controller and clears the
 114  * descriptor memory used for transmits and receives.
 115  */
 116 void sgdma_reset(struct altera_tse_private *priv)
 117 {
 118         /* Initialize descriptor memory to 0 */
 119         memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
 120         memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 121 
 122         csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
 123         csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
 124 
 125         csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
 126         csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
 127 }
 128 
 129 /* For SGDMA, interrupts remain enabled after initially enabling,
 130  * so no need to provide implementations for abstract enable
 131  * and disable
 132  */
 133 
 134 void sgdma_enable_rxirq(struct altera_tse_private *priv)
 135 {
 136 }
 137 
 138 void sgdma_enable_txirq(struct altera_tse_private *priv)
 139 {
 140 }
 141 
 142 void sgdma_disable_rxirq(struct altera_tse_private *priv)
 143 {
 144 }
 145 
 146 void sgdma_disable_txirq(struct altera_tse_private *priv)
 147 {
 148 }
 149 
 150 void sgdma_clear_rxirq(struct altera_tse_private *priv)
 151 {
 152         tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
 153                     SGDMA_CTRLREG_CLRINT);
 154 }
 155 
 156 void sgdma_clear_txirq(struct altera_tse_private *priv)
 157 {
 158         tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
 159                     SGDMA_CTRLREG_CLRINT);
 160 }
 161 
 162 /* transmits buffer through SGDMA. Returns number of buffers
 163  * transmitted, 0 if not possible.
 164  *
 165  * tx_lock is held by the caller
 166  */
 167 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 168 {
 169         struct sgdma_descrip __iomem *descbase =
 170                 (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
 171 
 172         struct sgdma_descrip __iomem *cdesc = &descbase[0];
 173         struct sgdma_descrip __iomem *ndesc = &descbase[1];
 174 
 175         /* wait 'til the tx sgdma is ready for the next transmit request */
 176         if (sgdma_txbusy(priv))
 177                 return 0;
 178 
 179         sgdma_setup_descrip(cdesc,                      /* current descriptor */
 180                             ndesc,                      /* next descriptor */
 181                             sgdma_txphysaddr(priv, ndesc),
 182                             buffer->dma_addr,           /* address of packet to xmit */
 183                             0,                          /* write addr 0 for tx dma */
 184                             buffer->len,                /* length of packet */
 185                             SGDMA_CONTROL_EOP,          /* Generate EOP */
 186                             0,                          /* read fixed */
 187                             SGDMA_CONTROL_WR_FIXED);    /* Generate SOP */
 188 
 189         sgdma_async_write(priv, cdesc);
 190 
 191         /* enqueue the request to the pending transmit queue */
 192         queue_tx(priv, buffer);
 193 
 194         return 1;
 195 }
 196 
 197 
 198 /* tx_lock held to protect access to queued tx list
 199  */
 200 u32 sgdma_tx_completions(struct altera_tse_private *priv)
 201 {
 202         u32 ready = 0;
 203 
 204         if (!sgdma_txbusy(priv) &&
 205             ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
 206              & SGDMA_CONTROL_HW_OWNED) == 0) &&
 207             (dequeue_tx(priv))) {
 208                 ready = 1;
 209         }
 210 
 211         return ready;
 212 }
 213 
 214 void sgdma_start_rxdma(struct altera_tse_private *priv)
 215 {
 216         sgdma_async_read(priv);
 217 }
 218 
 219 void sgdma_add_rx_desc(struct altera_tse_private *priv,
 220                        struct tse_buffer *rxbuffer)
 221 {
 222         queue_rx(priv, rxbuffer);
 223 }
 224 
 225 /* status is returned on upper 16 bits,
 226  * length is returned in lower 16 bits
 227  */
 228 u32 sgdma_rx_status(struct altera_tse_private *priv)
 229 {
 230         struct sgdma_descrip __iomem *base =
 231                 (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
 232         struct sgdma_descrip __iomem *desc = NULL;
 233         struct tse_buffer *rxbuffer = NULL;
 234         unsigned int rxstatus = 0;
 235 
 236         u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
 237 
 238         desc = &base[0];
 239         if (sts & SGDMA_STSREG_EOP) {
 240                 unsigned int pktlength = 0;
 241                 unsigned int pktstatus = 0;
 242                 dma_sync_single_for_cpu(priv->device,
 243                                         priv->rxdescphys,
 244                                         SGDMA_DESC_LEN,
 245                                         DMA_FROM_DEVICE);
 246 
 247                 pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
 248                 pktstatus = csrrd8(desc, sgdma_descroffs(status));
 249                 rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
 250                 rxstatus = rxstatus << 16;
 251                 rxstatus |= (pktlength & 0xffff);
 252 
 253                 if (rxstatus) {
 254                         csrwr8(0, desc, sgdma_descroffs(status));
 255 
 256                         rxbuffer = dequeue_rx(priv);
 257                         if (rxbuffer == NULL)
 258                                 netdev_info(priv->dev,
 259                                             "sgdma rx and rx queue empty!\n");
 260 
 261                         /* Clear control */
 262                         csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
 263                         /* clear status */
 264                         csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
 265 
 266                         /* kick the rx sgdma after reaping this descriptor */
 267                         sgdma_async_read(priv);
 268 
 269                 } else {
 270                         /* If the SGDMA indicated an end of packet on recv,
 271                          * then it's expected that the rxstatus from the
 272                          * descriptor is non-zero - meaning a valid packet
 273                          * with a nonzero length, or an error has been
 274                          * indicated. if not, then all we can do is signal
 275                          * an error and return no packet received. Most likely
 276                          * there is a system design error, or an error in the
 277                          * underlying kernel (cache or cache management problem)
 278                          */
 279                         netdev_err(priv->dev,
 280                                    "SGDMA RX Error Info: %x, %x, %x\n",
 281                                    sts, csrrd8(desc, sgdma_descroffs(status)),
 282                                    rxstatus);
 283                 }
 284         } else if (sts == 0) {
 285                 sgdma_async_read(priv);
 286         }
 287 
 288         return rxstatus;
 289 }
 290 
 291 
 292 /* Private functions */
 293 static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
 294                                 struct sgdma_descrip __iomem *ndesc,
 295                                 dma_addr_t ndesc_phys,
 296                                 dma_addr_t raddr,
 297                                 dma_addr_t waddr,
 298                                 u16 length,
 299                                 int generate_eop,
 300                                 int rfixed,
 301                                 int wfixed)
 302 {
 303         /* Clear the next descriptor as not owned by hardware */
 304 
 305         u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
 306         ctrl &= ~SGDMA_CONTROL_HW_OWNED;
 307         csrwr8(ctrl, ndesc, sgdma_descroffs(control));
 308 
 309         ctrl = SGDMA_CONTROL_HW_OWNED;
 310         ctrl |= generate_eop;
 311         ctrl |= rfixed;
 312         ctrl |= wfixed;
 313 
 314         /* Channel is implicitly zero, initialized to 0 by default */
 315         csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
 316         csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
 317 
 318         csrwr32(0, desc, sgdma_descroffs(pad1));
 319         csrwr32(0, desc, sgdma_descroffs(pad2));
 320         csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
 321 
 322         csrwr8(ctrl, desc, sgdma_descroffs(control));
 323         csrwr8(0, desc, sgdma_descroffs(status));
 324         csrwr8(0, desc, sgdma_descroffs(wburst));
 325         csrwr8(0, desc, sgdma_descroffs(rburst));
 326         csrwr16(length, desc, sgdma_descroffs(bytes));
 327         csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
 328 }
 329 
 330 /* If hardware is busy, don't restart async read.
 331  * if status register is 0 - meaning initial state, restart async read,
 332  * probably for the first time when populating a receive buffer.
 333  * If read status indicate not busy and a status, restart the async
 334  * DMA read.
 335  */
 336 static int sgdma_async_read(struct altera_tse_private *priv)
 337 {
 338         struct sgdma_descrip __iomem *descbase =
 339                 (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
 340 
 341         struct sgdma_descrip __iomem *cdesc = &descbase[0];
 342         struct sgdma_descrip __iomem *ndesc = &descbase[1];
 343         struct tse_buffer *rxbuffer = NULL;
 344 
 345         if (!sgdma_rxbusy(priv)) {
 346                 rxbuffer = queue_rx_peekhead(priv);
 347                 if (rxbuffer == NULL) {
 348                         netdev_err(priv->dev, "no rx buffers available\n");
 349                         return 0;
 350                 }
 351 
 352                 sgdma_setup_descrip(cdesc,              /* current descriptor */
 353                                     ndesc,              /* next descriptor */
 354                                     sgdma_rxphysaddr(priv, ndesc),
 355                                     0,                  /* read addr 0 for rx dma */
 356                                     rxbuffer->dma_addr, /* write addr for rx dma */
 357                                     0,                  /* read 'til EOP */
 358                                     0,                  /* EOP: NA for rx dma */
 359                                     0,                  /* read fixed: NA for rx dma */
 360                                     0);                 /* SOP: NA for rx DMA */
 361 
 362                 dma_sync_single_for_device(priv->device,
 363                                            priv->rxdescphys,
 364                                            SGDMA_DESC_LEN,
 365                                            DMA_TO_DEVICE);
 366 
 367                 csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
 368                         priv->rx_dma_csr,
 369                         sgdma_csroffs(next_descrip));
 370 
 371                 csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
 372                         priv->rx_dma_csr,
 373                         sgdma_csroffs(control));
 374 
 375                 return 1;
 376         }
 377 
 378         return 0;
 379 }
 380 
 381 static int sgdma_async_write(struct altera_tse_private *priv,
 382                              struct sgdma_descrip __iomem *desc)
 383 {
 384         if (sgdma_txbusy(priv))
 385                 return 0;
 386 
 387         /* clear control and status */
 388         csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
 389         csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
 390 
 391         dma_sync_single_for_device(priv->device, priv->txdescphys,
 392                                    SGDMA_DESC_LEN, DMA_TO_DEVICE);
 393 
 394         csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
 395                 priv->tx_dma_csr,
 396                 sgdma_csroffs(next_descrip));
 397 
 398         csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
 399                 priv->tx_dma_csr,
 400                 sgdma_csroffs(control));
 401 
 402         return 1;
 403 }
 404 
 405 static dma_addr_t
 406 sgdma_txphysaddr(struct altera_tse_private *priv,
 407                  struct sgdma_descrip __iomem *desc)
 408 {
 409         dma_addr_t paddr = priv->txdescmem_busaddr;
 410         uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
 411         return (dma_addr_t)((uintptr_t)paddr + offs);
 412 }
 413 
 414 static dma_addr_t
 415 sgdma_rxphysaddr(struct altera_tse_private *priv,
 416                  struct sgdma_descrip __iomem *desc)
 417 {
 418         dma_addr_t paddr = priv->rxdescmem_busaddr;
 419         uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
 420         return (dma_addr_t)((uintptr_t)paddr + offs);
 421 }
 422 
 423 #define list_remove_head(list, entry, type, member)                     \
 424         do {                                                            \
 425                 entry = NULL;                                           \
 426                 if (!list_empty(list)) {                                \
 427                         entry = list_entry((list)->next, type, member); \
 428                         list_del_init(&entry->member);                  \
 429                 }                                                       \
 430         } while (0)
 431 
 432 #define list_peek_head(list, entry, type, member)                       \
 433         do {                                                            \
 434                 entry = NULL;                                           \
 435                 if (!list_empty(list)) {                                \
 436                         entry = list_entry((list)->next, type, member); \
 437                 }                                                       \
 438         } while (0)
 439 
 440 /* adds a tse_buffer to the tail of a tx buffer list.
 441  * assumes the caller is managing and holding a mutual exclusion
 442  * primitive to avoid simultaneous pushes/pops to the list.
 443  */
 444 static void
 445 queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
 446 {
 447         list_add_tail(&buffer->lh, &priv->txlisthd);
 448 }
 449 
 450 
 451 /* adds a tse_buffer to the tail of a rx buffer list
 452  * assumes the caller is managing and holding a mutual exclusion
 453  * primitive to avoid simultaneous pushes/pops to the list.
 454  */
 455 static void
 456 queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
 457 {
 458         list_add_tail(&buffer->lh, &priv->rxlisthd);
 459 }
 460 
 461 /* dequeues a tse_buffer from the transmit buffer list, otherwise
 462  * returns NULL if empty.
 463  * assumes the caller is managing and holding a mutual exclusion
 464  * primitive to avoid simultaneous pushes/pops to the list.
 465  */
 466 static struct tse_buffer *
 467 dequeue_tx(struct altera_tse_private *priv)
 468 {
 469         struct tse_buffer *buffer = NULL;
 470         list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
 471         return buffer;
 472 }
 473 
 474 /* dequeues a tse_buffer from the receive buffer list, otherwise
 475  * returns NULL if empty
 476  * assumes the caller is managing and holding a mutual exclusion
 477  * primitive to avoid simultaneous pushes/pops to the list.
 478  */
 479 static struct tse_buffer *
 480 dequeue_rx(struct altera_tse_private *priv)
 481 {
 482         struct tse_buffer *buffer = NULL;
 483         list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
 484         return buffer;
 485 }
 486 
 487 /* dequeues a tse_buffer from the receive buffer list, otherwise
 488  * returns NULL if empty
 489  * assumes the caller is managing and holding a mutual exclusion
 490  * primitive to avoid simultaneous pushes/pops to the list while the
 491  * head is being examined.
 492  */
 493 static struct tse_buffer *
 494 queue_rx_peekhead(struct altera_tse_private *priv)
 495 {
 496         struct tse_buffer *buffer = NULL;
 497         list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
 498         return buffer;
 499 }
 500 
 501 /* check and return rx sgdma status without polling
 502  */
 503 static int sgdma_rxbusy(struct altera_tse_private *priv)
 504 {
 505         return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
 506                        & SGDMA_STSREG_BUSY;
 507 }
 508 
 509 /* waits for the tx sgdma to finish it's current operation, returns 0
 510  * when it transitions to nonbusy, returns 1 if the operation times out
 511  */
 512 static int sgdma_txbusy(struct altera_tse_private *priv)
 513 {
 514         int delay = 0;
 515 
 516         /* if DMA is busy, wait for current transactino to finish */
 517         while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
 518                 & SGDMA_STSREG_BUSY) && (delay++ < 100))
 519                 udelay(1);
 520 
 521         if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
 522             & SGDMA_STSREG_BUSY) {
 523                 netdev_err(priv->dev, "timeout waiting for tx dma\n");
 524                 return 1;
 525         }
 526         return 0;
 527 }

/* [<][>][^][v][top][bottom][index][help] */