root/drivers/dma/st_fdma.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. to_st_fdma_chan
  2. to_st_fdma_desc
  3. st_fdma_dreq_get
  4. st_fdma_dreq_put
  5. st_fdma_xfer_desc
  6. st_fdma_ch_sta_update
  7. st_fdma_irq_handler
  8. st_fdma_of_xlate
  9. st_fdma_free_desc
  10. st_fdma_alloc_desc
  11. st_fdma_alloc_chan_res
  12. st_fdma_free_chan_res
  13. st_fdma_prep_dma_memcpy
  14. config_reqctrl
  15. fill_hw_node
  16. st_fdma_prep_common
  17. st_fdma_prep_dma_cyclic
  18. st_fdma_prep_slave_sg
  19. st_fdma_desc_residue
  20. st_fdma_tx_status
  21. st_fdma_issue_pending
  22. st_fdma_pause
  23. st_fdma_resume
  24. st_fdma_terminate_all
  25. st_fdma_slave_config
  26. st_fdma_parse_dt
  27. st_fdma_free
  28. st_fdma_probe
  29. st_fdma_remove

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * DMA driver for STMicroelectronics STi FDMA controller
   4  *
   5  * Copyright (C) 2014 STMicroelectronics
   6  *
   7  * Author: Ludovic Barre <Ludovic.barre@st.com>
   8  *         Peter Griffin <peter.griffin@linaro.org>
   9  */
  10 
  11 #include <linux/init.h>
  12 #include <linux/module.h>
  13 #include <linux/of_device.h>
  14 #include <linux/of_dma.h>
  15 #include <linux/platform_device.h>
  16 #include <linux/interrupt.h>
  17 #include <linux/remoteproc.h>
  18 
  19 #include "st_fdma.h"
  20 
  21 static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c)
  22 {
  23         return container_of(c, struct st_fdma_chan, vchan.chan);
  24 }
  25 
  26 static struct st_fdma_desc *to_st_fdma_desc(struct virt_dma_desc *vd)
  27 {
  28         return container_of(vd, struct st_fdma_desc, vdesc);
  29 }
  30 
  31 static int st_fdma_dreq_get(struct st_fdma_chan *fchan)
  32 {
  33         struct st_fdma_dev *fdev = fchan->fdev;
  34         u32 req_line_cfg = fchan->cfg.req_line;
  35         u32 dreq_line;
  36         int try = 0;
  37 
  38         /*
  39          * dreq_mask is shared for n channels of fdma, so all accesses must be
  40          * atomic. if the dreq_mask is changed between ffz and set_bit,
  41          * we retry
  42          */
  43         do {
  44                 if (fdev->dreq_mask == ~0L) {
  45                         dev_err(fdev->dev, "No req lines available\n");
  46                         return -EINVAL;
  47                 }
  48 
  49                 if (try || req_line_cfg >= ST_FDMA_NR_DREQS) {
  50                         dev_err(fdev->dev, "Invalid or used req line\n");
  51                         return -EINVAL;
  52                 } else {
  53                         dreq_line = req_line_cfg;
  54                 }
  55 
  56                 try++;
  57         } while (test_and_set_bit(dreq_line, &fdev->dreq_mask));
  58 
  59         dev_dbg(fdev->dev, "get dreq_line:%d mask:%#lx\n",
  60                 dreq_line, fdev->dreq_mask);
  61 
  62         return dreq_line;
  63 }
  64 
  65 static void st_fdma_dreq_put(struct st_fdma_chan *fchan)
  66 {
  67         struct st_fdma_dev *fdev = fchan->fdev;
  68 
  69         dev_dbg(fdev->dev, "put dreq_line:%#x\n", fchan->dreq_line);
  70         clear_bit(fchan->dreq_line, &fdev->dreq_mask);
  71 }
  72 
  73 static void st_fdma_xfer_desc(struct st_fdma_chan *fchan)
  74 {
  75         struct virt_dma_desc *vdesc;
  76         unsigned long nbytes, ch_cmd, cmd;
  77 
  78         vdesc = vchan_next_desc(&fchan->vchan);
  79         if (!vdesc)
  80                 return;
  81 
  82         fchan->fdesc = to_st_fdma_desc(vdesc);
  83         nbytes = fchan->fdesc->node[0].desc->nbytes;
  84         cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id);
  85         ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START;
  86 
  87         /* start the channel for the descriptor */
  88         fnode_write(fchan, nbytes, FDMA_CNTN_OFST);
  89         fchan_write(fchan, ch_cmd, FDMA_CH_CMD_OFST);
  90         writel(cmd,
  91                 fchan->fdev->slim_rproc->peri + FDMA_CMD_SET_OFST);
  92 
  93         dev_dbg(fchan->fdev->dev, "start chan:%d\n", fchan->vchan.chan.chan_id);
  94 }
  95 
  96 static void st_fdma_ch_sta_update(struct st_fdma_chan *fchan,
  97                                   unsigned long int_sta)
  98 {
  99         unsigned long ch_sta, ch_err;
 100         int ch_id = fchan->vchan.chan.chan_id;
 101         struct st_fdma_dev *fdev = fchan->fdev;
 102 
 103         ch_sta = fchan_read(fchan, FDMA_CH_CMD_OFST);
 104         ch_err = ch_sta & FDMA_CH_CMD_ERR_MASK;
 105         ch_sta &= FDMA_CH_CMD_STA_MASK;
 106 
 107         if (int_sta & FDMA_INT_STA_ERR) {
 108                 dev_warn(fdev->dev, "chan:%d, error:%ld\n", ch_id, ch_err);
 109                 fchan->status = DMA_ERROR;
 110                 return;
 111         }
 112 
 113         switch (ch_sta) {
 114         case FDMA_CH_CMD_STA_PAUSED:
 115                 fchan->status = DMA_PAUSED;
 116                 break;
 117 
 118         case FDMA_CH_CMD_STA_RUNNING:
 119                 fchan->status = DMA_IN_PROGRESS;
 120                 break;
 121         }
 122 }
 123 
 124 static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id)
 125 {
 126         struct st_fdma_dev *fdev = dev_id;
 127         irqreturn_t ret = IRQ_NONE;
 128         struct st_fdma_chan *fchan = &fdev->chans[0];
 129         unsigned long int_sta, clr;
 130 
 131         int_sta = fdma_read(fdev, FDMA_INT_STA_OFST);
 132         clr = int_sta;
 133 
 134         for (; int_sta != 0 ; int_sta >>= 2, fchan++) {
 135                 if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR)))
 136                         continue;
 137 
 138                 spin_lock(&fchan->vchan.lock);
 139                 st_fdma_ch_sta_update(fchan, int_sta);
 140 
 141                 if (fchan->fdesc) {
 142                         if (!fchan->fdesc->iscyclic) {
 143                                 list_del(&fchan->fdesc->vdesc.node);
 144                                 vchan_cookie_complete(&fchan->fdesc->vdesc);
 145                                 fchan->fdesc = NULL;
 146                                 fchan->status = DMA_COMPLETE;
 147                         } else {
 148                                 vchan_cyclic_callback(&fchan->fdesc->vdesc);
 149                         }
 150 
 151                         /* Start the next descriptor (if available) */
 152                         if (!fchan->fdesc)
 153                                 st_fdma_xfer_desc(fchan);
 154                 }
 155 
 156                 spin_unlock(&fchan->vchan.lock);
 157                 ret = IRQ_HANDLED;
 158         }
 159 
 160         fdma_write(fdev, clr, FDMA_INT_CLR_OFST);
 161 
 162         return ret;
 163 }
 164 
 165 static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec,
 166                                          struct of_dma *ofdma)
 167 {
 168         struct st_fdma_dev *fdev = ofdma->of_dma_data;
 169         struct dma_chan *chan;
 170         struct st_fdma_chan *fchan;
 171         int ret;
 172 
 173         if (dma_spec->args_count < 1)
 174                 return ERR_PTR(-EINVAL);
 175 
 176         if (fdev->dma_device.dev->of_node != dma_spec->np)
 177                 return ERR_PTR(-EINVAL);
 178 
 179         ret = rproc_boot(fdev->slim_rproc->rproc);
 180         if (ret == -ENOENT)
 181                 return ERR_PTR(-EPROBE_DEFER);
 182         else if (ret)
 183                 return ERR_PTR(ret);
 184 
 185         chan = dma_get_any_slave_channel(&fdev->dma_device);
 186         if (!chan)
 187                 goto err_chan;
 188 
 189         fchan = to_st_fdma_chan(chan);
 190 
 191         fchan->cfg.of_node = dma_spec->np;
 192         fchan->cfg.req_line = dma_spec->args[0];
 193         fchan->cfg.req_ctrl = 0;
 194         fchan->cfg.type = ST_FDMA_TYPE_FREE_RUN;
 195 
 196         if (dma_spec->args_count > 1)
 197                 fchan->cfg.req_ctrl = dma_spec->args[1]
 198                         & FDMA_REQ_CTRL_CFG_MASK;
 199 
 200         if (dma_spec->args_count > 2)
 201                 fchan->cfg.type = dma_spec->args[2];
 202 
 203         if (fchan->cfg.type == ST_FDMA_TYPE_FREE_RUN) {
 204                 fchan->dreq_line = 0;
 205         } else {
 206                 fchan->dreq_line = st_fdma_dreq_get(fchan);
 207                 if (IS_ERR_VALUE(fchan->dreq_line)) {
 208                         chan = ERR_PTR(fchan->dreq_line);
 209                         goto err_chan;
 210                 }
 211         }
 212 
 213         dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n",
 214                 fchan->cfg.req_line, fchan->cfg.type, fchan->cfg.req_ctrl);
 215 
 216         return chan;
 217 
 218 err_chan:
 219         rproc_shutdown(fdev->slim_rproc->rproc);
 220         return chan;
 221 
 222 }
 223 
 224 static void st_fdma_free_desc(struct virt_dma_desc *vdesc)
 225 {
 226         struct st_fdma_desc *fdesc;
 227         int i;
 228 
 229         fdesc = to_st_fdma_desc(vdesc);
 230         for (i = 0; i < fdesc->n_nodes; i++)
 231                 dma_pool_free(fdesc->fchan->node_pool, fdesc->node[i].desc,
 232                               fdesc->node[i].pdesc);
 233         kfree(fdesc);
 234 }
 235 
 236 static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
 237                                                int sg_len)
 238 {
 239         struct st_fdma_desc *fdesc;
 240         int i;
 241 
 242         fdesc = kzalloc(struct_size(fdesc, node, sg_len), GFP_NOWAIT);
 243         if (!fdesc)
 244                 return NULL;
 245 
 246         fdesc->fchan = fchan;
 247         fdesc->n_nodes = sg_len;
 248         for (i = 0; i < sg_len; i++) {
 249                 fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool,
 250                                 GFP_NOWAIT, &fdesc->node[i].pdesc);
 251                 if (!fdesc->node[i].desc)
 252                         goto err;
 253         }
 254         return fdesc;
 255 
 256 err:
 257         while (--i >= 0)
 258                 dma_pool_free(fchan->node_pool, fdesc->node[i].desc,
 259                               fdesc->node[i].pdesc);
 260         kfree(fdesc);
 261         return NULL;
 262 }
 263 
 264 static int st_fdma_alloc_chan_res(struct dma_chan *chan)
 265 {
 266         struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
 267 
 268         /* Create the dma pool for descriptor allocation */
 269         fchan->node_pool = dma_pool_create(dev_name(&chan->dev->device),
 270                                             fchan->fdev->dev,
 271                                             sizeof(struct st_fdma_hw_node),
 272                                             __alignof__(struct st_fdma_hw_node),
 273                                             0);
 274 
 275         if (!fchan->node_pool) {
 276                 dev_err(fchan->fdev->dev, "unable to allocate desc pool\n");
 277                 return -ENOMEM;
 278         }
 279 
 280         dev_dbg(fchan->fdev->dev, "alloc ch_id:%d type:%d\n",
 281                 fchan->vchan.chan.chan_id, fchan->cfg.type);
 282 
 283         return 0;
 284 }
 285 
 286 static void st_fdma_free_chan_res(struct dma_chan *chan)
 287 {
 288         struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
 289         struct rproc *rproc = fchan->fdev->slim_rproc->rproc;
 290         unsigned long flags;
 291 
 292         dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n",
 293                 __func__, fchan->vchan.chan.chan_id);
 294 
 295         if (fchan->cfg.type != ST_FDMA_TYPE_FREE_RUN)
 296                 st_fdma_dreq_put(fchan);
 297 
 298         spin_lock_irqsave(&fchan->vchan.lock, flags);
 299         fchan->fdesc = NULL;
 300         spin_unlock_irqrestore(&fchan->vchan.lock, flags);
 301 
 302         dma_pool_destroy(fchan->node_pool);
 303         fchan->node_pool = NULL;
 304         memset(&fchan->cfg, 0, sizeof(struct st_fdma_cfg));
 305 
 306         rproc_shutdown(rproc);
 307 }
 308 
 309 static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy(
 310         struct dma_chan *chan,  dma_addr_t dst, dma_addr_t src,
 311         size_t len, unsigned long flags)
 312 {
 313         struct st_fdma_chan *fchan;
 314         struct st_fdma_desc *fdesc;
 315         struct st_fdma_hw_node *hw_node;
 316 
 317         if (!len)
 318                 return NULL;
 319 
 320         fchan = to_st_fdma_chan(chan);
 321 
 322         /* We only require a single descriptor */
 323         fdesc = st_fdma_alloc_desc(fchan, 1);
 324         if (!fdesc) {
 325                 dev_err(fchan->fdev->dev, "no memory for desc\n");
 326                 return NULL;
 327         }
 328 
 329         hw_node = fdesc->node[0].desc;
 330         hw_node->next = 0;
 331         hw_node->control = FDMA_NODE_CTRL_REQ_MAP_FREE_RUN;
 332         hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
 333         hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
 334         hw_node->control |= FDMA_NODE_CTRL_INT_EON;
 335         hw_node->nbytes = len;
 336         hw_node->saddr = src;
 337         hw_node->daddr = dst;
 338         hw_node->generic.length = len;
 339         hw_node->generic.sstride = 0;
 340         hw_node->generic.dstride = 0;
 341 
 342         return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
 343 }
 344 
 345 static int config_reqctrl(struct st_fdma_chan *fchan,
 346                           enum dma_transfer_direction direction)
 347 {
 348         u32 maxburst = 0, addr = 0;
 349         enum dma_slave_buswidth width;
 350         int ch_id = fchan->vchan.chan.chan_id;
 351         struct st_fdma_dev *fdev = fchan->fdev;
 352 
 353         switch (direction) {
 354 
 355         case DMA_DEV_TO_MEM:
 356                 fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_WNR;
 357                 maxburst = fchan->scfg.src_maxburst;
 358                 width = fchan->scfg.src_addr_width;
 359                 addr = fchan->scfg.src_addr;
 360                 break;
 361 
 362         case DMA_MEM_TO_DEV:
 363                 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_WNR;
 364                 maxburst = fchan->scfg.dst_maxburst;
 365                 width = fchan->scfg.dst_addr_width;
 366                 addr = fchan->scfg.dst_addr;
 367                 break;
 368 
 369         default:
 370                 return -EINVAL;
 371         }
 372 
 373         fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_OPCODE_MASK;
 374 
 375         switch (width) {
 376 
 377         case DMA_SLAVE_BUSWIDTH_1_BYTE:
 378                 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST1;
 379                 break;
 380 
 381         case DMA_SLAVE_BUSWIDTH_2_BYTES:
 382                 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST2;
 383                 break;
 384 
 385         case DMA_SLAVE_BUSWIDTH_4_BYTES:
 386                 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST4;
 387                 break;
 388 
 389         case DMA_SLAVE_BUSWIDTH_8_BYTES:
 390                 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST8;
 391                 break;
 392 
 393         default:
 394                 return -EINVAL;
 395         }
 396 
 397         fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_NUM_OPS_MASK;
 398         fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_NUM_OPS(maxburst-1);
 399         dreq_write(fchan, fchan->cfg.req_ctrl, FDMA_REQ_CTRL_OFST);
 400 
 401         fchan->cfg.dev_addr = addr;
 402         fchan->cfg.dir = direction;
 403 
 404         dev_dbg(fdev->dev, "chan:%d config_reqctrl:%#x req_ctrl:%#lx\n",
 405                 ch_id, addr, fchan->cfg.req_ctrl);
 406 
 407         return 0;
 408 }
 409 
 410 static void fill_hw_node(struct st_fdma_hw_node *hw_node,
 411                         struct st_fdma_chan *fchan,
 412                         enum dma_transfer_direction direction)
 413 {
 414         if (direction == DMA_MEM_TO_DEV) {
 415                 hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
 416                 hw_node->control |= FDMA_NODE_CTRL_DST_STATIC;
 417                 hw_node->daddr = fchan->cfg.dev_addr;
 418         } else {
 419                 hw_node->control |= FDMA_NODE_CTRL_SRC_STATIC;
 420                 hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
 421                 hw_node->saddr = fchan->cfg.dev_addr;
 422         }
 423 
 424         hw_node->generic.sstride = 0;
 425         hw_node->generic.dstride = 0;
 426 }
 427 
 428 static inline struct st_fdma_chan *st_fdma_prep_common(struct dma_chan *chan,
 429                 size_t len, enum dma_transfer_direction direction)
 430 {
 431         struct st_fdma_chan *fchan;
 432 
 433         if (!chan || !len)
 434                 return NULL;
 435 
 436         fchan = to_st_fdma_chan(chan);
 437 
 438         if (!is_slave_direction(direction)) {
 439                 dev_err(fchan->fdev->dev, "bad direction?\n");
 440                 return NULL;
 441         }
 442 
 443         return fchan;
 444 }
 445 
 446 static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic(
 447                 struct dma_chan *chan, dma_addr_t buf_addr, size_t len,
 448                 size_t period_len, enum dma_transfer_direction direction,
 449                 unsigned long flags)
 450 {
 451         struct st_fdma_chan *fchan;
 452         struct st_fdma_desc *fdesc;
 453         int sg_len, i;
 454 
 455         fchan = st_fdma_prep_common(chan, len, direction);
 456         if (!fchan)
 457                 return NULL;
 458 
 459         if (!period_len)
 460                 return NULL;
 461 
 462         if (config_reqctrl(fchan, direction)) {
 463                 dev_err(fchan->fdev->dev, "bad width or direction\n");
 464                 return NULL;
 465         }
 466 
 467         /* the buffer length must be a multiple of period_len */
 468         if (len % period_len != 0) {
 469                 dev_err(fchan->fdev->dev, "len is not multiple of period\n");
 470                 return NULL;
 471         }
 472 
 473         sg_len = len / period_len;
 474         fdesc = st_fdma_alloc_desc(fchan, sg_len);
 475         if (!fdesc) {
 476                 dev_err(fchan->fdev->dev, "no memory for desc\n");
 477                 return NULL;
 478         }
 479 
 480         fdesc->iscyclic = true;
 481 
 482         for (i = 0; i < sg_len; i++) {
 483                 struct st_fdma_hw_node *hw_node = fdesc->node[i].desc;
 484 
 485                 hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
 486 
 487                 hw_node->control =
 488                         FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
 489                 hw_node->control |= FDMA_NODE_CTRL_INT_EON;
 490 
 491                 fill_hw_node(hw_node, fchan, direction);
 492 
 493                 if (direction == DMA_MEM_TO_DEV)
 494                         hw_node->saddr = buf_addr + (i * period_len);
 495                 else
 496                         hw_node->daddr = buf_addr + (i * period_len);
 497 
 498                 hw_node->nbytes = period_len;
 499                 hw_node->generic.length = period_len;
 500         }
 501 
 502         return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
 503 }
 504 
 505 static struct dma_async_tx_descriptor *st_fdma_prep_slave_sg(
 506                 struct dma_chan *chan, struct scatterlist *sgl,
 507                 unsigned int sg_len, enum dma_transfer_direction direction,
 508                 unsigned long flags, void *context)
 509 {
 510         struct st_fdma_chan *fchan;
 511         struct st_fdma_desc *fdesc;
 512         struct st_fdma_hw_node *hw_node;
 513         struct scatterlist *sg;
 514         int i;
 515 
 516         fchan = st_fdma_prep_common(chan, sg_len, direction);
 517         if (!fchan)
 518                 return NULL;
 519 
 520         if (!sgl)
 521                 return NULL;
 522 
 523         fdesc = st_fdma_alloc_desc(fchan, sg_len);
 524         if (!fdesc) {
 525                 dev_err(fchan->fdev->dev, "no memory for desc\n");
 526                 return NULL;
 527         }
 528 
 529         fdesc->iscyclic = false;
 530 
 531         for_each_sg(sgl, sg, sg_len, i) {
 532                 hw_node = fdesc->node[i].desc;
 533 
 534                 hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
 535                 hw_node->control = FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
 536 
 537                 fill_hw_node(hw_node, fchan, direction);
 538 
 539                 if (direction == DMA_MEM_TO_DEV)
 540                         hw_node->saddr = sg_dma_address(sg);
 541                 else
 542                         hw_node->daddr = sg_dma_address(sg);
 543 
 544                 hw_node->nbytes = sg_dma_len(sg);
 545                 hw_node->generic.length = sg_dma_len(sg);
 546         }
 547 
 548         /* interrupt at end of last node */
 549         hw_node->control |= FDMA_NODE_CTRL_INT_EON;
 550 
 551         return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
 552 }
 553 
 554 static size_t st_fdma_desc_residue(struct st_fdma_chan *fchan,
 555                                    struct virt_dma_desc *vdesc,
 556                                    bool in_progress)
 557 {
 558         struct st_fdma_desc *fdesc = fchan->fdesc;
 559         size_t residue = 0;
 560         dma_addr_t cur_addr = 0;
 561         int i;
 562 
 563         if (in_progress) {
 564                 cur_addr = fchan_read(fchan, FDMA_CH_CMD_OFST);
 565                 cur_addr &= FDMA_CH_CMD_DATA_MASK;
 566         }
 567 
 568         for (i = fchan->fdesc->n_nodes - 1 ; i >= 0; i--) {
 569                 if (cur_addr == fdesc->node[i].pdesc) {
 570                         residue += fnode_read(fchan, FDMA_CNTN_OFST);
 571                         break;
 572                 }
 573                 residue += fdesc->node[i].desc->nbytes;
 574         }
 575 
 576         return residue;
 577 }
 578 
 579 static enum dma_status st_fdma_tx_status(struct dma_chan *chan,
 580                                          dma_cookie_t cookie,
 581                                          struct dma_tx_state *txstate)
 582 {
 583         struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
 584         struct virt_dma_desc *vd;
 585         enum dma_status ret;
 586         unsigned long flags;
 587 
 588         ret = dma_cookie_status(chan, cookie, txstate);
 589         if (ret == DMA_COMPLETE || !txstate)
 590                 return ret;
 591 
 592         spin_lock_irqsave(&fchan->vchan.lock, flags);
 593         vd = vchan_find_desc(&fchan->vchan, cookie);
 594         if (fchan->fdesc && cookie == fchan->fdesc->vdesc.tx.cookie)
 595                 txstate->residue = st_fdma_desc_residue(fchan, vd, true);
 596         else if (vd)
 597                 txstate->residue = st_fdma_desc_residue(fchan, vd, false);
 598         else
 599                 txstate->residue = 0;
 600 
 601         spin_unlock_irqrestore(&fchan->vchan.lock, flags);
 602 
 603         return ret;
 604 }
 605 
 606 static void st_fdma_issue_pending(struct dma_chan *chan)
 607 {
 608         struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
 609         unsigned long flags;
 610 
 611         spin_lock_irqsave(&fchan->vchan.lock, flags);
 612 
 613         if (vchan_issue_pending(&fchan->vchan) && !fchan->fdesc)
 614                 st_fdma_xfer_desc(fchan);
 615 
 616         spin_unlock_irqrestore(&fchan->vchan.lock, flags);
 617 }
 618 
 619 static int st_fdma_pause(struct dma_chan *chan)
 620 {
 621         unsigned long flags;
 622         struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
 623         int ch_id = fchan->vchan.chan.chan_id;
 624         unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
 625 
 626         dev_dbg(fchan->fdev->dev, "pause chan:%d\n", ch_id);
 627 
 628         spin_lock_irqsave(&fchan->vchan.lock, flags);
 629         if (fchan->fdesc)
 630                 fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
 631         spin_unlock_irqrestore(&fchan->vchan.lock, flags);
 632 
 633         return 0;
 634 }
 635 
 636 static int st_fdma_resume(struct dma_chan *chan)
 637 {
 638         unsigned long flags;
 639         unsigned long val;
 640         struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
 641         int ch_id = fchan->vchan.chan.chan_id;
 642 
 643         dev_dbg(fchan->fdev->dev, "resume chan:%d\n", ch_id);
 644 
 645         spin_lock_irqsave(&fchan->vchan.lock, flags);
 646         if (fchan->fdesc) {
 647                 val = fchan_read(fchan, FDMA_CH_CMD_OFST);
 648                 val &= FDMA_CH_CMD_DATA_MASK;
 649                 fchan_write(fchan, val, FDMA_CH_CMD_OFST);
 650         }
 651         spin_unlock_irqrestore(&fchan->vchan.lock, flags);
 652 
 653         return 0;
 654 }
 655 
 656 static int st_fdma_terminate_all(struct dma_chan *chan)
 657 {
 658         unsigned long flags;
 659         LIST_HEAD(head);
 660         struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
 661         int ch_id = fchan->vchan.chan.chan_id;
 662         unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
 663 
 664         dev_dbg(fchan->fdev->dev, "terminate chan:%d\n", ch_id);
 665 
 666         spin_lock_irqsave(&fchan->vchan.lock, flags);
 667         fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
 668         fchan->fdesc = NULL;
 669         vchan_get_all_descriptors(&fchan->vchan, &head);
 670         spin_unlock_irqrestore(&fchan->vchan.lock, flags);
 671         vchan_dma_desc_free_list(&fchan->vchan, &head);
 672 
 673         return 0;
 674 }
 675 
 676 static int st_fdma_slave_config(struct dma_chan *chan,
 677                                 struct dma_slave_config *slave_cfg)
 678 {
 679         struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
 680 
 681         memcpy(&fchan->scfg, slave_cfg, sizeof(fchan->scfg));
 682         return 0;
 683 }
 684 
 685 static const struct st_fdma_driverdata fdma_mpe31_stih407_11 = {
 686         .name = "STiH407",
 687         .id = 0,
 688 };
 689 
 690 static const struct st_fdma_driverdata fdma_mpe31_stih407_12 = {
 691         .name = "STiH407",
 692         .id = 1,
 693 };
 694 
 695 static const struct st_fdma_driverdata fdma_mpe31_stih407_13 = {
 696         .name = "STiH407",
 697         .id = 2,
 698 };
 699 
 700 static const struct of_device_id st_fdma_match[] = {
 701         { .compatible = "st,stih407-fdma-mpe31-11"
 702           , .data = &fdma_mpe31_stih407_11 },
 703         { .compatible = "st,stih407-fdma-mpe31-12"
 704           , .data = &fdma_mpe31_stih407_12 },
 705         { .compatible = "st,stih407-fdma-mpe31-13"
 706           , .data = &fdma_mpe31_stih407_13 },
 707         {},
 708 };
 709 MODULE_DEVICE_TABLE(of, st_fdma_match);
 710 
 711 static int st_fdma_parse_dt(struct platform_device *pdev,
 712                         const struct st_fdma_driverdata *drvdata,
 713                         struct st_fdma_dev *fdev)
 714 {
 715         snprintf(fdev->fw_name, FW_NAME_SIZE, "fdma_%s_%d.elf",
 716                 drvdata->name, drvdata->id);
 717 
 718         return of_property_read_u32(pdev->dev.of_node, "dma-channels",
 719                                     &fdev->nr_channels);
 720 }
 721 #define FDMA_DMA_BUSWIDTHS      (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
 722                                  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
 723                                  BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
 724                                  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 725 
 726 static void st_fdma_free(struct st_fdma_dev *fdev)
 727 {
 728         struct st_fdma_chan *fchan;
 729         int i;
 730 
 731         for (i = 0; i < fdev->nr_channels; i++) {
 732                 fchan = &fdev->chans[i];
 733                 list_del(&fchan->vchan.chan.device_node);
 734                 tasklet_kill(&fchan->vchan.task);
 735         }
 736 }
 737 
 738 static int st_fdma_probe(struct platform_device *pdev)
 739 {
 740         struct st_fdma_dev *fdev;
 741         const struct of_device_id *match;
 742         struct device_node *np = pdev->dev.of_node;
 743         const struct st_fdma_driverdata *drvdata;
 744         int ret, i;
 745 
 746         match = of_match_device((st_fdma_match), &pdev->dev);
 747         if (!match || !match->data) {
 748                 dev_err(&pdev->dev, "No device match found\n");
 749                 return -ENODEV;
 750         }
 751 
 752         drvdata = match->data;
 753 
 754         fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL);
 755         if (!fdev)
 756                 return -ENOMEM;
 757 
 758         ret = st_fdma_parse_dt(pdev, drvdata, fdev);
 759         if (ret) {
 760                 dev_err(&pdev->dev, "unable to find platform data\n");
 761                 goto err;
 762         }
 763 
 764         fdev->chans = devm_kcalloc(&pdev->dev, fdev->nr_channels,
 765                                    sizeof(struct st_fdma_chan), GFP_KERNEL);
 766         if (!fdev->chans)
 767                 return -ENOMEM;
 768 
 769         fdev->dev = &pdev->dev;
 770         fdev->drvdata = drvdata;
 771         platform_set_drvdata(pdev, fdev);
 772 
 773         fdev->irq = platform_get_irq(pdev, 0);
 774         if (fdev->irq < 0)
 775                 return -EINVAL;
 776 
 777         ret = devm_request_irq(&pdev->dev, fdev->irq, st_fdma_irq_handler, 0,
 778                                dev_name(&pdev->dev), fdev);
 779         if (ret) {
 780                 dev_err(&pdev->dev, "Failed to request irq (%d)\n", ret);
 781                 goto err;
 782         }
 783 
 784         fdev->slim_rproc = st_slim_rproc_alloc(pdev, fdev->fw_name);
 785         if (IS_ERR(fdev->slim_rproc)) {
 786                 ret = PTR_ERR(fdev->slim_rproc);
 787                 dev_err(&pdev->dev, "slim_rproc_alloc failed (%d)\n", ret);
 788                 goto err;
 789         }
 790 
 791         /* Initialise list of FDMA channels */
 792         INIT_LIST_HEAD(&fdev->dma_device.channels);
 793         for (i = 0; i < fdev->nr_channels; i++) {
 794                 struct st_fdma_chan *fchan = &fdev->chans[i];
 795 
 796                 fchan->fdev = fdev;
 797                 fchan->vchan.desc_free = st_fdma_free_desc;
 798                 vchan_init(&fchan->vchan, &fdev->dma_device);
 799         }
 800 
 801         /* Initialise the FDMA dreq (reserve 0 & 31 for FDMA use) */
 802         fdev->dreq_mask = BIT(0) | BIT(31);
 803 
 804         dma_cap_set(DMA_SLAVE, fdev->dma_device.cap_mask);
 805         dma_cap_set(DMA_CYCLIC, fdev->dma_device.cap_mask);
 806         dma_cap_set(DMA_MEMCPY, fdev->dma_device.cap_mask);
 807 
 808         fdev->dma_device.dev = &pdev->dev;
 809         fdev->dma_device.device_alloc_chan_resources = st_fdma_alloc_chan_res;
 810         fdev->dma_device.device_free_chan_resources = st_fdma_free_chan_res;
 811         fdev->dma_device.device_prep_dma_cyclic = st_fdma_prep_dma_cyclic;
 812         fdev->dma_device.device_prep_slave_sg = st_fdma_prep_slave_sg;
 813         fdev->dma_device.device_prep_dma_memcpy = st_fdma_prep_dma_memcpy;
 814         fdev->dma_device.device_tx_status = st_fdma_tx_status;
 815         fdev->dma_device.device_issue_pending = st_fdma_issue_pending;
 816         fdev->dma_device.device_terminate_all = st_fdma_terminate_all;
 817         fdev->dma_device.device_config = st_fdma_slave_config;
 818         fdev->dma_device.device_pause = st_fdma_pause;
 819         fdev->dma_device.device_resume = st_fdma_resume;
 820 
 821         fdev->dma_device.src_addr_widths = FDMA_DMA_BUSWIDTHS;
 822         fdev->dma_device.dst_addr_widths = FDMA_DMA_BUSWIDTHS;
 823         fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 824         fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 825 
 826         ret = dmaenginem_async_device_register(&fdev->dma_device);
 827         if (ret) {
 828                 dev_err(&pdev->dev,
 829                         "Failed to register DMA device (%d)\n", ret);
 830                 goto err_rproc;
 831         }
 832 
 833         ret = of_dma_controller_register(np, st_fdma_of_xlate, fdev);
 834         if (ret) {
 835                 dev_err(&pdev->dev,
 836                         "Failed to register controller (%d)\n", ret);
 837                 goto err_rproc;
 838         }
 839 
 840         dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq);
 841 
 842         return 0;
 843 
 844 err_rproc:
 845         st_fdma_free(fdev);
 846         st_slim_rproc_put(fdev->slim_rproc);
 847 err:
 848         return ret;
 849 }
 850 
 851 static int st_fdma_remove(struct platform_device *pdev)
 852 {
 853         struct st_fdma_dev *fdev = platform_get_drvdata(pdev);
 854 
 855         devm_free_irq(&pdev->dev, fdev->irq, fdev);
 856         st_slim_rproc_put(fdev->slim_rproc);
 857         of_dma_controller_free(pdev->dev.of_node);
 858 
 859         return 0;
 860 }
 861 
 862 static struct platform_driver st_fdma_platform_driver = {
 863         .driver = {
 864                 .name = DRIVER_NAME,
 865                 .of_match_table = st_fdma_match,
 866         },
 867         .probe = st_fdma_probe,
 868         .remove = st_fdma_remove,
 869 };
 870 module_platform_driver(st_fdma_platform_driver);
 871 
 872 MODULE_LICENSE("GPL v2");
 873 MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
 874 MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
 875 MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
 876 MODULE_ALIAS("platform: " DRIVER_NAME);

/* [<][>][^][v][top][bottom][index][help] */