1/* 2 * Copyright 2015 Robert Jarzmik <robert.jarzmik@free.fr> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9#include <linux/err.h> 10#include <linux/module.h> 11#include <linux/init.h> 12#include <linux/types.h> 13#include <linux/interrupt.h> 14#include <linux/dma-mapping.h> 15#include <linux/slab.h> 16#include <linux/dmaengine.h> 17#include <linux/platform_device.h> 18#include <linux/device.h> 19#include <linux/platform_data/mmp_dma.h> 20#include <linux/dmapool.h> 21#include <linux/of_device.h> 22#include <linux/of_dma.h> 23#include <linux/of.h> 24#include <linux/dma/pxa-dma.h> 25 26#include "dmaengine.h" 27#include "virt-dma.h" 28 29#define DCSR(n) (0x0000 + ((n) << 2)) 30#define DALGN(n) 0x00a0 31#define DINT 0x00f0 32#define DDADR(n) (0x0200 + ((n) << 4)) 33#define DSADR(n) (0x0204 + ((n) << 4)) 34#define DTADR(n) (0x0208 + ((n) << 4)) 35#define DCMD(n) (0x020c + ((n) << 4)) 36 37#define PXA_DCSR_RUN BIT(31) /* Run Bit (read / write) */ 38#define PXA_DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */ 39#define PXA_DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (R/W) */ 40#define PXA_DCSR_REQPEND BIT(8) /* Request Pending (read-only) */ 41#define PXA_DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */ 42#define PXA_DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */ 43#define PXA_DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */ 44#define PXA_DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */ 45 46#define PXA_DCSR_EORIRQEN BIT(28) /* End of Receive IRQ Enable (R/W) */ 47#define PXA_DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */ 48#define PXA_DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */ 49#define PXA_DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */ 50#define PXA_DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */ 51#define PXA_DCSR_CMPST BIT(10) /* The Descriptor Compare Status */ 52#define PXA_DCSR_EORINTR BIT(9) /* The end of Receive */ 53 54#define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */ 55#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ 56 57#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ 58#define DDADR_STOP BIT(0) /* Stop (read / write) */ 59 60#define PXA_DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */ 61#define PXA_DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */ 62#define PXA_DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */ 63#define PXA_DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */ 64#define PXA_DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */ 65#define PXA_DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */ 66#define PXA_DCMD_ENDIAN BIT(18) /* Device Endian-ness. */ 67#define PXA_DCMD_BURST8 (1 << 16) /* 8 byte burst */ 68#define PXA_DCMD_BURST16 (2 << 16) /* 16 byte burst */ 69#define PXA_DCMD_BURST32 (3 << 16) /* 32 byte burst */ 70#define PXA_DCMD_WIDTH1 (1 << 14) /* 1 byte width */ 71#define PXA_DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ 72#define PXA_DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ 73#define PXA_DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ 74 75#define PDMA_ALIGNMENT 3 76#define PDMA_MAX_DESC_BYTES (PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1)) 77 78struct pxad_desc_hw { 79 u32 ddadr; /* Points to the next descriptor + flags */ 80 u32 dsadr; /* DSADR value for the current transfer */ 81 u32 dtadr; /* DTADR value for the current transfer */ 82 u32 dcmd; /* DCMD value for the current transfer */ 83} __aligned(16); 84 85struct pxad_desc_sw { 86 struct virt_dma_desc vd; /* Virtual descriptor */ 87 int nb_desc; /* Number of hw. descriptors */ 88 size_t len; /* Number of bytes xfered */ 89 dma_addr_t first; /* First descriptor's addr */ 90 91 /* At least one descriptor has an src/dst address not multiple of 8 */ 92 bool misaligned; 93 bool cyclic; 94 struct dma_pool *desc_pool; /* Channel's used allocator */ 95 96 struct pxad_desc_hw *hw_desc[]; /* DMA coherent descriptors */ 97}; 98 99struct pxad_phy { 100 int idx; 101 void __iomem *base; 102 struct pxad_chan *vchan; 103}; 104 105struct pxad_chan { 106 struct virt_dma_chan vc; /* Virtual channel */ 107 u32 drcmr; /* Requestor of the channel */ 108 enum pxad_chan_prio prio; /* Required priority of phy */ 109 /* 110 * At least one desc_sw in submitted or issued transfers on this channel 111 * has one address such as: addr % 8 != 0. This implies the DALGN 112 * setting on the phy. 113 */ 114 bool misaligned; 115 struct dma_slave_config cfg; /* Runtime config */ 116 117 /* protected by vc->lock */ 118 struct pxad_phy *phy; 119 struct dma_pool *desc_pool; /* Descriptors pool */ 120}; 121 122struct pxad_device { 123 struct dma_device slave; 124 int nr_chans; 125 int nr_requestors; 126 void __iomem *base; 127 struct pxad_phy *phys; 128 spinlock_t phy_lock; /* Phy association */ 129#ifdef CONFIG_DEBUG_FS 130 struct dentry *dbgfs_root; 131 struct dentry *dbgfs_state; 132 struct dentry **dbgfs_chan; 133#endif 134}; 135 136#define tx_to_pxad_desc(tx) \ 137 container_of(tx, struct pxad_desc_sw, async_tx) 138#define to_pxad_chan(dchan) \ 139 container_of(dchan, struct pxad_chan, vc.chan) 140#define to_pxad_dev(dmadev) \ 141 container_of(dmadev, struct pxad_device, slave) 142#define to_pxad_sw_desc(_vd) \ 143 container_of((_vd), struct pxad_desc_sw, vd) 144 145#define _phy_readl_relaxed(phy, _reg) \ 146 readl_relaxed((phy)->base + _reg((phy)->idx)) 147#define phy_readl_relaxed(phy, _reg) \ 148 ({ \ 149 u32 _v; \ 150 _v = readl_relaxed((phy)->base + _reg((phy)->idx)); \ 151 dev_vdbg(&phy->vchan->vc.chan.dev->device, \ 152 "%s(): readl(%s): 0x%08x\n", __func__, #_reg, \ 153 _v); \ 154 _v; \ 155 }) 156#define phy_writel(phy, val, _reg) \ 157 do { \ 158 writel((val), (phy)->base + _reg((phy)->idx)); \ 159 dev_vdbg(&phy->vchan->vc.chan.dev->device, \ 160 "%s(): writel(0x%08x, %s)\n", \ 161 __func__, (u32)(val), #_reg); \ 162 } while (0) 163#define phy_writel_relaxed(phy, val, _reg) \ 164 do { \ 165 writel_relaxed((val), (phy)->base + _reg((phy)->idx)); \ 166 dev_vdbg(&phy->vchan->vc.chan.dev->device, \ 167 "%s(): writel_relaxed(0x%08x, %s)\n", \ 168 __func__, (u32)(val), #_reg); \ 169 } while (0) 170 171static unsigned int pxad_drcmr(unsigned int line) 172{ 173 if (line < 64) 174 return 0x100 + line * 4; 175 return 0x1000 + line * 4; 176} 177 178/* 179 * Debug fs 180 */ 181#ifdef CONFIG_DEBUG_FS 182#include <linux/debugfs.h> 183#include <linux/uaccess.h> 184#include <linux/seq_file.h> 185 186static int dbg_show_requester_chan(struct seq_file *s, void *p) 187{ 188 struct pxad_phy *phy = s->private; 189 int i; 190 u32 drcmr; 191 192 seq_printf(s, "DMA channel %d requester :\n", phy->idx); 193 for (i = 0; i < 70; i++) { 194 drcmr = readl_relaxed(phy->base + pxad_drcmr(i)); 195 if ((drcmr & DRCMR_CHLNUM) == phy->idx) 196 seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i, 197 !!(drcmr & DRCMR_MAPVLD)); 198 } 199 return 0; 200} 201 202static inline int dbg_burst_from_dcmd(u32 dcmd) 203{ 204 int burst = (dcmd >> 16) & 0x3; 205 206 return burst ? 4 << burst : 0; 207} 208 209static int is_phys_valid(unsigned long addr) 210{ 211 return pfn_valid(__phys_to_pfn(addr)); 212} 213 214#define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "") 215#define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "") 216 217static int dbg_show_descriptors(struct seq_file *s, void *p) 218{ 219 struct pxad_phy *phy = s->private; 220 int i, max_show = 20, burst, width; 221 u32 dcmd; 222 unsigned long phys_desc, ddadr; 223 struct pxad_desc_hw *desc; 224 225 phys_desc = ddadr = _phy_readl_relaxed(phy, DDADR); 226 227 seq_printf(s, "DMA channel %d descriptors :\n", phy->idx); 228 seq_printf(s, "[%03d] First descriptor unknown\n", 0); 229 for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) { 230 desc = phys_to_virt(phys_desc); 231 dcmd = desc->dcmd; 232 burst = dbg_burst_from_dcmd(dcmd); 233 width = (1 << ((dcmd >> 14) & 0x3)) >> 1; 234 235 seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n", 236 i, phys_desc, desc); 237 seq_printf(s, "\tDDADR = %08x\n", desc->ddadr); 238 seq_printf(s, "\tDSADR = %08x\n", desc->dsadr); 239 seq_printf(s, "\tDTADR = %08x\n", desc->dtadr); 240 seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n", 241 dcmd, 242 PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR), 243 PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG), 244 PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN), 245 PXA_DCMD_STR(ENDIAN), burst, width, 246 dcmd & PXA_DCMD_LENGTH); 247 phys_desc = desc->ddadr; 248 } 249 if (i == max_show) 250 seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n", 251 i, phys_desc); 252 else 253 seq_printf(s, "[%03d] Desc at %08lx is %s\n", 254 i, phys_desc, phys_desc == DDADR_STOP ? 255 "DDADR_STOP" : "invalid"); 256 257 return 0; 258} 259 260static int dbg_show_chan_state(struct seq_file *s, void *p) 261{ 262 struct pxad_phy *phy = s->private; 263 u32 dcsr, dcmd; 264 int burst, width; 265 static const char * const str_prio[] = { 266 "high", "normal", "low", "invalid" 267 }; 268 269 dcsr = _phy_readl_relaxed(phy, DCSR); 270 dcmd = _phy_readl_relaxed(phy, DCMD); 271 burst = dbg_burst_from_dcmd(dcmd); 272 width = (1 << ((dcmd >> 14) & 0x3)) >> 1; 273 274 seq_printf(s, "DMA channel %d\n", phy->idx); 275 seq_printf(s, "\tPriority : %s\n", 276 str_prio[(phy->idx & 0xf) / 4]); 277 seq_printf(s, "\tUnaligned transfer bit: %s\n", 278 _phy_readl_relaxed(phy, DALGN) & BIT(phy->idx) ? 279 "yes" : "no"); 280 seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n", 281 dcsr, PXA_DCSR_STR(RUN), PXA_DCSR_STR(NODESC), 282 PXA_DCSR_STR(STOPIRQEN), PXA_DCSR_STR(EORIRQEN), 283 PXA_DCSR_STR(EORJMPEN), PXA_DCSR_STR(EORSTOPEN), 284 PXA_DCSR_STR(SETCMPST), PXA_DCSR_STR(CLRCMPST), 285 PXA_DCSR_STR(CMPST), PXA_DCSR_STR(EORINTR), 286 PXA_DCSR_STR(REQPEND), PXA_DCSR_STR(STOPSTATE), 287 PXA_DCSR_STR(ENDINTR), PXA_DCSR_STR(STARTINTR), 288 PXA_DCSR_STR(BUSERR)); 289 290 seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n", 291 dcmd, 292 PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR), 293 PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG), 294 PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN), 295 PXA_DCMD_STR(ENDIAN), burst, width, dcmd & PXA_DCMD_LENGTH); 296 seq_printf(s, "\tDSADR = %08x\n", _phy_readl_relaxed(phy, DSADR)); 297 seq_printf(s, "\tDTADR = %08x\n", _phy_readl_relaxed(phy, DTADR)); 298 seq_printf(s, "\tDDADR = %08x\n", _phy_readl_relaxed(phy, DDADR)); 299 300 return 0; 301} 302 303static int dbg_show_state(struct seq_file *s, void *p) 304{ 305 struct pxad_device *pdev = s->private; 306 307 /* basic device status */ 308 seq_puts(s, "DMA engine status\n"); 309 seq_printf(s, "\tChannel number: %d\n", pdev->nr_chans); 310 311 return 0; 312} 313 314#define DBGFS_FUNC_DECL(name) \ 315static int dbg_open_##name(struct inode *inode, struct file *file) \ 316{ \ 317 return single_open(file, dbg_show_##name, inode->i_private); \ 318} \ 319static const struct file_operations dbg_fops_##name = { \ 320 .owner = THIS_MODULE, \ 321 .open = dbg_open_##name, \ 322 .llseek = seq_lseek, \ 323 .read = seq_read, \ 324 .release = single_release, \ 325} 326 327DBGFS_FUNC_DECL(state); 328DBGFS_FUNC_DECL(chan_state); 329DBGFS_FUNC_DECL(descriptors); 330DBGFS_FUNC_DECL(requester_chan); 331 332static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev, 333 int ch, struct dentry *chandir) 334{ 335 char chan_name[11]; 336 struct dentry *chan, *chan_state = NULL, *chan_descr = NULL; 337 struct dentry *chan_reqs = NULL; 338 void *dt; 339 340 scnprintf(chan_name, sizeof(chan_name), "%d", ch); 341 chan = debugfs_create_dir(chan_name, chandir); 342 dt = (void *)&pdev->phys[ch]; 343 344 if (chan) 345 chan_state = debugfs_create_file("state", 0400, chan, dt, 346 &dbg_fops_chan_state); 347 if (chan_state) 348 chan_descr = debugfs_create_file("descriptors", 0400, chan, dt, 349 &dbg_fops_descriptors); 350 if (chan_descr) 351 chan_reqs = debugfs_create_file("requesters", 0400, chan, dt, 352 &dbg_fops_requester_chan); 353 if (!chan_reqs) 354 goto err_state; 355 356 return chan; 357 358err_state: 359 debugfs_remove_recursive(chan); 360 return NULL; 361} 362 363static void pxad_init_debugfs(struct pxad_device *pdev) 364{ 365 int i; 366 struct dentry *chandir; 367 368 pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL); 369 if (IS_ERR(pdev->dbgfs_root) || !pdev->dbgfs_root) 370 goto err_root; 371 372 pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root, 373 pdev, &dbg_fops_state); 374 if (!pdev->dbgfs_state) 375 goto err_state; 376 377 pdev->dbgfs_chan = 378 kmalloc_array(pdev->nr_chans, sizeof(*pdev->dbgfs_state), 379 GFP_KERNEL); 380 if (!pdev->dbgfs_chan) 381 goto err_alloc; 382 383 chandir = debugfs_create_dir("channels", pdev->dbgfs_root); 384 if (!chandir) 385 goto err_chandir; 386 387 for (i = 0; i < pdev->nr_chans; i++) { 388 pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir); 389 if (!pdev->dbgfs_chan[i]) 390 goto err_chans; 391 } 392 393 return; 394err_chans: 395err_chandir: 396 kfree(pdev->dbgfs_chan); 397err_alloc: 398err_state: 399 debugfs_remove_recursive(pdev->dbgfs_root); 400err_root: 401 pr_err("pxad: debugfs is not available\n"); 402} 403 404static void pxad_cleanup_debugfs(struct pxad_device *pdev) 405{ 406 debugfs_remove_recursive(pdev->dbgfs_root); 407} 408#else 409static inline void pxad_init_debugfs(struct pxad_device *pdev) {} 410static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {} 411#endif 412 413/* 414 * In the transition phase where legacy pxa handling is done at the same time as 415 * mmp_dma, the DMA physical channel split between the 2 DMA providers is done 416 * through legacy_reserved. Legacy code reserves DMA channels by settings 417 * corresponding bits in legacy_reserved. 418 */ 419static u32 legacy_reserved; 420static u32 legacy_unavailable; 421 422static struct pxad_phy *lookup_phy(struct pxad_chan *pchan) 423{ 424 int prio, i; 425 struct pxad_device *pdev = to_pxad_dev(pchan->vc.chan.device); 426 struct pxad_phy *phy, *found = NULL; 427 unsigned long flags; 428 429 /* 430 * dma channel priorities 431 * ch 0 - 3, 16 - 19 <--> (0) 432 * ch 4 - 7, 20 - 23 <--> (1) 433 * ch 8 - 11, 24 - 27 <--> (2) 434 * ch 12 - 15, 28 - 31 <--> (3) 435 */ 436 437 spin_lock_irqsave(&pdev->phy_lock, flags); 438 for (prio = pchan->prio; prio >= PXAD_PRIO_HIGHEST; prio--) { 439 for (i = 0; i < pdev->nr_chans; i++) { 440 if (prio != (i & 0xf) >> 2) 441 continue; 442 if ((i < 32) && (legacy_reserved & BIT(i))) 443 continue; 444 phy = &pdev->phys[i]; 445 if (!phy->vchan) { 446 phy->vchan = pchan; 447 found = phy; 448 if (i < 32) 449 legacy_unavailable |= BIT(i); 450 goto out_unlock; 451 } 452 } 453 } 454 455out_unlock: 456 spin_unlock_irqrestore(&pdev->phy_lock, flags); 457 dev_dbg(&pchan->vc.chan.dev->device, 458 "%s(): phy=%p(%d)\n", __func__, found, 459 found ? found->idx : -1); 460 461 return found; 462} 463 464static void pxad_free_phy(struct pxad_chan *chan) 465{ 466 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device); 467 unsigned long flags; 468 u32 reg; 469 int i; 470 471 dev_dbg(&chan->vc.chan.dev->device, 472 "%s(): freeing\n", __func__); 473 if (!chan->phy) 474 return; 475 476 /* clear the channel mapping in DRCMR */ 477 if (chan->drcmr <= pdev->nr_requestors) { 478 reg = pxad_drcmr(chan->drcmr); 479 writel_relaxed(0, chan->phy->base + reg); 480 } 481 482 spin_lock_irqsave(&pdev->phy_lock, flags); 483 for (i = 0; i < 32; i++) 484 if (chan->phy == &pdev->phys[i]) 485 legacy_unavailable &= ~BIT(i); 486 chan->phy->vchan = NULL; 487 chan->phy = NULL; 488 spin_unlock_irqrestore(&pdev->phy_lock, flags); 489} 490 491static bool is_chan_running(struct pxad_chan *chan) 492{ 493 u32 dcsr; 494 struct pxad_phy *phy = chan->phy; 495 496 if (!phy) 497 return false; 498 dcsr = phy_readl_relaxed(phy, DCSR); 499 return dcsr & PXA_DCSR_RUN; 500} 501 502static bool is_running_chan_misaligned(struct pxad_chan *chan) 503{ 504 u32 dalgn; 505 506 BUG_ON(!chan->phy); 507 dalgn = phy_readl_relaxed(chan->phy, DALGN); 508 return dalgn & (BIT(chan->phy->idx)); 509} 510 511static void phy_enable(struct pxad_phy *phy, bool misaligned) 512{ 513 struct pxad_device *pdev; 514 u32 reg, dalgn; 515 516 if (!phy->vchan) 517 return; 518 519 dev_dbg(&phy->vchan->vc.chan.dev->device, 520 "%s(); phy=%p(%d) misaligned=%d\n", __func__, 521 phy, phy->idx, misaligned); 522 523 pdev = to_pxad_dev(phy->vchan->vc.chan.device); 524 if (phy->vchan->drcmr <= pdev->nr_requestors) { 525 reg = pxad_drcmr(phy->vchan->drcmr); 526 writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); 527 } 528 529 dalgn = phy_readl_relaxed(phy, DALGN); 530 if (misaligned) 531 dalgn |= BIT(phy->idx); 532 else 533 dalgn &= ~BIT(phy->idx); 534 phy_writel_relaxed(phy, dalgn, DALGN); 535 536 phy_writel(phy, PXA_DCSR_STOPIRQEN | PXA_DCSR_ENDINTR | 537 PXA_DCSR_BUSERR | PXA_DCSR_RUN, DCSR); 538} 539 540static void phy_disable(struct pxad_phy *phy) 541{ 542 u32 dcsr; 543 544 if (!phy) 545 return; 546 547 dcsr = phy_readl_relaxed(phy, DCSR); 548 dev_dbg(&phy->vchan->vc.chan.dev->device, 549 "%s(): phy=%p(%d)\n", __func__, phy, phy->idx); 550 phy_writel(phy, dcsr & ~PXA_DCSR_RUN & ~PXA_DCSR_STOPIRQEN, DCSR); 551} 552 553static void pxad_launch_chan(struct pxad_chan *chan, 554 struct pxad_desc_sw *desc) 555{ 556 dev_dbg(&chan->vc.chan.dev->device, 557 "%s(): desc=%p\n", __func__, desc); 558 if (!chan->phy) { 559 chan->phy = lookup_phy(chan); 560 if (!chan->phy) { 561 dev_dbg(&chan->vc.chan.dev->device, 562 "%s(): no free dma channel\n", __func__); 563 return; 564 } 565 } 566 567 /* 568 * Program the descriptor's address into the DMA controller, 569 * then start the DMA transaction 570 */ 571 phy_writel(chan->phy, desc->first, DDADR); 572 phy_enable(chan->phy, chan->misaligned); 573} 574 575static void set_updater_desc(struct pxad_desc_sw *sw_desc, 576 unsigned long flags) 577{ 578 struct pxad_desc_hw *updater = 579 sw_desc->hw_desc[sw_desc->nb_desc - 1]; 580 dma_addr_t dma = sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr; 581 582 updater->ddadr = DDADR_STOP; 583 updater->dsadr = dma; 584 updater->dtadr = dma + 8; 585 updater->dcmd = PXA_DCMD_WIDTH4 | PXA_DCMD_BURST32 | 586 (PXA_DCMD_LENGTH & sizeof(u32)); 587 if (flags & DMA_PREP_INTERRUPT) 588 updater->dcmd |= PXA_DCMD_ENDIRQEN; 589 if (sw_desc->cyclic) 590 sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first; 591} 592 593static bool is_desc_completed(struct virt_dma_desc *vd) 594{ 595 struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd); 596 struct pxad_desc_hw *updater = 597 sw_desc->hw_desc[sw_desc->nb_desc - 1]; 598 599 return updater->dtadr != (updater->dsadr + 8); 600} 601 602static void pxad_desc_chain(struct virt_dma_desc *vd1, 603 struct virt_dma_desc *vd2) 604{ 605 struct pxad_desc_sw *desc1 = to_pxad_sw_desc(vd1); 606 struct pxad_desc_sw *desc2 = to_pxad_sw_desc(vd2); 607 dma_addr_t dma_to_chain; 608 609 dma_to_chain = desc2->first; 610 desc1->hw_desc[desc1->nb_desc - 1]->ddadr = dma_to_chain; 611} 612 613static bool pxad_try_hotchain(struct virt_dma_chan *vc, 614 struct virt_dma_desc *vd) 615{ 616 struct virt_dma_desc *vd_last_issued = NULL; 617 struct pxad_chan *chan = to_pxad_chan(&vc->chan); 618 619 /* 620 * Attempt to hot chain the tx if the phy is still running. This is 621 * considered successful only if either the channel is still running 622 * after the chaining, or if the chained transfer is completed after 623 * having been hot chained. 624 * A change of alignment is not allowed, and forbids hotchaining. 625 */ 626 if (is_chan_running(chan)) { 627 BUG_ON(list_empty(&vc->desc_issued)); 628 629 if (!is_running_chan_misaligned(chan) && 630 to_pxad_sw_desc(vd)->misaligned) 631 return false; 632 633 vd_last_issued = list_entry(vc->desc_issued.prev, 634 struct virt_dma_desc, node); 635 pxad_desc_chain(vd_last_issued, vd); 636 if (is_chan_running(chan) || is_desc_completed(vd_last_issued)) 637 return true; 638 } 639 640 return false; 641} 642 643static unsigned int clear_chan_irq(struct pxad_phy *phy) 644{ 645 u32 dcsr; 646 u32 dint = readl(phy->base + DINT); 647 648 if (!(dint & BIT(phy->idx))) 649 return PXA_DCSR_RUN; 650 651 /* clear irq */ 652 dcsr = phy_readl_relaxed(phy, DCSR); 653 phy_writel(phy, dcsr, DCSR); 654 if ((dcsr & PXA_DCSR_BUSERR) && (phy->vchan)) 655 dev_warn(&phy->vchan->vc.chan.dev->device, 656 "%s(chan=%p): PXA_DCSR_BUSERR\n", 657 __func__, &phy->vchan); 658 659 return dcsr & ~PXA_DCSR_RUN; 660} 661 662static irqreturn_t pxad_chan_handler(int irq, void *dev_id) 663{ 664 struct pxad_phy *phy = dev_id; 665 struct pxad_chan *chan = phy->vchan; 666 struct virt_dma_desc *vd, *tmp; 667 unsigned int dcsr; 668 unsigned long flags; 669 670 BUG_ON(!chan); 671 672 dcsr = clear_chan_irq(phy); 673 if (dcsr & PXA_DCSR_RUN) 674 return IRQ_NONE; 675 676 spin_lock_irqsave(&chan->vc.lock, flags); 677 list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) { 678 dev_dbg(&chan->vc.chan.dev->device, 679 "%s(): checking txd %p[%x]: completed=%d\n", 680 __func__, vd, vd->tx.cookie, is_desc_completed(vd)); 681 if (to_pxad_sw_desc(vd)->cyclic) { 682 vchan_cyclic_callback(vd); 683 break; 684 } 685 if (is_desc_completed(vd)) { 686 list_del(&vd->node); 687 vchan_cookie_complete(vd); 688 } else { 689 break; 690 } 691 } 692 693 if (dcsr & PXA_DCSR_STOPSTATE) { 694 dev_dbg(&chan->vc.chan.dev->device, 695 "%s(): channel stopped, submitted_empty=%d issued_empty=%d", 696 __func__, 697 list_empty(&chan->vc.desc_submitted), 698 list_empty(&chan->vc.desc_issued)); 699 phy_writel_relaxed(phy, dcsr & ~PXA_DCSR_STOPIRQEN, DCSR); 700 701 if (list_empty(&chan->vc.desc_issued)) { 702 chan->misaligned = 703 !list_empty(&chan->vc.desc_submitted); 704 } else { 705 vd = list_first_entry(&chan->vc.desc_issued, 706 struct virt_dma_desc, node); 707 pxad_launch_chan(chan, to_pxad_sw_desc(vd)); 708 } 709 } 710 spin_unlock_irqrestore(&chan->vc.lock, flags); 711 712 return IRQ_HANDLED; 713} 714 715static irqreturn_t pxad_int_handler(int irq, void *dev_id) 716{ 717 struct pxad_device *pdev = dev_id; 718 struct pxad_phy *phy; 719 u32 dint = readl(pdev->base + DINT); 720 int i, ret = IRQ_NONE; 721 722 while (dint) { 723 i = __ffs(dint); 724 dint &= (dint - 1); 725 phy = &pdev->phys[i]; 726 if ((i < 32) && (legacy_reserved & BIT(i))) 727 continue; 728 if (pxad_chan_handler(irq, phy) == IRQ_HANDLED) 729 ret = IRQ_HANDLED; 730 } 731 732 return ret; 733} 734 735static int pxad_alloc_chan_resources(struct dma_chan *dchan) 736{ 737 struct pxad_chan *chan = to_pxad_chan(dchan); 738 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device); 739 740 if (chan->desc_pool) 741 return 1; 742 743 chan->desc_pool = dma_pool_create(dma_chan_name(dchan), 744 pdev->slave.dev, 745 sizeof(struct pxad_desc_hw), 746 __alignof__(struct pxad_desc_hw), 747 0); 748 if (!chan->desc_pool) { 749 dev_err(&chan->vc.chan.dev->device, 750 "%s(): unable to allocate descriptor pool\n", 751 __func__); 752 return -ENOMEM; 753 } 754 755 return 1; 756} 757 758static void pxad_free_chan_resources(struct dma_chan *dchan) 759{ 760 struct pxad_chan *chan = to_pxad_chan(dchan); 761 762 vchan_free_chan_resources(&chan->vc); 763 dma_pool_destroy(chan->desc_pool); 764 chan->desc_pool = NULL; 765 766} 767 768static void pxad_free_desc(struct virt_dma_desc *vd) 769{ 770 int i; 771 dma_addr_t dma; 772 struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd); 773 774 BUG_ON(sw_desc->nb_desc == 0); 775 for (i = sw_desc->nb_desc - 1; i >= 0; i--) { 776 if (i > 0) 777 dma = sw_desc->hw_desc[i - 1]->ddadr; 778 else 779 dma = sw_desc->first; 780 dma_pool_free(sw_desc->desc_pool, 781 sw_desc->hw_desc[i], dma); 782 } 783 sw_desc->nb_desc = 0; 784 kfree(sw_desc); 785} 786 787static struct pxad_desc_sw * 788pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc) 789{ 790 struct pxad_desc_sw *sw_desc; 791 dma_addr_t dma; 792 int i; 793 794 sw_desc = kzalloc(sizeof(*sw_desc) + 795 nb_hw_desc * sizeof(struct pxad_desc_hw *), 796 GFP_NOWAIT); 797 if (!sw_desc) 798 return NULL; 799 sw_desc->desc_pool = chan->desc_pool; 800 801 for (i = 0; i < nb_hw_desc; i++) { 802 sw_desc->hw_desc[i] = dma_pool_alloc(sw_desc->desc_pool, 803 GFP_NOWAIT, &dma); 804 if (!sw_desc->hw_desc[i]) { 805 dev_err(&chan->vc.chan.dev->device, 806 "%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n", 807 __func__, i, sw_desc->desc_pool); 808 goto err; 809 } 810 811 if (i == 0) 812 sw_desc->first = dma; 813 else 814 sw_desc->hw_desc[i - 1]->ddadr = dma; 815 sw_desc->nb_desc++; 816 } 817 818 return sw_desc; 819err: 820 pxad_free_desc(&sw_desc->vd); 821 return NULL; 822} 823 824static dma_cookie_t pxad_tx_submit(struct dma_async_tx_descriptor *tx) 825{ 826 struct virt_dma_chan *vc = to_virt_chan(tx->chan); 827 struct pxad_chan *chan = to_pxad_chan(&vc->chan); 828 struct virt_dma_desc *vd_chained = NULL, 829 *vd = container_of(tx, struct virt_dma_desc, tx); 830 dma_cookie_t cookie; 831 unsigned long flags; 832 833 set_updater_desc(to_pxad_sw_desc(vd), tx->flags); 834 835 spin_lock_irqsave(&vc->lock, flags); 836 cookie = dma_cookie_assign(tx); 837 838 if (list_empty(&vc->desc_submitted) && pxad_try_hotchain(vc, vd)) { 839 list_move_tail(&vd->node, &vc->desc_issued); 840 dev_dbg(&chan->vc.chan.dev->device, 841 "%s(): txd %p[%x]: submitted (hot linked)\n", 842 __func__, vd, cookie); 843 goto out; 844 } 845 846 /* 847 * Fallback to placing the tx in the submitted queue 848 */ 849 if (!list_empty(&vc->desc_submitted)) { 850 vd_chained = list_entry(vc->desc_submitted.prev, 851 struct virt_dma_desc, node); 852 /* 853 * Only chain the descriptors if no new misalignment is 854 * introduced. If a new misalignment is chained, let the channel 855 * stop, and be relaunched in misalign mode from the irq 856 * handler. 857 */ 858 if (chan->misaligned || !to_pxad_sw_desc(vd)->misaligned) 859 pxad_desc_chain(vd_chained, vd); 860 else 861 vd_chained = NULL; 862 } 863 dev_dbg(&chan->vc.chan.dev->device, 864 "%s(): txd %p[%x]: submitted (%s linked)\n", 865 __func__, vd, cookie, vd_chained ? "cold" : "not"); 866 list_move_tail(&vd->node, &vc->desc_submitted); 867 chan->misaligned |= to_pxad_sw_desc(vd)->misaligned; 868 869out: 870 spin_unlock_irqrestore(&vc->lock, flags); 871 return cookie; 872} 873 874static void pxad_issue_pending(struct dma_chan *dchan) 875{ 876 struct pxad_chan *chan = to_pxad_chan(dchan); 877 struct virt_dma_desc *vd_first; 878 unsigned long flags; 879 880 spin_lock_irqsave(&chan->vc.lock, flags); 881 if (list_empty(&chan->vc.desc_submitted)) 882 goto out; 883 884 vd_first = list_first_entry(&chan->vc.desc_submitted, 885 struct virt_dma_desc, node); 886 dev_dbg(&chan->vc.chan.dev->device, 887 "%s(): txd %p[%x]", __func__, vd_first, vd_first->tx.cookie); 888 889 vchan_issue_pending(&chan->vc); 890 if (!pxad_try_hotchain(&chan->vc, vd_first)) 891 pxad_launch_chan(chan, to_pxad_sw_desc(vd_first)); 892out: 893 spin_unlock_irqrestore(&chan->vc.lock, flags); 894} 895 896static inline struct dma_async_tx_descriptor * 897pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd, 898 unsigned long tx_flags) 899{ 900 struct dma_async_tx_descriptor *tx; 901 struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); 902 903 INIT_LIST_HEAD(&vd->node); 904 tx = vchan_tx_prep(vc, vd, tx_flags); 905 tx->tx_submit = pxad_tx_submit; 906 dev_dbg(&chan->vc.chan.dev->device, 907 "%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__, 908 vc, vd, vd->tx.cookie, 909 tx_flags); 910 911 return tx; 912} 913 914static void pxad_get_config(struct pxad_chan *chan, 915 enum dma_transfer_direction dir, 916 u32 *dcmd, u32 *dev_src, u32 *dev_dst) 917{ 918 u32 maxburst = 0, dev_addr = 0; 919 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 920 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device); 921 922 *dcmd = 0; 923 if (dir == DMA_DEV_TO_MEM) { 924 maxburst = chan->cfg.src_maxburst; 925 width = chan->cfg.src_addr_width; 926 dev_addr = chan->cfg.src_addr; 927 *dev_src = dev_addr; 928 *dcmd |= PXA_DCMD_INCTRGADDR; 929 if (chan->drcmr <= pdev->nr_requestors) 930 *dcmd |= PXA_DCMD_FLOWSRC; 931 } 932 if (dir == DMA_MEM_TO_DEV) { 933 maxburst = chan->cfg.dst_maxburst; 934 width = chan->cfg.dst_addr_width; 935 dev_addr = chan->cfg.dst_addr; 936 *dev_dst = dev_addr; 937 *dcmd |= PXA_DCMD_INCSRCADDR; 938 if (chan->drcmr <= pdev->nr_requestors) 939 *dcmd |= PXA_DCMD_FLOWTRG; 940 } 941 if (dir == DMA_MEM_TO_MEM) 942 *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | 943 PXA_DCMD_INCSRCADDR; 944 945 dev_dbg(&chan->vc.chan.dev->device, 946 "%s(): dev_addr=0x%x maxburst=%d width=%d dir=%d\n", 947 __func__, dev_addr, maxburst, width, dir); 948 949 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) 950 *dcmd |= PXA_DCMD_WIDTH1; 951 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) 952 *dcmd |= PXA_DCMD_WIDTH2; 953 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) 954 *dcmd |= PXA_DCMD_WIDTH4; 955 956 if (maxburst == 8) 957 *dcmd |= PXA_DCMD_BURST8; 958 else if (maxburst == 16) 959 *dcmd |= PXA_DCMD_BURST16; 960 else if (maxburst == 32) 961 *dcmd |= PXA_DCMD_BURST32; 962 963 /* FIXME: drivers should be ported over to use the filter 964 * function. Once that's done, the following two lines can 965 * be removed. 966 */ 967 if (chan->cfg.slave_id) 968 chan->drcmr = chan->cfg.slave_id; 969} 970 971static struct dma_async_tx_descriptor * 972pxad_prep_memcpy(struct dma_chan *dchan, 973 dma_addr_t dma_dst, dma_addr_t dma_src, 974 size_t len, unsigned long flags) 975{ 976 struct pxad_chan *chan = to_pxad_chan(dchan); 977 struct pxad_desc_sw *sw_desc; 978 struct pxad_desc_hw *hw_desc; 979 u32 dcmd; 980 unsigned int i, nb_desc = 0; 981 size_t copy; 982 983 if (!dchan || !len) 984 return NULL; 985 986 dev_dbg(&chan->vc.chan.dev->device, 987 "%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n", 988 __func__, (unsigned long)dma_dst, (unsigned long)dma_src, 989 len, flags); 990 pxad_get_config(chan, DMA_MEM_TO_MEM, &dcmd, NULL, NULL); 991 992 nb_desc = DIV_ROUND_UP(len, PDMA_MAX_DESC_BYTES); 993 sw_desc = pxad_alloc_desc(chan, nb_desc + 1); 994 if (!sw_desc) 995 return NULL; 996 sw_desc->len = len; 997 998 if (!IS_ALIGNED(dma_src, 1 << PDMA_ALIGNMENT) || 999 !IS_ALIGNED(dma_dst, 1 << PDMA_ALIGNMENT)) 1000 sw_desc->misaligned = true; 1001 1002 i = 0; 1003 do { 1004 hw_desc = sw_desc->hw_desc[i++]; 1005 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); 1006 hw_desc->dcmd = dcmd | (PXA_DCMD_LENGTH & copy); 1007 hw_desc->dsadr = dma_src; 1008 hw_desc->dtadr = dma_dst; 1009 len -= copy; 1010 dma_src += copy; 1011 dma_dst += copy; 1012 } while (len); 1013 set_updater_desc(sw_desc, flags); 1014 1015 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags); 1016} 1017 1018static struct dma_async_tx_descriptor * 1019pxad_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, 1020 unsigned int sg_len, enum dma_transfer_direction dir, 1021 unsigned long flags, void *context) 1022{ 1023 struct pxad_chan *chan = to_pxad_chan(dchan); 1024 struct pxad_desc_sw *sw_desc; 1025 size_t len, avail; 1026 struct scatterlist *sg; 1027 dma_addr_t dma; 1028 u32 dcmd, dsadr = 0, dtadr = 0; 1029 unsigned int nb_desc = 0, i, j = 0; 1030 1031 if ((sgl == NULL) || (sg_len == 0)) 1032 return NULL; 1033 1034 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr); 1035 dev_dbg(&chan->vc.chan.dev->device, 1036 "%s(): dir=%d flags=%lx\n", __func__, dir, flags); 1037 1038 for_each_sg(sgl, sg, sg_len, i) 1039 nb_desc += DIV_ROUND_UP(sg_dma_len(sg), PDMA_MAX_DESC_BYTES); 1040 sw_desc = pxad_alloc_desc(chan, nb_desc + 1); 1041 if (!sw_desc) 1042 return NULL; 1043 1044 for_each_sg(sgl, sg, sg_len, i) { 1045 dma = sg_dma_address(sg); 1046 avail = sg_dma_len(sg); 1047 sw_desc->len += avail; 1048 1049 do { 1050 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); 1051 if (dma & 0x7) 1052 sw_desc->misaligned = true; 1053 1054 sw_desc->hw_desc[j]->dcmd = 1055 dcmd | (PXA_DCMD_LENGTH & len); 1056 sw_desc->hw_desc[j]->dsadr = dsadr ? dsadr : dma; 1057 sw_desc->hw_desc[j++]->dtadr = dtadr ? dtadr : dma; 1058 1059 dma += len; 1060 avail -= len; 1061 } while (avail); 1062 } 1063 set_updater_desc(sw_desc, flags); 1064 1065 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags); 1066} 1067 1068static struct dma_async_tx_descriptor * 1069pxad_prep_dma_cyclic(struct dma_chan *dchan, 1070 dma_addr_t buf_addr, size_t len, size_t period_len, 1071 enum dma_transfer_direction dir, unsigned long flags) 1072{ 1073 struct pxad_chan *chan = to_pxad_chan(dchan); 1074 struct pxad_desc_sw *sw_desc; 1075 struct pxad_desc_hw **phw_desc; 1076 dma_addr_t dma; 1077 u32 dcmd, dsadr = 0, dtadr = 0; 1078 unsigned int nb_desc = 0; 1079 1080 if (!dchan || !len || !period_len) 1081 return NULL; 1082 if ((dir != DMA_DEV_TO_MEM) && (dir != DMA_MEM_TO_DEV)) { 1083 dev_err(&chan->vc.chan.dev->device, 1084 "Unsupported direction for cyclic DMA\n"); 1085 return NULL; 1086 } 1087 /* the buffer length must be a multiple of period_len */ 1088 if (len % period_len != 0 || period_len > PDMA_MAX_DESC_BYTES || 1089 !IS_ALIGNED(period_len, 1 << PDMA_ALIGNMENT)) 1090 return NULL; 1091 1092 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr); 1093 dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len); 1094 dev_dbg(&chan->vc.chan.dev->device, 1095 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n", 1096 __func__, (unsigned long)buf_addr, len, period_len, dir, flags); 1097 1098 nb_desc = DIV_ROUND_UP(period_len, PDMA_MAX_DESC_BYTES); 1099 nb_desc *= DIV_ROUND_UP(len, period_len); 1100 sw_desc = pxad_alloc_desc(chan, nb_desc + 1); 1101 if (!sw_desc) 1102 return NULL; 1103 sw_desc->cyclic = true; 1104 sw_desc->len = len; 1105 1106 phw_desc = sw_desc->hw_desc; 1107 dma = buf_addr; 1108 do { 1109 phw_desc[0]->dsadr = dsadr ? dsadr : dma; 1110 phw_desc[0]->dtadr = dtadr ? dtadr : dma; 1111 phw_desc[0]->dcmd = dcmd; 1112 phw_desc++; 1113 dma += period_len; 1114 len -= period_len; 1115 } while (len); 1116 set_updater_desc(sw_desc, flags); 1117 1118 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags); 1119} 1120 1121static int pxad_config(struct dma_chan *dchan, 1122 struct dma_slave_config *cfg) 1123{ 1124 struct pxad_chan *chan = to_pxad_chan(dchan); 1125 1126 if (!dchan) 1127 return -EINVAL; 1128 1129 chan->cfg = *cfg; 1130 return 0; 1131} 1132 1133static int pxad_terminate_all(struct dma_chan *dchan) 1134{ 1135 struct pxad_chan *chan = to_pxad_chan(dchan); 1136 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device); 1137 struct virt_dma_desc *vd = NULL; 1138 unsigned long flags; 1139 struct pxad_phy *phy; 1140 LIST_HEAD(head); 1141 1142 dev_dbg(&chan->vc.chan.dev->device, 1143 "%s(): vchan %p: terminate all\n", __func__, &chan->vc); 1144 1145 spin_lock_irqsave(&chan->vc.lock, flags); 1146 vchan_get_all_descriptors(&chan->vc, &head); 1147 1148 list_for_each_entry(vd, &head, node) { 1149 dev_dbg(&chan->vc.chan.dev->device, 1150 "%s(): cancelling txd %p[%x] (completed=%d)", __func__, 1151 vd, vd->tx.cookie, is_desc_completed(vd)); 1152 } 1153 1154 phy = chan->phy; 1155 if (phy) { 1156 phy_disable(chan->phy); 1157 pxad_free_phy(chan); 1158 chan->phy = NULL; 1159 spin_lock(&pdev->phy_lock); 1160 phy->vchan = NULL; 1161 spin_unlock(&pdev->phy_lock); 1162 } 1163 spin_unlock_irqrestore(&chan->vc.lock, flags); 1164 vchan_dma_desc_free_list(&chan->vc, &head); 1165 1166 return 0; 1167} 1168 1169static unsigned int pxad_residue(struct pxad_chan *chan, 1170 dma_cookie_t cookie) 1171{ 1172 struct virt_dma_desc *vd = NULL; 1173 struct pxad_desc_sw *sw_desc = NULL; 1174 struct pxad_desc_hw *hw_desc = NULL; 1175 u32 curr, start, len, end, residue = 0; 1176 unsigned long flags; 1177 bool passed = false; 1178 int i; 1179 1180 /* 1181 * If the channel does not have a phy pointer anymore, it has already 1182 * been completed. Therefore, its residue is 0. 1183 */ 1184 if (!chan->phy) 1185 return 0; 1186 1187 spin_lock_irqsave(&chan->vc.lock, flags); 1188 1189 vd = vchan_find_desc(&chan->vc, cookie); 1190 if (!vd) 1191 goto out; 1192 1193 sw_desc = to_pxad_sw_desc(vd); 1194 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) 1195 curr = phy_readl_relaxed(chan->phy, DSADR); 1196 else 1197 curr = phy_readl_relaxed(chan->phy, DTADR); 1198 1199 /* 1200 * curr has to be actually read before checking descriptor 1201 * completion, so that a curr inside a status updater 1202 * descriptor implies the following test returns true, and 1203 * preventing reordering of curr load and the test. 1204 */ 1205 rmb(); 1206 if (is_desc_completed(vd)) 1207 goto out; 1208 1209 for (i = 0; i < sw_desc->nb_desc - 1; i++) { 1210 hw_desc = sw_desc->hw_desc[i]; 1211 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) 1212 start = hw_desc->dsadr; 1213 else 1214 start = hw_desc->dtadr; 1215 len = hw_desc->dcmd & PXA_DCMD_LENGTH; 1216 end = start + len; 1217 1218 /* 1219 * 'passed' will be latched once we found the descriptor 1220 * which lies inside the boundaries of the curr 1221 * pointer. All descriptors that occur in the list 1222 * _after_ we found that partially handled descriptor 1223 * are still to be processed and are hence added to the 1224 * residual bytes counter. 1225 */ 1226 1227 if (passed) { 1228 residue += len; 1229 } else if (curr >= start && curr <= end) { 1230 residue += end - curr; 1231 passed = true; 1232 } 1233 } 1234 if (!passed) 1235 residue = sw_desc->len; 1236 1237out: 1238 spin_unlock_irqrestore(&chan->vc.lock, flags); 1239 dev_dbg(&chan->vc.chan.dev->device, 1240 "%s(): txd %p[%x] sw_desc=%p: %d\n", 1241 __func__, vd, cookie, sw_desc, residue); 1242 return residue; 1243} 1244 1245static enum dma_status pxad_tx_status(struct dma_chan *dchan, 1246 dma_cookie_t cookie, 1247 struct dma_tx_state *txstate) 1248{ 1249 struct pxad_chan *chan = to_pxad_chan(dchan); 1250 enum dma_status ret; 1251 1252 ret = dma_cookie_status(dchan, cookie, txstate); 1253 if (likely(txstate && (ret != DMA_ERROR))) 1254 dma_set_residue(txstate, pxad_residue(chan, cookie)); 1255 1256 return ret; 1257} 1258 1259static void pxad_free_channels(struct dma_device *dmadev) 1260{ 1261 struct pxad_chan *c, *cn; 1262 1263 list_for_each_entry_safe(c, cn, &dmadev->channels, 1264 vc.chan.device_node) { 1265 list_del(&c->vc.chan.device_node); 1266 tasklet_kill(&c->vc.task); 1267 } 1268} 1269 1270static int pxad_remove(struct platform_device *op) 1271{ 1272 struct pxad_device *pdev = platform_get_drvdata(op); 1273 1274 pxad_cleanup_debugfs(pdev); 1275 pxad_free_channels(&pdev->slave); 1276 dma_async_device_unregister(&pdev->slave); 1277 return 0; 1278} 1279 1280static int pxad_init_phys(struct platform_device *op, 1281 struct pxad_device *pdev, 1282 unsigned int nb_phy_chans) 1283{ 1284 int irq0, irq, nr_irq = 0, i, ret; 1285 struct pxad_phy *phy; 1286 1287 irq0 = platform_get_irq(op, 0); 1288 if (irq0 < 0) 1289 return irq0; 1290 1291 pdev->phys = devm_kcalloc(&op->dev, nb_phy_chans, 1292 sizeof(pdev->phys[0]), GFP_KERNEL); 1293 if (!pdev->phys) 1294 return -ENOMEM; 1295 1296 for (i = 0; i < nb_phy_chans; i++) 1297 if (platform_get_irq(op, i) > 0) 1298 nr_irq++; 1299 1300 for (i = 0; i < nb_phy_chans; i++) { 1301 phy = &pdev->phys[i]; 1302 phy->base = pdev->base; 1303 phy->idx = i; 1304 irq = platform_get_irq(op, i); 1305 if ((nr_irq > 1) && (irq > 0)) 1306 ret = devm_request_irq(&op->dev, irq, 1307 pxad_chan_handler, 1308 IRQF_SHARED, "pxa-dma", phy); 1309 if ((nr_irq == 1) && (i == 0)) 1310 ret = devm_request_irq(&op->dev, irq0, 1311 pxad_int_handler, 1312 IRQF_SHARED, "pxa-dma", pdev); 1313 if (ret) { 1314 dev_err(pdev->slave.dev, 1315 "%s(): can't request irq %d:%d\n", __func__, 1316 irq, ret); 1317 return ret; 1318 } 1319 } 1320 1321 return 0; 1322} 1323 1324static const struct of_device_id const pxad_dt_ids[] = { 1325 { .compatible = "marvell,pdma-1.0", }, 1326 {} 1327}; 1328MODULE_DEVICE_TABLE(of, pxad_dt_ids); 1329 1330static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec, 1331 struct of_dma *ofdma) 1332{ 1333 struct pxad_device *d = ofdma->of_dma_data; 1334 struct dma_chan *chan; 1335 1336 chan = dma_get_any_slave_channel(&d->slave); 1337 if (!chan) 1338 return NULL; 1339 1340 to_pxad_chan(chan)->drcmr = dma_spec->args[0]; 1341 to_pxad_chan(chan)->prio = dma_spec->args[1]; 1342 1343 return chan; 1344} 1345 1346static int pxad_init_dmadev(struct platform_device *op, 1347 struct pxad_device *pdev, 1348 unsigned int nr_phy_chans, 1349 unsigned int nr_requestors) 1350{ 1351 int ret; 1352 unsigned int i; 1353 struct pxad_chan *c; 1354 1355 pdev->nr_chans = nr_phy_chans; 1356 pdev->nr_requestors = nr_requestors; 1357 INIT_LIST_HEAD(&pdev->slave.channels); 1358 pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources; 1359 pdev->slave.device_free_chan_resources = pxad_free_chan_resources; 1360 pdev->slave.device_tx_status = pxad_tx_status; 1361 pdev->slave.device_issue_pending = pxad_issue_pending; 1362 pdev->slave.device_config = pxad_config; 1363 pdev->slave.device_terminate_all = pxad_terminate_all; 1364 1365 if (op->dev.coherent_dma_mask) 1366 dma_set_mask(&op->dev, op->dev.coherent_dma_mask); 1367 else 1368 dma_set_mask(&op->dev, DMA_BIT_MASK(32)); 1369 1370 ret = pxad_init_phys(op, pdev, nr_phy_chans); 1371 if (ret) 1372 return ret; 1373 1374 for (i = 0; i < nr_phy_chans; i++) { 1375 c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL); 1376 if (!c) 1377 return -ENOMEM; 1378 c->vc.desc_free = pxad_free_desc; 1379 vchan_init(&c->vc, &pdev->slave); 1380 } 1381 1382 return dma_async_device_register(&pdev->slave); 1383} 1384 1385static int pxad_probe(struct platform_device *op) 1386{ 1387 struct pxad_device *pdev; 1388 const struct of_device_id *of_id; 1389 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); 1390 struct resource *iores; 1391 int ret, dma_channels = 0, nb_requestors = 0; 1392 const enum dma_slave_buswidth widths = 1393 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | 1394 DMA_SLAVE_BUSWIDTH_4_BYTES; 1395 1396 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); 1397 if (!pdev) 1398 return -ENOMEM; 1399 1400 spin_lock_init(&pdev->phy_lock); 1401 1402 iores = platform_get_resource(op, IORESOURCE_MEM, 0); 1403 pdev->base = devm_ioremap_resource(&op->dev, iores); 1404 if (IS_ERR(pdev->base)) 1405 return PTR_ERR(pdev->base); 1406 1407 of_id = of_match_device(pxad_dt_ids, &op->dev); 1408 if (of_id) { 1409 of_property_read_u32(op->dev.of_node, "#dma-channels", 1410 &dma_channels); 1411 ret = of_property_read_u32(op->dev.of_node, "#dma-requests", 1412 &nb_requestors); 1413 if (ret) { 1414 dev_warn(pdev->slave.dev, 1415 "#dma-requests set to default 32 as missing in OF: %d", 1416 ret); 1417 nb_requestors = 32; 1418 }; 1419 } else if (pdata && pdata->dma_channels) { 1420 dma_channels = pdata->dma_channels; 1421 nb_requestors = pdata->nb_requestors; 1422 } else { 1423 dma_channels = 32; /* default 32 channel */ 1424 } 1425 1426 dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask); 1427 dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask); 1428 dma_cap_set(DMA_CYCLIC, pdev->slave.cap_mask); 1429 dma_cap_set(DMA_PRIVATE, pdev->slave.cap_mask); 1430 pdev->slave.device_prep_dma_memcpy = pxad_prep_memcpy; 1431 pdev->slave.device_prep_slave_sg = pxad_prep_slave_sg; 1432 pdev->slave.device_prep_dma_cyclic = pxad_prep_dma_cyclic; 1433 1434 pdev->slave.copy_align = PDMA_ALIGNMENT; 1435 pdev->slave.src_addr_widths = widths; 1436 pdev->slave.dst_addr_widths = widths; 1437 pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); 1438 pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 1439 1440 pdev->slave.dev = &op->dev; 1441 ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors); 1442 if (ret) { 1443 dev_err(pdev->slave.dev, "unable to register\n"); 1444 return ret; 1445 } 1446 1447 if (op->dev.of_node) { 1448 /* Device-tree DMA controller registration */ 1449 ret = of_dma_controller_register(op->dev.of_node, 1450 pxad_dma_xlate, pdev); 1451 if (ret < 0) { 1452 dev_err(pdev->slave.dev, 1453 "of_dma_controller_register failed\n"); 1454 return ret; 1455 } 1456 } 1457 1458 platform_set_drvdata(op, pdev); 1459 pxad_init_debugfs(pdev); 1460 dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n", 1461 dma_channels, nb_requestors); 1462 return 0; 1463} 1464 1465static const struct platform_device_id pxad_id_table[] = { 1466 { "pxa-dma", }, 1467 { }, 1468}; 1469 1470static struct platform_driver pxad_driver = { 1471 .driver = { 1472 .name = "pxa-dma", 1473 .of_match_table = pxad_dt_ids, 1474 }, 1475 .id_table = pxad_id_table, 1476 .probe = pxad_probe, 1477 .remove = pxad_remove, 1478}; 1479 1480bool pxad_filter_fn(struct dma_chan *chan, void *param) 1481{ 1482 struct pxad_chan *c = to_pxad_chan(chan); 1483 struct pxad_param *p = param; 1484 1485 if (chan->device->dev->driver != &pxad_driver.driver) 1486 return false; 1487 1488 c->drcmr = p->drcmr; 1489 c->prio = p->prio; 1490 1491 return true; 1492} 1493EXPORT_SYMBOL_GPL(pxad_filter_fn); 1494 1495int pxad_toggle_reserved_channel(int legacy_channel) 1496{ 1497 if (legacy_unavailable & (BIT(legacy_channel))) 1498 return -EBUSY; 1499 legacy_reserved ^= BIT(legacy_channel); 1500 return 0; 1501} 1502EXPORT_SYMBOL_GPL(pxad_toggle_reserved_channel); 1503 1504module_platform_driver(pxad_driver); 1505 1506MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver"); 1507MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>"); 1508MODULE_LICENSE("GPL v2"); 1509