root/arch/sparc/kernel/iommu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. iommu_flushall
  2. iopte_make_dummy
  3. iommu_table_init
  4. alloc_npages
  5. iommu_alloc_ctx
  6. iommu_free_ctx
  7. dma_4u_alloc_coherent
  8. dma_4u_free_coherent
  9. dma_4u_map_page
  10. strbuf_flush
  11. dma_4u_unmap_page
  12. dma_4u_map_sg
  13. fetch_sg_ctx
  14. dma_4u_unmap_sg
  15. dma_4u_sync_single_for_cpu
  16. dma_4u_sync_sg_for_cpu
  17. dma_4u_supported

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* iommu.c: Generic sparc64 IOMMU support.
   3  *
   4  * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
   5  * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
   6  */
   7 
   8 #include <linux/kernel.h>
   9 #include <linux/export.h>
  10 #include <linux/slab.h>
  11 #include <linux/delay.h>
  12 #include <linux/device.h>
  13 #include <linux/dma-mapping.h>
  14 #include <linux/errno.h>
  15 #include <linux/iommu-helper.h>
  16 #include <linux/bitmap.h>
  17 #include <asm/iommu-common.h>
  18 
  19 #ifdef CONFIG_PCI
  20 #include <linux/pci.h>
  21 #endif
  22 
  23 #include <asm/iommu.h>
  24 
  25 #include "iommu_common.h"
  26 #include "kernel.h"
  27 
  28 #define STC_CTXMATCH_ADDR(STC, CTX)     \
  29         ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
  30 #define STC_FLUSHFLAG_INIT(STC) \
  31         (*((STC)->strbuf_flushflag) = 0UL)
  32 #define STC_FLUSHFLAG_SET(STC) \
  33         (*((STC)->strbuf_flushflag) != 0UL)
  34 
  35 #define iommu_read(__reg) \
  36 ({      u64 __ret; \
  37         __asm__ __volatile__("ldxa [%1] %2, %0" \
  38                              : "=r" (__ret) \
  39                              : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
  40                              : "memory"); \
  41         __ret; \
  42 })
  43 #define iommu_write(__reg, __val) \
  44         __asm__ __volatile__("stxa %0, [%1] %2" \
  45                              : /* no outputs */ \
  46                              : "r" (__val), "r" (__reg), \
  47                                "i" (ASI_PHYS_BYPASS_EC_E))
  48 
  49 /* Must be invoked under the IOMMU lock. */
  50 static void iommu_flushall(struct iommu_map_table *iommu_map_table)
  51 {
  52         struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
  53         if (iommu->iommu_flushinv) {
  54                 iommu_write(iommu->iommu_flushinv, ~(u64)0);
  55         } else {
  56                 unsigned long tag;
  57                 int entry;
  58 
  59                 tag = iommu->iommu_tags;
  60                 for (entry = 0; entry < 16; entry++) {
  61                         iommu_write(tag, 0);
  62                         tag += 8;
  63                 }
  64 
  65                 /* Ensure completion of previous PIO writes. */
  66                 (void) iommu_read(iommu->write_complete_reg);
  67         }
  68 }
  69 
  70 #define IOPTE_CONSISTENT(CTX) \
  71         (IOPTE_VALID | IOPTE_CACHE | \
  72          (((CTX) << 47) & IOPTE_CONTEXT))
  73 
  74 #define IOPTE_STREAMING(CTX) \
  75         (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
  76 
  77 /* Existing mappings are never marked invalid, instead they
  78  * are pointed to a dummy page.
  79  */
  80 #define IOPTE_IS_DUMMY(iommu, iopte)    \
  81         ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
  82 
  83 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
  84 {
  85         unsigned long val = iopte_val(*iopte);
  86 
  87         val &= ~IOPTE_PAGE;
  88         val |= iommu->dummy_page_pa;
  89 
  90         iopte_val(*iopte) = val;
  91 }
  92 
  93 int iommu_table_init(struct iommu *iommu, int tsbsize,
  94                      u32 dma_offset, u32 dma_addr_mask,
  95                      int numa_node)
  96 {
  97         unsigned long i, order, sz, num_tsb_entries;
  98         struct page *page;
  99 
 100         num_tsb_entries = tsbsize / sizeof(iopte_t);
 101 
 102         /* Setup initial software IOMMU state. */
 103         spin_lock_init(&iommu->lock);
 104         iommu->ctx_lowest_free = 1;
 105         iommu->tbl.table_map_base = dma_offset;
 106         iommu->dma_addr_mask = dma_addr_mask;
 107 
 108         /* Allocate and initialize the free area map.  */
 109         sz = num_tsb_entries / 8;
 110         sz = (sz + 7UL) & ~7UL;
 111         iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
 112         if (!iommu->tbl.map)
 113                 return -ENOMEM;
 114 
 115         iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
 116                             (tlb_type != hypervisor ? iommu_flushall : NULL),
 117                             false, 1, false);
 118 
 119         /* Allocate and initialize the dummy page which we
 120          * set inactive IO PTEs to point to.
 121          */
 122         page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
 123         if (!page) {
 124                 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
 125                 goto out_free_map;
 126         }
 127         iommu->dummy_page = (unsigned long) page_address(page);
 128         memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
 129         iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
 130 
 131         /* Now allocate and setup the IOMMU page table itself.  */
 132         order = get_order(tsbsize);
 133         page = alloc_pages_node(numa_node, GFP_KERNEL, order);
 134         if (!page) {
 135                 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
 136                 goto out_free_dummy_page;
 137         }
 138         iommu->page_table = (iopte_t *)page_address(page);
 139 
 140         for (i = 0; i < num_tsb_entries; i++)
 141                 iopte_make_dummy(iommu, &iommu->page_table[i]);
 142 
 143         return 0;
 144 
 145 out_free_dummy_page:
 146         free_page(iommu->dummy_page);
 147         iommu->dummy_page = 0UL;
 148 
 149 out_free_map:
 150         kfree(iommu->tbl.map);
 151         iommu->tbl.map = NULL;
 152 
 153         return -ENOMEM;
 154 }
 155 
 156 static inline iopte_t *alloc_npages(struct device *dev,
 157                                     struct iommu *iommu,
 158                                     unsigned long npages)
 159 {
 160         unsigned long entry;
 161 
 162         entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
 163                                       (unsigned long)(-1), 0);
 164         if (unlikely(entry == IOMMU_ERROR_CODE))
 165                 return NULL;
 166 
 167         return iommu->page_table + entry;
 168 }
 169 
 170 static int iommu_alloc_ctx(struct iommu *iommu)
 171 {
 172         int lowest = iommu->ctx_lowest_free;
 173         int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
 174 
 175         if (unlikely(n == IOMMU_NUM_CTXS)) {
 176                 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
 177                 if (unlikely(n == lowest)) {
 178                         printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
 179                         n = 0;
 180                 }
 181         }
 182         if (n)
 183                 __set_bit(n, iommu->ctx_bitmap);
 184 
 185         return n;
 186 }
 187 
 188 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
 189 {
 190         if (likely(ctx)) {
 191                 __clear_bit(ctx, iommu->ctx_bitmap);
 192                 if (ctx < iommu->ctx_lowest_free)
 193                         iommu->ctx_lowest_free = ctx;
 194         }
 195 }
 196 
 197 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
 198                                    dma_addr_t *dma_addrp, gfp_t gfp,
 199                                    unsigned long attrs)
 200 {
 201         unsigned long order, first_page;
 202         struct iommu *iommu;
 203         struct page *page;
 204         int npages, nid;
 205         iopte_t *iopte;
 206         void *ret;
 207 
 208         size = IO_PAGE_ALIGN(size);
 209         order = get_order(size);
 210         if (order >= 10)
 211                 return NULL;
 212 
 213         nid = dev->archdata.numa_node;
 214         page = alloc_pages_node(nid, gfp, order);
 215         if (unlikely(!page))
 216                 return NULL;
 217 
 218         first_page = (unsigned long) page_address(page);
 219         memset((char *)first_page, 0, PAGE_SIZE << order);
 220 
 221         iommu = dev->archdata.iommu;
 222 
 223         iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
 224 
 225         if (unlikely(iopte == NULL)) {
 226                 free_pages(first_page, order);
 227                 return NULL;
 228         }
 229 
 230         *dma_addrp = (iommu->tbl.table_map_base +
 231                       ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
 232         ret = (void *) first_page;
 233         npages = size >> IO_PAGE_SHIFT;
 234         first_page = __pa(first_page);
 235         while (npages--) {
 236                 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
 237                                      IOPTE_WRITE |
 238                                      (first_page & IOPTE_PAGE));
 239                 iopte++;
 240                 first_page += IO_PAGE_SIZE;
 241         }
 242 
 243         return ret;
 244 }
 245 
 246 static void dma_4u_free_coherent(struct device *dev, size_t size,
 247                                  void *cpu, dma_addr_t dvma,
 248                                  unsigned long attrs)
 249 {
 250         struct iommu *iommu;
 251         unsigned long order, npages;
 252 
 253         npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
 254         iommu = dev->archdata.iommu;
 255 
 256         iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
 257 
 258         order = get_order(size);
 259         if (order < 10)
 260                 free_pages((unsigned long)cpu, order);
 261 }
 262 
 263 static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
 264                                   unsigned long offset, size_t sz,
 265                                   enum dma_data_direction direction,
 266                                   unsigned long attrs)
 267 {
 268         struct iommu *iommu;
 269         struct strbuf *strbuf;
 270         iopte_t *base;
 271         unsigned long flags, npages, oaddr;
 272         unsigned long i, base_paddr, ctx;
 273         u32 bus_addr, ret;
 274         unsigned long iopte_protection;
 275 
 276         iommu = dev->archdata.iommu;
 277         strbuf = dev->archdata.stc;
 278 
 279         if (unlikely(direction == DMA_NONE))
 280                 goto bad_no_ctx;
 281 
 282         oaddr = (unsigned long)(page_address(page) + offset);
 283         npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
 284         npages >>= IO_PAGE_SHIFT;
 285 
 286         base = alloc_npages(dev, iommu, npages);
 287         spin_lock_irqsave(&iommu->lock, flags);
 288         ctx = 0;
 289         if (iommu->iommu_ctxflush)
 290                 ctx = iommu_alloc_ctx(iommu);
 291         spin_unlock_irqrestore(&iommu->lock, flags);
 292 
 293         if (unlikely(!base))
 294                 goto bad;
 295 
 296         bus_addr = (iommu->tbl.table_map_base +
 297                     ((base - iommu->page_table) << IO_PAGE_SHIFT));
 298         ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
 299         base_paddr = __pa(oaddr & IO_PAGE_MASK);
 300         if (strbuf->strbuf_enabled)
 301                 iopte_protection = IOPTE_STREAMING(ctx);
 302         else
 303                 iopte_protection = IOPTE_CONSISTENT(ctx);
 304         if (direction != DMA_TO_DEVICE)
 305                 iopte_protection |= IOPTE_WRITE;
 306 
 307         for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
 308                 iopte_val(*base) = iopte_protection | base_paddr;
 309 
 310         return ret;
 311 
 312 bad:
 313         iommu_free_ctx(iommu, ctx);
 314 bad_no_ctx:
 315         if (printk_ratelimit())
 316                 WARN_ON(1);
 317         return DMA_MAPPING_ERROR;
 318 }
 319 
 320 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
 321                          u32 vaddr, unsigned long ctx, unsigned long npages,
 322                          enum dma_data_direction direction)
 323 {
 324         int limit;
 325 
 326         if (strbuf->strbuf_ctxflush &&
 327             iommu->iommu_ctxflush) {
 328                 unsigned long matchreg, flushreg;
 329                 u64 val;
 330 
 331                 flushreg = strbuf->strbuf_ctxflush;
 332                 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
 333 
 334                 iommu_write(flushreg, ctx);
 335                 val = iommu_read(matchreg);
 336                 val &= 0xffff;
 337                 if (!val)
 338                         goto do_flush_sync;
 339 
 340                 while (val) {
 341                         if (val & 0x1)
 342                                 iommu_write(flushreg, ctx);
 343                         val >>= 1;
 344                 }
 345                 val = iommu_read(matchreg);
 346                 if (unlikely(val)) {
 347                         printk(KERN_WARNING "strbuf_flush: ctx flush "
 348                                "timeout matchreg[%llx] ctx[%lx]\n",
 349                                val, ctx);
 350                         goto do_page_flush;
 351                 }
 352         } else {
 353                 unsigned long i;
 354 
 355         do_page_flush:
 356                 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
 357                         iommu_write(strbuf->strbuf_pflush, vaddr);
 358         }
 359 
 360 do_flush_sync:
 361         /* If the device could not have possibly put dirty data into
 362          * the streaming cache, no flush-flag synchronization needs
 363          * to be performed.
 364          */
 365         if (direction == DMA_TO_DEVICE)
 366                 return;
 367 
 368         STC_FLUSHFLAG_INIT(strbuf);
 369         iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
 370         (void) iommu_read(iommu->write_complete_reg);
 371 
 372         limit = 100000;
 373         while (!STC_FLUSHFLAG_SET(strbuf)) {
 374                 limit--;
 375                 if (!limit)
 376                         break;
 377                 udelay(1);
 378                 rmb();
 379         }
 380         if (!limit)
 381                 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
 382                        "vaddr[%08x] ctx[%lx] npages[%ld]\n",
 383                        vaddr, ctx, npages);
 384 }
 385 
 386 static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
 387                               size_t sz, enum dma_data_direction direction,
 388                               unsigned long attrs)
 389 {
 390         struct iommu *iommu;
 391         struct strbuf *strbuf;
 392         iopte_t *base;
 393         unsigned long flags, npages, ctx, i;
 394 
 395         if (unlikely(direction == DMA_NONE)) {
 396                 if (printk_ratelimit())
 397                         WARN_ON(1);
 398                 return;
 399         }
 400 
 401         iommu = dev->archdata.iommu;
 402         strbuf = dev->archdata.stc;
 403 
 404         npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
 405         npages >>= IO_PAGE_SHIFT;
 406         base = iommu->page_table +
 407                 ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
 408         bus_addr &= IO_PAGE_MASK;
 409 
 410         spin_lock_irqsave(&iommu->lock, flags);
 411 
 412         /* Record the context, if any. */
 413         ctx = 0;
 414         if (iommu->iommu_ctxflush)
 415                 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
 416 
 417         /* Step 1: Kick data out of streaming buffers if necessary. */
 418         if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 419                 strbuf_flush(strbuf, iommu, bus_addr, ctx,
 420                              npages, direction);
 421 
 422         /* Step 2: Clear out TSB entries. */
 423         for (i = 0; i < npages; i++)
 424                 iopte_make_dummy(iommu, base + i);
 425 
 426         iommu_free_ctx(iommu, ctx);
 427         spin_unlock_irqrestore(&iommu->lock, flags);
 428 
 429         iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
 430 }
 431 
 432 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
 433                          int nelems, enum dma_data_direction direction,
 434                          unsigned long attrs)
 435 {
 436         struct scatterlist *s, *outs, *segstart;
 437         unsigned long flags, handle, prot, ctx;
 438         dma_addr_t dma_next = 0, dma_addr;
 439         unsigned int max_seg_size;
 440         unsigned long seg_boundary_size;
 441         int outcount, incount, i;
 442         struct strbuf *strbuf;
 443         struct iommu *iommu;
 444         unsigned long base_shift;
 445 
 446         BUG_ON(direction == DMA_NONE);
 447 
 448         iommu = dev->archdata.iommu;
 449         strbuf = dev->archdata.stc;
 450         if (nelems == 0 || !iommu)
 451                 return 0;
 452 
 453         spin_lock_irqsave(&iommu->lock, flags);
 454 
 455         ctx = 0;
 456         if (iommu->iommu_ctxflush)
 457                 ctx = iommu_alloc_ctx(iommu);
 458 
 459         if (strbuf->strbuf_enabled)
 460                 prot = IOPTE_STREAMING(ctx);
 461         else
 462                 prot = IOPTE_CONSISTENT(ctx);
 463         if (direction != DMA_TO_DEVICE)
 464                 prot |= IOPTE_WRITE;
 465 
 466         outs = s = segstart = &sglist[0];
 467         outcount = 1;
 468         incount = nelems;
 469         handle = 0;
 470 
 471         /* Init first segment length for backout at failure */
 472         outs->dma_length = 0;
 473 
 474         max_seg_size = dma_get_max_seg_size(dev);
 475         seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 476                                   IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
 477         base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
 478         for_each_sg(sglist, s, nelems, i) {
 479                 unsigned long paddr, npages, entry, out_entry = 0, slen;
 480                 iopte_t *base;
 481 
 482                 slen = s->length;
 483                 /* Sanity check */
 484                 if (slen == 0) {
 485                         dma_next = 0;
 486                         continue;
 487                 }
 488                 /* Allocate iommu entries for that segment */
 489                 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
 490                 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
 491                 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
 492                                               &handle, (unsigned long)(-1), 0);
 493 
 494                 /* Handle failure */
 495                 if (unlikely(entry == IOMMU_ERROR_CODE)) {
 496                         if (printk_ratelimit())
 497                                 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
 498                                        " npages %lx\n", iommu, paddr, npages);
 499                         goto iommu_map_failed;
 500                 }
 501 
 502                 base = iommu->page_table + entry;
 503 
 504                 /* Convert entry to a dma_addr_t */
 505                 dma_addr = iommu->tbl.table_map_base +
 506                         (entry << IO_PAGE_SHIFT);
 507                 dma_addr |= (s->offset & ~IO_PAGE_MASK);
 508 
 509                 /* Insert into HW table */
 510                 paddr &= IO_PAGE_MASK;
 511                 while (npages--) {
 512                         iopte_val(*base) = prot | paddr;
 513                         base++;
 514                         paddr += IO_PAGE_SIZE;
 515                 }
 516 
 517                 /* If we are in an open segment, try merging */
 518                 if (segstart != s) {
 519                         /* We cannot merge if:
 520                          * - allocated dma_addr isn't contiguous to previous allocation
 521                          */
 522                         if ((dma_addr != dma_next) ||
 523                             (outs->dma_length + s->length > max_seg_size) ||
 524                             (is_span_boundary(out_entry, base_shift,
 525                                               seg_boundary_size, outs, s))) {
 526                                 /* Can't merge: create a new segment */
 527                                 segstart = s;
 528                                 outcount++;
 529                                 outs = sg_next(outs);
 530                         } else {
 531                                 outs->dma_length += s->length;
 532                         }
 533                 }
 534 
 535                 if (segstart == s) {
 536                         /* This is a new segment, fill entries */
 537                         outs->dma_address = dma_addr;
 538                         outs->dma_length = slen;
 539                         out_entry = entry;
 540                 }
 541 
 542                 /* Calculate next page pointer for contiguous check */
 543                 dma_next = dma_addr + slen;
 544         }
 545 
 546         spin_unlock_irqrestore(&iommu->lock, flags);
 547 
 548         if (outcount < incount) {
 549                 outs = sg_next(outs);
 550                 outs->dma_address = DMA_MAPPING_ERROR;
 551                 outs->dma_length = 0;
 552         }
 553 
 554         return outcount;
 555 
 556 iommu_map_failed:
 557         for_each_sg(sglist, s, nelems, i) {
 558                 if (s->dma_length != 0) {
 559                         unsigned long vaddr, npages, entry, j;
 560                         iopte_t *base;
 561 
 562                         vaddr = s->dma_address & IO_PAGE_MASK;
 563                         npages = iommu_num_pages(s->dma_address, s->dma_length,
 564                                                  IO_PAGE_SIZE);
 565 
 566                         entry = (vaddr - iommu->tbl.table_map_base)
 567                                 >> IO_PAGE_SHIFT;
 568                         base = iommu->page_table + entry;
 569 
 570                         for (j = 0; j < npages; j++)
 571                                 iopte_make_dummy(iommu, base + j);
 572 
 573                         iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
 574                                              IOMMU_ERROR_CODE);
 575 
 576                         s->dma_address = DMA_MAPPING_ERROR;
 577                         s->dma_length = 0;
 578                 }
 579                 if (s == outs)
 580                         break;
 581         }
 582         spin_unlock_irqrestore(&iommu->lock, flags);
 583 
 584         return 0;
 585 }
 586 
 587 /* If contexts are being used, they are the same in all of the mappings
 588  * we make for a particular SG.
 589  */
 590 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
 591 {
 592         unsigned long ctx = 0;
 593 
 594         if (iommu->iommu_ctxflush) {
 595                 iopte_t *base;
 596                 u32 bus_addr;
 597                 struct iommu_map_table *tbl = &iommu->tbl;
 598 
 599                 bus_addr = sg->dma_address & IO_PAGE_MASK;
 600                 base = iommu->page_table +
 601                         ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
 602 
 603                 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
 604         }
 605         return ctx;
 606 }
 607 
 608 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
 609                             int nelems, enum dma_data_direction direction,
 610                             unsigned long attrs)
 611 {
 612         unsigned long flags, ctx;
 613         struct scatterlist *sg;
 614         struct strbuf *strbuf;
 615         struct iommu *iommu;
 616 
 617         BUG_ON(direction == DMA_NONE);
 618 
 619         iommu = dev->archdata.iommu;
 620         strbuf = dev->archdata.stc;
 621 
 622         ctx = fetch_sg_ctx(iommu, sglist);
 623 
 624         spin_lock_irqsave(&iommu->lock, flags);
 625 
 626         sg = sglist;
 627         while (nelems--) {
 628                 dma_addr_t dma_handle = sg->dma_address;
 629                 unsigned int len = sg->dma_length;
 630                 unsigned long npages, entry;
 631                 iopte_t *base;
 632                 int i;
 633 
 634                 if (!len)
 635                         break;
 636                 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
 637 
 638                 entry = ((dma_handle - iommu->tbl.table_map_base)
 639                          >> IO_PAGE_SHIFT);
 640                 base = iommu->page_table + entry;
 641 
 642                 dma_handle &= IO_PAGE_MASK;
 643                 if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 644                         strbuf_flush(strbuf, iommu, dma_handle, ctx,
 645                                      npages, direction);
 646 
 647                 for (i = 0; i < npages; i++)
 648                         iopte_make_dummy(iommu, base + i);
 649 
 650                 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
 651                                      IOMMU_ERROR_CODE);
 652                 sg = sg_next(sg);
 653         }
 654 
 655         iommu_free_ctx(iommu, ctx);
 656 
 657         spin_unlock_irqrestore(&iommu->lock, flags);
 658 }
 659 
 660 static void dma_4u_sync_single_for_cpu(struct device *dev,
 661                                        dma_addr_t bus_addr, size_t sz,
 662                                        enum dma_data_direction direction)
 663 {
 664         struct iommu *iommu;
 665         struct strbuf *strbuf;
 666         unsigned long flags, ctx, npages;
 667 
 668         iommu = dev->archdata.iommu;
 669         strbuf = dev->archdata.stc;
 670 
 671         if (!strbuf->strbuf_enabled)
 672                 return;
 673 
 674         spin_lock_irqsave(&iommu->lock, flags);
 675 
 676         npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
 677         npages >>= IO_PAGE_SHIFT;
 678         bus_addr &= IO_PAGE_MASK;
 679 
 680         /* Step 1: Record the context, if any. */
 681         ctx = 0;
 682         if (iommu->iommu_ctxflush &&
 683             strbuf->strbuf_ctxflush) {
 684                 iopte_t *iopte;
 685                 struct iommu_map_table *tbl = &iommu->tbl;
 686 
 687                 iopte = iommu->page_table +
 688                         ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
 689                 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
 690         }
 691 
 692         /* Step 2: Kick data out of streaming buffers. */
 693         strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
 694 
 695         spin_unlock_irqrestore(&iommu->lock, flags);
 696 }
 697 
 698 static void dma_4u_sync_sg_for_cpu(struct device *dev,
 699                                    struct scatterlist *sglist, int nelems,
 700                                    enum dma_data_direction direction)
 701 {
 702         struct iommu *iommu;
 703         struct strbuf *strbuf;
 704         unsigned long flags, ctx, npages, i;
 705         struct scatterlist *sg, *sgprv;
 706         u32 bus_addr;
 707 
 708         iommu = dev->archdata.iommu;
 709         strbuf = dev->archdata.stc;
 710 
 711         if (!strbuf->strbuf_enabled)
 712                 return;
 713 
 714         spin_lock_irqsave(&iommu->lock, flags);
 715 
 716         /* Step 1: Record the context, if any. */
 717         ctx = 0;
 718         if (iommu->iommu_ctxflush &&
 719             strbuf->strbuf_ctxflush) {
 720                 iopte_t *iopte;
 721                 struct iommu_map_table *tbl = &iommu->tbl;
 722 
 723                 iopte = iommu->page_table + ((sglist[0].dma_address -
 724                         tbl->table_map_base) >> IO_PAGE_SHIFT);
 725                 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
 726         }
 727 
 728         /* Step 2: Kick data out of streaming buffers. */
 729         bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
 730         sgprv = NULL;
 731         for_each_sg(sglist, sg, nelems, i) {
 732                 if (sg->dma_length == 0)
 733                         break;
 734                 sgprv = sg;
 735         }
 736 
 737         npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
 738                   - bus_addr) >> IO_PAGE_SHIFT;
 739         strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
 740 
 741         spin_unlock_irqrestore(&iommu->lock, flags);
 742 }
 743 
 744 static int dma_4u_supported(struct device *dev, u64 device_mask)
 745 {
 746         struct iommu *iommu = dev->archdata.iommu;
 747 
 748         if (ali_sound_dma_hack(dev, device_mask))
 749                 return 1;
 750 
 751         if (device_mask < iommu->dma_addr_mask)
 752                 return 0;
 753         return 1;
 754 }
 755 
 756 static const struct dma_map_ops sun4u_dma_ops = {
 757         .alloc                  = dma_4u_alloc_coherent,
 758         .free                   = dma_4u_free_coherent,
 759         .map_page               = dma_4u_map_page,
 760         .unmap_page             = dma_4u_unmap_page,
 761         .map_sg                 = dma_4u_map_sg,
 762         .unmap_sg               = dma_4u_unmap_sg,
 763         .sync_single_for_cpu    = dma_4u_sync_single_for_cpu,
 764         .sync_sg_for_cpu        = dma_4u_sync_sg_for_cpu,
 765         .dma_supported          = dma_4u_supported,
 766 };
 767 
 768 const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
 769 EXPORT_SYMBOL(dma_ops);

/* [<][>][^][v][top][bottom][index][help] */