root/drivers/staging/gasket/gasket_page_table.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. gasket_page_table_init
  2. gasket_is_pte_range_free
  3. gasket_free_extended_subtable
  4. gasket_page_table_garbage_collect_nolock
  5. gasket_page_table_garbage_collect
  6. gasket_page_table_cleanup
  7. gasket_page_table_partition
  8. is_coherent
  9. gasket_release_page
  10. gasket_perform_mapping
  11. gasket_simple_page_idx
  12. gasket_extended_lvl0_page_idx
  13. gasket_extended_lvl1_page_idx
  14. gasket_alloc_simple_entries
  15. gasket_perform_unmapping
  16. gasket_unmap_simple_pages
  17. gasket_unmap_extended_pages
  18. gasket_addr_is_simple
  19. gasket_components_to_dev_address
  20. gasket_is_simple_dev_addr_bad
  21. gasket_is_extended_dev_addr_bad
  22. gasket_page_table_unmap_nolock
  23. gasket_map_simple_pages
  24. gasket_alloc_extended_subtable
  25. gasket_alloc_extended_entries
  26. gasket_map_extended_pages
  27. gasket_page_table_map
  28. gasket_page_table_unmap
  29. gasket_page_table_unmap_all_nolock
  30. gasket_page_table_unmap_all
  31. gasket_page_table_reset
  32. gasket_page_table_lookup_page
  33. gasket_page_table_are_addrs_bad
  34. gasket_page_table_is_dev_addr_bad
  35. gasket_page_table_max_size
  36. gasket_page_table_num_entries
  37. gasket_page_table_num_simple_entries
  38. gasket_page_table_num_active_pages
  39. gasket_page_table_system_status
  40. gasket_set_user_virt
  41. gasket_alloc_coherent_memory
  42. gasket_free_coherent_memory
  43. gasket_free_coherent_memory_all

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Implementation of Gasket page table support.
   4  *
   5  * Copyright (C) 2018 Google, Inc.
   6  */
   7 
   8 /*
   9  * Implementation of Gasket page table support.
  10  *
  11  * This file assumes 4kB pages throughout; can be factored out when necessary.
  12  *
  13  * There is a configurable number of page table entries, as well as a
  14  * configurable bit index for the extended address flag. Both of these are
  15  * specified in gasket_page_table_init through the page_table_config parameter.
  16  *
  17  * The following example assumes:
  18  *   page_table_config->total_entries = 8192
  19  *   page_table_config->extended_bit = 63
  20  *
  21  * Address format:
  22  * Simple addresses - those whose containing pages are directly placed in the
  23  * device's address translation registers - are laid out as:
  24  * [ 63 - 25: 0 | 24 - 12: page index | 11 - 0: page offset ]
  25  * page index:  The index of the containing page in the device's address
  26  *              translation registers.
  27  * page offset: The index of the address into the containing page.
  28  *
  29  * Extended address - those whose containing pages are contained in a second-
  30  * level page table whose address is present in the device's address translation
  31  * registers - are laid out as:
  32  * [ 63: flag | 62 - 34: 0 | 33 - 21: dev/level 0 index |
  33  *   20 - 12: host/level 1 index | 11 - 0: page offset ]
  34  * flag:        Marker indicating that this is an extended address. Always 1.
  35  * dev index:   The index of the first-level page in the device's extended
  36  *              address translation registers.
  37  * host index:  The index of the containing page in the [host-resident] second-
  38  *              level page table.
  39  * page offset: The index of the address into the containing [second-level]
  40  *              page.
  41  */
  42 #include "gasket_page_table.h"
  43 
  44 #include <linux/device.h>
  45 #include <linux/file.h>
  46 #include <linux/init.h>
  47 #include <linux/kernel.h>
  48 #include <linux/module.h>
  49 #include <linux/moduleparam.h>
  50 #include <linux/pagemap.h>
  51 #include <linux/vmalloc.h>
  52 
  53 #include "gasket_constants.h"
  54 #include "gasket_core.h"
  55 
  56 /* Constants & utility macros */
  57 /* The number of pages that can be mapped into each second-level page table. */
  58 #define GASKET_PAGES_PER_SUBTABLE 512
  59 
  60 /* The starting position of the page index in a simple virtual address. */
  61 #define GASKET_SIMPLE_PAGE_SHIFT 12
  62 
  63 /* Flag indicating that a [device] slot is valid for use. */
  64 #define GASKET_VALID_SLOT_FLAG 1
  65 
  66 /*
  67  * The starting position of the level 0 page index (i.e., the entry in the
  68  * device's extended address registers) in an extended address.
  69  * Also can be thought of as (log2(PAGE_SIZE) + log2(PAGES_PER_SUBTABLE)),
  70  * or (12 + 9).
  71  */
  72 #define GASKET_EXTENDED_LVL0_SHIFT 21
  73 
  74 /*
  75  * Number of first level pages that Gasket chips support. Equivalent to
  76  * log2(NUM_LVL0_PAGE_TABLES)
  77  *
  78  * At a maximum, allowing for a 34 bits address space (or 16GB)
  79  *   = GASKET_EXTENDED_LVL0_WIDTH + (log2(PAGE_SIZE) + log2(PAGES_PER_SUBTABLE)
  80  * or, = 13 + 9 + 12
  81  */
  82 #define GASKET_EXTENDED_LVL0_WIDTH 13
  83 
  84 /*
  85  * The starting position of the level 1 page index (i.e., the entry in the
  86  * host second-level/sub- table) in an extended address.
  87  */
  88 #define GASKET_EXTENDED_LVL1_SHIFT 12
  89 
  90 /* Type declarations */
  91 /* Valid states for a struct gasket_page_table_entry. */
  92 enum pte_status {
  93         PTE_FREE,
  94         PTE_INUSE,
  95 };
  96 
  97 /*
  98  * Mapping metadata for a single page.
  99  *
 100  * In this file, host-side page table entries are referred to as that (or PTEs).
 101  * Where device vs. host entries are differentiated, device-side or -visible
 102  * entries are called "slots". A slot may be either an entry in the device's
 103  * address translation table registers or an entry in a second-level page
 104  * table ("subtable").
 105  *
 106  * The full data in this structure is visible on the host [of course]. Only
 107  * the address contained in dma_addr is communicated to the device; that points
 108  * to the actual page mapped and described by this structure.
 109  */
 110 struct gasket_page_table_entry {
 111         /* The status of this entry/slot: free or in use. */
 112         enum pte_status status;
 113 
 114         /*
 115          * Index for alignment into host vaddrs.
 116          * When a user specifies a host address for a mapping, that address may
 117          * not be page-aligned. Offset is the index into the containing page of
 118          * the host address (i.e., host_vaddr & (PAGE_SIZE - 1)).
 119          * This is necessary for translating between user-specified addresses
 120          * and page-aligned addresses.
 121          */
 122         int offset;
 123 
 124         /* Address of the page in DMA space. */
 125         dma_addr_t dma_addr;
 126 
 127         /* Linux page descriptor for the page described by this structure. */
 128         struct page *page;
 129 
 130         /*
 131          * If this is an extended and first-level entry, sublevel points
 132          * to the second-level entries underneath this entry.
 133          */
 134         struct gasket_page_table_entry *sublevel;
 135 };
 136 
 137 /*
 138  * Maintains virtual to physical address mapping for a coherent page that is
 139  * allocated by this module for a given device.
 140  * Note that coherent pages mappings virt mapping cannot be tracked by the
 141  * Linux kernel, and coherent pages don't have a struct page associated,
 142  * hence Linux kernel cannot perform a get_user_page_xx() on a phys address
 143  * that was allocated coherent.
 144  * This structure trivially implements this mechanism.
 145  */
 146 struct gasket_coherent_page_entry {
 147         /* Phys address, dma'able by the owner device */
 148         dma_addr_t paddr;
 149 
 150         /* Kernel virtual address */
 151         u64 user_virt;
 152 
 153         /* User virtual address that was mapped by the mmap kernel subsystem */
 154         u64 kernel_virt;
 155 
 156         /*
 157          * Whether this page has been mapped into a user land process virtual
 158          * space
 159          */
 160         u32 in_use;
 161 };
 162 
 163 /*
 164  * [Host-side] page table descriptor.
 165  *
 166  * This structure tracks the metadata necessary to manage both simple and
 167  * extended page tables.
 168  */
 169 struct gasket_page_table {
 170         /* The config used to create this page table. */
 171         struct gasket_page_table_config config;
 172 
 173         /* The number of simple (single-level) entries in the page table. */
 174         uint num_simple_entries;
 175 
 176         /* The number of extended (two-level) entries in the page table. */
 177         uint num_extended_entries;
 178 
 179         /* Array of [host-side] page table entries. */
 180         struct gasket_page_table_entry *entries;
 181 
 182         /* Number of actively mapped kernel pages in this table. */
 183         uint num_active_pages;
 184 
 185         /* Device register: base of/first slot in the page table. */
 186         u64 __iomem *base_slot;
 187 
 188         /* Device register: holds the offset indicating the start of the
 189          * extended address region of the device's address translation table.
 190          */
 191         u64 __iomem *extended_offset_reg;
 192 
 193         /* Device structure for the underlying device. Only used for logging. */
 194         struct device *device;
 195 
 196         /* PCI system descriptor for the underlying device. */
 197         struct pci_dev *pci_dev;
 198 
 199         /* Location of the extended address bit for this Gasket device. */
 200         u64 extended_flag;
 201 
 202         /* Mutex to protect page table internals. */
 203         struct mutex mutex;
 204 
 205         /* Number of coherent pages accessible thru by this page table */
 206         int num_coherent_pages;
 207 
 208         /*
 209          * List of coherent memory (physical) allocated for a device.
 210          *
 211          * This structure also remembers the user virtual mapping, this is
 212          * hacky, but we need to do this because the kernel doesn't keep track
 213          * of the user coherent pages (pfn pages), and virt to coherent page
 214          * mapping.
 215          * TODO: use find_vma() APIs to convert host address to vm_area, to
 216          * dma_addr_t instead of storing user virtu address in
 217          * gasket_coherent_page_entry
 218          *
 219          * Note that the user virtual mapping is created by the driver, in
 220          * gasket_mmap function, so user_virt belongs in the driver anyhow.
 221          */
 222         struct gasket_coherent_page_entry *coherent_pages;
 223 };
 224 
 225 /* See gasket_page_table.h for description. */
 226 int gasket_page_table_init(struct gasket_page_table **ppg_tbl,
 227                            const struct gasket_bar_data *bar_data,
 228                            const struct gasket_page_table_config *page_table_config,
 229                            struct device *device, struct pci_dev *pci_dev)
 230 {
 231         ulong bytes;
 232         struct gasket_page_table *pg_tbl;
 233         ulong total_entries = page_table_config->total_entries;
 234 
 235         /*
 236          * TODO: Verify config->total_entries against value read from the
 237          * hardware register that contains the page table size.
 238          */
 239         if (total_entries == ULONG_MAX) {
 240                 dev_dbg(device,
 241                         "Error reading page table size. Initializing page table with size 0\n");
 242                 total_entries = 0;
 243         }
 244 
 245         dev_dbg(device,
 246                 "Attempting to initialize page table of size 0x%lx\n",
 247                 total_entries);
 248 
 249         dev_dbg(device,
 250                 "Table has base reg 0x%x, extended offset reg 0x%x\n",
 251                 page_table_config->base_reg,
 252                 page_table_config->extended_reg);
 253 
 254         *ppg_tbl = kzalloc(sizeof(**ppg_tbl), GFP_KERNEL);
 255         if (!*ppg_tbl) {
 256                 dev_dbg(device, "No memory for page table\n");
 257                 return -ENOMEM;
 258         }
 259 
 260         pg_tbl = *ppg_tbl;
 261         bytes = total_entries * sizeof(struct gasket_page_table_entry);
 262         if (bytes != 0) {
 263                 pg_tbl->entries = vzalloc(bytes);
 264                 if (!pg_tbl->entries) {
 265                         dev_dbg(device,
 266                                 "No memory for address translation metadata\n");
 267                         kfree(pg_tbl);
 268                         *ppg_tbl = NULL;
 269                         return -ENOMEM;
 270                 }
 271         }
 272 
 273         mutex_init(&pg_tbl->mutex);
 274         memcpy(&pg_tbl->config, page_table_config, sizeof(*page_table_config));
 275         if (pg_tbl->config.mode == GASKET_PAGE_TABLE_MODE_NORMAL ||
 276             pg_tbl->config.mode == GASKET_PAGE_TABLE_MODE_SIMPLE) {
 277                 pg_tbl->num_simple_entries = total_entries;
 278                 pg_tbl->num_extended_entries = 0;
 279                 pg_tbl->extended_flag = 1ull << page_table_config->extended_bit;
 280         } else {
 281                 pg_tbl->num_simple_entries = 0;
 282                 pg_tbl->num_extended_entries = total_entries;
 283                 pg_tbl->extended_flag = 0;
 284         }
 285         pg_tbl->num_active_pages = 0;
 286         pg_tbl->base_slot =
 287                 (u64 __iomem *)&bar_data->virt_base[page_table_config->base_reg];
 288         pg_tbl->extended_offset_reg =
 289                 (u64 __iomem *)&bar_data->virt_base[page_table_config->extended_reg];
 290         pg_tbl->device = get_device(device);
 291         pg_tbl->pci_dev = pci_dev;
 292 
 293         dev_dbg(device, "Page table initialized successfully\n");
 294 
 295         return 0;
 296 }
 297 
 298 /*
 299  * Check if a range of PTEs is free.
 300  * The page table mutex must be held by the caller.
 301  */
 302 static bool gasket_is_pte_range_free(struct gasket_page_table_entry *ptes,
 303                                      uint num_entries)
 304 {
 305         int i;
 306 
 307         for (i = 0; i < num_entries; i++) {
 308                 if (ptes[i].status != PTE_FREE)
 309                         return false;
 310         }
 311 
 312         return true;
 313 }
 314 
 315 /*
 316  * Free a second level page [sub]table.
 317  * The page table mutex must be held before this call.
 318  */
 319 static void gasket_free_extended_subtable(struct gasket_page_table *pg_tbl,
 320                                           struct gasket_page_table_entry *pte,
 321                                           u64 __iomem *slot)
 322 {
 323         /* Release the page table from the driver */
 324         pte->status = PTE_FREE;
 325 
 326         /* Release the page table from the device */
 327         writeq(0, slot);
 328 
 329         if (pte->dma_addr)
 330                 dma_unmap_page(pg_tbl->device, pte->dma_addr, PAGE_SIZE,
 331                                DMA_TO_DEVICE);
 332 
 333         vfree(pte->sublevel);
 334 
 335         if (pte->page)
 336                 free_page((ulong)page_address(pte->page));
 337 
 338         memset(pte, 0, sizeof(struct gasket_page_table_entry));
 339 }
 340 
 341 /*
 342  * Actually perform collection.
 343  * The page table mutex must be held by the caller.
 344  */
 345 static void
 346 gasket_page_table_garbage_collect_nolock(struct gasket_page_table *pg_tbl)
 347 {
 348         struct gasket_page_table_entry *pte;
 349         u64 __iomem *slot;
 350 
 351         /* XXX FIX ME XXX -- more efficient to keep a usage count */
 352         /* rather than scanning the second level page tables */
 353 
 354         for (pte = pg_tbl->entries + pg_tbl->num_simple_entries,
 355              slot = pg_tbl->base_slot + pg_tbl->num_simple_entries;
 356              pte < pg_tbl->entries + pg_tbl->config.total_entries;
 357              pte++, slot++) {
 358                 if (pte->status == PTE_INUSE) {
 359                         if (gasket_is_pte_range_free(pte->sublevel,
 360                                                      GASKET_PAGES_PER_SUBTABLE))
 361                                 gasket_free_extended_subtable(pg_tbl, pte,
 362                                                               slot);
 363                 }
 364         }
 365 }
 366 
 367 /* See gasket_page_table.h for description. */
 368 void gasket_page_table_garbage_collect(struct gasket_page_table *pg_tbl)
 369 {
 370         mutex_lock(&pg_tbl->mutex);
 371         gasket_page_table_garbage_collect_nolock(pg_tbl);
 372         mutex_unlock(&pg_tbl->mutex);
 373 }
 374 
 375 /* See gasket_page_table.h for description. */
 376 void gasket_page_table_cleanup(struct gasket_page_table *pg_tbl)
 377 {
 378         /* Deallocate free second-level tables. */
 379         gasket_page_table_garbage_collect(pg_tbl);
 380 
 381         /* TODO: Check that all PTEs have been freed? */
 382 
 383         vfree(pg_tbl->entries);
 384         pg_tbl->entries = NULL;
 385 
 386         put_device(pg_tbl->device);
 387         kfree(pg_tbl);
 388 }
 389 
 390 /* See gasket_page_table.h for description. */
 391 int gasket_page_table_partition(struct gasket_page_table *pg_tbl,
 392                                 uint num_simple_entries)
 393 {
 394         int i, start;
 395 
 396         mutex_lock(&pg_tbl->mutex);
 397         if (num_simple_entries > pg_tbl->config.total_entries) {
 398                 mutex_unlock(&pg_tbl->mutex);
 399                 return -EINVAL;
 400         }
 401 
 402         gasket_page_table_garbage_collect_nolock(pg_tbl);
 403 
 404         start = min(pg_tbl->num_simple_entries, num_simple_entries);
 405 
 406         for (i = start; i < pg_tbl->config.total_entries; i++) {
 407                 if (pg_tbl->entries[i].status != PTE_FREE) {
 408                         dev_err(pg_tbl->device, "entry %d is not free\n", i);
 409                         mutex_unlock(&pg_tbl->mutex);
 410                         return -EBUSY;
 411                 }
 412         }
 413 
 414         pg_tbl->num_simple_entries = num_simple_entries;
 415         pg_tbl->num_extended_entries =
 416                 pg_tbl->config.total_entries - num_simple_entries;
 417         writeq(num_simple_entries, pg_tbl->extended_offset_reg);
 418 
 419         mutex_unlock(&pg_tbl->mutex);
 420         return 0;
 421 }
 422 EXPORT_SYMBOL(gasket_page_table_partition);
 423 
 424 /*
 425  * Return whether a host buffer was mapped as coherent memory.
 426  *
 427  * A Gasket page_table currently support one contiguous dma range, mapped to one
 428  * contiguous virtual memory range. Check if the host_addr is within that range.
 429  */
 430 static int is_coherent(struct gasket_page_table *pg_tbl, ulong host_addr)
 431 {
 432         u64 min, max;
 433 
 434         /* whether the host address is within user virt range */
 435         if (!pg_tbl->coherent_pages)
 436                 return 0;
 437 
 438         min = (u64)pg_tbl->coherent_pages[0].user_virt;
 439         max = min + PAGE_SIZE * pg_tbl->num_coherent_pages;
 440 
 441         return min <= host_addr && host_addr < max;
 442 }
 443 
 444 /* Safely return a page to the OS. */
 445 static bool gasket_release_page(struct page *page)
 446 {
 447         if (!page)
 448                 return false;
 449 
 450         if (!PageReserved(page))
 451                 SetPageDirty(page);
 452         put_page(page);
 453 
 454         return true;
 455 }
 456 
 457 /*
 458  * Get and map last level page table buffers.
 459  *
 460  * slots is the location(s) to write device-mapped page address. If this is a
 461  * simple mapping, these will be address translation registers. If this is
 462  * an extended mapping, these will be within a second-level page table
 463  * allocated by the host and so must have their __iomem attribute casted away.
 464  */
 465 static int gasket_perform_mapping(struct gasket_page_table *pg_tbl,
 466                                   struct gasket_page_table_entry *ptes,
 467                                   u64 __iomem *slots, ulong host_addr,
 468                                   uint num_pages, int is_simple_mapping)
 469 {
 470         int ret;
 471         ulong offset;
 472         struct page *page;
 473         dma_addr_t dma_addr;
 474         ulong page_addr;
 475         int i;
 476 
 477         for (i = 0; i < num_pages; i++) {
 478                 page_addr = host_addr + i * PAGE_SIZE;
 479                 offset = page_addr & (PAGE_SIZE - 1);
 480                 if (is_coherent(pg_tbl, host_addr)) {
 481                         u64 off =
 482                                 (u64)host_addr -
 483                                 (u64)pg_tbl->coherent_pages[0].user_virt;
 484                         ptes[i].page = NULL;
 485                         ptes[i].offset = offset;
 486                         ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr +
 487                                            off + i * PAGE_SIZE;
 488                 } else {
 489                         ret = get_user_pages_fast(page_addr - offset, 1,
 490                                                   FOLL_WRITE, &page);
 491 
 492                         if (ret <= 0) {
 493                                 dev_err(pg_tbl->device,
 494                                         "get user pages failed for addr=0x%lx, offset=0x%lx [ret=%d]\n",
 495                                         page_addr, offset, ret);
 496                                 return ret ? ret : -ENOMEM;
 497                         }
 498                         ++pg_tbl->num_active_pages;
 499 
 500                         ptes[i].page = page;
 501                         ptes[i].offset = offset;
 502 
 503                         /* Map the page into DMA space. */
 504                         ptes[i].dma_addr =
 505                                 dma_map_page(pg_tbl->device, page, 0, PAGE_SIZE,
 506                                              DMA_BIDIRECTIONAL);
 507 
 508                         if (dma_mapping_error(pg_tbl->device,
 509                                               ptes[i].dma_addr)) {
 510                                 if (gasket_release_page(ptes[i].page))
 511                                         --pg_tbl->num_active_pages;
 512 
 513                                 memset(&ptes[i], 0,
 514                                        sizeof(struct gasket_page_table_entry));
 515                                 return -EINVAL;
 516                         }
 517                 }
 518 
 519                 /* Make the DMA-space address available to the device. */
 520                 dma_addr = (ptes[i].dma_addr + offset) | GASKET_VALID_SLOT_FLAG;
 521 
 522                 if (is_simple_mapping) {
 523                         writeq(dma_addr, &slots[i]);
 524                 } else {
 525                         ((u64 __force *)slots)[i] = dma_addr;
 526                         /* Extended page table vectors are in DRAM,
 527                          * and so need to be synced each time they are updated.
 528                          */
 529                         dma_map_single(pg_tbl->device,
 530                                        (void *)&((u64 __force *)slots)[i],
 531                                        sizeof(u64), DMA_TO_DEVICE);
 532                 }
 533                 ptes[i].status = PTE_INUSE;
 534         }
 535         return 0;
 536 }
 537 
 538 /*
 539  * Return the index of the page for the address in the simple table.
 540  * Does not perform validity checking.
 541  */
 542 static int gasket_simple_page_idx(struct gasket_page_table *pg_tbl,
 543                                   ulong dev_addr)
 544 {
 545         return (dev_addr >> GASKET_SIMPLE_PAGE_SHIFT) &
 546                 (pg_tbl->config.total_entries - 1);
 547 }
 548 
 549 /*
 550  * Return the level 0 page index for the given address.
 551  * Does not perform validity checking.
 552  */
 553 static ulong gasket_extended_lvl0_page_idx(struct gasket_page_table *pg_tbl,
 554                                            ulong dev_addr)
 555 {
 556         return (dev_addr >> GASKET_EXTENDED_LVL0_SHIFT) &
 557                 (pg_tbl->config.total_entries - 1);
 558 }
 559 
 560 /*
 561  * Return the level 1 page index for the given address.
 562  * Does not perform validity checking.
 563  */
 564 static ulong gasket_extended_lvl1_page_idx(struct gasket_page_table *pg_tbl,
 565                                            ulong dev_addr)
 566 {
 567         return (dev_addr >> GASKET_EXTENDED_LVL1_SHIFT) &
 568                (GASKET_PAGES_PER_SUBTABLE - 1);
 569 }
 570 
 571 /*
 572  * Allocate page table entries in a simple table.
 573  * The page table mutex must be held by the caller.
 574  */
 575 static int gasket_alloc_simple_entries(struct gasket_page_table *pg_tbl,
 576                                        ulong dev_addr, uint num_pages)
 577 {
 578         if (!gasket_is_pte_range_free(pg_tbl->entries +
 579                                       gasket_simple_page_idx(pg_tbl, dev_addr),
 580                                       num_pages))
 581                 return -EBUSY;
 582 
 583         return 0;
 584 }
 585 
 586 /*
 587  * Unmap and release mapped pages.
 588  * The page table mutex must be held by the caller.
 589  */
 590 static void gasket_perform_unmapping(struct gasket_page_table *pg_tbl,
 591                                      struct gasket_page_table_entry *ptes,
 592                                      u64 __iomem *slots, uint num_pages,
 593                                      int is_simple_mapping)
 594 {
 595         int i;
 596         /*
 597          * For each page table entry and corresponding entry in the device's
 598          * address translation table:
 599          */
 600         for (i = 0; i < num_pages; i++) {
 601                 /* release the address from the device, */
 602                 if (is_simple_mapping || ptes[i].status == PTE_INUSE) {
 603                         writeq(0, &slots[i]);
 604                 } else {
 605                         ((u64 __force *)slots)[i] = 0;
 606                         /* sync above PTE update before updating mappings */
 607                         wmb();
 608                 }
 609 
 610                 /* release the address from the driver, */
 611                 if (ptes[i].status == PTE_INUSE) {
 612                         if (ptes[i].page && ptes[i].dma_addr) {
 613                                 dma_unmap_page(pg_tbl->device, ptes[i].dma_addr,
 614                                                PAGE_SIZE, DMA_BIDIRECTIONAL);
 615                         }
 616                         if (gasket_release_page(ptes[i].page))
 617                                 --pg_tbl->num_active_pages;
 618                 }
 619 
 620                 /* and clear the PTE. */
 621                 memset(&ptes[i], 0, sizeof(struct gasket_page_table_entry));
 622         }
 623 }
 624 
 625 /*
 626  * Unmap and release pages mapped to simple addresses.
 627  * The page table mutex must be held by the caller.
 628  */
 629 static void gasket_unmap_simple_pages(struct gasket_page_table *pg_tbl,
 630                                       ulong dev_addr, uint num_pages)
 631 {
 632         uint slot = gasket_simple_page_idx(pg_tbl, dev_addr);
 633 
 634         gasket_perform_unmapping(pg_tbl, pg_tbl->entries + slot,
 635                                  pg_tbl->base_slot + slot, num_pages, 1);
 636 }
 637 
 638 /*
 639  * Unmap and release buffers to extended addresses.
 640  * The page table mutex must be held by the caller.
 641  */
 642 static void gasket_unmap_extended_pages(struct gasket_page_table *pg_tbl,
 643                                         ulong dev_addr, uint num_pages)
 644 {
 645         uint slot_idx, remain, len;
 646         struct gasket_page_table_entry *pte;
 647         u64 __iomem *slot_base;
 648 
 649         remain = num_pages;
 650         slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
 651         pte = pg_tbl->entries + pg_tbl->num_simple_entries +
 652               gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
 653 
 654         while (remain > 0) {
 655                 /* TODO: Add check to ensure pte remains valid? */
 656                 len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx);
 657 
 658                 if (pte->status == PTE_INUSE) {
 659                         slot_base = (u64 __iomem *)(page_address(pte->page) +
 660                                                     pte->offset);
 661                         gasket_perform_unmapping(pg_tbl,
 662                                                  pte->sublevel + slot_idx,
 663                                                  slot_base + slot_idx, len, 0);
 664                 }
 665 
 666                 remain -= len;
 667                 slot_idx = 0;
 668                 pte++;
 669         }
 670 }
 671 
 672 /* Evaluates to nonzero if the specified virtual address is simple. */
 673 static inline bool gasket_addr_is_simple(struct gasket_page_table *pg_tbl,
 674                                          ulong addr)
 675 {
 676         return !((addr) & (pg_tbl)->extended_flag);
 677 }
 678 
 679 /*
 680  * Convert (simple, page, offset) into a device address.
 681  * Examples:
 682  * Simple page 0, offset 32:
 683  *  Input (1, 0, 32), Output 0x20
 684  * Simple page 1000, offset 511:
 685  *  Input (1, 1000, 511), Output 0x3E81FF
 686  * Extended page 0, offset 32:
 687  *  Input (0, 0, 32), Output 0x8000000020
 688  * Extended page 1000, offset 511:
 689  *  Input (0, 1000, 511), Output 0x8003E81FF
 690  */
 691 static ulong gasket_components_to_dev_address(struct gasket_page_table *pg_tbl,
 692                                               int is_simple, uint page_index,
 693                                               uint offset)
 694 {
 695         ulong dev_addr = (page_index << GASKET_SIMPLE_PAGE_SHIFT) | offset;
 696 
 697         return is_simple ? dev_addr : (pg_tbl->extended_flag | dev_addr);
 698 }
 699 
 700 /*
 701  * Validity checking for simple addresses.
 702  *
 703  * Verify that address translation commutes (from address to/from page + offset)
 704  * and that the requested page range starts and ends within the set of
 705  * currently-partitioned simple pages.
 706  */
 707 static bool gasket_is_simple_dev_addr_bad(struct gasket_page_table *pg_tbl,
 708                                           ulong dev_addr, uint num_pages)
 709 {
 710         ulong page_offset = dev_addr & (PAGE_SIZE - 1);
 711         ulong page_index =
 712                 (dev_addr / PAGE_SIZE) & (pg_tbl->config.total_entries - 1);
 713 
 714         if (gasket_components_to_dev_address(pg_tbl, 1, page_index,
 715                                              page_offset) != dev_addr) {
 716                 dev_err(pg_tbl->device, "address is invalid, 0x%lX\n",
 717                         dev_addr);
 718                 return true;
 719         }
 720 
 721         if (page_index >= pg_tbl->num_simple_entries) {
 722                 dev_err(pg_tbl->device,
 723                         "starting slot at %lu is too large, max is < %u\n",
 724                         page_index, pg_tbl->num_simple_entries);
 725                 return true;
 726         }
 727 
 728         if (page_index + num_pages > pg_tbl->num_simple_entries) {
 729                 dev_err(pg_tbl->device,
 730                         "ending slot at %lu is too large, max is <= %u\n",
 731                         page_index + num_pages, pg_tbl->num_simple_entries);
 732                 return true;
 733         }
 734 
 735         return false;
 736 }
 737 
 738 /*
 739  * Validity checking for extended addresses.
 740  *
 741  * Verify that address translation commutes (from address to/from page +
 742  * offset) and that the requested page range starts and ends within the set of
 743  * currently-partitioned extended pages.
 744  */
 745 static bool gasket_is_extended_dev_addr_bad(struct gasket_page_table *pg_tbl,
 746                                             ulong dev_addr, uint num_pages)
 747 {
 748         /* Starting byte index of dev_addr into the first mapped page */
 749         ulong page_offset = dev_addr & (PAGE_SIZE - 1);
 750         ulong page_global_idx, page_lvl0_idx;
 751         ulong num_lvl0_pages;
 752         ulong addr;
 753 
 754         /* check if the device address is out of bound */
 755         addr = dev_addr & ~((pg_tbl)->extended_flag);
 756         if (addr >> (GASKET_EXTENDED_LVL0_WIDTH + GASKET_EXTENDED_LVL0_SHIFT)) {
 757                 dev_err(pg_tbl->device, "device address out of bounds: 0x%lx\n",
 758                         dev_addr);
 759                 return true;
 760         }
 761 
 762         /* Find the starting sub-page index in the space of all sub-pages. */
 763         page_global_idx = (dev_addr / PAGE_SIZE) &
 764                 (pg_tbl->config.total_entries * GASKET_PAGES_PER_SUBTABLE - 1);
 765 
 766         /* Find the starting level 0 index. */
 767         page_lvl0_idx = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
 768 
 769         /* Get the count of affected level 0 pages. */
 770         num_lvl0_pages = DIV_ROUND_UP(num_pages, GASKET_PAGES_PER_SUBTABLE);
 771 
 772         if (gasket_components_to_dev_address(pg_tbl, 0, page_global_idx,
 773                                              page_offset) != dev_addr) {
 774                 dev_err(pg_tbl->device, "address is invalid: 0x%lx\n",
 775                         dev_addr);
 776                 return true;
 777         }
 778 
 779         if (page_lvl0_idx >= pg_tbl->num_extended_entries) {
 780                 dev_err(pg_tbl->device,
 781                         "starting level 0 slot at %lu is too large, max is < %u\n",
 782                         page_lvl0_idx, pg_tbl->num_extended_entries);
 783                 return true;
 784         }
 785 
 786         if (page_lvl0_idx + num_lvl0_pages > pg_tbl->num_extended_entries) {
 787                 dev_err(pg_tbl->device,
 788                         "ending level 0 slot at %lu is too large, max is <= %u\n",
 789                         page_lvl0_idx + num_lvl0_pages,
 790                         pg_tbl->num_extended_entries);
 791                 return true;
 792         }
 793 
 794         return false;
 795 }
 796 
 797 /*
 798  * Non-locking entry to unmapping routines.
 799  * The page table mutex must be held by the caller.
 800  */
 801 static void gasket_page_table_unmap_nolock(struct gasket_page_table *pg_tbl,
 802                                            ulong dev_addr, uint num_pages)
 803 {
 804         if (!num_pages)
 805                 return;
 806 
 807         if (gasket_addr_is_simple(pg_tbl, dev_addr))
 808                 gasket_unmap_simple_pages(pg_tbl, dev_addr, num_pages);
 809         else
 810                 gasket_unmap_extended_pages(pg_tbl, dev_addr, num_pages);
 811 }
 812 
 813 /*
 814  * Allocate and map pages to simple addresses.
 815  * If there is an error, no pages are mapped.
 816  */
 817 static int gasket_map_simple_pages(struct gasket_page_table *pg_tbl,
 818                                    ulong host_addr, ulong dev_addr,
 819                                    uint num_pages)
 820 {
 821         int ret;
 822         uint slot_idx = gasket_simple_page_idx(pg_tbl, dev_addr);
 823 
 824         ret = gasket_alloc_simple_entries(pg_tbl, dev_addr, num_pages);
 825         if (ret) {
 826                 dev_err(pg_tbl->device,
 827                         "page table slots %u (@ 0x%lx) to %u are not available\n",
 828                         slot_idx, dev_addr, slot_idx + num_pages - 1);
 829                 return ret;
 830         }
 831 
 832         ret = gasket_perform_mapping(pg_tbl, pg_tbl->entries + slot_idx,
 833                                      pg_tbl->base_slot + slot_idx, host_addr,
 834                                      num_pages, 1);
 835 
 836         if (ret) {
 837                 gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages);
 838                 dev_err(pg_tbl->device, "gasket_perform_mapping %d\n", ret);
 839         }
 840         return ret;
 841 }
 842 
 843 /*
 844  * Allocate a second level page table.
 845  * The page table mutex must be held by the caller.
 846  */
 847 static int gasket_alloc_extended_subtable(struct gasket_page_table *pg_tbl,
 848                                           struct gasket_page_table_entry *pte,
 849                                           u64 __iomem *slot)
 850 {
 851         ulong page_addr, subtable_bytes;
 852         dma_addr_t dma_addr;
 853 
 854         /* XXX FIX ME XXX this is inefficient for non-4K page sizes */
 855 
 856         /* GFP_DMA flag must be passed to architectures for which
 857          * part of the memory range is not considered DMA'able.
 858          * This seems to be the case for Juno board with 4.5.0 Linaro kernel
 859          */
 860         page_addr = get_zeroed_page(GFP_KERNEL | GFP_DMA);
 861         if (!page_addr)
 862                 return -ENOMEM;
 863         pte->page = virt_to_page((void *)page_addr);
 864         pte->offset = 0;
 865 
 866         subtable_bytes = sizeof(struct gasket_page_table_entry) *
 867                 GASKET_PAGES_PER_SUBTABLE;
 868         pte->sublevel = vzalloc(subtable_bytes);
 869         if (!pte->sublevel) {
 870                 free_page(page_addr);
 871                 memset(pte, 0, sizeof(struct gasket_page_table_entry));
 872                 return -ENOMEM;
 873         }
 874 
 875         /* Map the page into DMA space. */
 876         pte->dma_addr = dma_map_page(pg_tbl->device, pte->page, 0, PAGE_SIZE,
 877                                      DMA_TO_DEVICE);
 878         if (dma_mapping_error(pg_tbl->device, pte->dma_addr)) {
 879                 free_page(page_addr);
 880                 vfree(pte->sublevel);
 881                 memset(pte, 0, sizeof(struct gasket_page_table_entry));
 882                 return -ENOMEM;
 883         }
 884 
 885         /* make the addresses available to the device */
 886         dma_addr = (pte->dma_addr + pte->offset) | GASKET_VALID_SLOT_FLAG;
 887         writeq(dma_addr, slot);
 888 
 889         pte->status = PTE_INUSE;
 890 
 891         return 0;
 892 }
 893 
 894 /*
 895  * Allocate slots in an extended page table.  Check to see if a range of page
 896  * table slots are available. If necessary, memory is allocated for second level
 897  * page tables.
 898  *
 899  * Note that memory for second level page tables is allocated as needed, but
 900  * that memory is only freed on the final close of the device file, when the
 901  * page tables are repartitioned, or the the device is removed.  If there is an
 902  * error or if the full range of slots is not available, any memory
 903  * allocated for second level page tables remains allocated until final close,
 904  * repartition, or device removal.
 905  *
 906  * The page table mutex must be held by the caller.
 907  */
 908 static int gasket_alloc_extended_entries(struct gasket_page_table *pg_tbl,
 909                                          ulong dev_addr, uint num_entries)
 910 {
 911         int ret = 0;
 912         uint remain, subtable_slot_idx, len;
 913         struct gasket_page_table_entry *pte;
 914         u64 __iomem *slot;
 915 
 916         remain = num_entries;
 917         subtable_slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
 918         pte = pg_tbl->entries + pg_tbl->num_simple_entries +
 919               gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
 920         slot = pg_tbl->base_slot + pg_tbl->num_simple_entries +
 921                gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
 922 
 923         while (remain > 0) {
 924                 len = min(remain,
 925                           GASKET_PAGES_PER_SUBTABLE - subtable_slot_idx);
 926 
 927                 if (pte->status == PTE_FREE) {
 928                         ret = gasket_alloc_extended_subtable(pg_tbl, pte, slot);
 929                         if (ret) {
 930                                 dev_err(pg_tbl->device,
 931                                         "no memory for extended addr subtable\n");
 932                                 return ret;
 933                         }
 934                 } else {
 935                         if (!gasket_is_pte_range_free(pte->sublevel +
 936                                                       subtable_slot_idx, len))
 937                                 return -EBUSY;
 938                 }
 939 
 940                 remain -= len;
 941                 subtable_slot_idx = 0;
 942                 pte++;
 943                 slot++;
 944         }
 945 
 946         return 0;
 947 }
 948 
 949 /*
 950  * gasket_map_extended_pages - Get and map buffers to extended addresses.
 951  * If there is an error, no pages are mapped.
 952  */
 953 static int gasket_map_extended_pages(struct gasket_page_table *pg_tbl,
 954                                      ulong host_addr, ulong dev_addr,
 955                                      uint num_pages)
 956 {
 957         int ret;
 958         ulong dev_addr_end;
 959         uint slot_idx, remain, len;
 960         struct gasket_page_table_entry *pte;
 961         u64 __iomem *slot_base;
 962 
 963         ret = gasket_alloc_extended_entries(pg_tbl, dev_addr, num_pages);
 964         if (ret) {
 965                 dev_addr_end = dev_addr + (num_pages / PAGE_SIZE) - 1;
 966                 dev_err(pg_tbl->device,
 967                         "page table slots (%lu,%lu) (@ 0x%lx) to (%lu,%lu) are not available\n",
 968                         gasket_extended_lvl0_page_idx(pg_tbl, dev_addr),
 969                         dev_addr,
 970                         gasket_extended_lvl1_page_idx(pg_tbl, dev_addr),
 971                         gasket_extended_lvl0_page_idx(pg_tbl, dev_addr_end),
 972                         gasket_extended_lvl1_page_idx(pg_tbl, dev_addr_end));
 973                 return ret;
 974         }
 975 
 976         remain = num_pages;
 977         slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
 978         pte = pg_tbl->entries + pg_tbl->num_simple_entries +
 979               gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
 980 
 981         while (remain > 0) {
 982                 len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx);
 983 
 984                 slot_base =
 985                         (u64 __iomem *)(page_address(pte->page) + pte->offset);
 986                 ret = gasket_perform_mapping(pg_tbl, pte->sublevel + slot_idx,
 987                                              slot_base + slot_idx, host_addr,
 988                                              len, 0);
 989                 if (ret) {
 990                         gasket_page_table_unmap_nolock(pg_tbl, dev_addr,
 991                                                        num_pages);
 992                         return ret;
 993                 }
 994 
 995                 remain -= len;
 996                 slot_idx = 0;
 997                 pte++;
 998                 host_addr += len * PAGE_SIZE;
 999         }
1000 
1001         return 0;
1002 }
1003 
1004 /*
1005  * See gasket_page_table.h for general description.
1006  *
1007  * gasket_page_table_map calls either gasket_map_simple_pages() or
1008  * gasket_map_extended_pages() to actually perform the mapping.
1009  *
1010  * The page table mutex is held for the entire operation.
1011  */
1012 int gasket_page_table_map(struct gasket_page_table *pg_tbl, ulong host_addr,
1013                           ulong dev_addr, uint num_pages)
1014 {
1015         int ret;
1016 
1017         if (!num_pages)
1018                 return 0;
1019 
1020         mutex_lock(&pg_tbl->mutex);
1021 
1022         if (gasket_addr_is_simple(pg_tbl, dev_addr)) {
1023                 ret = gasket_map_simple_pages(pg_tbl, host_addr, dev_addr,
1024                                               num_pages);
1025         } else {
1026                 ret = gasket_map_extended_pages(pg_tbl, host_addr, dev_addr,
1027                                                 num_pages);
1028         }
1029 
1030         mutex_unlock(&pg_tbl->mutex);
1031         return ret;
1032 }
1033 EXPORT_SYMBOL(gasket_page_table_map);
1034 
1035 /*
1036  * See gasket_page_table.h for general description.
1037  *
1038  * gasket_page_table_unmap takes the page table lock and calls either
1039  * gasket_unmap_simple_pages() or gasket_unmap_extended_pages() to
1040  * actually unmap the pages from device space.
1041  *
1042  * The page table mutex is held for the entire operation.
1043  */
1044 void gasket_page_table_unmap(struct gasket_page_table *pg_tbl, ulong dev_addr,
1045                              uint num_pages)
1046 {
1047         if (!num_pages)
1048                 return;
1049 
1050         mutex_lock(&pg_tbl->mutex);
1051         gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages);
1052         mutex_unlock(&pg_tbl->mutex);
1053 }
1054 EXPORT_SYMBOL(gasket_page_table_unmap);
1055 
1056 static void gasket_page_table_unmap_all_nolock(struct gasket_page_table *pg_tbl)
1057 {
1058         gasket_unmap_simple_pages(pg_tbl,
1059                                   gasket_components_to_dev_address(pg_tbl, 1, 0,
1060                                                                    0),
1061                                   pg_tbl->num_simple_entries);
1062         gasket_unmap_extended_pages(pg_tbl,
1063                                     gasket_components_to_dev_address(pg_tbl, 0,
1064                                                                      0, 0),
1065                                     pg_tbl->num_extended_entries *
1066                                     GASKET_PAGES_PER_SUBTABLE);
1067 }
1068 
1069 /* See gasket_page_table.h for description. */
1070 void gasket_page_table_unmap_all(struct gasket_page_table *pg_tbl)
1071 {
1072         mutex_lock(&pg_tbl->mutex);
1073         gasket_page_table_unmap_all_nolock(pg_tbl);
1074         mutex_unlock(&pg_tbl->mutex);
1075 }
1076 EXPORT_SYMBOL(gasket_page_table_unmap_all);
1077 
1078 /* See gasket_page_table.h for description. */
1079 void gasket_page_table_reset(struct gasket_page_table *pg_tbl)
1080 {
1081         mutex_lock(&pg_tbl->mutex);
1082         gasket_page_table_unmap_all_nolock(pg_tbl);
1083         writeq(pg_tbl->config.total_entries, pg_tbl->extended_offset_reg);
1084         mutex_unlock(&pg_tbl->mutex);
1085 }
1086 
1087 /* See gasket_page_table.h for description. */
1088 int gasket_page_table_lookup_page(struct gasket_page_table *pg_tbl,
1089                                   ulong dev_addr, struct page **ppage,
1090                                   ulong *poffset)
1091 {
1092         uint page_num;
1093         struct gasket_page_table_entry *pte;
1094 
1095         mutex_lock(&pg_tbl->mutex);
1096         if (gasket_addr_is_simple(pg_tbl, dev_addr)) {
1097                 page_num = gasket_simple_page_idx(pg_tbl, dev_addr);
1098                 if (page_num >= pg_tbl->num_simple_entries)
1099                         goto fail;
1100 
1101                 pte = pg_tbl->entries + page_num;
1102                 if (pte->status != PTE_INUSE)
1103                         goto fail;
1104         } else {
1105                 /* Find the level 0 entry, */
1106                 page_num = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
1107                 if (page_num >= pg_tbl->num_extended_entries)
1108                         goto fail;
1109 
1110                 pte = pg_tbl->entries + pg_tbl->num_simple_entries + page_num;
1111                 if (pte->status != PTE_INUSE)
1112                         goto fail;
1113 
1114                 /* and its contained level 1 entry. */
1115                 page_num = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
1116                 pte = pte->sublevel + page_num;
1117                 if (pte->status != PTE_INUSE)
1118                         goto fail;
1119         }
1120 
1121         *ppage = pte->page;
1122         *poffset = pte->offset;
1123         mutex_unlock(&pg_tbl->mutex);
1124         return 0;
1125 
1126 fail:
1127         *ppage = NULL;
1128         *poffset = 0;
1129         mutex_unlock(&pg_tbl->mutex);
1130         return -EINVAL;
1131 }
1132 
1133 /* See gasket_page_table.h for description. */
1134 bool gasket_page_table_are_addrs_bad(struct gasket_page_table *pg_tbl,
1135                                      ulong host_addr, ulong dev_addr,
1136                                      ulong bytes)
1137 {
1138         if (host_addr & (PAGE_SIZE - 1)) {
1139                 dev_err(pg_tbl->device,
1140                         "host mapping address 0x%lx must be page aligned\n",
1141                         host_addr);
1142                 return true;
1143         }
1144 
1145         return gasket_page_table_is_dev_addr_bad(pg_tbl, dev_addr, bytes);
1146 }
1147 EXPORT_SYMBOL(gasket_page_table_are_addrs_bad);
1148 
1149 /* See gasket_page_table.h for description. */
1150 bool gasket_page_table_is_dev_addr_bad(struct gasket_page_table *pg_tbl,
1151                                        ulong dev_addr, ulong bytes)
1152 {
1153         uint num_pages = bytes / PAGE_SIZE;
1154 
1155         if (bytes & (PAGE_SIZE - 1)) {
1156                 dev_err(pg_tbl->device,
1157                         "mapping size 0x%lX must be page aligned\n", bytes);
1158                 return true;
1159         }
1160 
1161         if (num_pages == 0) {
1162                 dev_err(pg_tbl->device,
1163                         "requested mapping is less than one page: %lu / %lu\n",
1164                         bytes, PAGE_SIZE);
1165                 return true;
1166         }
1167 
1168         if (gasket_addr_is_simple(pg_tbl, dev_addr))
1169                 return gasket_is_simple_dev_addr_bad(pg_tbl, dev_addr,
1170                                                      num_pages);
1171         return gasket_is_extended_dev_addr_bad(pg_tbl, dev_addr, num_pages);
1172 }
1173 EXPORT_SYMBOL(gasket_page_table_is_dev_addr_bad);
1174 
1175 /* See gasket_page_table.h for description. */
1176 uint gasket_page_table_max_size(struct gasket_page_table *page_table)
1177 {
1178         if (!page_table)
1179                 return 0;
1180         return page_table->config.total_entries;
1181 }
1182 EXPORT_SYMBOL(gasket_page_table_max_size);
1183 
1184 /* See gasket_page_table.h for description. */
1185 uint gasket_page_table_num_entries(struct gasket_page_table *pg_tbl)
1186 {
1187         if (!pg_tbl)
1188                 return 0;
1189         return pg_tbl->num_simple_entries + pg_tbl->num_extended_entries;
1190 }
1191 EXPORT_SYMBOL(gasket_page_table_num_entries);
1192 
1193 /* See gasket_page_table.h for description. */
1194 uint gasket_page_table_num_simple_entries(struct gasket_page_table *pg_tbl)
1195 {
1196         if (!pg_tbl)
1197                 return 0;
1198         return pg_tbl->num_simple_entries;
1199 }
1200 EXPORT_SYMBOL(gasket_page_table_num_simple_entries);
1201 
1202 /* See gasket_page_table.h for description. */
1203 uint gasket_page_table_num_active_pages(struct gasket_page_table *pg_tbl)
1204 {
1205         if (!pg_tbl)
1206                 return 0;
1207         return pg_tbl->num_active_pages;
1208 }
1209 EXPORT_SYMBOL(gasket_page_table_num_active_pages);
1210 
1211 /* See gasket_page_table.h */
1212 int gasket_page_table_system_status(struct gasket_page_table *page_table)
1213 {
1214         if (!page_table)
1215                 return GASKET_STATUS_LAMED;
1216 
1217         if (gasket_page_table_num_entries(page_table) == 0) {
1218                 dev_dbg(page_table->device, "Page table size is 0\n");
1219                 return GASKET_STATUS_LAMED;
1220         }
1221 
1222         return GASKET_STATUS_ALIVE;
1223 }
1224 
1225 /* Record the host_addr to coherent dma memory mapping. */
1226 int gasket_set_user_virt(struct gasket_dev *gasket_dev, u64 size,
1227                          dma_addr_t dma_address, ulong vma)
1228 {
1229         int j;
1230         struct gasket_page_table *pg_tbl;
1231 
1232         unsigned int num_pages = size / PAGE_SIZE;
1233 
1234         /*
1235          * TODO: for future chipset, better handling of the case where multiple
1236          * page tables are supported on a given device
1237          */
1238         pg_tbl = gasket_dev->page_table[0];
1239         if (!pg_tbl) {
1240                 dev_dbg(gasket_dev->dev, "%s: invalid page table index\n",
1241                         __func__);
1242                 return 0;
1243         }
1244         for (j = 0; j < num_pages; j++) {
1245                 pg_tbl->coherent_pages[j].user_virt =
1246                         (u64)vma + j * PAGE_SIZE;
1247         }
1248         return 0;
1249 }
1250 
1251 /* Allocate a block of coherent memory. */
1252 int gasket_alloc_coherent_memory(struct gasket_dev *gasket_dev, u64 size,
1253                                  dma_addr_t *dma_address, u64 index)
1254 {
1255         dma_addr_t handle;
1256         void *mem;
1257         int j;
1258         unsigned int num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
1259         const struct gasket_driver_desc *driver_desc =
1260                 gasket_get_driver_desc(gasket_dev);
1261 
1262         if (!gasket_dev->page_table[index])
1263                 return -EFAULT;
1264 
1265         if (num_pages == 0)
1266                 return -EINVAL;
1267 
1268         mem = dma_alloc_coherent(gasket_get_device(gasket_dev),
1269                                  num_pages * PAGE_SIZE, &handle, GFP_KERNEL);
1270         if (!mem)
1271                 goto nomem;
1272 
1273         gasket_dev->page_table[index]->num_coherent_pages = num_pages;
1274 
1275         /* allocate the physical memory block */
1276         gasket_dev->page_table[index]->coherent_pages =
1277                 kcalloc(num_pages,
1278                         sizeof(*gasket_dev->page_table[index]->coherent_pages),
1279                         GFP_KERNEL);
1280         if (!gasket_dev->page_table[index]->coherent_pages)
1281                 goto nomem;
1282 
1283         gasket_dev->coherent_buffer.length_bytes =
1284                 PAGE_SIZE * (num_pages);
1285         gasket_dev->coherent_buffer.phys_base = handle;
1286         gasket_dev->coherent_buffer.virt_base = mem;
1287 
1288         *dma_address = driver_desc->coherent_buffer_description.base;
1289         for (j = 0; j < num_pages; j++) {
1290                 gasket_dev->page_table[index]->coherent_pages[j].paddr =
1291                         handle + j * PAGE_SIZE;
1292                 gasket_dev->page_table[index]->coherent_pages[j].kernel_virt =
1293                         (u64)mem + j * PAGE_SIZE;
1294         }
1295 
1296         return 0;
1297 
1298 nomem:
1299         if (mem) {
1300                 dma_free_coherent(gasket_get_device(gasket_dev),
1301                                   num_pages * PAGE_SIZE, mem, handle);
1302                 gasket_dev->coherent_buffer.length_bytes = 0;
1303                 gasket_dev->coherent_buffer.virt_base = NULL;
1304                 gasket_dev->coherent_buffer.phys_base = 0;
1305         }
1306 
1307         kfree(gasket_dev->page_table[index]->coherent_pages);
1308         gasket_dev->page_table[index]->coherent_pages = NULL;
1309         gasket_dev->page_table[index]->num_coherent_pages = 0;
1310         return -ENOMEM;
1311 }
1312 
1313 /* Free a block of coherent memory. */
1314 int gasket_free_coherent_memory(struct gasket_dev *gasket_dev, u64 size,
1315                                 dma_addr_t dma_address, u64 index)
1316 {
1317         const struct gasket_driver_desc *driver_desc;
1318 
1319         if (!gasket_dev->page_table[index])
1320                 return -EFAULT;
1321 
1322         driver_desc = gasket_get_driver_desc(gasket_dev);
1323 
1324         if (driver_desc->coherent_buffer_description.base != dma_address)
1325                 return -EADDRNOTAVAIL;
1326 
1327         if (gasket_dev->coherent_buffer.length_bytes) {
1328                 dma_free_coherent(gasket_get_device(gasket_dev),
1329                                   gasket_dev->coherent_buffer.length_bytes,
1330                                   gasket_dev->coherent_buffer.virt_base,
1331                                   gasket_dev->coherent_buffer.phys_base);
1332                 gasket_dev->coherent_buffer.length_bytes = 0;
1333                 gasket_dev->coherent_buffer.virt_base = NULL;
1334                 gasket_dev->coherent_buffer.phys_base = 0;
1335         }
1336 
1337         kfree(gasket_dev->page_table[index]->coherent_pages);
1338         gasket_dev->page_table[index]->coherent_pages = NULL;
1339         gasket_dev->page_table[index]->num_coherent_pages = 0;
1340 
1341         return 0;
1342 }
1343 
1344 /* Release all coherent memory. */
1345 void gasket_free_coherent_memory_all(struct gasket_dev *gasket_dev, u64 index)
1346 {
1347         if (!gasket_dev->page_table[index])
1348                 return;
1349 
1350         if (gasket_dev->coherent_buffer.length_bytes) {
1351                 dma_free_coherent(gasket_get_device(gasket_dev),
1352                                   gasket_dev->coherent_buffer.length_bytes,
1353                                   gasket_dev->coherent_buffer.virt_base,
1354                                   gasket_dev->coherent_buffer.phys_base);
1355                 gasket_dev->coherent_buffer.length_bytes = 0;
1356                 gasket_dev->coherent_buffer.virt_base = NULL;
1357                 gasket_dev->coherent_buffer.phys_base = 0;
1358         }
1359 }

/* [<][>][^][v][top][bottom][index][help] */