root/drivers/gpu/drm/ttm/ttm_page_alloc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ttm_pool_kobj_release
  2. ttm_pool_store
  3. ttm_pool_show
  4. ttm_get_pool
  5. ttm_pages_put
  6. ttm_pool_update_free_locked
  7. ttm_page_pool_free
  8. ttm_pool_shrink_scan
  9. ttm_pool_shrink_count
  10. ttm_pool_mm_shrink_init
  11. ttm_pool_mm_shrink_fini
  12. ttm_set_pages_caching
  13. ttm_handle_caching_state_failure
  14. ttm_alloc_new_pages
  15. ttm_page_pool_fill_locked
  16. ttm_page_pool_get_pages
  17. ttm_put_pages
  18. ttm_get_pages
  19. ttm_page_pool_init_locked
  20. ttm_page_alloc_init
  21. ttm_page_alloc_fini
  22. ttm_pool_unpopulate_helper
  23. ttm_pool_populate
  24. ttm_pool_unpopulate
  25. ttm_populate_and_map_pages
  26. ttm_unmap_and_unpopulate_pages
  27. ttm_page_alloc_debugfs

   1 /*
   2  * Copyright (c) Red Hat Inc.
   3 
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice (including the
  12  * next paragraph) shall be included in all copies or substantial portions
  13  * of the Software.
  14  *
  15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21  * DEALINGS IN THE SOFTWARE.
  22  *
  23  * Authors: Dave Airlie <airlied@redhat.com>
  24  *          Jerome Glisse <jglisse@redhat.com>
  25  *          Pauli Nieminen <suokkos@gmail.com>
  26  */
  27 
  28 /* simple list based uncached page pool
  29  * - Pool collects resently freed pages for reuse
  30  * - Use page->lru to keep a free list
  31  * - doesn't track currently in use pages
  32  */
  33 
  34 #define pr_fmt(fmt) "[TTM] " fmt
  35 
  36 #include <linux/list.h>
  37 #include <linux/spinlock.h>
  38 #include <linux/highmem.h>
  39 #include <linux/mm_types.h>
  40 #include <linux/module.h>
  41 #include <linux/mm.h>
  42 #include <linux/seq_file.h> /* for seq_printf */
  43 #include <linux/slab.h>
  44 #include <linux/dma-mapping.h>
  45 
  46 #include <linux/atomic.h>
  47 
  48 #include <drm/ttm/ttm_bo_driver.h>
  49 #include <drm/ttm/ttm_page_alloc.h>
  50 #include <drm/ttm/ttm_set_memory.h>
  51 
  52 #define NUM_PAGES_TO_ALLOC              (PAGE_SIZE/sizeof(struct page *))
  53 #define SMALL_ALLOCATION                16
  54 #define FREE_ALL_PAGES                  (~0U)
  55 /* times are in msecs */
  56 #define PAGE_FREE_INTERVAL              1000
  57 
  58 /**
  59  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
  60  *
  61  * @lock: Protects the shared pool from concurrnet access. Must be used with
  62  * irqsave/irqrestore variants because pool allocator maybe called from
  63  * delayed work.
  64  * @fill_lock: Prevent concurrent calls to fill.
  65  * @list: Pool of free uc/wc pages for fast reuse.
  66  * @gfp_flags: Flags to pass for alloc_page.
  67  * @npages: Number of pages in pool.
  68  */
  69 struct ttm_page_pool {
  70         spinlock_t              lock;
  71         bool                    fill_lock;
  72         struct list_head        list;
  73         gfp_t                   gfp_flags;
  74         unsigned                npages;
  75         char                    *name;
  76         unsigned long           nfrees;
  77         unsigned long           nrefills;
  78         unsigned int            order;
  79 };
  80 
  81 /**
  82  * Limits for the pool. They are handled without locks because only place where
  83  * they may change is in sysfs store. They won't have immediate effect anyway
  84  * so forcing serialization to access them is pointless.
  85  */
  86 
  87 struct ttm_pool_opts {
  88         unsigned        alloc_size;
  89         unsigned        max_size;
  90         unsigned        small;
  91 };
  92 
  93 #define NUM_POOLS 6
  94 
  95 /**
  96  * struct ttm_pool_manager - Holds memory pools for fst allocation
  97  *
  98  * Manager is read only object for pool code so it doesn't need locking.
  99  *
 100  * @free_interval: minimum number of jiffies between freeing pages from pool.
 101  * @page_alloc_inited: reference counting for pool allocation.
 102  * @work: Work that is used to shrink the pool. Work is only run when there is
 103  * some pages to free.
 104  * @small_allocation: Limit in number of pages what is small allocation.
 105  *
 106  * @pools: All pool objects in use.
 107  **/
 108 struct ttm_pool_manager {
 109         struct kobject          kobj;
 110         struct shrinker         mm_shrink;
 111         struct ttm_pool_opts    options;
 112 
 113         union {
 114                 struct ttm_page_pool    pools[NUM_POOLS];
 115                 struct {
 116                         struct ttm_page_pool    wc_pool;
 117                         struct ttm_page_pool    uc_pool;
 118                         struct ttm_page_pool    wc_pool_dma32;
 119                         struct ttm_page_pool    uc_pool_dma32;
 120                         struct ttm_page_pool    wc_pool_huge;
 121                         struct ttm_page_pool    uc_pool_huge;
 122                 } ;
 123         };
 124 };
 125 
 126 static struct attribute ttm_page_pool_max = {
 127         .name = "pool_max_size",
 128         .mode = S_IRUGO | S_IWUSR
 129 };
 130 static struct attribute ttm_page_pool_small = {
 131         .name = "pool_small_allocation",
 132         .mode = S_IRUGO | S_IWUSR
 133 };
 134 static struct attribute ttm_page_pool_alloc_size = {
 135         .name = "pool_allocation_size",
 136         .mode = S_IRUGO | S_IWUSR
 137 };
 138 
 139 static struct attribute *ttm_pool_attrs[] = {
 140         &ttm_page_pool_max,
 141         &ttm_page_pool_small,
 142         &ttm_page_pool_alloc_size,
 143         NULL
 144 };
 145 
 146 static void ttm_pool_kobj_release(struct kobject *kobj)
 147 {
 148         struct ttm_pool_manager *m =
 149                 container_of(kobj, struct ttm_pool_manager, kobj);
 150         kfree(m);
 151 }
 152 
 153 static ssize_t ttm_pool_store(struct kobject *kobj,
 154                 struct attribute *attr, const char *buffer, size_t size)
 155 {
 156         struct ttm_pool_manager *m =
 157                 container_of(kobj, struct ttm_pool_manager, kobj);
 158         int chars;
 159         unsigned val;
 160         chars = sscanf(buffer, "%u", &val);
 161         if (chars == 0)
 162                 return size;
 163 
 164         /* Convert kb to number of pages */
 165         val = val / (PAGE_SIZE >> 10);
 166 
 167         if (attr == &ttm_page_pool_max)
 168                 m->options.max_size = val;
 169         else if (attr == &ttm_page_pool_small)
 170                 m->options.small = val;
 171         else if (attr == &ttm_page_pool_alloc_size) {
 172                 if (val > NUM_PAGES_TO_ALLOC*8) {
 173                         pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
 174                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
 175                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
 176                         return size;
 177                 } else if (val > NUM_PAGES_TO_ALLOC) {
 178                         pr_warn("Setting allocation size to larger than %lu is not recommended\n",
 179                                 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
 180                 }
 181                 m->options.alloc_size = val;
 182         }
 183 
 184         return size;
 185 }
 186 
 187 static ssize_t ttm_pool_show(struct kobject *kobj,
 188                 struct attribute *attr, char *buffer)
 189 {
 190         struct ttm_pool_manager *m =
 191                 container_of(kobj, struct ttm_pool_manager, kobj);
 192         unsigned val = 0;
 193 
 194         if (attr == &ttm_page_pool_max)
 195                 val = m->options.max_size;
 196         else if (attr == &ttm_page_pool_small)
 197                 val = m->options.small;
 198         else if (attr == &ttm_page_pool_alloc_size)
 199                 val = m->options.alloc_size;
 200 
 201         val = val * (PAGE_SIZE >> 10);
 202 
 203         return snprintf(buffer, PAGE_SIZE, "%u\n", val);
 204 }
 205 
 206 static const struct sysfs_ops ttm_pool_sysfs_ops = {
 207         .show = &ttm_pool_show,
 208         .store = &ttm_pool_store,
 209 };
 210 
 211 static struct kobj_type ttm_pool_kobj_type = {
 212         .release = &ttm_pool_kobj_release,
 213         .sysfs_ops = &ttm_pool_sysfs_ops,
 214         .default_attrs = ttm_pool_attrs,
 215 };
 216 
 217 static struct ttm_pool_manager *_manager;
 218 
 219 /**
 220  * Select the right pool or requested caching state and ttm flags. */
 221 static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
 222                                           enum ttm_caching_state cstate)
 223 {
 224         int pool_index;
 225 
 226         if (cstate == tt_cached)
 227                 return NULL;
 228 
 229         if (cstate == tt_wc)
 230                 pool_index = 0x0;
 231         else
 232                 pool_index = 0x1;
 233 
 234         if (flags & TTM_PAGE_FLAG_DMA32) {
 235                 if (huge)
 236                         return NULL;
 237                 pool_index |= 0x2;
 238 
 239         } else if (huge) {
 240                 pool_index |= 0x4;
 241         }
 242 
 243         return &_manager->pools[pool_index];
 244 }
 245 
 246 /* set memory back to wb and free the pages. */
 247 static void ttm_pages_put(struct page *pages[], unsigned npages,
 248                 unsigned int order)
 249 {
 250         unsigned int i, pages_nr = (1 << order);
 251 
 252         if (order == 0) {
 253                 if (ttm_set_pages_array_wb(pages, npages))
 254                         pr_err("Failed to set %d pages to wb!\n", npages);
 255         }
 256 
 257         for (i = 0; i < npages; ++i) {
 258                 if (order > 0) {
 259                         if (ttm_set_pages_wb(pages[i], pages_nr))
 260                                 pr_err("Failed to set %d pages to wb!\n", pages_nr);
 261                 }
 262                 __free_pages(pages[i], order);
 263         }
 264 }
 265 
 266 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
 267                 unsigned freed_pages)
 268 {
 269         pool->npages -= freed_pages;
 270         pool->nfrees += freed_pages;
 271 }
 272 
 273 /**
 274  * Free pages from pool.
 275  *
 276  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
 277  * number of pages in one go.
 278  *
 279  * @pool: to free the pages from
 280  * @free_all: If set to true will free all pages in pool
 281  * @use_static: Safe to use static buffer
 282  **/
 283 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
 284                               bool use_static)
 285 {
 286         static struct page *static_buf[NUM_PAGES_TO_ALLOC];
 287         unsigned long irq_flags;
 288         struct page *p;
 289         struct page **pages_to_free;
 290         unsigned freed_pages = 0,
 291                  npages_to_free = nr_free;
 292 
 293         if (NUM_PAGES_TO_ALLOC < nr_free)
 294                 npages_to_free = NUM_PAGES_TO_ALLOC;
 295 
 296         if (use_static)
 297                 pages_to_free = static_buf;
 298         else
 299                 pages_to_free = kmalloc_array(npages_to_free,
 300                                               sizeof(struct page *),
 301                                               GFP_KERNEL);
 302         if (!pages_to_free) {
 303                 pr_debug("Failed to allocate memory for pool free operation\n");
 304                 return 0;
 305         }
 306 
 307 restart:
 308         spin_lock_irqsave(&pool->lock, irq_flags);
 309 
 310         list_for_each_entry_reverse(p, &pool->list, lru) {
 311                 if (freed_pages >= npages_to_free)
 312                         break;
 313 
 314                 pages_to_free[freed_pages++] = p;
 315                 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
 316                 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
 317                         /* remove range of pages from the pool */
 318                         __list_del(p->lru.prev, &pool->list);
 319 
 320                         ttm_pool_update_free_locked(pool, freed_pages);
 321                         /**
 322                          * Because changing page caching is costly
 323                          * we unlock the pool to prevent stalling.
 324                          */
 325                         spin_unlock_irqrestore(&pool->lock, irq_flags);
 326 
 327                         ttm_pages_put(pages_to_free, freed_pages, pool->order);
 328                         if (likely(nr_free != FREE_ALL_PAGES))
 329                                 nr_free -= freed_pages;
 330 
 331                         if (NUM_PAGES_TO_ALLOC >= nr_free)
 332                                 npages_to_free = nr_free;
 333                         else
 334                                 npages_to_free = NUM_PAGES_TO_ALLOC;
 335 
 336                         freed_pages = 0;
 337 
 338                         /* free all so restart the processing */
 339                         if (nr_free)
 340                                 goto restart;
 341 
 342                         /* Not allowed to fall through or break because
 343                          * following context is inside spinlock while we are
 344                          * outside here.
 345                          */
 346                         goto out;
 347 
 348                 }
 349         }
 350 
 351         /* remove range of pages from the pool */
 352         if (freed_pages) {
 353                 __list_del(&p->lru, &pool->list);
 354 
 355                 ttm_pool_update_free_locked(pool, freed_pages);
 356                 nr_free -= freed_pages;
 357         }
 358 
 359         spin_unlock_irqrestore(&pool->lock, irq_flags);
 360 
 361         if (freed_pages)
 362                 ttm_pages_put(pages_to_free, freed_pages, pool->order);
 363 out:
 364         if (pages_to_free != static_buf)
 365                 kfree(pages_to_free);
 366         return nr_free;
 367 }
 368 
 369 /**
 370  * Callback for mm to request pool to reduce number of page held.
 371  *
 372  * XXX: (dchinner) Deadlock warning!
 373  *
 374  * This code is crying out for a shrinker per pool....
 375  */
 376 static unsigned long
 377 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 378 {
 379         static DEFINE_MUTEX(lock);
 380         static unsigned start_pool;
 381         unsigned i;
 382         unsigned pool_offset;
 383         struct ttm_page_pool *pool;
 384         int shrink_pages = sc->nr_to_scan;
 385         unsigned long freed = 0;
 386         unsigned int nr_free_pool;
 387 
 388         if (!mutex_trylock(&lock))
 389                 return SHRINK_STOP;
 390         pool_offset = ++start_pool % NUM_POOLS;
 391         /* select start pool in round robin fashion */
 392         for (i = 0; i < NUM_POOLS; ++i) {
 393                 unsigned nr_free = shrink_pages;
 394                 unsigned page_nr;
 395 
 396                 if (shrink_pages == 0)
 397                         break;
 398 
 399                 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
 400                 page_nr = (1 << pool->order);
 401                 /* OK to use static buffer since global mutex is held. */
 402                 nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
 403                 shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
 404                 freed += (nr_free_pool - shrink_pages) << pool->order;
 405                 if (freed >= sc->nr_to_scan)
 406                         break;
 407                 shrink_pages <<= pool->order;
 408         }
 409         mutex_unlock(&lock);
 410         return freed;
 411 }
 412 
 413 
 414 static unsigned long
 415 ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 416 {
 417         unsigned i;
 418         unsigned long count = 0;
 419         struct ttm_page_pool *pool;
 420 
 421         for (i = 0; i < NUM_POOLS; ++i) {
 422                 pool = &_manager->pools[i];
 423                 count += (pool->npages << pool->order);
 424         }
 425 
 426         return count;
 427 }
 428 
 429 static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
 430 {
 431         manager->mm_shrink.count_objects = ttm_pool_shrink_count;
 432         manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
 433         manager->mm_shrink.seeks = 1;
 434         return register_shrinker(&manager->mm_shrink);
 435 }
 436 
 437 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
 438 {
 439         unregister_shrinker(&manager->mm_shrink);
 440 }
 441 
 442 static int ttm_set_pages_caching(struct page **pages,
 443                 enum ttm_caching_state cstate, unsigned cpages)
 444 {
 445         int r = 0;
 446         /* Set page caching */
 447         switch (cstate) {
 448         case tt_uncached:
 449                 r = ttm_set_pages_array_uc(pages, cpages);
 450                 if (r)
 451                         pr_err("Failed to set %d pages to uc!\n", cpages);
 452                 break;
 453         case tt_wc:
 454                 r = ttm_set_pages_array_wc(pages, cpages);
 455                 if (r)
 456                         pr_err("Failed to set %d pages to wc!\n", cpages);
 457                 break;
 458         default:
 459                 break;
 460         }
 461         return r;
 462 }
 463 
 464 /**
 465  * Free pages the pages that failed to change the caching state. If there is
 466  * any pages that have changed their caching state already put them to the
 467  * pool.
 468  */
 469 static void ttm_handle_caching_state_failure(struct list_head *pages,
 470                 int ttm_flags, enum ttm_caching_state cstate,
 471                 struct page **failed_pages, unsigned cpages)
 472 {
 473         unsigned i;
 474         /* Failed pages have to be freed */
 475         for (i = 0; i < cpages; ++i) {
 476                 list_del(&failed_pages[i]->lru);
 477                 __free_page(failed_pages[i]);
 478         }
 479 }
 480 
 481 /**
 482  * Allocate new pages with correct caching.
 483  *
 484  * This function is reentrant if caller updates count depending on number of
 485  * pages returned in pages array.
 486  */
 487 static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
 488                                int ttm_flags, enum ttm_caching_state cstate,
 489                                unsigned count, unsigned order)
 490 {
 491         struct page **caching_array;
 492         struct page *p;
 493         int r = 0;
 494         unsigned i, j, cpages;
 495         unsigned npages = 1 << order;
 496         unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
 497 
 498         /* allocate array for page caching change */
 499         caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
 500                                       GFP_KERNEL);
 501 
 502         if (!caching_array) {
 503                 pr_debug("Unable to allocate table for new pages\n");
 504                 return -ENOMEM;
 505         }
 506 
 507         for (i = 0, cpages = 0; i < count; ++i) {
 508                 p = alloc_pages(gfp_flags, order);
 509 
 510                 if (!p) {
 511                         pr_debug("Unable to get page %u\n", i);
 512 
 513                         /* store already allocated pages in the pool after
 514                          * setting the caching state */
 515                         if (cpages) {
 516                                 r = ttm_set_pages_caching(caching_array,
 517                                                           cstate, cpages);
 518                                 if (r)
 519                                         ttm_handle_caching_state_failure(pages,
 520                                                 ttm_flags, cstate,
 521                                                 caching_array, cpages);
 522                         }
 523                         r = -ENOMEM;
 524                         goto out;
 525                 }
 526 
 527                 list_add(&p->lru, pages);
 528 
 529 #ifdef CONFIG_HIGHMEM
 530                 /* gfp flags of highmem page should never be dma32 so we
 531                  * we should be fine in such case
 532                  */
 533                 if (PageHighMem(p))
 534                         continue;
 535 
 536 #endif
 537                 for (j = 0; j < npages; ++j) {
 538                         caching_array[cpages++] = p++;
 539                         if (cpages == max_cpages) {
 540 
 541                                 r = ttm_set_pages_caching(caching_array,
 542                                                 cstate, cpages);
 543                                 if (r) {
 544                                         ttm_handle_caching_state_failure(pages,
 545                                                 ttm_flags, cstate,
 546                                                 caching_array, cpages);
 547                                         goto out;
 548                                 }
 549                                 cpages = 0;
 550                         }
 551                 }
 552         }
 553 
 554         if (cpages) {
 555                 r = ttm_set_pages_caching(caching_array, cstate, cpages);
 556                 if (r)
 557                         ttm_handle_caching_state_failure(pages,
 558                                         ttm_flags, cstate,
 559                                         caching_array, cpages);
 560         }
 561 out:
 562         kfree(caching_array);
 563 
 564         return r;
 565 }
 566 
 567 /**
 568  * Fill the given pool if there aren't enough pages and the requested number of
 569  * pages is small.
 570  */
 571 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
 572                                       enum ttm_caching_state cstate,
 573                                       unsigned count, unsigned long *irq_flags)
 574 {
 575         struct page *p;
 576         int r;
 577         unsigned cpages = 0;
 578         /**
 579          * Only allow one pool fill operation at a time.
 580          * If pool doesn't have enough pages for the allocation new pages are
 581          * allocated from outside of pool.
 582          */
 583         if (pool->fill_lock)
 584                 return;
 585 
 586         pool->fill_lock = true;
 587 
 588         /* If allocation request is small and there are not enough
 589          * pages in a pool we fill the pool up first. */
 590         if (count < _manager->options.small
 591                 && count > pool->npages) {
 592                 struct list_head new_pages;
 593                 unsigned alloc_size = _manager->options.alloc_size;
 594 
 595                 /**
 596                  * Can't change page caching if in irqsave context. We have to
 597                  * drop the pool->lock.
 598                  */
 599                 spin_unlock_irqrestore(&pool->lock, *irq_flags);
 600 
 601                 INIT_LIST_HEAD(&new_pages);
 602                 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
 603                                         cstate, alloc_size, 0);
 604                 spin_lock_irqsave(&pool->lock, *irq_flags);
 605 
 606                 if (!r) {
 607                         list_splice(&new_pages, &pool->list);
 608                         ++pool->nrefills;
 609                         pool->npages += alloc_size;
 610                 } else {
 611                         pr_debug("Failed to fill pool (%p)\n", pool);
 612                         /* If we have any pages left put them to the pool. */
 613                         list_for_each_entry(p, &new_pages, lru) {
 614                                 ++cpages;
 615                         }
 616                         list_splice(&new_pages, &pool->list);
 617                         pool->npages += cpages;
 618                 }
 619 
 620         }
 621         pool->fill_lock = false;
 622 }
 623 
 624 /**
 625  * Allocate pages from the pool and put them on the return list.
 626  *
 627  * @return zero for success or negative error code.
 628  */
 629 static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
 630                                    struct list_head *pages,
 631                                    int ttm_flags,
 632                                    enum ttm_caching_state cstate,
 633                                    unsigned count, unsigned order)
 634 {
 635         unsigned long irq_flags;
 636         struct list_head *p;
 637         unsigned i;
 638         int r = 0;
 639 
 640         spin_lock_irqsave(&pool->lock, irq_flags);
 641         if (!order)
 642                 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
 643                                           &irq_flags);
 644 
 645         if (count >= pool->npages) {
 646                 /* take all pages from the pool */
 647                 list_splice_init(&pool->list, pages);
 648                 count -= pool->npages;
 649                 pool->npages = 0;
 650                 goto out;
 651         }
 652         /* find the last pages to include for requested number of pages. Split
 653          * pool to begin and halve it to reduce search space. */
 654         if (count <= pool->npages/2) {
 655                 i = 0;
 656                 list_for_each(p, &pool->list) {
 657                         if (++i == count)
 658                                 break;
 659                 }
 660         } else {
 661                 i = pool->npages + 1;
 662                 list_for_each_prev(p, &pool->list) {
 663                         if (--i == count)
 664                                 break;
 665                 }
 666         }
 667         /* Cut 'count' number of pages from the pool */
 668         list_cut_position(pages, &pool->list, p);
 669         pool->npages -= count;
 670         count = 0;
 671 out:
 672         spin_unlock_irqrestore(&pool->lock, irq_flags);
 673 
 674         /* clear the pages coming from the pool if requested */
 675         if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
 676                 struct page *page;
 677 
 678                 list_for_each_entry(page, pages, lru) {
 679                         if (PageHighMem(page))
 680                                 clear_highpage(page);
 681                         else
 682                                 clear_page(page_address(page));
 683                 }
 684         }
 685 
 686         /* If pool didn't have enough pages allocate new one. */
 687         if (count) {
 688                 gfp_t gfp_flags = pool->gfp_flags;
 689 
 690                 /* set zero flag for page allocation if required */
 691                 if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
 692                         gfp_flags |= __GFP_ZERO;
 693 
 694                 if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY)
 695                         gfp_flags |= __GFP_RETRY_MAYFAIL;
 696 
 697                 /* ttm_alloc_new_pages doesn't reference pool so we can run
 698                  * multiple requests in parallel.
 699                  **/
 700                 r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate,
 701                                         count, order);
 702         }
 703 
 704         return r;
 705 }
 706 
 707 /* Put all pages in pages list to correct pool to wait for reuse */
 708 static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
 709                           enum ttm_caching_state cstate)
 710 {
 711         struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
 712 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 713         struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
 714 #endif
 715         unsigned long irq_flags;
 716         unsigned i;
 717 
 718         if (pool == NULL) {
 719                 /* No pool for this memory type so free the pages */
 720                 i = 0;
 721                 while (i < npages) {
 722 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 723                         struct page *p = pages[i];
 724 #endif
 725                         unsigned order = 0, j;
 726 
 727                         if (!pages[i]) {
 728                                 ++i;
 729                                 continue;
 730                         }
 731 
 732 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 733                         if (!(flags & TTM_PAGE_FLAG_DMA32) &&
 734                             (npages - i) >= HPAGE_PMD_NR) {
 735                                 for (j = 1; j < HPAGE_PMD_NR; ++j)
 736                                         if (++p != pages[i + j])
 737                                             break;
 738 
 739                                 if (j == HPAGE_PMD_NR)
 740                                         order = HPAGE_PMD_ORDER;
 741                         }
 742 #endif
 743 
 744                         if (page_count(pages[i]) != 1)
 745                                 pr_err("Erroneous page count. Leaking pages.\n");
 746                         __free_pages(pages[i], order);
 747 
 748                         j = 1 << order;
 749                         while (j) {
 750                                 pages[i++] = NULL;
 751                                 --j;
 752                         }
 753                 }
 754                 return;
 755         }
 756 
 757         i = 0;
 758 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 759         if (huge) {
 760                 unsigned max_size, n2free;
 761 
 762                 spin_lock_irqsave(&huge->lock, irq_flags);
 763                 while ((npages - i) >= HPAGE_PMD_NR) {
 764                         struct page *p = pages[i];
 765                         unsigned j;
 766 
 767                         if (!p)
 768                                 break;
 769 
 770                         for (j = 1; j < HPAGE_PMD_NR; ++j)
 771                                 if (++p != pages[i + j])
 772                                     break;
 773 
 774                         if (j != HPAGE_PMD_NR)
 775                                 break;
 776 
 777                         list_add_tail(&pages[i]->lru, &huge->list);
 778 
 779                         for (j = 0; j < HPAGE_PMD_NR; ++j)
 780                                 pages[i++] = NULL;
 781                         huge->npages++;
 782                 }
 783 
 784                 /* Check that we don't go over the pool limit */
 785                 max_size = _manager->options.max_size;
 786                 max_size /= HPAGE_PMD_NR;
 787                 if (huge->npages > max_size)
 788                         n2free = huge->npages - max_size;
 789                 else
 790                         n2free = 0;
 791                 spin_unlock_irqrestore(&huge->lock, irq_flags);
 792                 if (n2free)
 793                         ttm_page_pool_free(huge, n2free, false);
 794         }
 795 #endif
 796 
 797         spin_lock_irqsave(&pool->lock, irq_flags);
 798         while (i < npages) {
 799                 if (pages[i]) {
 800                         if (page_count(pages[i]) != 1)
 801                                 pr_err("Erroneous page count. Leaking pages.\n");
 802                         list_add_tail(&pages[i]->lru, &pool->list);
 803                         pages[i] = NULL;
 804                         pool->npages++;
 805                 }
 806                 ++i;
 807         }
 808         /* Check that we don't go over the pool limit */
 809         npages = 0;
 810         if (pool->npages > _manager->options.max_size) {
 811                 npages = pool->npages - _manager->options.max_size;
 812                 /* free at least NUM_PAGES_TO_ALLOC number of pages
 813                  * to reduce calls to set_memory_wb */
 814                 if (npages < NUM_PAGES_TO_ALLOC)
 815                         npages = NUM_PAGES_TO_ALLOC;
 816         }
 817         spin_unlock_irqrestore(&pool->lock, irq_flags);
 818         if (npages)
 819                 ttm_page_pool_free(pool, npages, false);
 820 }
 821 
 822 /*
 823  * On success pages list will hold count number of correctly
 824  * cached pages.
 825  */
 826 static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
 827                          enum ttm_caching_state cstate)
 828 {
 829         struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
 830 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 831         struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
 832 #endif
 833         struct list_head plist;
 834         struct page *p = NULL;
 835         unsigned count, first;
 836         int r;
 837 
 838         /* No pool for cached pages */
 839         if (pool == NULL) {
 840                 gfp_t gfp_flags = GFP_USER;
 841                 unsigned i;
 842 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 843                 unsigned j;
 844 #endif
 845 
 846                 /* set zero flag for page allocation if required */
 847                 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
 848                         gfp_flags |= __GFP_ZERO;
 849 
 850                 if (flags & TTM_PAGE_FLAG_NO_RETRY)
 851                         gfp_flags |= __GFP_RETRY_MAYFAIL;
 852 
 853                 if (flags & TTM_PAGE_FLAG_DMA32)
 854                         gfp_flags |= GFP_DMA32;
 855                 else
 856                         gfp_flags |= GFP_HIGHUSER;
 857 
 858                 i = 0;
 859 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 860                 if (!(gfp_flags & GFP_DMA32)) {
 861                         while (npages >= HPAGE_PMD_NR) {
 862                                 gfp_t huge_flags = gfp_flags;
 863 
 864                                 huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
 865                                         __GFP_KSWAPD_RECLAIM;
 866                                 huge_flags &= ~__GFP_MOVABLE;
 867                                 huge_flags &= ~__GFP_COMP;
 868                                 p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
 869                                 if (!p)
 870                                         break;
 871 
 872                                 for (j = 0; j < HPAGE_PMD_NR; ++j)
 873                                         pages[i++] = p++;
 874 
 875                                 npages -= HPAGE_PMD_NR;
 876                         }
 877                 }
 878 #endif
 879 
 880                 first = i;
 881                 while (npages) {
 882                         p = alloc_page(gfp_flags);
 883                         if (!p) {
 884                                 pr_debug("Unable to allocate page\n");
 885                                 return -ENOMEM;
 886                         }
 887 
 888                         /* Swap the pages if we detect consecutive order */
 889                         if (i > first && pages[i - 1] == p - 1)
 890                                 swap(p, pages[i - 1]);
 891 
 892                         pages[i++] = p;
 893                         --npages;
 894                 }
 895                 return 0;
 896         }
 897 
 898         count = 0;
 899 
 900 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 901         if (huge && npages >= HPAGE_PMD_NR) {
 902                 INIT_LIST_HEAD(&plist);
 903                 ttm_page_pool_get_pages(huge, &plist, flags, cstate,
 904                                         npages / HPAGE_PMD_NR,
 905                                         HPAGE_PMD_ORDER);
 906 
 907                 list_for_each_entry(p, &plist, lru) {
 908                         unsigned j;
 909 
 910                         for (j = 0; j < HPAGE_PMD_NR; ++j)
 911                                 pages[count++] = &p[j];
 912                 }
 913         }
 914 #endif
 915 
 916         INIT_LIST_HEAD(&plist);
 917         r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
 918                                     npages - count, 0);
 919 
 920         first = count;
 921         list_for_each_entry(p, &plist, lru) {
 922                 struct page *tmp = p;
 923 
 924                 /* Swap the pages if we detect consecutive order */
 925                 if (count > first && pages[count - 1] == tmp - 1)
 926                         swap(tmp, pages[count - 1]);
 927                 pages[count++] = tmp;
 928         }
 929 
 930         if (r) {
 931                 /* If there is any pages in the list put them back to
 932                  * the pool.
 933                  */
 934                 pr_debug("Failed to allocate extra pages for large request\n");
 935                 ttm_put_pages(pages, count, flags, cstate);
 936                 return r;
 937         }
 938 
 939         return 0;
 940 }
 941 
 942 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
 943                 char *name, unsigned int order)
 944 {
 945         spin_lock_init(&pool->lock);
 946         pool->fill_lock = false;
 947         INIT_LIST_HEAD(&pool->list);
 948         pool->npages = pool->nfrees = 0;
 949         pool->gfp_flags = flags;
 950         pool->name = name;
 951         pool->order = order;
 952 }
 953 
 954 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
 955 {
 956         int ret;
 957 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 958         unsigned order = HPAGE_PMD_ORDER;
 959 #else
 960         unsigned order = 0;
 961 #endif
 962 
 963         WARN_ON(_manager);
 964 
 965         pr_info("Initializing pool allocator\n");
 966 
 967         _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
 968         if (!_manager)
 969                 return -ENOMEM;
 970 
 971         ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
 972 
 973         ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
 974 
 975         ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
 976                                   GFP_USER | GFP_DMA32, "wc dma", 0);
 977 
 978         ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
 979                                   GFP_USER | GFP_DMA32, "uc dma", 0);
 980 
 981         ttm_page_pool_init_locked(&_manager->wc_pool_huge,
 982                                   (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
 983                                    __GFP_KSWAPD_RECLAIM) &
 984                                   ~(__GFP_MOVABLE | __GFP_COMP),
 985                                   "wc huge", order);
 986 
 987         ttm_page_pool_init_locked(&_manager->uc_pool_huge,
 988                                   (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
 989                                    __GFP_KSWAPD_RECLAIM) &
 990                                   ~(__GFP_MOVABLE | __GFP_COMP)
 991                                   , "uc huge", order);
 992 
 993         _manager->options.max_size = max_pages;
 994         _manager->options.small = SMALL_ALLOCATION;
 995         _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
 996 
 997         ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
 998                                    &glob->kobj, "pool");
 999         if (unlikely(ret != 0))
1000                 goto error;
1001 
1002         ret = ttm_pool_mm_shrink_init(_manager);
1003         if (unlikely(ret != 0))
1004                 goto error;
1005         return 0;
1006 
1007 error:
1008         kobject_put(&_manager->kobj);
1009         _manager = NULL;
1010         return ret;
1011 }
1012 
1013 void ttm_page_alloc_fini(void)
1014 {
1015         int i;
1016 
1017         pr_info("Finalizing pool allocator\n");
1018         ttm_pool_mm_shrink_fini(_manager);
1019 
1020         /* OK to use static buffer since global mutex is no longer used. */
1021         for (i = 0; i < NUM_POOLS; ++i)
1022                 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
1023 
1024         kobject_put(&_manager->kobj);
1025         _manager = NULL;
1026 }
1027 
1028 static void
1029 ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
1030 {
1031         struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
1032         unsigned i;
1033 
1034         if (mem_count_update == 0)
1035                 goto put_pages;
1036 
1037         for (i = 0; i < mem_count_update; ++i) {
1038                 if (!ttm->pages[i])
1039                         continue;
1040 
1041                 ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE);
1042         }
1043 
1044 put_pages:
1045         ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
1046                       ttm->caching_state);
1047         ttm->state = tt_unpopulated;
1048 }
1049 
1050 int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1051 {
1052         struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
1053         unsigned i;
1054         int ret;
1055 
1056         if (ttm->state != tt_unpopulated)
1057                 return 0;
1058 
1059         if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx))
1060                 return -ENOMEM;
1061 
1062         ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
1063                             ttm->caching_state);
1064         if (unlikely(ret != 0)) {
1065                 ttm_pool_unpopulate_helper(ttm, 0);
1066                 return ret;
1067         }
1068 
1069         for (i = 0; i < ttm->num_pages; ++i) {
1070                 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
1071                                                 PAGE_SIZE, ctx);
1072                 if (unlikely(ret != 0)) {
1073                         ttm_pool_unpopulate_helper(ttm, i);
1074                         return -ENOMEM;
1075                 }
1076         }
1077 
1078         if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
1079                 ret = ttm_tt_swapin(ttm);
1080                 if (unlikely(ret != 0)) {
1081                         ttm_pool_unpopulate(ttm);
1082                         return ret;
1083                 }
1084         }
1085 
1086         ttm->state = tt_unbound;
1087         return 0;
1088 }
1089 EXPORT_SYMBOL(ttm_pool_populate);
1090 
1091 void ttm_pool_unpopulate(struct ttm_tt *ttm)
1092 {
1093         ttm_pool_unpopulate_helper(ttm, ttm->num_pages);
1094 }
1095 EXPORT_SYMBOL(ttm_pool_unpopulate);
1096 
1097 int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
1098                                         struct ttm_operation_ctx *ctx)
1099 {
1100         unsigned i, j;
1101         int r;
1102 
1103         r = ttm_pool_populate(&tt->ttm, ctx);
1104         if (r)
1105                 return r;
1106 
1107         for (i = 0; i < tt->ttm.num_pages; ++i) {
1108                 struct page *p = tt->ttm.pages[i];
1109                 size_t num_pages = 1;
1110 
1111                 for (j = i + 1; j < tt->ttm.num_pages; ++j) {
1112                         if (++p != tt->ttm.pages[j])
1113                                 break;
1114 
1115                         ++num_pages;
1116                 }
1117 
1118                 tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
1119                                                   0, num_pages * PAGE_SIZE,
1120                                                   DMA_BIDIRECTIONAL);
1121                 if (dma_mapping_error(dev, tt->dma_address[i])) {
1122                         while (i--) {
1123                                 dma_unmap_page(dev, tt->dma_address[i],
1124                                                PAGE_SIZE, DMA_BIDIRECTIONAL);
1125                                 tt->dma_address[i] = 0;
1126                         }
1127                         ttm_pool_unpopulate(&tt->ttm);
1128                         return -EFAULT;
1129                 }
1130 
1131                 for (j = 1; j < num_pages; ++j) {
1132                         tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE;
1133                         ++i;
1134                 }
1135         }
1136         return 0;
1137 }
1138 EXPORT_SYMBOL(ttm_populate_and_map_pages);
1139 
1140 void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
1141 {
1142         unsigned i, j;
1143 
1144         for (i = 0; i < tt->ttm.num_pages;) {
1145                 struct page *p = tt->ttm.pages[i];
1146                 size_t num_pages = 1;
1147 
1148                 if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
1149                         ++i;
1150                         continue;
1151                 }
1152 
1153                 for (j = i + 1; j < tt->ttm.num_pages; ++j) {
1154                         if (++p != tt->ttm.pages[j])
1155                                 break;
1156 
1157                         ++num_pages;
1158                 }
1159 
1160                 dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE,
1161                                DMA_BIDIRECTIONAL);
1162 
1163                 i += num_pages;
1164         }
1165         ttm_pool_unpopulate(&tt->ttm);
1166 }
1167 EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
1168 
1169 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
1170 {
1171         struct ttm_page_pool *p;
1172         unsigned i;
1173         char *h[] = {"pool", "refills", "pages freed", "size"};
1174         if (!_manager) {
1175                 seq_printf(m, "No pool allocator running.\n");
1176                 return 0;
1177         }
1178         seq_printf(m, "%7s %12s %13s %8s\n",
1179                         h[0], h[1], h[2], h[3]);
1180         for (i = 0; i < NUM_POOLS; ++i) {
1181                 p = &_manager->pools[i];
1182 
1183                 seq_printf(m, "%7s %12ld %13ld %8d\n",
1184                                 p->name, p->nrefills,
1185                                 p->nfrees, p->npages);
1186         }
1187         return 0;
1188 }
1189 EXPORT_SYMBOL(ttm_page_alloc_debugfs);

/* [<][>][^][v][top][bottom][index][help] */