root/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. cleanup_freed_objects
  2. fake_free_pages
  3. fake_get_pages
  4. fake_put_pages
  5. fake_dma_object
  6. igt_ppgtt_alloc
  7. lowlevel_hole
  8. close_object_list
  9. fill_hole
  10. walk_hole
  11. pot_hole
  12. drunk_hole
  13. __shrink_hole
  14. shrink_hole
  15. shrink_boom
  16. exercise_ppgtt
  17. igt_ppgtt_fill
  18. igt_ppgtt_walk
  19. igt_ppgtt_pot
  20. igt_ppgtt_drunk
  21. igt_ppgtt_lowlevel
  22. igt_ppgtt_shrink
  23. igt_ppgtt_shrink_boom
  24. sort_holes
  25. exercise_ggtt
  26. igt_ggtt_fill
  27. igt_ggtt_walk
  28. igt_ggtt_pot
  29. igt_ggtt_drunk
  30. igt_ggtt_lowlevel
  31. igt_ggtt_page
  32. track_vma_bind
  33. exercise_mock
  34. igt_mock_fill
  35. igt_mock_walk
  36. igt_mock_pot
  37. igt_mock_drunk
  38. igt_gtt_reserve
  39. igt_gtt_insert
  40. i915_gem_gtt_mock_selftests
  41. i915_gem_gtt_live_selftests

   1 /*
   2  * Copyright © 2016 Intel Corporation
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice (including the next
  12  * paragraph) shall be included in all copies or substantial portions of the
  13  * Software.
  14  *
  15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21  * IN THE SOFTWARE.
  22  *
  23  */
  24 
  25 #include <linux/list_sort.h>
  26 #include <linux/prime_numbers.h>
  27 
  28 #include "gem/selftests/mock_context.h"
  29 
  30 #include "i915_random.h"
  31 #include "i915_selftest.h"
  32 
  33 #include "mock_drm.h"
  34 #include "mock_gem_device.h"
  35 
  36 static void cleanup_freed_objects(struct drm_i915_private *i915)
  37 {
  38         /*
  39          * As we may hold onto the struct_mutex for inordinate lengths of
  40          * time, the NMI khungtaskd detector may fire for the free objects
  41          * worker.
  42          */
  43         mutex_unlock(&i915->drm.struct_mutex);
  44 
  45         i915_gem_drain_freed_objects(i915);
  46 
  47         mutex_lock(&i915->drm.struct_mutex);
  48 }
  49 
  50 static void fake_free_pages(struct drm_i915_gem_object *obj,
  51                             struct sg_table *pages)
  52 {
  53         sg_free_table(pages);
  54         kfree(pages);
  55 }
  56 
  57 static int fake_get_pages(struct drm_i915_gem_object *obj)
  58 {
  59 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
  60 #define PFN_BIAS 0x1000
  61         struct sg_table *pages;
  62         struct scatterlist *sg;
  63         unsigned int sg_page_sizes;
  64         typeof(obj->base.size) rem;
  65 
  66         pages = kmalloc(sizeof(*pages), GFP);
  67         if (!pages)
  68                 return -ENOMEM;
  69 
  70         rem = round_up(obj->base.size, BIT(31)) >> 31;
  71         if (sg_alloc_table(pages, rem, GFP)) {
  72                 kfree(pages);
  73                 return -ENOMEM;
  74         }
  75 
  76         sg_page_sizes = 0;
  77         rem = obj->base.size;
  78         for (sg = pages->sgl; sg; sg = sg_next(sg)) {
  79                 unsigned long len = min_t(typeof(rem), rem, BIT(31));
  80 
  81                 GEM_BUG_ON(!len);
  82                 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
  83                 sg_dma_address(sg) = page_to_phys(sg_page(sg));
  84                 sg_dma_len(sg) = len;
  85                 sg_page_sizes |= len;
  86 
  87                 rem -= len;
  88         }
  89         GEM_BUG_ON(rem);
  90 
  91         obj->mm.madv = I915_MADV_DONTNEED;
  92 
  93         __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
  94 
  95         return 0;
  96 #undef GFP
  97 }
  98 
  99 static void fake_put_pages(struct drm_i915_gem_object *obj,
 100                            struct sg_table *pages)
 101 {
 102         fake_free_pages(obj, pages);
 103         obj->mm.dirty = false;
 104         obj->mm.madv = I915_MADV_WILLNEED;
 105 }
 106 
 107 static const struct drm_i915_gem_object_ops fake_ops = {
 108         .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
 109         .get_pages = fake_get_pages,
 110         .put_pages = fake_put_pages,
 111 };
 112 
 113 static struct drm_i915_gem_object *
 114 fake_dma_object(struct drm_i915_private *i915, u64 size)
 115 {
 116         struct drm_i915_gem_object *obj;
 117 
 118         GEM_BUG_ON(!size);
 119         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
 120 
 121         if (overflows_type(size, obj->base.size))
 122                 return ERR_PTR(-E2BIG);
 123 
 124         obj = i915_gem_object_alloc();
 125         if (!obj)
 126                 goto err;
 127 
 128         drm_gem_private_object_init(&i915->drm, &obj->base, size);
 129         i915_gem_object_init(obj, &fake_ops);
 130 
 131         obj->write_domain = I915_GEM_DOMAIN_CPU;
 132         obj->read_domains = I915_GEM_DOMAIN_CPU;
 133         obj->cache_level = I915_CACHE_NONE;
 134 
 135         /* Preallocate the "backing storage" */
 136         if (i915_gem_object_pin_pages(obj))
 137                 goto err_obj;
 138 
 139         i915_gem_object_unpin_pages(obj);
 140         return obj;
 141 
 142 err_obj:
 143         i915_gem_object_put(obj);
 144 err:
 145         return ERR_PTR(-ENOMEM);
 146 }
 147 
 148 static int igt_ppgtt_alloc(void *arg)
 149 {
 150         struct drm_i915_private *dev_priv = arg;
 151         struct i915_ppgtt *ppgtt;
 152         u64 size, last, limit;
 153         int err = 0;
 154 
 155         /* Allocate a ppggt and try to fill the entire range */
 156 
 157         if (!HAS_PPGTT(dev_priv))
 158                 return 0;
 159 
 160         ppgtt = __ppgtt_create(dev_priv);
 161         if (IS_ERR(ppgtt))
 162                 return PTR_ERR(ppgtt);
 163 
 164         if (!ppgtt->vm.allocate_va_range)
 165                 goto err_ppgtt_cleanup;
 166 
 167         /*
 168          * While we only allocate the page tables here and so we could
 169          * address a much larger GTT than we could actually fit into
 170          * RAM, a practical limit is the amount of physical pages in the system.
 171          * This should ensure that we do not run into the oomkiller during
 172          * the test and take down the machine wilfully.
 173          */
 174         limit = totalram_pages() << PAGE_SHIFT;
 175         limit = min(ppgtt->vm.total, limit);
 176 
 177         /* Check we can allocate the entire range */
 178         for (size = 4096; size <= limit; size <<= 2) {
 179                 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
 180                 if (err) {
 181                         if (err == -ENOMEM) {
 182                                 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
 183                                         size, ilog2(size));
 184                                 err = 0; /* virtual space too large! */
 185                         }
 186                         goto err_ppgtt_cleanup;
 187                 }
 188 
 189                 cond_resched();
 190 
 191                 ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
 192         }
 193 
 194         /* Check we can incrementally allocate the entire range */
 195         for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
 196                 err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
 197                                                   last, size - last);
 198                 if (err) {
 199                         if (err == -ENOMEM) {
 200                                 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
 201                                         last, size - last, ilog2(size));
 202                                 err = 0; /* virtual space too large! */
 203                         }
 204                         goto err_ppgtt_cleanup;
 205                 }
 206 
 207                 cond_resched();
 208         }
 209 
 210 err_ppgtt_cleanup:
 211         i915_vm_put(&ppgtt->vm);
 212         return err;
 213 }
 214 
 215 static int lowlevel_hole(struct drm_i915_private *i915,
 216                          struct i915_address_space *vm,
 217                          u64 hole_start, u64 hole_end,
 218                          unsigned long end_time)
 219 {
 220         I915_RND_STATE(seed_prng);
 221         unsigned int size;
 222         struct i915_vma mock_vma;
 223 
 224         memset(&mock_vma, 0, sizeof(struct i915_vma));
 225 
 226         /* Keep creating larger objects until one cannot fit into the hole */
 227         for (size = 12; (hole_end - hole_start) >> size; size++) {
 228                 I915_RND_SUBSTATE(prng, seed_prng);
 229                 struct drm_i915_gem_object *obj;
 230                 unsigned int *order, count, n;
 231                 u64 hole_size;
 232 
 233                 hole_size = (hole_end - hole_start) >> size;
 234                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
 235                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
 236                 count = hole_size >> 1;
 237                 if (!count) {
 238                         pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
 239                                  __func__, hole_start, hole_end, size, hole_size);
 240                         break;
 241                 }
 242 
 243                 do {
 244                         order = i915_random_order(count, &prng);
 245                         if (order)
 246                                 break;
 247                 } while (count >>= 1);
 248                 if (!count)
 249                         return -ENOMEM;
 250                 GEM_BUG_ON(!order);
 251 
 252                 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
 253                 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
 254 
 255                 /* Ignore allocation failures (i.e. don't report them as
 256                  * a test failure) as we are purposefully allocating very
 257                  * large objects without checking that we have sufficient
 258                  * memory. We expect to hit -ENOMEM.
 259                  */
 260 
 261                 obj = fake_dma_object(i915, BIT_ULL(size));
 262                 if (IS_ERR(obj)) {
 263                         kfree(order);
 264                         break;
 265                 }
 266 
 267                 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
 268 
 269                 if (i915_gem_object_pin_pages(obj)) {
 270                         i915_gem_object_put(obj);
 271                         kfree(order);
 272                         break;
 273                 }
 274 
 275                 for (n = 0; n < count; n++) {
 276                         u64 addr = hole_start + order[n] * BIT_ULL(size);
 277                         intel_wakeref_t wakeref;
 278 
 279                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
 280 
 281                         if (igt_timeout(end_time,
 282                                         "%s timed out before %d/%d\n",
 283                                         __func__, n, count)) {
 284                                 hole_end = hole_start; /* quit */
 285                                 break;
 286                         }
 287 
 288                         if (vm->allocate_va_range &&
 289                             vm->allocate_va_range(vm, addr, BIT_ULL(size)))
 290                                 break;
 291 
 292                         mock_vma.pages = obj->mm.pages;
 293                         mock_vma.node.size = BIT_ULL(size);
 294                         mock_vma.node.start = addr;
 295 
 296                         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 297                         vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
 298                         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 299                 }
 300                 count = n;
 301 
 302                 i915_random_reorder(order, count, &prng);
 303                 for (n = 0; n < count; n++) {
 304                         u64 addr = hole_start + order[n] * BIT_ULL(size);
 305 
 306                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
 307                         vm->clear_range(vm, addr, BIT_ULL(size));
 308                 }
 309 
 310                 i915_gem_object_unpin_pages(obj);
 311                 i915_gem_object_put(obj);
 312 
 313                 kfree(order);
 314 
 315                 cleanup_freed_objects(i915);
 316         }
 317 
 318         return 0;
 319 }
 320 
 321 static void close_object_list(struct list_head *objects,
 322                               struct i915_address_space *vm)
 323 {
 324         struct drm_i915_gem_object *obj, *on;
 325         int ignored;
 326 
 327         list_for_each_entry_safe(obj, on, objects, st_link) {
 328                 struct i915_vma *vma;
 329 
 330                 vma = i915_vma_instance(obj, vm, NULL);
 331                 if (!IS_ERR(vma))
 332                         ignored = i915_vma_unbind(vma);
 333                 /* Only ppgtt vma may be closed before the object is freed */
 334                 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
 335                         i915_vma_close(vma);
 336 
 337                 list_del(&obj->st_link);
 338                 i915_gem_object_put(obj);
 339         }
 340 }
 341 
 342 static int fill_hole(struct drm_i915_private *i915,
 343                      struct i915_address_space *vm,
 344                      u64 hole_start, u64 hole_end,
 345                      unsigned long end_time)
 346 {
 347         const u64 hole_size = hole_end - hole_start;
 348         struct drm_i915_gem_object *obj;
 349         const unsigned long max_pages =
 350                 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
 351         const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
 352         unsigned long npages, prime, flags;
 353         struct i915_vma *vma;
 354         LIST_HEAD(objects);
 355         int err;
 356 
 357         /* Try binding many VMA working inwards from either edge */
 358 
 359         flags = PIN_OFFSET_FIXED | PIN_USER;
 360         if (i915_is_ggtt(vm))
 361                 flags |= PIN_GLOBAL;
 362 
 363         for_each_prime_number_from(prime, 2, max_step) {
 364                 for (npages = 1; npages <= max_pages; npages *= prime) {
 365                         const u64 full_size = npages << PAGE_SHIFT;
 366                         const struct {
 367                                 const char *name;
 368                                 u64 offset;
 369                                 int step;
 370                         } phases[] = {
 371                                 { "top-down", hole_end, -1, },
 372                                 { "bottom-up", hole_start, 1, },
 373                                 { }
 374                         }, *p;
 375 
 376                         obj = fake_dma_object(i915, full_size);
 377                         if (IS_ERR(obj))
 378                                 break;
 379 
 380                         list_add(&obj->st_link, &objects);
 381 
 382                         /* Align differing sized objects against the edges, and
 383                          * check we don't walk off into the void when binding
 384                          * them into the GTT.
 385                          */
 386                         for (p = phases; p->name; p++) {
 387                                 u64 offset;
 388 
 389                                 offset = p->offset;
 390                                 list_for_each_entry(obj, &objects, st_link) {
 391                                         vma = i915_vma_instance(obj, vm, NULL);
 392                                         if (IS_ERR(vma))
 393                                                 continue;
 394 
 395                                         if (p->step < 0) {
 396                                                 if (offset < hole_start + obj->base.size)
 397                                                         break;
 398                                                 offset -= obj->base.size;
 399                                         }
 400 
 401                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
 402                                         if (err) {
 403                                                 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
 404                                                        __func__, p->name, err, npages, prime, offset);
 405                                                 goto err;
 406                                         }
 407 
 408                                         if (!drm_mm_node_allocated(&vma->node) ||
 409                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
 410                                                 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
 411                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
 412                                                        offset);
 413                                                 err = -EINVAL;
 414                                                 goto err;
 415                                         }
 416 
 417                                         i915_vma_unpin(vma);
 418 
 419                                         if (p->step > 0) {
 420                                                 if (offset + obj->base.size > hole_end)
 421                                                         break;
 422                                                 offset += obj->base.size;
 423                                         }
 424                                 }
 425 
 426                                 offset = p->offset;
 427                                 list_for_each_entry(obj, &objects, st_link) {
 428                                         vma = i915_vma_instance(obj, vm, NULL);
 429                                         if (IS_ERR(vma))
 430                                                 continue;
 431 
 432                                         if (p->step < 0) {
 433                                                 if (offset < hole_start + obj->base.size)
 434                                                         break;
 435                                                 offset -= obj->base.size;
 436                                         }
 437 
 438                                         if (!drm_mm_node_allocated(&vma->node) ||
 439                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
 440                                                 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
 441                                                        __func__, p->name, vma->node.start, vma->node.size,
 442                                                        offset);
 443                                                 err = -EINVAL;
 444                                                 goto err;
 445                                         }
 446 
 447                                         err = i915_vma_unbind(vma);
 448                                         if (err) {
 449                                                 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
 450                                                        __func__, p->name, vma->node.start, vma->node.size,
 451                                                        err);
 452                                                 goto err;
 453                                         }
 454 
 455                                         if (p->step > 0) {
 456                                                 if (offset + obj->base.size > hole_end)
 457                                                         break;
 458                                                 offset += obj->base.size;
 459                                         }
 460                                 }
 461 
 462                                 offset = p->offset;
 463                                 list_for_each_entry_reverse(obj, &objects, st_link) {
 464                                         vma = i915_vma_instance(obj, vm, NULL);
 465                                         if (IS_ERR(vma))
 466                                                 continue;
 467 
 468                                         if (p->step < 0) {
 469                                                 if (offset < hole_start + obj->base.size)
 470                                                         break;
 471                                                 offset -= obj->base.size;
 472                                         }
 473 
 474                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
 475                                         if (err) {
 476                                                 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
 477                                                        __func__, p->name, err, npages, prime, offset);
 478                                                 goto err;
 479                                         }
 480 
 481                                         if (!drm_mm_node_allocated(&vma->node) ||
 482                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
 483                                                 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
 484                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
 485                                                        offset);
 486                                                 err = -EINVAL;
 487                                                 goto err;
 488                                         }
 489 
 490                                         i915_vma_unpin(vma);
 491 
 492                                         if (p->step > 0) {
 493                                                 if (offset + obj->base.size > hole_end)
 494                                                         break;
 495                                                 offset += obj->base.size;
 496                                         }
 497                                 }
 498 
 499                                 offset = p->offset;
 500                                 list_for_each_entry_reverse(obj, &objects, st_link) {
 501                                         vma = i915_vma_instance(obj, vm, NULL);
 502                                         if (IS_ERR(vma))
 503                                                 continue;
 504 
 505                                         if (p->step < 0) {
 506                                                 if (offset < hole_start + obj->base.size)
 507                                                         break;
 508                                                 offset -= obj->base.size;
 509                                         }
 510 
 511                                         if (!drm_mm_node_allocated(&vma->node) ||
 512                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
 513                                                 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
 514                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
 515                                                        offset);
 516                                                 err = -EINVAL;
 517                                                 goto err;
 518                                         }
 519 
 520                                         err = i915_vma_unbind(vma);
 521                                         if (err) {
 522                                                 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
 523                                                        __func__, p->name, vma->node.start, vma->node.size,
 524                                                        err);
 525                                                 goto err;
 526                                         }
 527 
 528                                         if (p->step > 0) {
 529                                                 if (offset + obj->base.size > hole_end)
 530                                                         break;
 531                                                 offset += obj->base.size;
 532                                         }
 533                                 }
 534                         }
 535 
 536                         if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
 537                                         __func__, npages, prime)) {
 538                                 err = -EINTR;
 539                                 goto err;
 540                         }
 541                 }
 542 
 543                 close_object_list(&objects, vm);
 544                 cleanup_freed_objects(i915);
 545         }
 546 
 547         return 0;
 548 
 549 err:
 550         close_object_list(&objects, vm);
 551         return err;
 552 }
 553 
 554 static int walk_hole(struct drm_i915_private *i915,
 555                      struct i915_address_space *vm,
 556                      u64 hole_start, u64 hole_end,
 557                      unsigned long end_time)
 558 {
 559         const u64 hole_size = hole_end - hole_start;
 560         const unsigned long max_pages =
 561                 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
 562         unsigned long flags;
 563         u64 size;
 564 
 565         /* Try binding a single VMA in different positions within the hole */
 566 
 567         flags = PIN_OFFSET_FIXED | PIN_USER;
 568         if (i915_is_ggtt(vm))
 569                 flags |= PIN_GLOBAL;
 570 
 571         for_each_prime_number_from(size, 1, max_pages) {
 572                 struct drm_i915_gem_object *obj;
 573                 struct i915_vma *vma;
 574                 u64 addr;
 575                 int err = 0;
 576 
 577                 obj = fake_dma_object(i915, size << PAGE_SHIFT);
 578                 if (IS_ERR(obj))
 579                         break;
 580 
 581                 vma = i915_vma_instance(obj, vm, NULL);
 582                 if (IS_ERR(vma)) {
 583                         err = PTR_ERR(vma);
 584                         goto err_put;
 585                 }
 586 
 587                 for (addr = hole_start;
 588                      addr + obj->base.size < hole_end;
 589                      addr += obj->base.size) {
 590                         err = i915_vma_pin(vma, 0, 0, addr | flags);
 591                         if (err) {
 592                                 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
 593                                        __func__, addr, vma->size,
 594                                        hole_start, hole_end, err);
 595                                 goto err_close;
 596                         }
 597                         i915_vma_unpin(vma);
 598 
 599                         if (!drm_mm_node_allocated(&vma->node) ||
 600                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
 601                                 pr_err("%s incorrect at %llx + %llx\n",
 602                                        __func__, addr, vma->size);
 603                                 err = -EINVAL;
 604                                 goto err_close;
 605                         }
 606 
 607                         err = i915_vma_unbind(vma);
 608                         if (err) {
 609                                 pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
 610                                        __func__, addr, vma->size, err);
 611                                 goto err_close;
 612                         }
 613 
 614                         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
 615 
 616                         if (igt_timeout(end_time,
 617                                         "%s timed out at %llx\n",
 618                                         __func__, addr)) {
 619                                 err = -EINTR;
 620                                 goto err_close;
 621                         }
 622                 }
 623 
 624 err_close:
 625                 if (!i915_vma_is_ggtt(vma))
 626                         i915_vma_close(vma);
 627 err_put:
 628                 i915_gem_object_put(obj);
 629                 if (err)
 630                         return err;
 631 
 632                 cleanup_freed_objects(i915);
 633         }
 634 
 635         return 0;
 636 }
 637 
 638 static int pot_hole(struct drm_i915_private *i915,
 639                     struct i915_address_space *vm,
 640                     u64 hole_start, u64 hole_end,
 641                     unsigned long end_time)
 642 {
 643         struct drm_i915_gem_object *obj;
 644         struct i915_vma *vma;
 645         unsigned long flags;
 646         unsigned int pot;
 647         int err = 0;
 648 
 649         flags = PIN_OFFSET_FIXED | PIN_USER;
 650         if (i915_is_ggtt(vm))
 651                 flags |= PIN_GLOBAL;
 652 
 653         obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
 654         if (IS_ERR(obj))
 655                 return PTR_ERR(obj);
 656 
 657         vma = i915_vma_instance(obj, vm, NULL);
 658         if (IS_ERR(vma)) {
 659                 err = PTR_ERR(vma);
 660                 goto err_obj;
 661         }
 662 
 663         /* Insert a pair of pages across every pot boundary within the hole */
 664         for (pot = fls64(hole_end - 1) - 1;
 665              pot > ilog2(2 * I915_GTT_PAGE_SIZE);
 666              pot--) {
 667                 u64 step = BIT_ULL(pot);
 668                 u64 addr;
 669 
 670                 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
 671                      addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
 672                      addr += step) {
 673                         err = i915_vma_pin(vma, 0, 0, addr | flags);
 674                         if (err) {
 675                                 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
 676                                        __func__,
 677                                        addr,
 678                                        hole_start, hole_end,
 679                                        err);
 680                                 goto err;
 681                         }
 682 
 683                         if (!drm_mm_node_allocated(&vma->node) ||
 684                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
 685                                 pr_err("%s incorrect at %llx + %llx\n",
 686                                        __func__, addr, vma->size);
 687                                 i915_vma_unpin(vma);
 688                                 err = i915_vma_unbind(vma);
 689                                 err = -EINVAL;
 690                                 goto err;
 691                         }
 692 
 693                         i915_vma_unpin(vma);
 694                         err = i915_vma_unbind(vma);
 695                         GEM_BUG_ON(err);
 696                 }
 697 
 698                 if (igt_timeout(end_time,
 699                                 "%s timed out after %d/%d\n",
 700                                 __func__, pot, fls64(hole_end - 1) - 1)) {
 701                         err = -EINTR;
 702                         goto err;
 703                 }
 704         }
 705 
 706 err:
 707         if (!i915_vma_is_ggtt(vma))
 708                 i915_vma_close(vma);
 709 err_obj:
 710         i915_gem_object_put(obj);
 711         return err;
 712 }
 713 
 714 static int drunk_hole(struct drm_i915_private *i915,
 715                       struct i915_address_space *vm,
 716                       u64 hole_start, u64 hole_end,
 717                       unsigned long end_time)
 718 {
 719         I915_RND_STATE(prng);
 720         unsigned int size;
 721         unsigned long flags;
 722 
 723         flags = PIN_OFFSET_FIXED | PIN_USER;
 724         if (i915_is_ggtt(vm))
 725                 flags |= PIN_GLOBAL;
 726 
 727         /* Keep creating larger objects until one cannot fit into the hole */
 728         for (size = 12; (hole_end - hole_start) >> size; size++) {
 729                 struct drm_i915_gem_object *obj;
 730                 unsigned int *order, count, n;
 731                 struct i915_vma *vma;
 732                 u64 hole_size;
 733                 int err = -ENODEV;
 734 
 735                 hole_size = (hole_end - hole_start) >> size;
 736                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
 737                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
 738                 count = hole_size >> 1;
 739                 if (!count) {
 740                         pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
 741                                  __func__, hole_start, hole_end, size, hole_size);
 742                         break;
 743                 }
 744 
 745                 do {
 746                         order = i915_random_order(count, &prng);
 747                         if (order)
 748                                 break;
 749                 } while (count >>= 1);
 750                 if (!count)
 751                         return -ENOMEM;
 752                 GEM_BUG_ON(!order);
 753 
 754                 /* Ignore allocation failures (i.e. don't report them as
 755                  * a test failure) as we are purposefully allocating very
 756                  * large objects without checking that we have sufficient
 757                  * memory. We expect to hit -ENOMEM.
 758                  */
 759 
 760                 obj = fake_dma_object(i915, BIT_ULL(size));
 761                 if (IS_ERR(obj)) {
 762                         kfree(order);
 763                         break;
 764                 }
 765 
 766                 vma = i915_vma_instance(obj, vm, NULL);
 767                 if (IS_ERR(vma)) {
 768                         err = PTR_ERR(vma);
 769                         goto err_obj;
 770                 }
 771 
 772                 GEM_BUG_ON(vma->size != BIT_ULL(size));
 773 
 774                 for (n = 0; n < count; n++) {
 775                         u64 addr = hole_start + order[n] * BIT_ULL(size);
 776 
 777                         err = i915_vma_pin(vma, 0, 0, addr | flags);
 778                         if (err) {
 779                                 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
 780                                        __func__,
 781                                        addr, BIT_ULL(size),
 782                                        hole_start, hole_end,
 783                                        err);
 784                                 goto err;
 785                         }
 786 
 787                         if (!drm_mm_node_allocated(&vma->node) ||
 788                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
 789                                 pr_err("%s incorrect at %llx + %llx\n",
 790                                        __func__, addr, BIT_ULL(size));
 791                                 i915_vma_unpin(vma);
 792                                 err = i915_vma_unbind(vma);
 793                                 err = -EINVAL;
 794                                 goto err;
 795                         }
 796 
 797                         i915_vma_unpin(vma);
 798                         err = i915_vma_unbind(vma);
 799                         GEM_BUG_ON(err);
 800 
 801                         if (igt_timeout(end_time,
 802                                         "%s timed out after %d/%d\n",
 803                                         __func__, n, count)) {
 804                                 err = -EINTR;
 805                                 goto err;
 806                         }
 807                 }
 808 
 809 err:
 810                 if (!i915_vma_is_ggtt(vma))
 811                         i915_vma_close(vma);
 812 err_obj:
 813                 i915_gem_object_put(obj);
 814                 kfree(order);
 815                 if (err)
 816                         return err;
 817 
 818                 cleanup_freed_objects(i915);
 819         }
 820 
 821         return 0;
 822 }
 823 
 824 static int __shrink_hole(struct drm_i915_private *i915,
 825                          struct i915_address_space *vm,
 826                          u64 hole_start, u64 hole_end,
 827                          unsigned long end_time)
 828 {
 829         struct drm_i915_gem_object *obj;
 830         unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
 831         unsigned int order = 12;
 832         LIST_HEAD(objects);
 833         int err = 0;
 834         u64 addr;
 835 
 836         /* Keep creating larger objects until one cannot fit into the hole */
 837         for (addr = hole_start; addr < hole_end; ) {
 838                 struct i915_vma *vma;
 839                 u64 size = BIT_ULL(order++);
 840 
 841                 size = min(size, hole_end - addr);
 842                 obj = fake_dma_object(i915, size);
 843                 if (IS_ERR(obj)) {
 844                         err = PTR_ERR(obj);
 845                         break;
 846                 }
 847 
 848                 list_add(&obj->st_link, &objects);
 849 
 850                 vma = i915_vma_instance(obj, vm, NULL);
 851                 if (IS_ERR(vma)) {
 852                         err = PTR_ERR(vma);
 853                         break;
 854                 }
 855 
 856                 GEM_BUG_ON(vma->size != size);
 857 
 858                 err = i915_vma_pin(vma, 0, 0, addr | flags);
 859                 if (err) {
 860                         pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
 861                                __func__, addr, size, hole_start, hole_end, err);
 862                         break;
 863                 }
 864 
 865                 if (!drm_mm_node_allocated(&vma->node) ||
 866                     i915_vma_misplaced(vma, 0, 0, addr | flags)) {
 867                         pr_err("%s incorrect at %llx + %llx\n",
 868                                __func__, addr, size);
 869                         i915_vma_unpin(vma);
 870                         err = i915_vma_unbind(vma);
 871                         err = -EINVAL;
 872                         break;
 873                 }
 874 
 875                 i915_vma_unpin(vma);
 876                 addr += size;
 877 
 878                 if (igt_timeout(end_time,
 879                                 "%s timed out at ofset %llx [%llx - %llx]\n",
 880                                 __func__, addr, hole_start, hole_end)) {
 881                         err = -EINTR;
 882                         break;
 883                 }
 884         }
 885 
 886         close_object_list(&objects, vm);
 887         cleanup_freed_objects(i915);
 888         return err;
 889 }
 890 
 891 static int shrink_hole(struct drm_i915_private *i915,
 892                        struct i915_address_space *vm,
 893                        u64 hole_start, u64 hole_end,
 894                        unsigned long end_time)
 895 {
 896         unsigned long prime;
 897         int err;
 898 
 899         vm->fault_attr.probability = 999;
 900         atomic_set(&vm->fault_attr.times, -1);
 901 
 902         for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
 903                 vm->fault_attr.interval = prime;
 904                 err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
 905                 if (err)
 906                         break;
 907         }
 908 
 909         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
 910 
 911         return err;
 912 }
 913 
 914 static int shrink_boom(struct drm_i915_private *i915,
 915                        struct i915_address_space *vm,
 916                        u64 hole_start, u64 hole_end,
 917                        unsigned long end_time)
 918 {
 919         unsigned int sizes[] = { SZ_2M, SZ_1G };
 920         struct drm_i915_gem_object *purge;
 921         struct drm_i915_gem_object *explode;
 922         int err;
 923         int i;
 924 
 925         /*
 926          * Catch the case which shrink_hole seems to miss. The setup here
 927          * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
 928          * ensuring that all vma assiocated with the respective pd/pdp are
 929          * unpinned at the time.
 930          */
 931 
 932         for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
 933                 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
 934                 unsigned int size = sizes[i];
 935                 struct i915_vma *vma;
 936 
 937                 purge = fake_dma_object(i915, size);
 938                 if (IS_ERR(purge))
 939                         return PTR_ERR(purge);
 940 
 941                 vma = i915_vma_instance(purge, vm, NULL);
 942                 if (IS_ERR(vma)) {
 943                         err = PTR_ERR(vma);
 944                         goto err_purge;
 945                 }
 946 
 947                 err = i915_vma_pin(vma, 0, 0, flags);
 948                 if (err)
 949                         goto err_purge;
 950 
 951                 /* Should now be ripe for purging */
 952                 i915_vma_unpin(vma);
 953 
 954                 explode = fake_dma_object(i915, size);
 955                 if (IS_ERR(explode)) {
 956                         err = PTR_ERR(explode);
 957                         goto err_purge;
 958                 }
 959 
 960                 vm->fault_attr.probability = 100;
 961                 vm->fault_attr.interval = 1;
 962                 atomic_set(&vm->fault_attr.times, -1);
 963 
 964                 vma = i915_vma_instance(explode, vm, NULL);
 965                 if (IS_ERR(vma)) {
 966                         err = PTR_ERR(vma);
 967                         goto err_explode;
 968                 }
 969 
 970                 err = i915_vma_pin(vma, 0, 0, flags | size);
 971                 if (err)
 972                         goto err_explode;
 973 
 974                 i915_vma_unpin(vma);
 975 
 976                 i915_gem_object_put(purge);
 977                 i915_gem_object_put(explode);
 978 
 979                 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
 980                 cleanup_freed_objects(i915);
 981         }
 982 
 983         return 0;
 984 
 985 err_explode:
 986         i915_gem_object_put(explode);
 987 err_purge:
 988         i915_gem_object_put(purge);
 989         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
 990         return err;
 991 }
 992 
 993 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
 994                           int (*func)(struct drm_i915_private *i915,
 995                                       struct i915_address_space *vm,
 996                                       u64 hole_start, u64 hole_end,
 997                                       unsigned long end_time))
 998 {
 999         struct drm_file *file;
1000         struct i915_ppgtt *ppgtt;
1001         IGT_TIMEOUT(end_time);
1002         int err;
1003 
1004         if (!HAS_FULL_PPGTT(dev_priv))
1005                 return 0;
1006 
1007         file = mock_file(dev_priv);
1008         if (IS_ERR(file))
1009                 return PTR_ERR(file);
1010 
1011         mutex_lock(&dev_priv->drm.struct_mutex);
1012         ppgtt = i915_ppgtt_create(dev_priv);
1013         if (IS_ERR(ppgtt)) {
1014                 err = PTR_ERR(ppgtt);
1015                 goto out_unlock;
1016         }
1017         GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1018         GEM_BUG_ON(ppgtt->vm.closed);
1019 
1020         err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
1021 
1022         i915_vm_put(&ppgtt->vm);
1023 out_unlock:
1024         mutex_unlock(&dev_priv->drm.struct_mutex);
1025 
1026         mock_file_free(dev_priv, file);
1027         return err;
1028 }
1029 
1030 static int igt_ppgtt_fill(void *arg)
1031 {
1032         return exercise_ppgtt(arg, fill_hole);
1033 }
1034 
1035 static int igt_ppgtt_walk(void *arg)
1036 {
1037         return exercise_ppgtt(arg, walk_hole);
1038 }
1039 
1040 static int igt_ppgtt_pot(void *arg)
1041 {
1042         return exercise_ppgtt(arg, pot_hole);
1043 }
1044 
1045 static int igt_ppgtt_drunk(void *arg)
1046 {
1047         return exercise_ppgtt(arg, drunk_hole);
1048 }
1049 
1050 static int igt_ppgtt_lowlevel(void *arg)
1051 {
1052         return exercise_ppgtt(arg, lowlevel_hole);
1053 }
1054 
1055 static int igt_ppgtt_shrink(void *arg)
1056 {
1057         return exercise_ppgtt(arg, shrink_hole);
1058 }
1059 
1060 static int igt_ppgtt_shrink_boom(void *arg)
1061 {
1062         return exercise_ppgtt(arg, shrink_boom);
1063 }
1064 
1065 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1066 {
1067         struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1068         struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1069 
1070         if (a->start < b->start)
1071                 return -1;
1072         else
1073                 return 1;
1074 }
1075 
1076 static int exercise_ggtt(struct drm_i915_private *i915,
1077                          int (*func)(struct drm_i915_private *i915,
1078                                      struct i915_address_space *vm,
1079                                      u64 hole_start, u64 hole_end,
1080                                      unsigned long end_time))
1081 {
1082         struct i915_ggtt *ggtt = &i915->ggtt;
1083         u64 hole_start, hole_end, last = 0;
1084         struct drm_mm_node *node;
1085         IGT_TIMEOUT(end_time);
1086         int err = 0;
1087 
1088         mutex_lock(&i915->drm.struct_mutex);
1089 restart:
1090         list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1091         drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1092                 if (hole_start < last)
1093                         continue;
1094 
1095                 if (ggtt->vm.mm.color_adjust)
1096                         ggtt->vm.mm.color_adjust(node, 0,
1097                                                  &hole_start, &hole_end);
1098                 if (hole_start >= hole_end)
1099                         continue;
1100 
1101                 err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
1102                 if (err)
1103                         break;
1104 
1105                 /* As we have manipulated the drm_mm, the list may be corrupt */
1106                 last = hole_end;
1107                 goto restart;
1108         }
1109         mutex_unlock(&i915->drm.struct_mutex);
1110 
1111         return err;
1112 }
1113 
1114 static int igt_ggtt_fill(void *arg)
1115 {
1116         return exercise_ggtt(arg, fill_hole);
1117 }
1118 
1119 static int igt_ggtt_walk(void *arg)
1120 {
1121         return exercise_ggtt(arg, walk_hole);
1122 }
1123 
1124 static int igt_ggtt_pot(void *arg)
1125 {
1126         return exercise_ggtt(arg, pot_hole);
1127 }
1128 
1129 static int igt_ggtt_drunk(void *arg)
1130 {
1131         return exercise_ggtt(arg, drunk_hole);
1132 }
1133 
1134 static int igt_ggtt_lowlevel(void *arg)
1135 {
1136         return exercise_ggtt(arg, lowlevel_hole);
1137 }
1138 
1139 static int igt_ggtt_page(void *arg)
1140 {
1141         const unsigned int count = PAGE_SIZE/sizeof(u32);
1142         I915_RND_STATE(prng);
1143         struct drm_i915_private *i915 = arg;
1144         struct i915_ggtt *ggtt = &i915->ggtt;
1145         struct drm_i915_gem_object *obj;
1146         intel_wakeref_t wakeref;
1147         struct drm_mm_node tmp;
1148         unsigned int *order, n;
1149         int err;
1150 
1151         mutex_lock(&i915->drm.struct_mutex);
1152 
1153         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1154         if (IS_ERR(obj)) {
1155                 err = PTR_ERR(obj);
1156                 goto out_unlock;
1157         }
1158 
1159         err = i915_gem_object_pin_pages(obj);
1160         if (err)
1161                 goto out_free;
1162 
1163         memset(&tmp, 0, sizeof(tmp));
1164         err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1165                                           count * PAGE_SIZE, 0,
1166                                           I915_COLOR_UNEVICTABLE,
1167                                           0, ggtt->mappable_end,
1168                                           DRM_MM_INSERT_LOW);
1169         if (err)
1170                 goto out_unpin;
1171 
1172         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1173 
1174         for (n = 0; n < count; n++) {
1175                 u64 offset = tmp.start + n * PAGE_SIZE;
1176 
1177                 ggtt->vm.insert_page(&ggtt->vm,
1178                                      i915_gem_object_get_dma_address(obj, 0),
1179                                      offset, I915_CACHE_NONE, 0);
1180         }
1181 
1182         order = i915_random_order(count, &prng);
1183         if (!order) {
1184                 err = -ENOMEM;
1185                 goto out_remove;
1186         }
1187 
1188         for (n = 0; n < count; n++) {
1189                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1190                 u32 __iomem *vaddr;
1191 
1192                 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1193                 iowrite32(n, vaddr + n);
1194                 io_mapping_unmap_atomic(vaddr);
1195         }
1196         intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1197 
1198         i915_random_reorder(order, count, &prng);
1199         for (n = 0; n < count; n++) {
1200                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1201                 u32 __iomem *vaddr;
1202                 u32 val;
1203 
1204                 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1205                 val = ioread32(vaddr + n);
1206                 io_mapping_unmap_atomic(vaddr);
1207 
1208                 if (val != n) {
1209                         pr_err("insert page failed: found %d, expected %d\n",
1210                                val, n);
1211                         err = -EINVAL;
1212                         break;
1213                 }
1214         }
1215 
1216         kfree(order);
1217 out_remove:
1218         ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1219         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1220         drm_mm_remove_node(&tmp);
1221 out_unpin:
1222         i915_gem_object_unpin_pages(obj);
1223 out_free:
1224         i915_gem_object_put(obj);
1225 out_unlock:
1226         mutex_unlock(&i915->drm.struct_mutex);
1227         return err;
1228 }
1229 
1230 static void track_vma_bind(struct i915_vma *vma)
1231 {
1232         struct drm_i915_gem_object *obj = vma->obj;
1233 
1234         atomic_inc(&obj->bind_count); /* track for eviction later */
1235         __i915_gem_object_pin_pages(obj);
1236 
1237         vma->pages = obj->mm.pages;
1238 
1239         mutex_lock(&vma->vm->mutex);
1240         list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1241         mutex_unlock(&vma->vm->mutex);
1242 }
1243 
1244 static int exercise_mock(struct drm_i915_private *i915,
1245                          int (*func)(struct drm_i915_private *i915,
1246                                      struct i915_address_space *vm,
1247                                      u64 hole_start, u64 hole_end,
1248                                      unsigned long end_time))
1249 {
1250         const u64 limit = totalram_pages() << PAGE_SHIFT;
1251         struct i915_gem_context *ctx;
1252         IGT_TIMEOUT(end_time);
1253         int err;
1254 
1255         ctx = mock_context(i915, "mock");
1256         if (!ctx)
1257                 return -ENOMEM;
1258 
1259         err = func(i915, ctx->vm, 0, min(ctx->vm->total, limit), end_time);
1260 
1261         mock_context_close(ctx);
1262         return err;
1263 }
1264 
1265 static int igt_mock_fill(void *arg)
1266 {
1267         struct i915_ggtt *ggtt = arg;
1268 
1269         return exercise_mock(ggtt->vm.i915, fill_hole);
1270 }
1271 
1272 static int igt_mock_walk(void *arg)
1273 {
1274         struct i915_ggtt *ggtt = arg;
1275 
1276         return exercise_mock(ggtt->vm.i915, walk_hole);
1277 }
1278 
1279 static int igt_mock_pot(void *arg)
1280 {
1281         struct i915_ggtt *ggtt = arg;
1282 
1283         return exercise_mock(ggtt->vm.i915, pot_hole);
1284 }
1285 
1286 static int igt_mock_drunk(void *arg)
1287 {
1288         struct i915_ggtt *ggtt = arg;
1289 
1290         return exercise_mock(ggtt->vm.i915, drunk_hole);
1291 }
1292 
1293 static int igt_gtt_reserve(void *arg)
1294 {
1295         struct i915_ggtt *ggtt = arg;
1296         struct drm_i915_gem_object *obj, *on;
1297         LIST_HEAD(objects);
1298         u64 total;
1299         int err = -ENODEV;
1300 
1301         /* i915_gem_gtt_reserve() tries to reserve the precise range
1302          * for the node, and evicts if it has to. So our test checks that
1303          * it can give us the requsted space and prevent overlaps.
1304          */
1305 
1306         /* Start by filling the GGTT */
1307         for (total = 0;
1308              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1309              total += 2 * I915_GTT_PAGE_SIZE) {
1310                 struct i915_vma *vma;
1311 
1312                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1313                                                       2 * PAGE_SIZE);
1314                 if (IS_ERR(obj)) {
1315                         err = PTR_ERR(obj);
1316                         goto out;
1317                 }
1318 
1319                 err = i915_gem_object_pin_pages(obj);
1320                 if (err) {
1321                         i915_gem_object_put(obj);
1322                         goto out;
1323                 }
1324 
1325                 list_add(&obj->st_link, &objects);
1326 
1327                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1328                 if (IS_ERR(vma)) {
1329                         err = PTR_ERR(vma);
1330                         goto out;
1331                 }
1332 
1333                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1334                                            obj->base.size,
1335                                            total,
1336                                            obj->cache_level,
1337                                            0);
1338                 if (err) {
1339                         pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1340                                total, ggtt->vm.total, err);
1341                         goto out;
1342                 }
1343                 track_vma_bind(vma);
1344 
1345                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1346                 if (vma->node.start != total ||
1347                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1348                         pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1349                                vma->node.start, vma->node.size,
1350                                total, 2*I915_GTT_PAGE_SIZE);
1351                         err = -EINVAL;
1352                         goto out;
1353                 }
1354         }
1355 
1356         /* Now we start forcing evictions */
1357         for (total = I915_GTT_PAGE_SIZE;
1358              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1359              total += 2 * I915_GTT_PAGE_SIZE) {
1360                 struct i915_vma *vma;
1361 
1362                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1363                                                       2 * PAGE_SIZE);
1364                 if (IS_ERR(obj)) {
1365                         err = PTR_ERR(obj);
1366                         goto out;
1367                 }
1368 
1369                 err = i915_gem_object_pin_pages(obj);
1370                 if (err) {
1371                         i915_gem_object_put(obj);
1372                         goto out;
1373                 }
1374 
1375                 list_add(&obj->st_link, &objects);
1376 
1377                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1378                 if (IS_ERR(vma)) {
1379                         err = PTR_ERR(vma);
1380                         goto out;
1381                 }
1382 
1383                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1384                                            obj->base.size,
1385                                            total,
1386                                            obj->cache_level,
1387                                            0);
1388                 if (err) {
1389                         pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1390                                total, ggtt->vm.total, err);
1391                         goto out;
1392                 }
1393                 track_vma_bind(vma);
1394 
1395                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1396                 if (vma->node.start != total ||
1397                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1398                         pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1399                                vma->node.start, vma->node.size,
1400                                total, 2*I915_GTT_PAGE_SIZE);
1401                         err = -EINVAL;
1402                         goto out;
1403                 }
1404         }
1405 
1406         /* And then try at random */
1407         list_for_each_entry_safe(obj, on, &objects, st_link) {
1408                 struct i915_vma *vma;
1409                 u64 offset;
1410 
1411                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1412                 if (IS_ERR(vma)) {
1413                         err = PTR_ERR(vma);
1414                         goto out;
1415                 }
1416 
1417                 err = i915_vma_unbind(vma);
1418                 if (err) {
1419                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1420                         goto out;
1421                 }
1422 
1423                 offset = random_offset(0, ggtt->vm.total,
1424                                        2*I915_GTT_PAGE_SIZE,
1425                                        I915_GTT_MIN_ALIGNMENT);
1426 
1427                 err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
1428                                            obj->base.size,
1429                                            offset,
1430                                            obj->cache_level,
1431                                            0);
1432                 if (err) {
1433                         pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1434                                total, ggtt->vm.total, err);
1435                         goto out;
1436                 }
1437                 track_vma_bind(vma);
1438 
1439                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1440                 if (vma->node.start != offset ||
1441                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1442                         pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1443                                vma->node.start, vma->node.size,
1444                                offset, 2*I915_GTT_PAGE_SIZE);
1445                         err = -EINVAL;
1446                         goto out;
1447                 }
1448         }
1449 
1450 out:
1451         list_for_each_entry_safe(obj, on, &objects, st_link) {
1452                 i915_gem_object_unpin_pages(obj);
1453                 i915_gem_object_put(obj);
1454         }
1455         return err;
1456 }
1457 
1458 static int igt_gtt_insert(void *arg)
1459 {
1460         struct i915_ggtt *ggtt = arg;
1461         struct drm_i915_gem_object *obj, *on;
1462         struct drm_mm_node tmp = {};
1463         const struct invalid_insert {
1464                 u64 size;
1465                 u64 alignment;
1466                 u64 start, end;
1467         } invalid_insert[] = {
1468                 {
1469                         ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1470                         0, ggtt->vm.total,
1471                 },
1472                 {
1473                         2*I915_GTT_PAGE_SIZE, 0,
1474                         0, I915_GTT_PAGE_SIZE,
1475                 },
1476                 {
1477                         -(u64)I915_GTT_PAGE_SIZE, 0,
1478                         0, 4*I915_GTT_PAGE_SIZE,
1479                 },
1480                 {
1481                         -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1482                         0, 4*I915_GTT_PAGE_SIZE,
1483                 },
1484                 {
1485                         I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1486                         I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1487                 },
1488                 {}
1489         }, *ii;
1490         LIST_HEAD(objects);
1491         u64 total;
1492         int err = -ENODEV;
1493 
1494         /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1495          * to the node, evicting if required.
1496          */
1497 
1498         /* Check a couple of obviously invalid requests */
1499         for (ii = invalid_insert; ii->size; ii++) {
1500                 err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
1501                                           ii->size, ii->alignment,
1502                                           I915_COLOR_UNEVICTABLE,
1503                                           ii->start, ii->end,
1504                                           0);
1505                 if (err != -ENOSPC) {
1506                         pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1507                                ii->size, ii->alignment, ii->start, ii->end,
1508                                err);
1509                         return -EINVAL;
1510                 }
1511         }
1512 
1513         /* Start by filling the GGTT */
1514         for (total = 0;
1515              total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1516              total += I915_GTT_PAGE_SIZE) {
1517                 struct i915_vma *vma;
1518 
1519                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1520                                                       I915_GTT_PAGE_SIZE);
1521                 if (IS_ERR(obj)) {
1522                         err = PTR_ERR(obj);
1523                         goto out;
1524                 }
1525 
1526                 err = i915_gem_object_pin_pages(obj);
1527                 if (err) {
1528                         i915_gem_object_put(obj);
1529                         goto out;
1530                 }
1531 
1532                 list_add(&obj->st_link, &objects);
1533 
1534                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1535                 if (IS_ERR(vma)) {
1536                         err = PTR_ERR(vma);
1537                         goto out;
1538                 }
1539 
1540                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1541                                           obj->base.size, 0, obj->cache_level,
1542                                           0, ggtt->vm.total,
1543                                           0);
1544                 if (err == -ENOSPC) {
1545                         /* maxed out the GGTT space */
1546                         i915_gem_object_put(obj);
1547                         break;
1548                 }
1549                 if (err) {
1550                         pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1551                                total, ggtt->vm.total, err);
1552                         goto out;
1553                 }
1554                 track_vma_bind(vma);
1555                 __i915_vma_pin(vma);
1556 
1557                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1558         }
1559 
1560         list_for_each_entry(obj, &objects, st_link) {
1561                 struct i915_vma *vma;
1562 
1563                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1564                 if (IS_ERR(vma)) {
1565                         err = PTR_ERR(vma);
1566                         goto out;
1567                 }
1568 
1569                 if (!drm_mm_node_allocated(&vma->node)) {
1570                         pr_err("VMA was unexpectedly evicted!\n");
1571                         err = -EINVAL;
1572                         goto out;
1573                 }
1574 
1575                 __i915_vma_unpin(vma);
1576         }
1577 
1578         /* If we then reinsert, we should find the same hole */
1579         list_for_each_entry_safe(obj, on, &objects, st_link) {
1580                 struct i915_vma *vma;
1581                 u64 offset;
1582 
1583                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1584                 if (IS_ERR(vma)) {
1585                         err = PTR_ERR(vma);
1586                         goto out;
1587                 }
1588 
1589                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1590                 offset = vma->node.start;
1591 
1592                 err = i915_vma_unbind(vma);
1593                 if (err) {
1594                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1595                         goto out;
1596                 }
1597 
1598                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1599                                           obj->base.size, 0, obj->cache_level,
1600                                           0, ggtt->vm.total,
1601                                           0);
1602                 if (err) {
1603                         pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1604                                total, ggtt->vm.total, err);
1605                         goto out;
1606                 }
1607                 track_vma_bind(vma);
1608 
1609                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1610                 if (vma->node.start != offset) {
1611                         pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1612                                offset, vma->node.start);
1613                         err = -EINVAL;
1614                         goto out;
1615                 }
1616         }
1617 
1618         /* And then force evictions */
1619         for (total = 0;
1620              total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1621              total += 2 * I915_GTT_PAGE_SIZE) {
1622                 struct i915_vma *vma;
1623 
1624                 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1625                                                       2 * I915_GTT_PAGE_SIZE);
1626                 if (IS_ERR(obj)) {
1627                         err = PTR_ERR(obj);
1628                         goto out;
1629                 }
1630 
1631                 err = i915_gem_object_pin_pages(obj);
1632                 if (err) {
1633                         i915_gem_object_put(obj);
1634                         goto out;
1635                 }
1636 
1637                 list_add(&obj->st_link, &objects);
1638 
1639                 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1640                 if (IS_ERR(vma)) {
1641                         err = PTR_ERR(vma);
1642                         goto out;
1643                 }
1644 
1645                 err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
1646                                           obj->base.size, 0, obj->cache_level,
1647                                           0, ggtt->vm.total,
1648                                           0);
1649                 if (err) {
1650                         pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1651                                total, ggtt->vm.total, err);
1652                         goto out;
1653                 }
1654                 track_vma_bind(vma);
1655 
1656                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1657         }
1658 
1659 out:
1660         list_for_each_entry_safe(obj, on, &objects, st_link) {
1661                 i915_gem_object_unpin_pages(obj);
1662                 i915_gem_object_put(obj);
1663         }
1664         return err;
1665 }
1666 
1667 int i915_gem_gtt_mock_selftests(void)
1668 {
1669         static const struct i915_subtest tests[] = {
1670                 SUBTEST(igt_mock_drunk),
1671                 SUBTEST(igt_mock_walk),
1672                 SUBTEST(igt_mock_pot),
1673                 SUBTEST(igt_mock_fill),
1674                 SUBTEST(igt_gtt_reserve),
1675                 SUBTEST(igt_gtt_insert),
1676         };
1677         struct drm_i915_private *i915;
1678         struct i915_ggtt *ggtt;
1679         int err;
1680 
1681         i915 = mock_gem_device();
1682         if (!i915)
1683                 return -ENOMEM;
1684 
1685         ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1686         if (!ggtt) {
1687                 err = -ENOMEM;
1688                 goto out_put;
1689         }
1690         mock_init_ggtt(i915, ggtt);
1691 
1692         mutex_lock(&i915->drm.struct_mutex);
1693         err = i915_subtests(tests, ggtt);
1694         mock_device_flush(i915);
1695         mutex_unlock(&i915->drm.struct_mutex);
1696 
1697         i915_gem_drain_freed_objects(i915);
1698 
1699         mock_fini_ggtt(ggtt);
1700         kfree(ggtt);
1701 out_put:
1702         drm_dev_put(&i915->drm);
1703         return err;
1704 }
1705 
1706 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1707 {
1708         static const struct i915_subtest tests[] = {
1709                 SUBTEST(igt_ppgtt_alloc),
1710                 SUBTEST(igt_ppgtt_lowlevel),
1711                 SUBTEST(igt_ppgtt_drunk),
1712                 SUBTEST(igt_ppgtt_walk),
1713                 SUBTEST(igt_ppgtt_pot),
1714                 SUBTEST(igt_ppgtt_fill),
1715                 SUBTEST(igt_ppgtt_shrink),
1716                 SUBTEST(igt_ppgtt_shrink_boom),
1717                 SUBTEST(igt_ggtt_lowlevel),
1718                 SUBTEST(igt_ggtt_drunk),
1719                 SUBTEST(igt_ggtt_walk),
1720                 SUBTEST(igt_ggtt_pot),
1721                 SUBTEST(igt_ggtt_fill),
1722                 SUBTEST(igt_ggtt_page),
1723         };
1724 
1725         GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
1726 
1727         return i915_subtests(tests, i915);
1728 }

/* [<][>][^][v][top][bottom][index][help] */