This source file includes following definitions.
- save_stack
- show_leaks
- save_stack
- show_leaks
- INTERVAL_TREE_DEFINE
- drm_mm_interval_tree_add_node
- rb_to_hole_size
- insert_hole_size
- add_hole
- rm_hole
- rb_hole_size_to_node
- rb_hole_addr_to_node
- rb_hole_size
- best_hole
- find_hole
- first_hole
- next_hole
- drm_mm_reserve_node
- rb_to_hole_size_or_zero
- drm_mm_insert_node_in_range
- drm_mm_remove_node
- drm_mm_replace_node
- drm_mm_scan_init_with_range
- drm_mm_scan_add_block
- drm_mm_scan_remove_block
- drm_mm_scan_color_evict
- drm_mm_init
- drm_mm_takedown
- drm_mm_dump_hole
- drm_mm_print
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 
  23 
  24 
  25 
  26 
  27 
  28 
  29 
  30 
  31 
  32 
  33 
  34 
  35 
  36 
  37 
  38 
  39 
  40 
  41 
  42 
  43 
  44 
  45 #include <linux/export.h>
  46 #include <linux/interval_tree_generic.h>
  47 #include <linux/seq_file.h>
  48 #include <linux/slab.h>
  49 #include <linux/stacktrace.h>
  50 
  51 #include <drm/drm_mm.h>
  52 
  53 
  54 
  55 
  56 
  57 
  58 
  59 
  60 
  61 
  62 
  63 
  64 
  65 
  66 
  67 
  68 
  69 
  70 
  71 
  72 
  73 
  74 
  75 
  76 
  77 
  78 
  79 
  80 
  81 
  82 
  83 
  84 
  85 
  86 
  87 
  88 
  89 
  90 
  91 
  92 
  93 
  94 
  95 
  96 
  97 
  98 
  99 
 100 
 101 #ifdef CONFIG_DRM_DEBUG_MM
 102 #include <linux/stackdepot.h>
 103 
 104 #define STACKDEPTH 32
 105 #define BUFSZ 4096
 106 
 107 static noinline void save_stack(struct drm_mm_node *node)
 108 {
 109         unsigned long entries[STACKDEPTH];
 110         unsigned int n;
 111 
 112         n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
 113 
 114         
 115         node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
 116 }
 117 
 118 static void show_leaks(struct drm_mm *mm)
 119 {
 120         struct drm_mm_node *node;
 121         unsigned long *entries;
 122         unsigned int nr_entries;
 123         char *buf;
 124 
 125         buf = kmalloc(BUFSZ, GFP_KERNEL);
 126         if (!buf)
 127                 return;
 128 
 129         list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
 130                 if (!node->stack) {
 131                         DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
 132                                   node->start, node->size);
 133                         continue;
 134                 }
 135 
 136                 nr_entries = stack_depot_fetch(node->stack, &entries);
 137                 stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
 138                 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
 139                           node->start, node->size, buf);
 140         }
 141 
 142         kfree(buf);
 143 }
 144 
 145 #undef STACKDEPTH
 146 #undef BUFSZ
 147 #else
 148 static void save_stack(struct drm_mm_node *node) { }
 149 static void show_leaks(struct drm_mm *mm) { }
 150 #endif
 151 
 152 #define START(node) ((node)->start)
 153 #define LAST(node)  ((node)->start + (node)->size - 1)
 154 
 155 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
 156                      u64, __subtree_last,
 157                      START, LAST, static inline, drm_mm_interval_tree)
 158 
 159 struct drm_mm_node *
 160 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
 161 {
 162         return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
 163                                                start, last) ?: (struct drm_mm_node *)&mm->head_node;
 164 }
 165 EXPORT_SYMBOL(__drm_mm_interval_first);
 166 
 167 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
 168                                           struct drm_mm_node *node)
 169 {
 170         struct drm_mm *mm = hole_node->mm;
 171         struct rb_node **link, *rb;
 172         struct drm_mm_node *parent;
 173         bool leftmost;
 174 
 175         node->__subtree_last = LAST(node);
 176 
 177         if (hole_node->allocated) {
 178                 rb = &hole_node->rb;
 179                 while (rb) {
 180                         parent = rb_entry(rb, struct drm_mm_node, rb);
 181                         if (parent->__subtree_last >= node->__subtree_last)
 182                                 break;
 183 
 184                         parent->__subtree_last = node->__subtree_last;
 185                         rb = rb_parent(rb);
 186                 }
 187 
 188                 rb = &hole_node->rb;
 189                 link = &hole_node->rb.rb_right;
 190                 leftmost = false;
 191         } else {
 192                 rb = NULL;
 193                 link = &mm->interval_tree.rb_root.rb_node;
 194                 leftmost = true;
 195         }
 196 
 197         while (*link) {
 198                 rb = *link;
 199                 parent = rb_entry(rb, struct drm_mm_node, rb);
 200                 if (parent->__subtree_last < node->__subtree_last)
 201                         parent->__subtree_last = node->__subtree_last;
 202                 if (node->start < parent->start) {
 203                         link = &parent->rb.rb_left;
 204                 } else {
 205                         link = &parent->rb.rb_right;
 206                         leftmost = false;
 207                 }
 208         }
 209 
 210         rb_link_node(&node->rb, rb, link);
 211         rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
 212                                    &drm_mm_interval_tree_augment);
 213 }
 214 
 215 #define RB_INSERT(root, member, expr) do { \
 216         struct rb_node **link = &root.rb_node, *rb = NULL; \
 217         u64 x = expr(node); \
 218         while (*link) { \
 219                 rb = *link; \
 220                 if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
 221                         link = &rb->rb_left; \
 222                 else \
 223                         link = &rb->rb_right; \
 224         } \
 225         rb_link_node(&node->member, rb, link); \
 226         rb_insert_color(&node->member, &root); \
 227 } while (0)
 228 
 229 #define HOLE_SIZE(NODE) ((NODE)->hole_size)
 230 #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
 231 
 232 static u64 rb_to_hole_size(struct rb_node *rb)
 233 {
 234         return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
 235 }
 236 
 237 static void insert_hole_size(struct rb_root_cached *root,
 238                              struct drm_mm_node *node)
 239 {
 240         struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
 241         u64 x = node->hole_size;
 242         bool first = true;
 243 
 244         while (*link) {
 245                 rb = *link;
 246                 if (x > rb_to_hole_size(rb)) {
 247                         link = &rb->rb_left;
 248                 } else {
 249                         link = &rb->rb_right;
 250                         first = false;
 251                 }
 252         }
 253 
 254         rb_link_node(&node->rb_hole_size, rb, link);
 255         rb_insert_color_cached(&node->rb_hole_size, root, first);
 256 }
 257 
 258 static void add_hole(struct drm_mm_node *node)
 259 {
 260         struct drm_mm *mm = node->mm;
 261 
 262         node->hole_size =
 263                 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
 264         DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 265 
 266         insert_hole_size(&mm->holes_size, node);
 267         RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
 268 
 269         list_add(&node->hole_stack, &mm->hole_stack);
 270 }
 271 
 272 static void rm_hole(struct drm_mm_node *node)
 273 {
 274         DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 275 
 276         list_del(&node->hole_stack);
 277         rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
 278         rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
 279         node->hole_size = 0;
 280 
 281         DRM_MM_BUG_ON(drm_mm_hole_follows(node));
 282 }
 283 
 284 static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
 285 {
 286         return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
 287 }
 288 
 289 static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
 290 {
 291         return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
 292 }
 293 
 294 static inline u64 rb_hole_size(struct rb_node *rb)
 295 {
 296         return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
 297 }
 298 
 299 static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
 300 {
 301         struct rb_node *rb = mm->holes_size.rb_root.rb_node;
 302         struct drm_mm_node *best = NULL;
 303 
 304         do {
 305                 struct drm_mm_node *node =
 306                         rb_entry(rb, struct drm_mm_node, rb_hole_size);
 307 
 308                 if (size <= node->hole_size) {
 309                         best = node;
 310                         rb = rb->rb_right;
 311                 } else {
 312                         rb = rb->rb_left;
 313                 }
 314         } while (rb);
 315 
 316         return best;
 317 }
 318 
 319 static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
 320 {
 321         struct rb_node *rb = mm->holes_addr.rb_node;
 322         struct drm_mm_node *node = NULL;
 323 
 324         while (rb) {
 325                 u64 hole_start;
 326 
 327                 node = rb_hole_addr_to_node(rb);
 328                 hole_start = __drm_mm_hole_node_start(node);
 329 
 330                 if (addr < hole_start)
 331                         rb = node->rb_hole_addr.rb_left;
 332                 else if (addr > hole_start + node->hole_size)
 333                         rb = node->rb_hole_addr.rb_right;
 334                 else
 335                         break;
 336         }
 337 
 338         return node;
 339 }
 340 
 341 static struct drm_mm_node *
 342 first_hole(struct drm_mm *mm,
 343            u64 start, u64 end, u64 size,
 344            enum drm_mm_insert_mode mode)
 345 {
 346         switch (mode) {
 347         default:
 348         case DRM_MM_INSERT_BEST:
 349                 return best_hole(mm, size);
 350 
 351         case DRM_MM_INSERT_LOW:
 352                 return find_hole(mm, start);
 353 
 354         case DRM_MM_INSERT_HIGH:
 355                 return find_hole(mm, end);
 356 
 357         case DRM_MM_INSERT_EVICT:
 358                 return list_first_entry_or_null(&mm->hole_stack,
 359                                                 struct drm_mm_node,
 360                                                 hole_stack);
 361         }
 362 }
 363 
 364 static struct drm_mm_node *
 365 next_hole(struct drm_mm *mm,
 366           struct drm_mm_node *node,
 367           enum drm_mm_insert_mode mode)
 368 {
 369         switch (mode) {
 370         default:
 371         case DRM_MM_INSERT_BEST:
 372                 return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
 373 
 374         case DRM_MM_INSERT_LOW:
 375                 return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
 376 
 377         case DRM_MM_INSERT_HIGH:
 378                 return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
 379 
 380         case DRM_MM_INSERT_EVICT:
 381                 node = list_next_entry(node, hole_stack);
 382                 return &node->hole_stack == &mm->hole_stack ? NULL : node;
 383         }
 384 }
 385 
 386 
 387 
 388 
 389 
 390 
 391 
 392 
 393 
 394 
 395 
 396 
 397 
 398 
 399 
 400 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 401 {
 402         u64 end = node->start + node->size;
 403         struct drm_mm_node *hole;
 404         u64 hole_start, hole_end;
 405         u64 adj_start, adj_end;
 406 
 407         end = node->start + node->size;
 408         if (unlikely(end <= node->start))
 409                 return -ENOSPC;
 410 
 411         
 412         hole = find_hole(mm, node->start);
 413         if (!hole)
 414                 return -ENOSPC;
 415 
 416         adj_start = hole_start = __drm_mm_hole_node_start(hole);
 417         adj_end = hole_end = hole_start + hole->hole_size;
 418 
 419         if (mm->color_adjust)
 420                 mm->color_adjust(hole, node->color, &adj_start, &adj_end);
 421 
 422         if (adj_start > node->start || adj_end < end)
 423                 return -ENOSPC;
 424 
 425         node->mm = mm;
 426 
 427         list_add(&node->node_list, &hole->node_list);
 428         drm_mm_interval_tree_add_node(hole, node);
 429         node->allocated = true;
 430         node->hole_size = 0;
 431 
 432         rm_hole(hole);
 433         if (node->start > hole_start)
 434                 add_hole(hole);
 435         if (end < hole_end)
 436                 add_hole(node);
 437 
 438         save_stack(node);
 439         return 0;
 440 }
 441 EXPORT_SYMBOL(drm_mm_reserve_node);
 442 
 443 static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
 444 {
 445         return rb ? rb_to_hole_size(rb) : 0;
 446 }
 447 
 448 
 449 
 450 
 451 
 452 
 453 
 454 
 455 
 456 
 457 
 458 
 459 
 460 
 461 
 462 
 463 
 464 int drm_mm_insert_node_in_range(struct drm_mm * const mm,
 465                                 struct drm_mm_node * const node,
 466                                 u64 size, u64 alignment,
 467                                 unsigned long color,
 468                                 u64 range_start, u64 range_end,
 469                                 enum drm_mm_insert_mode mode)
 470 {
 471         struct drm_mm_node *hole;
 472         u64 remainder_mask;
 473         bool once;
 474 
 475         DRM_MM_BUG_ON(range_start > range_end);
 476 
 477         if (unlikely(size == 0 || range_end - range_start < size))
 478                 return -ENOSPC;
 479 
 480         if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
 481                 return -ENOSPC;
 482 
 483         if (alignment <= 1)
 484                 alignment = 0;
 485 
 486         once = mode & DRM_MM_INSERT_ONCE;
 487         mode &= ~DRM_MM_INSERT_ONCE;
 488 
 489         remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
 490         for (hole = first_hole(mm, range_start, range_end, size, mode);
 491              hole;
 492              hole = once ? NULL : next_hole(mm, hole, mode)) {
 493                 u64 hole_start = __drm_mm_hole_node_start(hole);
 494                 u64 hole_end = hole_start + hole->hole_size;
 495                 u64 adj_start, adj_end;
 496                 u64 col_start, col_end;
 497 
 498                 if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
 499                         break;
 500 
 501                 if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
 502                         break;
 503 
 504                 col_start = hole_start;
 505                 col_end = hole_end;
 506                 if (mm->color_adjust)
 507                         mm->color_adjust(hole, color, &col_start, &col_end);
 508 
 509                 adj_start = max(col_start, range_start);
 510                 adj_end = min(col_end, range_end);
 511 
 512                 if (adj_end <= adj_start || adj_end - adj_start < size)
 513                         continue;
 514 
 515                 if (mode == DRM_MM_INSERT_HIGH)
 516                         adj_start = adj_end - size;
 517 
 518                 if (alignment) {
 519                         u64 rem;
 520 
 521                         if (likely(remainder_mask))
 522                                 rem = adj_start & remainder_mask;
 523                         else
 524                                 div64_u64_rem(adj_start, alignment, &rem);
 525                         if (rem) {
 526                                 adj_start -= rem;
 527                                 if (mode != DRM_MM_INSERT_HIGH)
 528                                         adj_start += alignment;
 529 
 530                                 if (adj_start < max(col_start, range_start) ||
 531                                     min(col_end, range_end) - adj_start < size)
 532                                         continue;
 533 
 534                                 if (adj_end <= adj_start ||
 535                                     adj_end - adj_start < size)
 536                                         continue;
 537                         }
 538                 }
 539 
 540                 node->mm = mm;
 541                 node->size = size;
 542                 node->start = adj_start;
 543                 node->color = color;
 544                 node->hole_size = 0;
 545 
 546                 list_add(&node->node_list, &hole->node_list);
 547                 drm_mm_interval_tree_add_node(hole, node);
 548                 node->allocated = true;
 549 
 550                 rm_hole(hole);
 551                 if (adj_start > hole_start)
 552                         add_hole(hole);
 553                 if (adj_start + size < hole_end)
 554                         add_hole(node);
 555 
 556                 save_stack(node);
 557                 return 0;
 558         }
 559 
 560         return -ENOSPC;
 561 }
 562 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
 563 
 564 
 565 
 566 
 567 
 568 
 569 
 570 
 571 
 572 void drm_mm_remove_node(struct drm_mm_node *node)
 573 {
 574         struct drm_mm *mm = node->mm;
 575         struct drm_mm_node *prev_node;
 576 
 577         DRM_MM_BUG_ON(!node->allocated);
 578         DRM_MM_BUG_ON(node->scanned_block);
 579 
 580         prev_node = list_prev_entry(node, node_list);
 581 
 582         if (drm_mm_hole_follows(node))
 583                 rm_hole(node);
 584 
 585         drm_mm_interval_tree_remove(node, &mm->interval_tree);
 586         list_del(&node->node_list);
 587         node->allocated = false;
 588 
 589         if (drm_mm_hole_follows(prev_node))
 590                 rm_hole(prev_node);
 591         add_hole(prev_node);
 592 }
 593 EXPORT_SYMBOL(drm_mm_remove_node);
 594 
 595 
 596 
 597 
 598 
 599 
 600 
 601 
 602 
 603 
 604 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 605 {
 606         struct drm_mm *mm = old->mm;
 607 
 608         DRM_MM_BUG_ON(!old->allocated);
 609 
 610         *new = *old;
 611 
 612         list_replace(&old->node_list, &new->node_list);
 613         rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
 614 
 615         if (drm_mm_hole_follows(old)) {
 616                 list_replace(&old->hole_stack, &new->hole_stack);
 617                 rb_replace_node_cached(&old->rb_hole_size,
 618                                        &new->rb_hole_size,
 619                                        &mm->holes_size);
 620                 rb_replace_node(&old->rb_hole_addr,
 621                                 &new->rb_hole_addr,
 622                                 &mm->holes_addr);
 623         }
 624 
 625         old->allocated = false;
 626         new->allocated = true;
 627 }
 628 EXPORT_SYMBOL(drm_mm_replace_node);
 629 
 630 
 631 
 632 
 633 
 634 
 635 
 636 
 637 
 638 
 639 
 640 
 641 
 642 
 643 
 644 
 645 
 646 
 647 
 648 
 649 
 650 
 651 
 652 
 653 
 654 
 655 
 656 
 657 
 658 
 659 
 660 
 661 
 662 
 663 
 664 
 665 
 666 
 667 
 668 
 669 
 670 
 671 
 672 
 673 
 674 
 675 
 676 
 677 
 678 
 679 
 680 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
 681                                  struct drm_mm *mm,
 682                                  u64 size,
 683                                  u64 alignment,
 684                                  unsigned long color,
 685                                  u64 start,
 686                                  u64 end,
 687                                  enum drm_mm_insert_mode mode)
 688 {
 689         DRM_MM_BUG_ON(start >= end);
 690         DRM_MM_BUG_ON(!size || size > end - start);
 691         DRM_MM_BUG_ON(mm->scan_active);
 692 
 693         scan->mm = mm;
 694 
 695         if (alignment <= 1)
 696                 alignment = 0;
 697 
 698         scan->color = color;
 699         scan->alignment = alignment;
 700         scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
 701         scan->size = size;
 702         scan->mode = mode;
 703 
 704         DRM_MM_BUG_ON(end <= start);
 705         scan->range_start = start;
 706         scan->range_end = end;
 707 
 708         scan->hit_start = U64_MAX;
 709         scan->hit_end = 0;
 710 }
 711 EXPORT_SYMBOL(drm_mm_scan_init_with_range);
 712 
 713 
 714 
 715 
 716 
 717 
 718 
 719 
 720 
 721 
 722 
 723 
 724 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
 725                            struct drm_mm_node *node)
 726 {
 727         struct drm_mm *mm = scan->mm;
 728         struct drm_mm_node *hole;
 729         u64 hole_start, hole_end;
 730         u64 col_start, col_end;
 731         u64 adj_start, adj_end;
 732 
 733         DRM_MM_BUG_ON(node->mm != mm);
 734         DRM_MM_BUG_ON(!node->allocated);
 735         DRM_MM_BUG_ON(node->scanned_block);
 736         node->scanned_block = true;
 737         mm->scan_active++;
 738 
 739         
 740 
 741 
 742 
 743 
 744         hole = list_prev_entry(node, node_list);
 745         DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
 746         __list_del_entry(&node->node_list);
 747 
 748         hole_start = __drm_mm_hole_node_start(hole);
 749         hole_end = __drm_mm_hole_node_end(hole);
 750 
 751         col_start = hole_start;
 752         col_end = hole_end;
 753         if (mm->color_adjust)
 754                 mm->color_adjust(hole, scan->color, &col_start, &col_end);
 755 
 756         adj_start = max(col_start, scan->range_start);
 757         adj_end = min(col_end, scan->range_end);
 758         if (adj_end <= adj_start || adj_end - adj_start < scan->size)
 759                 return false;
 760 
 761         if (scan->mode == DRM_MM_INSERT_HIGH)
 762                 adj_start = adj_end - scan->size;
 763 
 764         if (scan->alignment) {
 765                 u64 rem;
 766 
 767                 if (likely(scan->remainder_mask))
 768                         rem = adj_start & scan->remainder_mask;
 769                 else
 770                         div64_u64_rem(adj_start, scan->alignment, &rem);
 771                 if (rem) {
 772                         adj_start -= rem;
 773                         if (scan->mode != DRM_MM_INSERT_HIGH)
 774                                 adj_start += scan->alignment;
 775                         if (adj_start < max(col_start, scan->range_start) ||
 776                             min(col_end, scan->range_end) - adj_start < scan->size)
 777                                 return false;
 778 
 779                         if (adj_end <= adj_start ||
 780                             adj_end - adj_start < scan->size)
 781                                 return false;
 782                 }
 783         }
 784 
 785         scan->hit_start = adj_start;
 786         scan->hit_end = adj_start + scan->size;
 787 
 788         DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
 789         DRM_MM_BUG_ON(scan->hit_start < hole_start);
 790         DRM_MM_BUG_ON(scan->hit_end > hole_end);
 791 
 792         return true;
 793 }
 794 EXPORT_SYMBOL(drm_mm_scan_add_block);
 795 
 796 
 797 
 798 
 799 
 800 
 801 
 802 
 803 
 804 
 805 
 806 
 807 
 808 
 809 
 810 
 811 
 812 
 813 
 814 
 815 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
 816                               struct drm_mm_node *node)
 817 {
 818         struct drm_mm_node *prev_node;
 819 
 820         DRM_MM_BUG_ON(node->mm != scan->mm);
 821         DRM_MM_BUG_ON(!node->scanned_block);
 822         node->scanned_block = false;
 823 
 824         DRM_MM_BUG_ON(!node->mm->scan_active);
 825         node->mm->scan_active--;
 826 
 827         
 828 
 829 
 830 
 831 
 832 
 833 
 834 
 835         prev_node = list_prev_entry(node, node_list);
 836         DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
 837                       list_next_entry(node, node_list));
 838         list_add(&node->node_list, &prev_node->node_list);
 839 
 840         return (node->start + node->size > scan->hit_start &&
 841                 node->start < scan->hit_end);
 842 }
 843 EXPORT_SYMBOL(drm_mm_scan_remove_block);
 844 
 845 
 846 
 847 
 848 
 849 
 850 
 851 
 852 
 853 
 854 
 855 
 856 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
 857 {
 858         struct drm_mm *mm = scan->mm;
 859         struct drm_mm_node *hole;
 860         u64 hole_start, hole_end;
 861 
 862         DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
 863 
 864         if (!mm->color_adjust)
 865                 return NULL;
 866 
 867         
 868 
 869 
 870 
 871 
 872         list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
 873                 hole_start = __drm_mm_hole_node_start(hole);
 874                 hole_end = hole_start + hole->hole_size;
 875 
 876                 if (hole_start <= scan->hit_start &&
 877                     hole_end >= scan->hit_end)
 878                         break;
 879         }
 880 
 881         
 882         DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
 883         if (unlikely(&hole->hole_stack == &mm->hole_stack))
 884                 return NULL;
 885 
 886         DRM_MM_BUG_ON(hole_start > scan->hit_start);
 887         DRM_MM_BUG_ON(hole_end < scan->hit_end);
 888 
 889         mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
 890         if (hole_start > scan->hit_start)
 891                 return hole;
 892         if (hole_end < scan->hit_end)
 893                 return list_next_entry(hole, node_list);
 894 
 895         return NULL;
 896 }
 897 EXPORT_SYMBOL(drm_mm_scan_color_evict);
 898 
 899 
 900 
 901 
 902 
 903 
 904 
 905 
 906 
 907 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
 908 {
 909         DRM_MM_BUG_ON(start + size <= start);
 910 
 911         mm->color_adjust = NULL;
 912 
 913         INIT_LIST_HEAD(&mm->hole_stack);
 914         mm->interval_tree = RB_ROOT_CACHED;
 915         mm->holes_size = RB_ROOT_CACHED;
 916         mm->holes_addr = RB_ROOT;
 917 
 918         
 919         INIT_LIST_HEAD(&mm->head_node.node_list);
 920         mm->head_node.allocated = false;
 921         mm->head_node.mm = mm;
 922         mm->head_node.start = start + size;
 923         mm->head_node.size = -size;
 924         add_hole(&mm->head_node);
 925 
 926         mm->scan_active = 0;
 927 }
 928 EXPORT_SYMBOL(drm_mm_init);
 929 
 930 
 931 
 932 
 933 
 934 
 935 
 936 
 937 void drm_mm_takedown(struct drm_mm *mm)
 938 {
 939         if (WARN(!drm_mm_clean(mm),
 940                  "Memory manager not clean during takedown.\n"))
 941                 show_leaks(mm);
 942 }
 943 EXPORT_SYMBOL(drm_mm_takedown);
 944 
 945 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
 946 {
 947         u64 start, size;
 948 
 949         size = entry->hole_size;
 950         if (size) {
 951                 start = drm_mm_hole_node_start(entry);
 952                 drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
 953                            start, start + size, size);
 954         }
 955 
 956         return size;
 957 }
 958 
 959 
 960 
 961 
 962 
 963 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
 964 {
 965         const struct drm_mm_node *entry;
 966         u64 total_used = 0, total_free = 0, total = 0;
 967 
 968         total_free += drm_mm_dump_hole(p, &mm->head_node);
 969 
 970         drm_mm_for_each_node(entry, mm) {
 971                 drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
 972                            entry->start + entry->size, entry->size);
 973                 total_used += entry->size;
 974                 total_free += drm_mm_dump_hole(p, entry);
 975         }
 976         total = total_free + total_used;
 977 
 978         drm_printf(p, "total: %llu, used %llu free %llu\n", total,
 979                    total_used, total_free);
 980 }
 981 EXPORT_SYMBOL(drm_mm_print);