H A D | iova.c | 58 init_iova_domain(struct iova_domain *iovad, unsigned long granule, init_iova_domain() argument 68 spin_lock_init(&iovad->iova_rbtree_lock); init_iova_domain() 69 iovad->rbroot = RB_ROOT; init_iova_domain() 70 iovad->cached32_node = NULL; init_iova_domain() 71 iovad->granule = granule; init_iova_domain() 72 iovad->start_pfn = start_pfn; init_iova_domain() 73 iovad->dma_32bit_pfn = pfn_32bit; init_iova_domain() 77 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) __get_cached_rbnode() argument 79 if ((*limit_pfn != iovad->dma_32bit_pfn) || __get_cached_rbnode() 80 (iovad->cached32_node == NULL)) __get_cached_rbnode() 81 return rb_last(&iovad->rbroot); __get_cached_rbnode() 83 struct rb_node *prev_node = rb_prev(iovad->cached32_node); __get_cached_rbnode() 85 container_of(iovad->cached32_node, struct iova, node); __get_cached_rbnode() 92 __cached_rbnode_insert_update(struct iova_domain *iovad, __cached_rbnode_insert_update() argument 95 if (limit_pfn != iovad->dma_32bit_pfn) __cached_rbnode_insert_update() 97 iovad->cached32_node = &new->node; __cached_rbnode_insert_update() 101 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) __cached_rbnode_delete_update() argument 106 if (!iovad->cached32_node) __cached_rbnode_delete_update() 108 curr = iovad->cached32_node; __cached_rbnode_delete_update() 116 if (node && iova->pfn_lo < iovad->dma_32bit_pfn) __cached_rbnode_delete_update() 117 iovad->cached32_node = node; __cached_rbnode_delete_update() 119 iovad->cached32_node = NULL; __cached_rbnode_delete_update() 138 static int __alloc_and_insert_iova_range(struct iova_domain *iovad, __alloc_and_insert_iova_range() argument 148 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); __alloc_and_insert_iova_range() 150 curr = __get_cached_rbnode(iovad, &limit_pfn); __alloc_and_insert_iova_range() 175 if ((iovad->start_pfn + size + pad_size) > limit_pfn) { __alloc_and_insert_iova_range() 176 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); __alloc_and_insert_iova_range() 195 entry = &iovad->rbroot.rb_node; __alloc_and_insert_iova_range() 213 rb_insert_color(&new->node, &iovad->rbroot); __alloc_and_insert_iova_range() 215 __cached_rbnode_insert_update(iovad, saved_pfn, new); __alloc_and_insert_iova_range() 217 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); __alloc_and_insert_iova_range() 246 * @iovad: - iova domain in question 250 * This function allocates an iova in the range iovad->start_pfn to limit_pfn, 251 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned 256 alloc_iova(struct iova_domain *iovad, unsigned long size, alloc_iova() argument 273 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, alloc_iova() 286 * @iovad: - iova domain in question. 291 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) find_iova() argument 297 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); find_iova() 298 node = iovad->rbroot.rb_node; find_iova() 304 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); find_iova() 320 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); find_iova() 326 * @iovad: iova domain in question. 331 __free_iova(struct iova_domain *iovad, struct iova *iova) __free_iova() argument 335 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); __free_iova() 336 __cached_rbnode_delete_update(iovad, iova); __free_iova() 337 rb_erase(&iova->node, &iovad->rbroot); __free_iova() 338 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); __free_iova() 344 * @iovad: - iova domain in question. 350 free_iova(struct iova_domain *iovad, unsigned long pfn) free_iova() argument 352 struct iova *iova = find_iova(iovad, pfn); free_iova() 354 __free_iova(iovad, iova); free_iova() 360 * @iovad: - iova domain in question. 363 void put_iova_domain(struct iova_domain *iovad) put_iova_domain() argument 368 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); put_iova_domain() 369 node = rb_first(&iovad->rbroot); put_iova_domain() 372 rb_erase(node, &iovad->rbroot); put_iova_domain() 374 node = rb_first(&iovad->rbroot); put_iova_domain() 376 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); put_iova_domain() 405 __insert_new_range(struct iova_domain *iovad, __insert_new_range() argument 412 iova_insert_rbtree(&iovad->rbroot, iova); __insert_new_range() 429 * @iovad: - iova domain pointer 436 reserve_iova(struct iova_domain *iovad, reserve_iova() argument 444 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); reserve_iova() 445 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { reserve_iova() 461 iova = __insert_new_range(iovad, pfn_lo, pfn_hi); reserve_iova() 464 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); reserve_iova() 494 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, split_and_remove_iova() argument 500 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); split_and_remove_iova() 512 __cached_rbnode_delete_update(iovad, iova); split_and_remove_iova() 513 rb_erase(&iova->node, &iovad->rbroot); split_and_remove_iova() 516 iova_insert_rbtree(&iovad->rbroot, prev); split_and_remove_iova() 520 iova_insert_rbtree(&iovad->rbroot, next); split_and_remove_iova() 523 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); split_and_remove_iova() 528 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); split_and_remove_iova()
|