root/drivers/misc/habanalabs/memory.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. alloc_device_memory
  2. get_userptr_from_host_va
  3. free_userptr
  4. dram_pg_pool_do_release
  5. free_phys_pg_pack
  6. free_device_memory
  7. clear_va_list_locked
  8. print_va_list_locked
  9. merge_va_blocks_locked
  10. add_va_block_locked
  11. add_va_block
  12. get_va_block
  13. get_sg_info
  14. init_phys_pg_pack_from_userptr
  15. map_phys_page_pack
  16. get_paddr_from_handle
  17. map_device_va
  18. unmap_device_va
  19. mem_ioctl_no_mmu
  20. hl_mem_ioctl
  21. hl_pin_host_memory
  22. hl_unpin_host_memory
  23. hl_userptr_delete_list
  24. hl_userptr_is_pinned
  25. hl_va_range_init
  26. hl_vm_ctx_init_with_ranges
  27. hl_vm_ctx_init
  28. hl_va_range_fini
  29. hl_vm_ctx_fini
  30. hl_vm_init
  31. hl_vm_fini

   1 // SPDX-License-Identifier: GPL-2.0
   2 
   3 /*
   4  * Copyright 2016-2019 HabanaLabs, Ltd.
   5  * All Rights Reserved.
   6  */
   7 
   8 #include <uapi/misc/habanalabs.h>
   9 #include "habanalabs.h"
  10 #include "include/hw_ip/mmu/mmu_general.h"
  11 
  12 #include <linux/uaccess.h>
  13 #include <linux/slab.h>
  14 #include <linux/genalloc.h>
  15 
  16 #define PGS_IN_2MB_PAGE (PAGE_SIZE_2MB >> PAGE_SHIFT)
  17 #define HL_MMU_DEBUG    0
  18 
  19 /*
  20  * The va ranges in context object contain a list with the available chunks of
  21  * device virtual memory.
  22  * There is one range for host allocations and one for DRAM allocations.
  23  *
  24  * On initialization each range contains one chunk of all of its available
  25  * virtual range which is a half of the total device virtual range.
  26  *
  27  * On each mapping of physical pages, a suitable virtual range chunk (with a
  28  * minimum size) is selected from the list. If the chunk size equals the
  29  * requested size, the chunk is returned. Otherwise, the chunk is split into
  30  * two chunks - one to return as result and a remainder to stay in the list.
  31  *
  32  * On each Unmapping of a virtual address, the relevant virtual chunk is
  33  * returned to the list. The chunk is added to the list and if its edges match
  34  * the edges of the adjacent chunks (means a contiguous chunk can be created),
  35  * the chunks are merged.
  36  *
  37  * On finish, the list is checked to have only one chunk of all the relevant
  38  * virtual range (which is a half of the device total virtual range).
  39  * If not (means not all mappings were unmapped), a warning is printed.
  40  */
  41 
  42 /*
  43  * alloc_device_memory - allocate device memory
  44  *
  45  * @ctx                 : current context
  46  * @args                : host parameters containing the requested size
  47  * @ret_handle          : result handle
  48  *
  49  * This function does the following:
  50  * - Allocate the requested size rounded up to 2MB pages
  51  * - Return unique handle
  52  */
  53 static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
  54                                 u32 *ret_handle)
  55 {
  56         struct hl_device *hdev = ctx->hdev;
  57         struct hl_vm *vm = &hdev->vm;
  58         struct hl_vm_phys_pg_pack *phys_pg_pack;
  59         u64 paddr = 0, total_size, num_pgs, i;
  60         u32 num_curr_pgs, page_size, page_shift;
  61         int handle, rc;
  62         bool contiguous;
  63 
  64         num_curr_pgs = 0;
  65         page_size = hdev->asic_prop.dram_page_size;
  66         page_shift = __ffs(page_size);
  67         num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift;
  68         total_size = num_pgs << page_shift;
  69 
  70         contiguous = args->flags & HL_MEM_CONTIGUOUS;
  71 
  72         if (contiguous) {
  73                 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
  74                 if (!paddr) {
  75                         dev_err(hdev->dev,
  76                                 "failed to allocate %llu huge contiguous pages\n",
  77                                 num_pgs);
  78                         return -ENOMEM;
  79                 }
  80         }
  81 
  82         phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
  83         if (!phys_pg_pack) {
  84                 rc = -ENOMEM;
  85                 goto pages_pack_err;
  86         }
  87 
  88         phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
  89         phys_pg_pack->asid = ctx->asid;
  90         phys_pg_pack->npages = num_pgs;
  91         phys_pg_pack->page_size = page_size;
  92         phys_pg_pack->total_size = total_size;
  93         phys_pg_pack->flags = args->flags;
  94         phys_pg_pack->contiguous = contiguous;
  95 
  96         phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
  97         if (!phys_pg_pack->pages) {
  98                 rc = -ENOMEM;
  99                 goto pages_arr_err;
 100         }
 101 
 102         if (phys_pg_pack->contiguous) {
 103                 for (i = 0 ; i < num_pgs ; i++)
 104                         phys_pg_pack->pages[i] = paddr + i * page_size;
 105         } else {
 106                 for (i = 0 ; i < num_pgs ; i++) {
 107                         phys_pg_pack->pages[i] = (u64) gen_pool_alloc(
 108                                                         vm->dram_pg_pool,
 109                                                         page_size);
 110                         if (!phys_pg_pack->pages[i]) {
 111                                 dev_err(hdev->dev,
 112                                         "Failed to allocate device memory (out of memory)\n");
 113                                 rc = -ENOMEM;
 114                                 goto page_err;
 115                         }
 116 
 117                         num_curr_pgs++;
 118                 }
 119         }
 120 
 121         spin_lock(&vm->idr_lock);
 122         handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
 123                                 GFP_ATOMIC);
 124         spin_unlock(&vm->idr_lock);
 125 
 126         if (handle < 0) {
 127                 dev_err(hdev->dev, "Failed to get handle for page\n");
 128                 rc = -EFAULT;
 129                 goto idr_err;
 130         }
 131 
 132         for (i = 0 ; i < num_pgs ; i++)
 133                 kref_get(&vm->dram_pg_pool_refcount);
 134 
 135         phys_pg_pack->handle = handle;
 136 
 137         atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
 138         atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
 139 
 140         *ret_handle = handle;
 141 
 142         return 0;
 143 
 144 idr_err:
 145 page_err:
 146         if (!phys_pg_pack->contiguous)
 147                 for (i = 0 ; i < num_curr_pgs ; i++)
 148                         gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
 149                                         page_size);
 150 
 151         kvfree(phys_pg_pack->pages);
 152 pages_arr_err:
 153         kfree(phys_pg_pack);
 154 pages_pack_err:
 155         if (contiguous)
 156                 gen_pool_free(vm->dram_pg_pool, paddr, total_size);
 157 
 158         return rc;
 159 }
 160 
 161 /*
 162  * get_userptr_from_host_va - initialize userptr structure from given host
 163  *                            virtual address
 164  *
 165  * @hdev                : habanalabs device structure
 166  * @args                : parameters containing the virtual address and size
 167  * @p_userptr           : pointer to result userptr structure
 168  *
 169  * This function does the following:
 170  * - Allocate userptr structure
 171  * - Pin the given host memory using the userptr structure
 172  * - Perform DMA mapping to have the DMA addresses of the pages
 173  */
 174 static int get_userptr_from_host_va(struct hl_device *hdev,
 175                 struct hl_mem_in *args, struct hl_userptr **p_userptr)
 176 {
 177         struct hl_userptr *userptr;
 178         int rc;
 179 
 180         userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
 181         if (!userptr) {
 182                 rc = -ENOMEM;
 183                 goto userptr_err;
 184         }
 185 
 186         rc = hl_pin_host_memory(hdev, args->map_host.host_virt_addr,
 187                         args->map_host.mem_size, userptr);
 188         if (rc) {
 189                 dev_err(hdev->dev, "Failed to pin host memory\n");
 190                 goto pin_err;
 191         }
 192 
 193         rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
 194                                         userptr->sgt->nents, DMA_BIDIRECTIONAL);
 195         if (rc) {
 196                 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
 197                 goto dma_map_err;
 198         }
 199 
 200         userptr->dma_mapped = true;
 201         userptr->dir = DMA_BIDIRECTIONAL;
 202         userptr->vm_type = VM_TYPE_USERPTR;
 203 
 204         *p_userptr = userptr;
 205 
 206         return 0;
 207 
 208 dma_map_err:
 209         hl_unpin_host_memory(hdev, userptr);
 210 pin_err:
 211         kfree(userptr);
 212 userptr_err:
 213 
 214         return rc;
 215 }
 216 
 217 /*
 218  * free_userptr - free userptr structure
 219  *
 220  * @hdev                : habanalabs device structure
 221  * @userptr             : userptr to free
 222  *
 223  * This function does the following:
 224  * - Unpins the physical pages
 225  * - Frees the userptr structure
 226  */
 227 static void free_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
 228 {
 229         hl_unpin_host_memory(hdev, userptr);
 230         kfree(userptr);
 231 }
 232 
 233 /*
 234  * dram_pg_pool_do_release - free DRAM pages pool
 235  *
 236  * @ref                 : pointer to reference object
 237  *
 238  * This function does the following:
 239  * - Frees the idr structure of physical pages handles
 240  * - Frees the generic pool of DRAM physical pages
 241  */
 242 static void dram_pg_pool_do_release(struct kref *ref)
 243 {
 244         struct hl_vm *vm = container_of(ref, struct hl_vm,
 245                         dram_pg_pool_refcount);
 246 
 247         /*
 248          * free the idr here as only here we know for sure that there are no
 249          * allocated physical pages and hence there are no handles in use
 250          */
 251         idr_destroy(&vm->phys_pg_pack_handles);
 252         gen_pool_destroy(vm->dram_pg_pool);
 253 }
 254 
 255 /*
 256  * free_phys_pg_pack   - free physical page pack
 257  *
 258  * @hdev               : habanalabs device structure
 259  * @phys_pg_pack       : physical page pack to free
 260  *
 261  * This function does the following:
 262  * - For DRAM memory only, iterate over the pack and free each physical block
 263  *   structure by returning it to the general pool
 264  * - Free the hl_vm_phys_pg_pack structure
 265  */
 266 static void free_phys_pg_pack(struct hl_device *hdev,
 267                 struct hl_vm_phys_pg_pack *phys_pg_pack)
 268 {
 269         struct hl_vm *vm = &hdev->vm;
 270         u64 i;
 271 
 272         if (!phys_pg_pack->created_from_userptr) {
 273                 if (phys_pg_pack->contiguous) {
 274                         gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
 275                                         phys_pg_pack->total_size);
 276 
 277                         for (i = 0; i < phys_pg_pack->npages ; i++)
 278                                 kref_put(&vm->dram_pg_pool_refcount,
 279                                         dram_pg_pool_do_release);
 280                 } else {
 281                         for (i = 0 ; i < phys_pg_pack->npages ; i++) {
 282                                 gen_pool_free(vm->dram_pg_pool,
 283                                                 phys_pg_pack->pages[i],
 284                                                 phys_pg_pack->page_size);
 285                                 kref_put(&vm->dram_pg_pool_refcount,
 286                                         dram_pg_pool_do_release);
 287                         }
 288                 }
 289         }
 290 
 291         kvfree(phys_pg_pack->pages);
 292         kfree(phys_pg_pack);
 293 }
 294 
 295 /*
 296  * free_device_memory - free device memory
 297  *
 298  * @ctx                  : current context
 299  * @handle              : handle of the memory chunk to free
 300  *
 301  * This function does the following:
 302  * - Free the device memory related to the given handle
 303  */
 304 static int free_device_memory(struct hl_ctx *ctx, u32 handle)
 305 {
 306         struct hl_device *hdev = ctx->hdev;
 307         struct hl_vm *vm = &hdev->vm;
 308         struct hl_vm_phys_pg_pack *phys_pg_pack;
 309 
 310         spin_lock(&vm->idr_lock);
 311         phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
 312         if (phys_pg_pack) {
 313                 if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
 314                         dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
 315                                 handle);
 316                         spin_unlock(&vm->idr_lock);
 317                         return -EINVAL;
 318                 }
 319 
 320                 /*
 321                  * must remove from idr before the freeing of the physical
 322                  * pages as the refcount of the pool is also the trigger of the
 323                  * idr destroy
 324                  */
 325                 idr_remove(&vm->phys_pg_pack_handles, handle);
 326                 spin_unlock(&vm->idr_lock);
 327 
 328                 atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
 329                 atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
 330 
 331                 free_phys_pg_pack(hdev, phys_pg_pack);
 332         } else {
 333                 spin_unlock(&vm->idr_lock);
 334                 dev_err(hdev->dev,
 335                         "free device memory failed, no match for handle %u\n",
 336                         handle);
 337                 return -EINVAL;
 338         }
 339 
 340         return 0;
 341 }
 342 
 343 /*
 344  * clear_va_list_locked - free virtual addresses list
 345  *
 346  * @hdev                : habanalabs device structure
 347  * @va_list             : list of virtual addresses to free
 348  *
 349  * This function does the following:
 350  * - Iterate over the list and free each virtual addresses block
 351  *
 352  * This function should be called only when va_list lock is taken
 353  */
 354 static void clear_va_list_locked(struct hl_device *hdev,
 355                 struct list_head *va_list)
 356 {
 357         struct hl_vm_va_block *va_block, *tmp;
 358 
 359         list_for_each_entry_safe(va_block, tmp, va_list, node) {
 360                 list_del(&va_block->node);
 361                 kfree(va_block);
 362         }
 363 }
 364 
 365 /*
 366  * print_va_list_locked    - print virtual addresses list
 367  *
 368  * @hdev                : habanalabs device structure
 369  * @va_list             : list of virtual addresses to print
 370  *
 371  * This function does the following:
 372  * - Iterate over the list and print each virtual addresses block
 373  *
 374  * This function should be called only when va_list lock is taken
 375  */
 376 static void print_va_list_locked(struct hl_device *hdev,
 377                 struct list_head *va_list)
 378 {
 379 #if HL_MMU_DEBUG
 380         struct hl_vm_va_block *va_block;
 381 
 382         dev_dbg(hdev->dev, "print va list:\n");
 383 
 384         list_for_each_entry(va_block, va_list, node)
 385                 dev_dbg(hdev->dev,
 386                         "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
 387                         va_block->start, va_block->end, va_block->size);
 388 #endif
 389 }
 390 
 391 /*
 392  * merge_va_blocks_locked - merge a virtual block if possible
 393  *
 394  * @hdev                : pointer to the habanalabs device structure
 395  * @va_list             : pointer to the virtual addresses block list
 396  * @va_block            : virtual block to merge with adjacent blocks
 397  *
 398  * This function does the following:
 399  * - Merge the given blocks with the adjacent blocks if their virtual ranges
 400  *   create a contiguous virtual range
 401  *
 402  * This Function should be called only when va_list lock is taken
 403  */
 404 static void merge_va_blocks_locked(struct hl_device *hdev,
 405                 struct list_head *va_list, struct hl_vm_va_block *va_block)
 406 {
 407         struct hl_vm_va_block *prev, *next;
 408 
 409         prev = list_prev_entry(va_block, node);
 410         if (&prev->node != va_list && prev->end + 1 == va_block->start) {
 411                 prev->end = va_block->end;
 412                 prev->size = prev->end - prev->start;
 413                 list_del(&va_block->node);
 414                 kfree(va_block);
 415                 va_block = prev;
 416         }
 417 
 418         next = list_next_entry(va_block, node);
 419         if (&next->node != va_list && va_block->end + 1 == next->start) {
 420                 next->start = va_block->start;
 421                 next->size = next->end - next->start;
 422                 list_del(&va_block->node);
 423                 kfree(va_block);
 424         }
 425 }
 426 
 427 /*
 428  * add_va_block_locked - add a virtual block to the virtual addresses list
 429  *
 430  * @hdev                : pointer to the habanalabs device structure
 431  * @va_list             : pointer to the virtual addresses block list
 432  * @start               : start virtual address
 433  * @end                 : end virtual address
 434  *
 435  * This function does the following:
 436  * - Add the given block to the virtual blocks list and merge with other
 437  * blocks if a contiguous virtual block can be created
 438  *
 439  * This Function should be called only when va_list lock is taken
 440  */
 441 static int add_va_block_locked(struct hl_device *hdev,
 442                 struct list_head *va_list, u64 start, u64 end)
 443 {
 444         struct hl_vm_va_block *va_block, *res = NULL;
 445         u64 size = end - start;
 446 
 447         print_va_list_locked(hdev, va_list);
 448 
 449         list_for_each_entry(va_block, va_list, node) {
 450                 /* TODO: remove upon matureness */
 451                 if (hl_mem_area_crosses_range(start, size, va_block->start,
 452                                 va_block->end)) {
 453                         dev_err(hdev->dev,
 454                                 "block crossing ranges at start 0x%llx, end 0x%llx\n",
 455                                 va_block->start, va_block->end);
 456                         return -EINVAL;
 457                 }
 458 
 459                 if (va_block->end < start)
 460                         res = va_block;
 461         }
 462 
 463         va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
 464         if (!va_block)
 465                 return -ENOMEM;
 466 
 467         va_block->start = start;
 468         va_block->end = end;
 469         va_block->size = size;
 470 
 471         if (!res)
 472                 list_add(&va_block->node, va_list);
 473         else
 474                 list_add(&va_block->node, &res->node);
 475 
 476         merge_va_blocks_locked(hdev, va_list, va_block);
 477 
 478         print_va_list_locked(hdev, va_list);
 479 
 480         return 0;
 481 }
 482 
 483 /*
 484  * add_va_block - wrapper for add_va_block_locked
 485  *
 486  * @hdev                : pointer to the habanalabs device structure
 487  * @va_list             : pointer to the virtual addresses block list
 488  * @start               : start virtual address
 489  * @end                 : end virtual address
 490  *
 491  * This function does the following:
 492  * - Takes the list lock and calls add_va_block_locked
 493  */
 494 static inline int add_va_block(struct hl_device *hdev,
 495                 struct hl_va_range *va_range, u64 start, u64 end)
 496 {
 497         int rc;
 498 
 499         mutex_lock(&va_range->lock);
 500         rc = add_va_block_locked(hdev, &va_range->list, start, end);
 501         mutex_unlock(&va_range->lock);
 502 
 503         return rc;
 504 }
 505 
 506 /*
 507  * get_va_block - get a virtual block with the requested size
 508  *
 509  * @hdev            : pointer to the habanalabs device structure
 510  * @va_range        : pointer to the virtual addresses range
 511  * @size            : requested block size
 512  * @hint_addr       : hint for request address by the user
 513  * @is_userptr      : is host or DRAM memory
 514  *
 515  * This function does the following:
 516  * - Iterate on the virtual block list to find a suitable virtual block for the
 517  *   requested size
 518  * - Reserve the requested block and update the list
 519  * - Return the start address of the virtual block
 520  */
 521 static u64 get_va_block(struct hl_device *hdev,
 522                 struct hl_va_range *va_range, u64 size, u64 hint_addr,
 523                 bool is_userptr)
 524 {
 525         struct hl_vm_va_block *va_block, *new_va_block = NULL;
 526         u64 valid_start, valid_size, prev_start, prev_end, page_mask,
 527                 res_valid_start = 0, res_valid_size = 0;
 528         u32 page_size;
 529         bool add_prev = false;
 530 
 531         if (is_userptr) {
 532                 /*
 533                  * We cannot know if the user allocated memory with huge pages
 534                  * or not, hence we continue with the biggest possible
 535                  * granularity.
 536                  */
 537                 page_size = PAGE_SIZE_2MB;
 538                 page_mask = PAGE_MASK_2MB;
 539         } else {
 540                 page_size = hdev->asic_prop.dram_page_size;
 541                 page_mask = ~((u64)page_size - 1);
 542         }
 543 
 544         mutex_lock(&va_range->lock);
 545 
 546         print_va_list_locked(hdev, &va_range->list);
 547 
 548         list_for_each_entry(va_block, &va_range->list, node) {
 549                 /* calc the first possible aligned addr */
 550                 valid_start = va_block->start;
 551 
 552 
 553                 if (valid_start & (page_size - 1)) {
 554                         valid_start &= page_mask;
 555                         valid_start += page_size;
 556                         if (valid_start > va_block->end)
 557                                 continue;
 558                 }
 559 
 560                 valid_size = va_block->end - valid_start;
 561 
 562                 if (valid_size >= size &&
 563                         (!new_va_block || valid_size < res_valid_size)) {
 564 
 565                         new_va_block = va_block;
 566                         res_valid_start = valid_start;
 567                         res_valid_size = valid_size;
 568                 }
 569 
 570                 if (hint_addr && hint_addr >= valid_start &&
 571                                 ((hint_addr + size) <= va_block->end)) {
 572                         new_va_block = va_block;
 573                         res_valid_start = hint_addr;
 574                         res_valid_size = valid_size;
 575                         break;
 576                 }
 577         }
 578 
 579         if (!new_va_block) {
 580                 dev_err(hdev->dev, "no available va block for size %llu\n",
 581                                 size);
 582                 goto out;
 583         }
 584 
 585         if (res_valid_start > new_va_block->start) {
 586                 prev_start = new_va_block->start;
 587                 prev_end = res_valid_start - 1;
 588 
 589                 new_va_block->start = res_valid_start;
 590                 new_va_block->size = res_valid_size;
 591 
 592                 add_prev = true;
 593         }
 594 
 595         if (new_va_block->size > size) {
 596                 new_va_block->start += size;
 597                 new_va_block->size = new_va_block->end - new_va_block->start;
 598         } else {
 599                 list_del(&new_va_block->node);
 600                 kfree(new_va_block);
 601         }
 602 
 603         if (add_prev)
 604                 add_va_block_locked(hdev, &va_range->list, prev_start,
 605                                 prev_end);
 606 
 607         print_va_list_locked(hdev, &va_range->list);
 608 out:
 609         mutex_unlock(&va_range->lock);
 610 
 611         return res_valid_start;
 612 }
 613 
 614 /*
 615  * get_sg_info - get number of pages and the DMA address from SG list
 616  *
 617  * @sg                 : the SG list
 618  * @dma_addr           : pointer to DMA address to return
 619  *
 620  * Calculate the number of consecutive pages described by the SG list. Take the
 621  * offset of the address in the first page, add to it the length and round it up
 622  * to the number of needed pages.
 623  */
 624 static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
 625 {
 626         *dma_addr = sg_dma_address(sg);
 627 
 628         return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
 629                         (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 630 }
 631 
 632 /*
 633  * init_phys_pg_pack_from_userptr - initialize physical page pack from host
 634  *                                   memory
 635  *
 636  * @ctx                : current context
 637  * @userptr            : userptr to initialize from
 638  * @pphys_pg_pack      : res pointer
 639  *
 640  * This function does the following:
 641  * - Pin the physical pages related to the given virtual block
 642  * - Create a physical page pack from the physical pages related to the given
 643  *   virtual block
 644  */
 645 static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
 646                 struct hl_userptr *userptr,
 647                 struct hl_vm_phys_pg_pack **pphys_pg_pack)
 648 {
 649         struct hl_vm_phys_pg_pack *phys_pg_pack;
 650         struct scatterlist *sg;
 651         dma_addr_t dma_addr;
 652         u64 page_mask, total_npages;
 653         u32 npages, page_size = PAGE_SIZE;
 654         bool first = true, is_huge_page_opt = true;
 655         int rc, i, j;
 656 
 657         phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
 658         if (!phys_pg_pack)
 659                 return -ENOMEM;
 660 
 661         phys_pg_pack->vm_type = userptr->vm_type;
 662         phys_pg_pack->created_from_userptr = true;
 663         phys_pg_pack->asid = ctx->asid;
 664         atomic_set(&phys_pg_pack->mapping_cnt, 1);
 665 
 666         /* Only if all dma_addrs are aligned to 2MB and their
 667          * sizes is at least 2MB, we can use huge page mapping.
 668          * We limit the 2MB optimization to this condition,
 669          * since later on we acquire the related VA range as one
 670          * consecutive block.
 671          */
 672         total_npages = 0;
 673         for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
 674                 npages = get_sg_info(sg, &dma_addr);
 675 
 676                 total_npages += npages;
 677 
 678                 if ((npages % PGS_IN_2MB_PAGE) ||
 679                                         (dma_addr & (PAGE_SIZE_2MB - 1)))
 680                         is_huge_page_opt = false;
 681         }
 682 
 683         if (is_huge_page_opt) {
 684                 page_size = PAGE_SIZE_2MB;
 685                 total_npages /= PGS_IN_2MB_PAGE;
 686         }
 687 
 688         page_mask = ~(((u64) page_size) - 1);
 689 
 690         phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
 691                                                 GFP_KERNEL);
 692         if (!phys_pg_pack->pages) {
 693                 rc = -ENOMEM;
 694                 goto page_pack_arr_mem_err;
 695         }
 696 
 697         phys_pg_pack->npages = total_npages;
 698         phys_pg_pack->page_size = page_size;
 699         phys_pg_pack->total_size = total_npages * page_size;
 700 
 701         j = 0;
 702         for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
 703                 npages = get_sg_info(sg, &dma_addr);
 704 
 705                 /* align down to physical page size and save the offset */
 706                 if (first) {
 707                         first = false;
 708                         phys_pg_pack->offset = dma_addr & (page_size - 1);
 709                         dma_addr &= page_mask;
 710                 }
 711 
 712                 while (npages) {
 713                         phys_pg_pack->pages[j++] = dma_addr;
 714                         dma_addr += page_size;
 715 
 716                         if (is_huge_page_opt)
 717                                 npages -= PGS_IN_2MB_PAGE;
 718                         else
 719                                 npages--;
 720                 }
 721         }
 722 
 723         *pphys_pg_pack = phys_pg_pack;
 724 
 725         return 0;
 726 
 727 page_pack_arr_mem_err:
 728         kfree(phys_pg_pack);
 729 
 730         return rc;
 731 }
 732 
 733 /*
 734  * map_phys_page_pack - maps the physical page pack
 735  *
 736  * @ctx                : current context
 737  * @vaddr              : start address of the virtual area to map from
 738  * @phys_pg_pack       : the pack of physical pages to map to
 739  *
 740  * This function does the following:
 741  * - Maps each chunk of virtual memory to matching physical chunk
 742  * - Stores number of successful mappings in the given argument
 743  * - Returns 0 on success, error code otherwise.
 744  */
 745 static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
 746                 struct hl_vm_phys_pg_pack *phys_pg_pack)
 747 {
 748         struct hl_device *hdev = ctx->hdev;
 749         u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
 750         u32 page_size = phys_pg_pack->page_size;
 751         int rc = 0;
 752 
 753         for (i = 0 ; i < phys_pg_pack->npages ; i++) {
 754                 paddr = phys_pg_pack->pages[i];
 755 
 756                 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
 757                 if (rc) {
 758                         dev_err(hdev->dev,
 759                                 "map failed for handle %u, npages: %llu, mapped: %llu",
 760                                 phys_pg_pack->handle, phys_pg_pack->npages,
 761                                 mapped_pg_cnt);
 762                         goto err;
 763                 }
 764 
 765                 mapped_pg_cnt++;
 766                 next_vaddr += page_size;
 767         }
 768 
 769         return 0;
 770 
 771 err:
 772         next_vaddr = vaddr;
 773         for (i = 0 ; i < mapped_pg_cnt ; i++) {
 774                 if (hl_mmu_unmap(ctx, next_vaddr, page_size))
 775                         dev_warn_ratelimited(hdev->dev,
 776                                 "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
 777                                         phys_pg_pack->handle, next_vaddr,
 778                                         phys_pg_pack->pages[i], page_size);
 779 
 780                 next_vaddr += page_size;
 781         }
 782 
 783         return rc;
 784 }
 785 
 786 static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
 787                                 u64 *paddr)
 788 {
 789         struct hl_device *hdev = ctx->hdev;
 790         struct hl_vm *vm = &hdev->vm;
 791         struct hl_vm_phys_pg_pack *phys_pg_pack;
 792         u32 handle;
 793 
 794         handle = lower_32_bits(args->map_device.handle);
 795         spin_lock(&vm->idr_lock);
 796         phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
 797         if (!phys_pg_pack) {
 798                 spin_unlock(&vm->idr_lock);
 799                 dev_err(hdev->dev, "no match for handle %u\n", handle);
 800                 return -EINVAL;
 801         }
 802 
 803         *paddr = phys_pg_pack->pages[0];
 804 
 805         spin_unlock(&vm->idr_lock);
 806 
 807         return 0;
 808 }
 809 
 810 /*
 811  * map_device_va - map the given memory
 812  *
 813  * @ctx          : current context
 814  * @args         : host parameters with handle/host virtual address
 815  * @device_addr  : pointer to result device virtual address
 816  *
 817  * This function does the following:
 818  * - If given a physical device memory handle, map to a device virtual block
 819  *   and return the start address of this block
 820  * - If given a host virtual address and size, find the related physical pages,
 821  *   map a device virtual block to this pages and return the start address of
 822  *   this block
 823  */
 824 static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
 825                 u64 *device_addr)
 826 {
 827         struct hl_device *hdev = ctx->hdev;
 828         struct hl_vm *vm = &hdev->vm;
 829         struct hl_vm_phys_pg_pack *phys_pg_pack;
 830         struct hl_userptr *userptr = NULL;
 831         struct hl_vm_hash_node *hnode;
 832         enum vm_type_t *vm_type;
 833         u64 ret_vaddr, hint_addr;
 834         u32 handle = 0;
 835         int rc;
 836         bool is_userptr = args->flags & HL_MEM_USERPTR;
 837 
 838         /* Assume failure */
 839         *device_addr = 0;
 840 
 841         if (is_userptr) {
 842                 rc = get_userptr_from_host_va(hdev, args, &userptr);
 843                 if (rc) {
 844                         dev_err(hdev->dev, "failed to get userptr from va\n");
 845                         return rc;
 846                 }
 847 
 848                 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
 849                                 &phys_pg_pack);
 850                 if (rc) {
 851                         dev_err(hdev->dev,
 852                                 "unable to init page pack for vaddr 0x%llx\n",
 853                                 args->map_host.host_virt_addr);
 854                         goto init_page_pack_err;
 855                 }
 856 
 857                 vm_type = (enum vm_type_t *) userptr;
 858                 hint_addr = args->map_host.hint_addr;
 859         } else {
 860                 handle = lower_32_bits(args->map_device.handle);
 861 
 862                 spin_lock(&vm->idr_lock);
 863                 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
 864                 if (!phys_pg_pack) {
 865                         spin_unlock(&vm->idr_lock);
 866                         dev_err(hdev->dev,
 867                                 "no match for handle %u\n", handle);
 868                         return -EINVAL;
 869                 }
 870 
 871                 /* increment now to avoid freeing device memory while mapping */
 872                 atomic_inc(&phys_pg_pack->mapping_cnt);
 873 
 874                 spin_unlock(&vm->idr_lock);
 875 
 876                 vm_type = (enum vm_type_t *) phys_pg_pack;
 877 
 878                 hint_addr = args->map_device.hint_addr;
 879         }
 880 
 881         /*
 882          * relevant for mapping device physical memory only, as host memory is
 883          * implicitly shared
 884          */
 885         if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
 886                         phys_pg_pack->asid != ctx->asid) {
 887                 dev_err(hdev->dev,
 888                         "Failed to map memory, handle %u is not shared\n",
 889                         handle);
 890                 rc = -EPERM;
 891                 goto shared_err;
 892         }
 893 
 894         hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
 895         if (!hnode) {
 896                 rc = -ENOMEM;
 897                 goto hnode_err;
 898         }
 899 
 900         ret_vaddr = get_va_block(hdev,
 901                         is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
 902                         phys_pg_pack->total_size, hint_addr, is_userptr);
 903         if (!ret_vaddr) {
 904                 dev_err(hdev->dev, "no available va block for handle %u\n",
 905                                 handle);
 906                 rc = -ENOMEM;
 907                 goto va_block_err;
 908         }
 909 
 910         mutex_lock(&ctx->mmu_lock);
 911 
 912         rc = map_phys_page_pack(ctx, ret_vaddr, phys_pg_pack);
 913         if (rc) {
 914                 mutex_unlock(&ctx->mmu_lock);
 915                 dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
 916                                 handle);
 917                 goto map_err;
 918         }
 919 
 920         hdev->asic_funcs->mmu_invalidate_cache(hdev, false);
 921 
 922         mutex_unlock(&ctx->mmu_lock);
 923 
 924         ret_vaddr += phys_pg_pack->offset;
 925 
 926         hnode->ptr = vm_type;
 927         hnode->vaddr = ret_vaddr;
 928 
 929         mutex_lock(&ctx->mem_hash_lock);
 930         hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
 931         mutex_unlock(&ctx->mem_hash_lock);
 932 
 933         *device_addr = ret_vaddr;
 934 
 935         if (is_userptr)
 936                 free_phys_pg_pack(hdev, phys_pg_pack);
 937 
 938         return 0;
 939 
 940 map_err:
 941         if (add_va_block(hdev,
 942                         is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
 943                         ret_vaddr,
 944                         ret_vaddr + phys_pg_pack->total_size - 1))
 945                 dev_warn(hdev->dev,
 946                         "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
 947                                 handle, ret_vaddr);
 948 
 949 va_block_err:
 950         kfree(hnode);
 951 hnode_err:
 952 shared_err:
 953         atomic_dec(&phys_pg_pack->mapping_cnt);
 954         if (is_userptr)
 955                 free_phys_pg_pack(hdev, phys_pg_pack);
 956 init_page_pack_err:
 957         if (is_userptr)
 958                 free_userptr(hdev, userptr);
 959 
 960         return rc;
 961 }
 962 
 963 /*
 964  * unmap_device_va      - unmap the given device virtual address
 965  *
 966  * @ctx                 : current context
 967  * @vaddr               : device virtual address to unmap
 968  * @ctx_free            : true if in context free flow, false otherwise.
 969  *
 970  * This function does the following:
 971  * - Unmap the physical pages related to the given virtual address
 972  * - return the device virtual block to the virtual block list
 973  */
 974 static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
 975 {
 976         struct hl_device *hdev = ctx->hdev;
 977         struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
 978         struct hl_vm_hash_node *hnode = NULL;
 979         struct hl_userptr *userptr = NULL;
 980         struct hl_va_range *va_range;
 981         enum vm_type_t *vm_type;
 982         u64 next_vaddr, i;
 983         u32 page_size;
 984         bool is_userptr;
 985         int rc;
 986 
 987         /* protect from double entrance */
 988         mutex_lock(&ctx->mem_hash_lock);
 989         hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
 990                 if (vaddr == hnode->vaddr)
 991                         break;
 992 
 993         if (!hnode) {
 994                 mutex_unlock(&ctx->mem_hash_lock);
 995                 dev_err(hdev->dev,
 996                         "unmap failed, no mem hnode for vaddr 0x%llx\n",
 997                         vaddr);
 998                 return -EINVAL;
 999         }
1000 
1001         hash_del(&hnode->node);
1002         mutex_unlock(&ctx->mem_hash_lock);
1003 
1004         vm_type = hnode->ptr;
1005 
1006         if (*vm_type == VM_TYPE_USERPTR) {
1007                 is_userptr = true;
1008                 va_range = &ctx->host_va_range;
1009                 userptr = hnode->ptr;
1010                 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1011                                 &phys_pg_pack);
1012                 if (rc) {
1013                         dev_err(hdev->dev,
1014                                 "unable to init page pack for vaddr 0x%llx\n",
1015                                 vaddr);
1016                         goto vm_type_err;
1017                 }
1018         } else if (*vm_type == VM_TYPE_PHYS_PACK) {
1019                 is_userptr = false;
1020                 va_range = &ctx->dram_va_range;
1021                 phys_pg_pack = hnode->ptr;
1022         } else {
1023                 dev_warn(hdev->dev,
1024                         "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1025                                 vaddr);
1026                 rc = -EFAULT;
1027                 goto vm_type_err;
1028         }
1029 
1030         if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
1031                 dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1032                 rc = -EINVAL;
1033                 goto mapping_cnt_err;
1034         }
1035 
1036         page_size = phys_pg_pack->page_size;
1037         vaddr &= ~(((u64) page_size) - 1);
1038 
1039         next_vaddr = vaddr;
1040 
1041         mutex_lock(&ctx->mmu_lock);
1042 
1043         for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
1044                 if (hl_mmu_unmap(ctx, next_vaddr, page_size))
1045                         dev_warn_ratelimited(hdev->dev,
1046                         "unmap failed for vaddr: 0x%llx\n", next_vaddr);
1047 
1048                 /* unmapping on Palladium can be really long, so avoid a CPU
1049                  * soft lockup bug by sleeping a little between unmapping pages
1050                  */
1051                 if (hdev->pldm)
1052                         usleep_range(500, 1000);
1053         }
1054 
1055         hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
1056 
1057         mutex_unlock(&ctx->mmu_lock);
1058 
1059         /*
1060          * No point in maintaining the free VA block list if the context is
1061          * closing as the list will be freed anyway
1062          */
1063         if (!ctx_free) {
1064                 rc = add_va_block(hdev, va_range, vaddr,
1065                                         vaddr + phys_pg_pack->total_size - 1);
1066                 if (rc)
1067                         dev_warn(hdev->dev,
1068                                         "add va block failed for vaddr: 0x%llx\n",
1069                                         vaddr);
1070         }
1071 
1072         atomic_dec(&phys_pg_pack->mapping_cnt);
1073         kfree(hnode);
1074 
1075         if (is_userptr) {
1076                 free_phys_pg_pack(hdev, phys_pg_pack);
1077                 free_userptr(hdev, userptr);
1078         }
1079 
1080         return 0;
1081 
1082 mapping_cnt_err:
1083         if (is_userptr)
1084                 free_phys_pg_pack(hdev, phys_pg_pack);
1085 vm_type_err:
1086         mutex_lock(&ctx->mem_hash_lock);
1087         hash_add(ctx->mem_hash, &hnode->node, vaddr);
1088         mutex_unlock(&ctx->mem_hash_lock);
1089 
1090         return rc;
1091 }
1092 
1093 static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
1094 {
1095         struct hl_device *hdev = hpriv->hdev;
1096         struct hl_ctx *ctx = hpriv->ctx;
1097         u64 device_addr = 0;
1098         u32 handle = 0;
1099         int rc;
1100 
1101         switch (args->in.op) {
1102         case HL_MEM_OP_ALLOC:
1103                 if (args->in.alloc.mem_size == 0) {
1104                         dev_err(hdev->dev,
1105                                 "alloc size must be larger than 0\n");
1106                         rc = -EINVAL;
1107                         goto out;
1108                 }
1109 
1110                 /* Force contiguous as there are no real MMU
1111                  * translations to overcome physical memory gaps
1112                  */
1113                 args->in.flags |= HL_MEM_CONTIGUOUS;
1114                 rc = alloc_device_memory(ctx, &args->in, &handle);
1115 
1116                 memset(args, 0, sizeof(*args));
1117                 args->out.handle = (__u64) handle;
1118                 break;
1119 
1120         case HL_MEM_OP_FREE:
1121                 rc = free_device_memory(ctx, args->in.free.handle);
1122                 break;
1123 
1124         case HL_MEM_OP_MAP:
1125                 if (args->in.flags & HL_MEM_USERPTR) {
1126                         device_addr = args->in.map_host.host_virt_addr;
1127                         rc = 0;
1128                 } else {
1129                         rc = get_paddr_from_handle(ctx, &args->in,
1130                                         &device_addr);
1131                 }
1132 
1133                 memset(args, 0, sizeof(*args));
1134                 args->out.device_virt_addr = device_addr;
1135                 break;
1136 
1137         case HL_MEM_OP_UNMAP:
1138                 rc = 0;
1139                 break;
1140 
1141         default:
1142                 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1143                 rc = -ENOTTY;
1144                 break;
1145         }
1146 
1147 out:
1148         return rc;
1149 }
1150 
1151 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
1152 {
1153         union hl_mem_args *args = data;
1154         struct hl_device *hdev = hpriv->hdev;
1155         struct hl_ctx *ctx = hpriv->ctx;
1156         u64 device_addr = 0;
1157         u32 handle = 0;
1158         int rc;
1159 
1160         if (hl_device_disabled_or_in_reset(hdev)) {
1161                 dev_warn_ratelimited(hdev->dev,
1162                         "Device is %s. Can't execute MEMORY IOCTL\n",
1163                         atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
1164                 return -EBUSY;
1165         }
1166 
1167         if (!hdev->mmu_enable)
1168                 return mem_ioctl_no_mmu(hpriv, args);
1169 
1170         switch (args->in.op) {
1171         case HL_MEM_OP_ALLOC:
1172                 if (!hdev->dram_supports_virtual_memory) {
1173                         dev_err(hdev->dev, "DRAM alloc is not supported\n");
1174                         rc = -EINVAL;
1175                         goto out;
1176                 }
1177 
1178                 if (args->in.alloc.mem_size == 0) {
1179                         dev_err(hdev->dev,
1180                                 "alloc size must be larger than 0\n");
1181                         rc = -EINVAL;
1182                         goto out;
1183                 }
1184                 rc = alloc_device_memory(ctx, &args->in, &handle);
1185 
1186                 memset(args, 0, sizeof(*args));
1187                 args->out.handle = (__u64) handle;
1188                 break;
1189 
1190         case HL_MEM_OP_FREE:
1191                 rc = free_device_memory(ctx, args->in.free.handle);
1192                 break;
1193 
1194         case HL_MEM_OP_MAP:
1195                 rc = map_device_va(ctx, &args->in, &device_addr);
1196 
1197                 memset(args, 0, sizeof(*args));
1198                 args->out.device_virt_addr = device_addr;
1199                 break;
1200 
1201         case HL_MEM_OP_UNMAP:
1202                 rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr,
1203                                         false);
1204                 break;
1205 
1206         default:
1207                 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1208                 rc = -ENOTTY;
1209                 break;
1210         }
1211 
1212 out:
1213         return rc;
1214 }
1215 
1216 /*
1217  * hl_pin_host_memory - pins a chunk of host memory
1218  *
1219  * @hdev                : pointer to the habanalabs device structure
1220  * @addr                : the user-space virtual address of the memory area
1221  * @size                : the size of the memory area
1222  * @userptr             : pointer to hl_userptr structure
1223  *
1224  * This function does the following:
1225  * - Pins the physical pages
1226  * - Create a SG list from those pages
1227  */
1228 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
1229                         struct hl_userptr *userptr)
1230 {
1231         u64 start, end;
1232         u32 npages, offset;
1233         int rc;
1234 
1235         if (!size) {
1236                 dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
1237                 return -EINVAL;
1238         }
1239 
1240         if (!access_ok((void __user *) (uintptr_t) addr, size)) {
1241                 dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
1242                 return -EFAULT;
1243         }
1244 
1245         /*
1246          * If the combination of the address and size requested for this memory
1247          * region causes an integer overflow, return error.
1248          */
1249         if (((addr + size) < addr) ||
1250                         PAGE_ALIGN(addr + size) < (addr + size)) {
1251                 dev_err(hdev->dev,
1252                         "user pointer 0x%llx + %llu causes integer overflow\n",
1253                         addr, size);
1254                 return -EINVAL;
1255         }
1256 
1257         start = addr & PAGE_MASK;
1258         offset = addr & ~PAGE_MASK;
1259         end = PAGE_ALIGN(addr + size);
1260         npages = (end - start) >> PAGE_SHIFT;
1261 
1262         userptr->size = size;
1263         userptr->addr = addr;
1264         userptr->dma_mapped = false;
1265         INIT_LIST_HEAD(&userptr->job_node);
1266 
1267         userptr->vec = frame_vector_create(npages);
1268         if (!userptr->vec) {
1269                 dev_err(hdev->dev, "Failed to create frame vector\n");
1270                 return -ENOMEM;
1271         }
1272 
1273         rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
1274                                 userptr->vec);
1275 
1276         if (rc != npages) {
1277                 dev_err(hdev->dev,
1278                         "Failed to map host memory, user ptr probably wrong\n");
1279                 if (rc < 0)
1280                         goto destroy_framevec;
1281                 rc = -EFAULT;
1282                 goto put_framevec;
1283         }
1284 
1285         if (frame_vector_to_pages(userptr->vec) < 0) {
1286                 dev_err(hdev->dev,
1287                         "Failed to translate frame vector to pages\n");
1288                 rc = -EFAULT;
1289                 goto put_framevec;
1290         }
1291 
1292         userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
1293         if (!userptr->sgt) {
1294                 rc = -ENOMEM;
1295                 goto put_framevec;
1296         }
1297 
1298         rc = sg_alloc_table_from_pages(userptr->sgt,
1299                                         frame_vector_pages(userptr->vec),
1300                                         npages, offset, size, GFP_ATOMIC);
1301         if (rc < 0) {
1302                 dev_err(hdev->dev, "failed to create SG table from pages\n");
1303                 goto free_sgt;
1304         }
1305 
1306         hl_debugfs_add_userptr(hdev, userptr);
1307 
1308         return 0;
1309 
1310 free_sgt:
1311         kfree(userptr->sgt);
1312 put_framevec:
1313         put_vaddr_frames(userptr->vec);
1314 destroy_framevec:
1315         frame_vector_destroy(userptr->vec);
1316         return rc;
1317 }
1318 
1319 /*
1320  * hl_unpin_host_memory - unpins a chunk of host memory
1321  *
1322  * @hdev                : pointer to the habanalabs device structure
1323  * @userptr             : pointer to hl_userptr structure
1324  *
1325  * This function does the following:
1326  * - Unpins the physical pages related to the host memory
1327  * - Free the SG list
1328  */
1329 int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
1330 {
1331         struct page **pages;
1332 
1333         hl_debugfs_remove_userptr(hdev, userptr);
1334 
1335         if (userptr->dma_mapped)
1336                 hdev->asic_funcs->hl_dma_unmap_sg(hdev,
1337                                 userptr->sgt->sgl,
1338                                 userptr->sgt->nents,
1339                                 userptr->dir);
1340 
1341         pages = frame_vector_pages(userptr->vec);
1342         if (!IS_ERR(pages)) {
1343                 int i;
1344 
1345                 for (i = 0; i < frame_vector_count(userptr->vec); i++)
1346                         set_page_dirty_lock(pages[i]);
1347         }
1348         put_vaddr_frames(userptr->vec);
1349         frame_vector_destroy(userptr->vec);
1350 
1351         list_del(&userptr->job_node);
1352 
1353         sg_free_table(userptr->sgt);
1354         kfree(userptr->sgt);
1355 
1356         return 0;
1357 }
1358 
1359 /*
1360  * hl_userptr_delete_list - clear userptr list
1361  *
1362  * @hdev                : pointer to the habanalabs device structure
1363  * @userptr_list        : pointer to the list to clear
1364  *
1365  * This function does the following:
1366  * - Iterates over the list and unpins the host memory and frees the userptr
1367  *   structure.
1368  */
1369 void hl_userptr_delete_list(struct hl_device *hdev,
1370                                 struct list_head *userptr_list)
1371 {
1372         struct hl_userptr *userptr, *tmp;
1373 
1374         list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
1375                 hl_unpin_host_memory(hdev, userptr);
1376                 kfree(userptr);
1377         }
1378 
1379         INIT_LIST_HEAD(userptr_list);
1380 }
1381 
1382 /*
1383  * hl_userptr_is_pinned - returns whether the given userptr is pinned
1384  *
1385  * @hdev                : pointer to the habanalabs device structure
1386  * @userptr_list        : pointer to the list to clear
1387  * @userptr             : pointer to userptr to check
1388  *
1389  * This function does the following:
1390  * - Iterates over the list and checks if the given userptr is in it, means is
1391  *   pinned. If so, returns true, otherwise returns false.
1392  */
1393 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
1394                                 u32 size, struct list_head *userptr_list,
1395                                 struct hl_userptr **userptr)
1396 {
1397         list_for_each_entry((*userptr), userptr_list, job_node) {
1398                 if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
1399                         return true;
1400         }
1401 
1402         return false;
1403 }
1404 
1405 /*
1406  * hl_va_range_init - initialize virtual addresses range
1407  *
1408  * @hdev                : pointer to the habanalabs device structure
1409  * @va_range            : pointer to the range to initialize
1410  * @start               : range start address
1411  * @end                 : range end address
1412  *
1413  * This function does the following:
1414  * - Initializes the virtual addresses list of the given range with the given
1415  *   addresses.
1416  */
1417 static int hl_va_range_init(struct hl_device *hdev,
1418                 struct hl_va_range *va_range, u64 start, u64 end)
1419 {
1420         int rc;
1421 
1422         INIT_LIST_HEAD(&va_range->list);
1423 
1424         /* PAGE_SIZE alignment */
1425 
1426         if (start & (PAGE_SIZE - 1)) {
1427                 start &= PAGE_MASK;
1428                 start += PAGE_SIZE;
1429         }
1430 
1431         if (end & (PAGE_SIZE - 1))
1432                 end &= PAGE_MASK;
1433 
1434         if (start >= end) {
1435                 dev_err(hdev->dev, "too small vm range for va list\n");
1436                 return -EFAULT;
1437         }
1438 
1439         rc = add_va_block(hdev, va_range, start, end);
1440 
1441         if (rc) {
1442                 dev_err(hdev->dev, "Failed to init host va list\n");
1443                 return rc;
1444         }
1445 
1446         va_range->start_addr = start;
1447         va_range->end_addr = end;
1448 
1449         return 0;
1450 }
1451 
1452 /*
1453  * hl_vm_ctx_init_with_ranges - initialize virtual memory for context
1454  *
1455  * @ctx                 : pointer to the habanalabs context structure
1456  * @host_range_start    : host virtual addresses range start
1457  * @host_range_end      : host virtual addresses range end
1458  * @dram_range_start    : dram virtual addresses range start
1459  * @dram_range_end      : dram virtual addresses range end
1460  *
1461  * This function initializes the following:
1462  * - MMU for context
1463  * - Virtual address to area descriptor hashtable
1464  * - Virtual block list of available virtual memory
1465  */
1466 static int hl_vm_ctx_init_with_ranges(struct hl_ctx *ctx, u64 host_range_start,
1467                                 u64 host_range_end, u64 dram_range_start,
1468                                 u64 dram_range_end)
1469 {
1470         struct hl_device *hdev = ctx->hdev;
1471         int rc;
1472 
1473         rc = hl_mmu_ctx_init(ctx);
1474         if (rc) {
1475                 dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
1476                 return rc;
1477         }
1478 
1479         mutex_init(&ctx->mem_hash_lock);
1480         hash_init(ctx->mem_hash);
1481 
1482         mutex_init(&ctx->host_va_range.lock);
1483 
1484         rc = hl_va_range_init(hdev, &ctx->host_va_range, host_range_start,
1485                         host_range_end);
1486         if (rc) {
1487                 dev_err(hdev->dev, "failed to init host vm range\n");
1488                 goto host_vm_err;
1489         }
1490 
1491         mutex_init(&ctx->dram_va_range.lock);
1492 
1493         rc = hl_va_range_init(hdev, &ctx->dram_va_range, dram_range_start,
1494                         dram_range_end);
1495         if (rc) {
1496                 dev_err(hdev->dev, "failed to init dram vm range\n");
1497                 goto dram_vm_err;
1498         }
1499 
1500         hl_debugfs_add_ctx_mem_hash(hdev, ctx);
1501 
1502         return 0;
1503 
1504 dram_vm_err:
1505         mutex_destroy(&ctx->dram_va_range.lock);
1506 
1507         mutex_lock(&ctx->host_va_range.lock);
1508         clear_va_list_locked(hdev, &ctx->host_va_range.list);
1509         mutex_unlock(&ctx->host_va_range.lock);
1510 host_vm_err:
1511         mutex_destroy(&ctx->host_va_range.lock);
1512         mutex_destroy(&ctx->mem_hash_lock);
1513         hl_mmu_ctx_fini(ctx);
1514 
1515         return rc;
1516 }
1517 
1518 int hl_vm_ctx_init(struct hl_ctx *ctx)
1519 {
1520         struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
1521         u64 host_range_start, host_range_end, dram_range_start,
1522                 dram_range_end;
1523 
1524         atomic64_set(&ctx->dram_phys_mem, 0);
1525 
1526         /*
1527          * - If MMU is enabled, init the ranges as usual.
1528          * - If MMU is disabled, in case of host mapping, the returned address
1529          *   is the given one.
1530          *   In case of DRAM mapping, the returned address is the physical
1531          *   address of the memory related to the given handle.
1532          */
1533         if (ctx->hdev->mmu_enable) {
1534                 dram_range_start = prop->va_space_dram_start_address;
1535                 dram_range_end = prop->va_space_dram_end_address;
1536                 host_range_start = prop->va_space_host_start_address;
1537                 host_range_end = prop->va_space_host_end_address;
1538         } else {
1539                 dram_range_start = prop->dram_user_base_address;
1540                 dram_range_end = prop->dram_end_address;
1541                 host_range_start = prop->dram_user_base_address;
1542                 host_range_end = prop->dram_end_address;
1543         }
1544 
1545         return hl_vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
1546                         dram_range_start, dram_range_end);
1547 }
1548 
1549 /*
1550  * hl_va_range_fini     - clear a virtual addresses range
1551  *
1552  * @hdev                : pointer to the habanalabs structure
1553  * va_range             : pointer to virtual addresses range
1554  *
1555  * This function initializes the following:
1556  * - Checks that the given range contains the whole initial range
1557  * - Frees the virtual addresses block list and its lock
1558  */
1559 static void hl_va_range_fini(struct hl_device *hdev,
1560                 struct hl_va_range *va_range)
1561 {
1562         struct hl_vm_va_block *va_block;
1563 
1564         if (list_empty(&va_range->list)) {
1565                 dev_warn(hdev->dev,
1566                                 "va list should not be empty on cleanup!\n");
1567                 goto out;
1568         }
1569 
1570         if (!list_is_singular(&va_range->list)) {
1571                 dev_warn(hdev->dev,
1572                         "va list should not contain multiple blocks on cleanup!\n");
1573                 goto free_va_list;
1574         }
1575 
1576         va_block = list_first_entry(&va_range->list, typeof(*va_block), node);
1577 
1578         if (va_block->start != va_range->start_addr ||
1579                 va_block->end != va_range->end_addr) {
1580                 dev_warn(hdev->dev,
1581                         "wrong va block on cleanup, from 0x%llx to 0x%llx\n",
1582                                 va_block->start, va_block->end);
1583                 goto free_va_list;
1584         }
1585 
1586 free_va_list:
1587         mutex_lock(&va_range->lock);
1588         clear_va_list_locked(hdev, &va_range->list);
1589         mutex_unlock(&va_range->lock);
1590 
1591 out:
1592         mutex_destroy(&va_range->lock);
1593 }
1594 
1595 /*
1596  * hl_vm_ctx_fini       - virtual memory teardown of context
1597  *
1598  * @ctx                 : pointer to the habanalabs context structure
1599  *
1600  * This function perform teardown the following:
1601  * - Virtual block list of available virtual memory
1602  * - Virtual address to area descriptor hashtable
1603  * - MMU for context
1604  *
1605  * In addition this function does the following:
1606  * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
1607  *   hashtable should be empty as no valid mappings should exist at this
1608  *   point.
1609  * - Frees any existing physical page list from the idr which relates to the
1610  *   current context asid.
1611  * - This function checks the virtual block list for correctness. At this point
1612  *   the list should contain one element which describes the whole virtual
1613  *   memory range of the context. Otherwise, a warning is printed.
1614  */
1615 void hl_vm_ctx_fini(struct hl_ctx *ctx)
1616 {
1617         struct hl_device *hdev = ctx->hdev;
1618         struct hl_vm *vm = &hdev->vm;
1619         struct hl_vm_phys_pg_pack *phys_pg_list;
1620         struct hl_vm_hash_node *hnode;
1621         struct hlist_node *tmp_node;
1622         int i;
1623 
1624         hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
1625 
1626         if (!hash_empty(ctx->mem_hash))
1627                 dev_notice(hdev->dev, "ctx is freed while it has va in use\n");
1628 
1629         hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
1630                 dev_dbg(hdev->dev,
1631                         "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
1632                         hnode->vaddr, ctx->asid);
1633                 unmap_device_va(ctx, hnode->vaddr, true);
1634         }
1635 
1636         spin_lock(&vm->idr_lock);
1637         idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
1638                 if (phys_pg_list->asid == ctx->asid) {
1639                         dev_dbg(hdev->dev,
1640                                 "page list 0x%p of asid %d is still alive\n",
1641                                 phys_pg_list, ctx->asid);
1642                         atomic64_sub(phys_pg_list->total_size,
1643                                         &hdev->dram_used_mem);
1644                         free_phys_pg_pack(hdev, phys_pg_list);
1645                         idr_remove(&vm->phys_pg_pack_handles, i);
1646                 }
1647         spin_unlock(&vm->idr_lock);
1648 
1649         hl_va_range_fini(hdev, &ctx->dram_va_range);
1650         hl_va_range_fini(hdev, &ctx->host_va_range);
1651 
1652         mutex_destroy(&ctx->mem_hash_lock);
1653         hl_mmu_ctx_fini(ctx);
1654 }
1655 
1656 /*
1657  * hl_vm_init           - initialize virtual memory module
1658  *
1659  * @hdev                : pointer to the habanalabs device structure
1660  *
1661  * This function initializes the following:
1662  * - MMU module
1663  * - DRAM physical pages pool of 2MB
1664  * - Idr for device memory allocation handles
1665  */
1666 int hl_vm_init(struct hl_device *hdev)
1667 {
1668         struct asic_fixed_properties *prop = &hdev->asic_prop;
1669         struct hl_vm *vm = &hdev->vm;
1670         int rc;
1671 
1672         vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1);
1673         if (!vm->dram_pg_pool) {
1674                 dev_err(hdev->dev, "Failed to create dram page pool\n");
1675                 return -ENOMEM;
1676         }
1677 
1678         kref_init(&vm->dram_pg_pool_refcount);
1679 
1680         rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
1681                         prop->dram_end_address - prop->dram_user_base_address,
1682                         -1);
1683 
1684         if (rc) {
1685                 dev_err(hdev->dev,
1686                         "Failed to add memory to dram page pool %d\n", rc);
1687                 goto pool_add_err;
1688         }
1689 
1690         spin_lock_init(&vm->idr_lock);
1691         idr_init(&vm->phys_pg_pack_handles);
1692 
1693         atomic64_set(&hdev->dram_used_mem, 0);
1694 
1695         vm->init_done = true;
1696 
1697         return 0;
1698 
1699 pool_add_err:
1700         gen_pool_destroy(vm->dram_pg_pool);
1701 
1702         return rc;
1703 }
1704 
1705 /*
1706  * hl_vm_fini           - virtual memory module teardown
1707  *
1708  * @hdev                : pointer to the habanalabs device structure
1709  *
1710  * This function perform teardown to the following:
1711  * - Idr for device memory allocation handles
1712  * - DRAM physical pages pool of 2MB
1713  * - MMU module
1714  */
1715 void hl_vm_fini(struct hl_device *hdev)
1716 {
1717         struct hl_vm *vm = &hdev->vm;
1718 
1719         if (!vm->init_done)
1720                 return;
1721 
1722         /*
1723          * At this point all the contexts should be freed and hence no DRAM
1724          * memory should be in use. Hence the DRAM pool should be freed here.
1725          */
1726         if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
1727                 dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
1728                                 __func__);
1729 
1730         vm->init_done = false;
1731 }

/* [<][>][^][v][top][bottom][index][help] */