root/drivers/tee/tee_shm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. tee_shm_release
  2. tee_shm_op_map_dma_buf
  3. tee_shm_op_unmap_dma_buf
  4. tee_shm_op_release
  5. tee_shm_op_map
  6. tee_shm_op_mmap
  7. __tee_shm_alloc
  8. tee_shm_alloc
  9. tee_shm_priv_alloc
  10. tee_shm_register
  11. tee_shm_get_fd
  12. tee_shm_free
  13. tee_shm_va2pa
  14. tee_shm_pa2va
  15. tee_shm_get_va
  16. tee_shm_get_pa
  17. tee_shm_get_from_id
  18. tee_shm_put

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 2015-2016, Linaro Limited
   4  */
   5 #include <linux/device.h>
   6 #include <linux/dma-buf.h>
   7 #include <linux/fdtable.h>
   8 #include <linux/idr.h>
   9 #include <linux/sched.h>
  10 #include <linux/slab.h>
  11 #include <linux/tee_drv.h>
  12 #include "tee_private.h"
  13 
  14 static void tee_shm_release(struct tee_shm *shm)
  15 {
  16         struct tee_device *teedev = shm->teedev;
  17 
  18         mutex_lock(&teedev->mutex);
  19         idr_remove(&teedev->idr, shm->id);
  20         if (shm->ctx)
  21                 list_del(&shm->link);
  22         mutex_unlock(&teedev->mutex);
  23 
  24         if (shm->flags & TEE_SHM_POOL) {
  25                 struct tee_shm_pool_mgr *poolm;
  26 
  27                 if (shm->flags & TEE_SHM_DMA_BUF)
  28                         poolm = teedev->pool->dma_buf_mgr;
  29                 else
  30                         poolm = teedev->pool->private_mgr;
  31 
  32                 poolm->ops->free(poolm, shm);
  33         } else if (shm->flags & TEE_SHM_REGISTER) {
  34                 size_t n;
  35                 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
  36 
  37                 if (rc)
  38                         dev_err(teedev->dev.parent,
  39                                 "unregister shm %p failed: %d", shm, rc);
  40 
  41                 for (n = 0; n < shm->num_pages; n++)
  42                         put_page(shm->pages[n]);
  43 
  44                 kfree(shm->pages);
  45         }
  46 
  47         if (shm->ctx)
  48                 teedev_ctx_put(shm->ctx);
  49 
  50         kfree(shm);
  51 
  52         tee_device_put(teedev);
  53 }
  54 
  55 static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
  56                         *attach, enum dma_data_direction dir)
  57 {
  58         return NULL;
  59 }
  60 
  61 static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
  62                                      struct sg_table *table,
  63                                      enum dma_data_direction dir)
  64 {
  65 }
  66 
  67 static void tee_shm_op_release(struct dma_buf *dmabuf)
  68 {
  69         struct tee_shm *shm = dmabuf->priv;
  70 
  71         tee_shm_release(shm);
  72 }
  73 
  74 static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum)
  75 {
  76         return NULL;
  77 }
  78 
  79 static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
  80 {
  81         struct tee_shm *shm = dmabuf->priv;
  82         size_t size = vma->vm_end - vma->vm_start;
  83 
  84         /* Refuse sharing shared memory provided by application */
  85         if (shm->flags & TEE_SHM_REGISTER)
  86                 return -EINVAL;
  87 
  88         return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
  89                                size, vma->vm_page_prot);
  90 }
  91 
  92 static const struct dma_buf_ops tee_shm_dma_buf_ops = {
  93         .map_dma_buf = tee_shm_op_map_dma_buf,
  94         .unmap_dma_buf = tee_shm_op_unmap_dma_buf,
  95         .release = tee_shm_op_release,
  96         .map = tee_shm_op_map,
  97         .mmap = tee_shm_op_mmap,
  98 };
  99 
 100 static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
 101                                        struct tee_device *teedev,
 102                                        size_t size, u32 flags)
 103 {
 104         struct tee_shm_pool_mgr *poolm = NULL;
 105         struct tee_shm *shm;
 106         void *ret;
 107         int rc;
 108 
 109         if (ctx && ctx->teedev != teedev) {
 110                 dev_err(teedev->dev.parent, "ctx and teedev mismatch\n");
 111                 return ERR_PTR(-EINVAL);
 112         }
 113 
 114         if (!(flags & TEE_SHM_MAPPED)) {
 115                 dev_err(teedev->dev.parent,
 116                         "only mapped allocations supported\n");
 117                 return ERR_PTR(-EINVAL);
 118         }
 119 
 120         if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
 121                 dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
 122                 return ERR_PTR(-EINVAL);
 123         }
 124 
 125         if (!tee_device_get(teedev))
 126                 return ERR_PTR(-EINVAL);
 127 
 128         if (!teedev->pool) {
 129                 /* teedev has been detached from driver */
 130                 ret = ERR_PTR(-EINVAL);
 131                 goto err_dev_put;
 132         }
 133 
 134         shm = kzalloc(sizeof(*shm), GFP_KERNEL);
 135         if (!shm) {
 136                 ret = ERR_PTR(-ENOMEM);
 137                 goto err_dev_put;
 138         }
 139 
 140         shm->flags = flags | TEE_SHM_POOL;
 141         shm->teedev = teedev;
 142         shm->ctx = ctx;
 143         if (flags & TEE_SHM_DMA_BUF)
 144                 poolm = teedev->pool->dma_buf_mgr;
 145         else
 146                 poolm = teedev->pool->private_mgr;
 147 
 148         rc = poolm->ops->alloc(poolm, shm, size);
 149         if (rc) {
 150                 ret = ERR_PTR(rc);
 151                 goto err_kfree;
 152         }
 153 
 154         mutex_lock(&teedev->mutex);
 155         shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
 156         mutex_unlock(&teedev->mutex);
 157         if (shm->id < 0) {
 158                 ret = ERR_PTR(shm->id);
 159                 goto err_pool_free;
 160         }
 161 
 162         if (flags & TEE_SHM_DMA_BUF) {
 163                 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 164 
 165                 exp_info.ops = &tee_shm_dma_buf_ops;
 166                 exp_info.size = shm->size;
 167                 exp_info.flags = O_RDWR;
 168                 exp_info.priv = shm;
 169 
 170                 shm->dmabuf = dma_buf_export(&exp_info);
 171                 if (IS_ERR(shm->dmabuf)) {
 172                         ret = ERR_CAST(shm->dmabuf);
 173                         goto err_rem;
 174                 }
 175         }
 176 
 177         if (ctx) {
 178                 teedev_ctx_get(ctx);
 179                 mutex_lock(&teedev->mutex);
 180                 list_add_tail(&shm->link, &ctx->list_shm);
 181                 mutex_unlock(&teedev->mutex);
 182         }
 183 
 184         return shm;
 185 err_rem:
 186         mutex_lock(&teedev->mutex);
 187         idr_remove(&teedev->idr, shm->id);
 188         mutex_unlock(&teedev->mutex);
 189 err_pool_free:
 190         poolm->ops->free(poolm, shm);
 191 err_kfree:
 192         kfree(shm);
 193 err_dev_put:
 194         tee_device_put(teedev);
 195         return ret;
 196 }
 197 
 198 /**
 199  * tee_shm_alloc() - Allocate shared memory
 200  * @ctx:        Context that allocates the shared memory
 201  * @size:       Requested size of shared memory
 202  * @flags:      Flags setting properties for the requested shared memory.
 203  *
 204  * Memory allocated as global shared memory is automatically freed when the
 205  * TEE file pointer is closed. The @flags field uses the bits defined by
 206  * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
 207  * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
 208  * associated with a dma-buf handle, else driver private memory.
 209  */
 210 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
 211 {
 212         return __tee_shm_alloc(ctx, ctx->teedev, size, flags);
 213 }
 214 EXPORT_SYMBOL_GPL(tee_shm_alloc);
 215 
 216 struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size)
 217 {
 218         return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED);
 219 }
 220 EXPORT_SYMBOL_GPL(tee_shm_priv_alloc);
 221 
 222 struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
 223                                  size_t length, u32 flags)
 224 {
 225         struct tee_device *teedev = ctx->teedev;
 226         const u32 req_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
 227         struct tee_shm *shm;
 228         void *ret;
 229         int rc;
 230         int num_pages;
 231         unsigned long start;
 232 
 233         if (flags != req_flags)
 234                 return ERR_PTR(-ENOTSUPP);
 235 
 236         if (!tee_device_get(teedev))
 237                 return ERR_PTR(-EINVAL);
 238 
 239         if (!teedev->desc->ops->shm_register ||
 240             !teedev->desc->ops->shm_unregister) {
 241                 tee_device_put(teedev);
 242                 return ERR_PTR(-ENOTSUPP);
 243         }
 244 
 245         teedev_ctx_get(ctx);
 246 
 247         shm = kzalloc(sizeof(*shm), GFP_KERNEL);
 248         if (!shm) {
 249                 ret = ERR_PTR(-ENOMEM);
 250                 goto err;
 251         }
 252 
 253         shm->flags = flags | TEE_SHM_REGISTER;
 254         shm->teedev = teedev;
 255         shm->ctx = ctx;
 256         shm->id = -1;
 257         addr = untagged_addr(addr);
 258         start = rounddown(addr, PAGE_SIZE);
 259         shm->offset = addr - start;
 260         shm->size = length;
 261         num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
 262         shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
 263         if (!shm->pages) {
 264                 ret = ERR_PTR(-ENOMEM);
 265                 goto err;
 266         }
 267 
 268         rc = get_user_pages_fast(start, num_pages, FOLL_WRITE, shm->pages);
 269         if (rc > 0)
 270                 shm->num_pages = rc;
 271         if (rc != num_pages) {
 272                 if (rc >= 0)
 273                         rc = -ENOMEM;
 274                 ret = ERR_PTR(rc);
 275                 goto err;
 276         }
 277 
 278         mutex_lock(&teedev->mutex);
 279         shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
 280         mutex_unlock(&teedev->mutex);
 281 
 282         if (shm->id < 0) {
 283                 ret = ERR_PTR(shm->id);
 284                 goto err;
 285         }
 286 
 287         rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
 288                                              shm->num_pages, start);
 289         if (rc) {
 290                 ret = ERR_PTR(rc);
 291                 goto err;
 292         }
 293 
 294         if (flags & TEE_SHM_DMA_BUF) {
 295                 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 296 
 297                 exp_info.ops = &tee_shm_dma_buf_ops;
 298                 exp_info.size = shm->size;
 299                 exp_info.flags = O_RDWR;
 300                 exp_info.priv = shm;
 301 
 302                 shm->dmabuf = dma_buf_export(&exp_info);
 303                 if (IS_ERR(shm->dmabuf)) {
 304                         ret = ERR_CAST(shm->dmabuf);
 305                         teedev->desc->ops->shm_unregister(ctx, shm);
 306                         goto err;
 307                 }
 308         }
 309 
 310         mutex_lock(&teedev->mutex);
 311         list_add_tail(&shm->link, &ctx->list_shm);
 312         mutex_unlock(&teedev->mutex);
 313 
 314         return shm;
 315 err:
 316         if (shm) {
 317                 size_t n;
 318 
 319                 if (shm->id >= 0) {
 320                         mutex_lock(&teedev->mutex);
 321                         idr_remove(&teedev->idr, shm->id);
 322                         mutex_unlock(&teedev->mutex);
 323                 }
 324                 if (shm->pages) {
 325                         for (n = 0; n < shm->num_pages; n++)
 326                                 put_page(shm->pages[n]);
 327                         kfree(shm->pages);
 328                 }
 329         }
 330         kfree(shm);
 331         teedev_ctx_put(ctx);
 332         tee_device_put(teedev);
 333         return ret;
 334 }
 335 EXPORT_SYMBOL_GPL(tee_shm_register);
 336 
 337 /**
 338  * tee_shm_get_fd() - Increase reference count and return file descriptor
 339  * @shm:        Shared memory handle
 340  * @returns user space file descriptor to shared memory
 341  */
 342 int tee_shm_get_fd(struct tee_shm *shm)
 343 {
 344         int fd;
 345 
 346         if (!(shm->flags & TEE_SHM_DMA_BUF))
 347                 return -EINVAL;
 348 
 349         get_dma_buf(shm->dmabuf);
 350         fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
 351         if (fd < 0)
 352                 dma_buf_put(shm->dmabuf);
 353         return fd;
 354 }
 355 
 356 /**
 357  * tee_shm_free() - Free shared memory
 358  * @shm:        Handle to shared memory to free
 359  */
 360 void tee_shm_free(struct tee_shm *shm)
 361 {
 362         /*
 363          * dma_buf_put() decreases the dmabuf reference counter and will
 364          * call tee_shm_release() when the last reference is gone.
 365          *
 366          * In the case of driver private memory we call tee_shm_release
 367          * directly instead as it doesn't have a reference counter.
 368          */
 369         if (shm->flags & TEE_SHM_DMA_BUF)
 370                 dma_buf_put(shm->dmabuf);
 371         else
 372                 tee_shm_release(shm);
 373 }
 374 EXPORT_SYMBOL_GPL(tee_shm_free);
 375 
 376 /**
 377  * tee_shm_va2pa() - Get physical address of a virtual address
 378  * @shm:        Shared memory handle
 379  * @va:         Virtual address to tranlsate
 380  * @pa:         Returned physical address
 381  * @returns 0 on success and < 0 on failure
 382  */
 383 int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
 384 {
 385         if (!(shm->flags & TEE_SHM_MAPPED))
 386                 return -EINVAL;
 387         /* Check that we're in the range of the shm */
 388         if ((char *)va < (char *)shm->kaddr)
 389                 return -EINVAL;
 390         if ((char *)va >= ((char *)shm->kaddr + shm->size))
 391                 return -EINVAL;
 392 
 393         return tee_shm_get_pa(
 394                         shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
 395 }
 396 EXPORT_SYMBOL_GPL(tee_shm_va2pa);
 397 
 398 /**
 399  * tee_shm_pa2va() - Get virtual address of a physical address
 400  * @shm:        Shared memory handle
 401  * @pa:         Physical address to tranlsate
 402  * @va:         Returned virtual address
 403  * @returns 0 on success and < 0 on failure
 404  */
 405 int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
 406 {
 407         if (!(shm->flags & TEE_SHM_MAPPED))
 408                 return -EINVAL;
 409         /* Check that we're in the range of the shm */
 410         if (pa < shm->paddr)
 411                 return -EINVAL;
 412         if (pa >= (shm->paddr + shm->size))
 413                 return -EINVAL;
 414 
 415         if (va) {
 416                 void *v = tee_shm_get_va(shm, pa - shm->paddr);
 417 
 418                 if (IS_ERR(v))
 419                         return PTR_ERR(v);
 420                 *va = v;
 421         }
 422         return 0;
 423 }
 424 EXPORT_SYMBOL_GPL(tee_shm_pa2va);
 425 
 426 /**
 427  * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
 428  * @shm:        Shared memory handle
 429  * @offs:       Offset from start of this shared memory
 430  * @returns virtual address of the shared memory + offs if offs is within
 431  *      the bounds of this shared memory, else an ERR_PTR
 432  */
 433 void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
 434 {
 435         if (!(shm->flags & TEE_SHM_MAPPED))
 436                 return ERR_PTR(-EINVAL);
 437         if (offs >= shm->size)
 438                 return ERR_PTR(-EINVAL);
 439         return (char *)shm->kaddr + offs;
 440 }
 441 EXPORT_SYMBOL_GPL(tee_shm_get_va);
 442 
 443 /**
 444  * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
 445  * @shm:        Shared memory handle
 446  * @offs:       Offset from start of this shared memory
 447  * @pa:         Physical address to return
 448  * @returns 0 if offs is within the bounds of this shared memory, else an
 449  *      error code.
 450  */
 451 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
 452 {
 453         if (offs >= shm->size)
 454                 return -EINVAL;
 455         if (pa)
 456                 *pa = shm->paddr + offs;
 457         return 0;
 458 }
 459 EXPORT_SYMBOL_GPL(tee_shm_get_pa);
 460 
 461 /**
 462  * tee_shm_get_from_id() - Find shared memory object and increase reference
 463  * count
 464  * @ctx:        Context owning the shared memory
 465  * @id:         Id of shared memory object
 466  * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
 467  */
 468 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
 469 {
 470         struct tee_device *teedev;
 471         struct tee_shm *shm;
 472 
 473         if (!ctx)
 474                 return ERR_PTR(-EINVAL);
 475 
 476         teedev = ctx->teedev;
 477         mutex_lock(&teedev->mutex);
 478         shm = idr_find(&teedev->idr, id);
 479         if (!shm || shm->ctx != ctx)
 480                 shm = ERR_PTR(-EINVAL);
 481         else if (shm->flags & TEE_SHM_DMA_BUF)
 482                 get_dma_buf(shm->dmabuf);
 483         mutex_unlock(&teedev->mutex);
 484         return shm;
 485 }
 486 EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
 487 
 488 /**
 489  * tee_shm_put() - Decrease reference count on a shared memory handle
 490  * @shm:        Shared memory handle
 491  */
 492 void tee_shm_put(struct tee_shm *shm)
 493 {
 494         if (shm->flags & TEE_SHM_DMA_BUF)
 495                 dma_buf_put(shm->dmabuf);
 496 }
 497 EXPORT_SYMBOL_GPL(tee_shm_put);

/* [<][>][^][v][top][bottom][index][help] */