root/drivers/misc/cxl/api.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. cxl_fs_init_fs_context
  2. cxl_release_mapping
  3. cxl_getfile
  4. cxl_dev_context_init
  5. cxl_get_context
  6. cxl_release_context
  7. cxl_find_afu_irq
  8. cxl_set_priv
  9. cxl_get_priv
  10. cxl_allocate_afu_irqs
  11. cxl_free_afu_irqs
  12. cxl_map_afu_irq
  13. cxl_unmap_afu_irq
  14. cxl_start_context
  15. cxl_process_element
  16. cxl_stop_context
  17. cxl_set_master
  18. cxl_fd_open
  19. cxl_fd_release
  20. cxl_fd_ioctl
  21. cxl_fd_mmap
  22. cxl_fd_poll
  23. cxl_fd_read
  24. cxl_get_fd
  25. cxl_fops_get_context
  26. cxl_set_driver_ops
  27. cxl_context_events_pending
  28. cxl_start_work
  29. cxl_psa_map
  30. cxl_psa_unmap
  31. cxl_afu_reset
  32. cxl_perst_reloads_same_image
  33. cxl_read_adapter_vpd

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Copyright 2014 IBM Corp.
   4  */
   5 
   6 #include <linux/pci.h>
   7 #include <linux/slab.h>
   8 #include <linux/file.h>
   9 #include <misc/cxl.h>
  10 #include <linux/module.h>
  11 #include <linux/mount.h>
  12 #include <linux/pseudo_fs.h>
  13 #include <linux/sched/mm.h>
  14 #include <linux/mmu_context.h>
  15 
  16 #include "cxl.h"
  17 
  18 /*
  19  * Since we want to track memory mappings to be able to force-unmap
  20  * when the AFU is no longer reachable, we need an inode. For devices
  21  * opened through the cxl user API, this is not a problem, but a
  22  * userland process can also get a cxl fd through the cxl_get_fd()
  23  * API, which is used by the cxlflash driver.
  24  *
  25  * Therefore we implement our own simple pseudo-filesystem and inode
  26  * allocator. We don't use the anonymous inode, as we need the
  27  * meta-data associated with it (address_space) and it is shared by
  28  * other drivers/processes, so it could lead to cxl unmapping VMAs
  29  * from random processes.
  30  */
  31 
  32 #define CXL_PSEUDO_FS_MAGIC     0x1697697f
  33 
  34 static int cxl_fs_cnt;
  35 static struct vfsmount *cxl_vfs_mount;
  36 
  37 static int cxl_fs_init_fs_context(struct fs_context *fc)
  38 {
  39         return init_pseudo(fc, CXL_PSEUDO_FS_MAGIC) ? 0 : -ENOMEM;
  40 }
  41 
  42 static struct file_system_type cxl_fs_type = {
  43         .name           = "cxl",
  44         .owner          = THIS_MODULE,
  45         .init_fs_context = cxl_fs_init_fs_context,
  46         .kill_sb        = kill_anon_super,
  47 };
  48 
  49 
  50 void cxl_release_mapping(struct cxl_context *ctx)
  51 {
  52         if (ctx->kernelapi && ctx->mapping)
  53                 simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
  54 }
  55 
  56 static struct file *cxl_getfile(const char *name,
  57                                 const struct file_operations *fops,
  58                                 void *priv, int flags)
  59 {
  60         struct file *file;
  61         struct inode *inode;
  62         int rc;
  63 
  64         /* strongly inspired by anon_inode_getfile() */
  65 
  66         if (fops->owner && !try_module_get(fops->owner))
  67                 return ERR_PTR(-ENOENT);
  68 
  69         rc = simple_pin_fs(&cxl_fs_type, &cxl_vfs_mount, &cxl_fs_cnt);
  70         if (rc < 0) {
  71                 pr_err("Cannot mount cxl pseudo filesystem: %d\n", rc);
  72                 file = ERR_PTR(rc);
  73                 goto err_module;
  74         }
  75 
  76         inode = alloc_anon_inode(cxl_vfs_mount->mnt_sb);
  77         if (IS_ERR(inode)) {
  78                 file = ERR_CAST(inode);
  79                 goto err_fs;
  80         }
  81 
  82         file = alloc_file_pseudo(inode, cxl_vfs_mount, name,
  83                                  flags & (O_ACCMODE | O_NONBLOCK), fops);
  84         if (IS_ERR(file))
  85                 goto err_inode;
  86 
  87         file->private_data = priv;
  88 
  89         return file;
  90 
  91 err_inode:
  92         iput(inode);
  93 err_fs:
  94         simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
  95 err_module:
  96         module_put(fops->owner);
  97         return file;
  98 }
  99 
 100 struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
 101 {
 102         struct cxl_afu *afu;
 103         struct cxl_context  *ctx;
 104         int rc;
 105 
 106         afu = cxl_pci_to_afu(dev);
 107         if (IS_ERR(afu))
 108                 return ERR_CAST(afu);
 109 
 110         ctx = cxl_context_alloc();
 111         if (!ctx)
 112                 return ERR_PTR(-ENOMEM);
 113 
 114         ctx->kernelapi = true;
 115 
 116         /* Make it a slave context.  We can promote it later? */
 117         rc = cxl_context_init(ctx, afu, false);
 118         if (rc)
 119                 goto err_ctx;
 120 
 121         return ctx;
 122 
 123 err_ctx:
 124         kfree(ctx);
 125         return ERR_PTR(rc);
 126 }
 127 EXPORT_SYMBOL_GPL(cxl_dev_context_init);
 128 
 129 struct cxl_context *cxl_get_context(struct pci_dev *dev)
 130 {
 131         return dev->dev.archdata.cxl_ctx;
 132 }
 133 EXPORT_SYMBOL_GPL(cxl_get_context);
 134 
 135 int cxl_release_context(struct cxl_context *ctx)
 136 {
 137         if (ctx->status >= STARTED)
 138                 return -EBUSY;
 139 
 140         cxl_context_free(ctx);
 141 
 142         return 0;
 143 }
 144 EXPORT_SYMBOL_GPL(cxl_release_context);
 145 
 146 static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
 147 {
 148         __u16 range;
 149         int r;
 150 
 151         for (r = 0; r < CXL_IRQ_RANGES; r++) {
 152                 range = ctx->irqs.range[r];
 153                 if (num < range) {
 154                         return ctx->irqs.offset[r] + num;
 155                 }
 156                 num -= range;
 157         }
 158         return 0;
 159 }
 160 
 161 
 162 int cxl_set_priv(struct cxl_context *ctx, void *priv)
 163 {
 164         if (!ctx)
 165                 return -EINVAL;
 166 
 167         ctx->priv = priv;
 168 
 169         return 0;
 170 }
 171 EXPORT_SYMBOL_GPL(cxl_set_priv);
 172 
 173 void *cxl_get_priv(struct cxl_context *ctx)
 174 {
 175         if (!ctx)
 176                 return ERR_PTR(-EINVAL);
 177 
 178         return ctx->priv;
 179 }
 180 EXPORT_SYMBOL_GPL(cxl_get_priv);
 181 
 182 int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
 183 {
 184         int res;
 185         irq_hw_number_t hwirq;
 186 
 187         if (num == 0)
 188                 num = ctx->afu->pp_irqs;
 189         res = afu_allocate_irqs(ctx, num);
 190         if (res)
 191                 return res;
 192 
 193         if (!cpu_has_feature(CPU_FTR_HVMODE)) {
 194                 /* In a guest, the PSL interrupt is not multiplexed. It was
 195                  * allocated above, and we need to set its handler
 196                  */
 197                 hwirq = cxl_find_afu_irq(ctx, 0);
 198                 if (hwirq)
 199                         cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl");
 200         }
 201 
 202         if (ctx->status == STARTED) {
 203                 if (cxl_ops->update_ivtes)
 204                         cxl_ops->update_ivtes(ctx);
 205                 else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n");
 206         }
 207 
 208         return res;
 209 }
 210 EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
 211 
 212 void cxl_free_afu_irqs(struct cxl_context *ctx)
 213 {
 214         irq_hw_number_t hwirq;
 215         unsigned int virq;
 216 
 217         if (!cpu_has_feature(CPU_FTR_HVMODE)) {
 218                 hwirq = cxl_find_afu_irq(ctx, 0);
 219                 if (hwirq) {
 220                         virq = irq_find_mapping(NULL, hwirq);
 221                         if (virq)
 222                                 cxl_unmap_irq(virq, ctx);
 223                 }
 224         }
 225         afu_irq_name_free(ctx);
 226         cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
 227 }
 228 EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
 229 
 230 int cxl_map_afu_irq(struct cxl_context *ctx, int num,
 231                     irq_handler_t handler, void *cookie, char *name)
 232 {
 233         irq_hw_number_t hwirq;
 234 
 235         /*
 236          * Find interrupt we are to register.
 237          */
 238         hwirq = cxl_find_afu_irq(ctx, num);
 239         if (!hwirq)
 240                 return -ENOENT;
 241 
 242         return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name);
 243 }
 244 EXPORT_SYMBOL_GPL(cxl_map_afu_irq);
 245 
 246 void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie)
 247 {
 248         irq_hw_number_t hwirq;
 249         unsigned int virq;
 250 
 251         hwirq = cxl_find_afu_irq(ctx, num);
 252         if (!hwirq)
 253                 return;
 254 
 255         virq = irq_find_mapping(NULL, hwirq);
 256         if (virq)
 257                 cxl_unmap_irq(virq, cookie);
 258 }
 259 EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq);
 260 
 261 /*
 262  * Start a context
 263  * Code here similar to afu_ioctl_start_work().
 264  */
 265 int cxl_start_context(struct cxl_context *ctx, u64 wed,
 266                       struct task_struct *task)
 267 {
 268         int rc = 0;
 269         bool kernel = true;
 270 
 271         pr_devel("%s: pe: %i\n", __func__, ctx->pe);
 272 
 273         mutex_lock(&ctx->status_mutex);
 274         if (ctx->status == STARTED)
 275                 goto out; /* already started */
 276 
 277         /*
 278          * Increment the mapped context count for adapter. This also checks
 279          * if adapter_context_lock is taken.
 280          */
 281         rc = cxl_adapter_context_get(ctx->afu->adapter);
 282         if (rc)
 283                 goto out;
 284 
 285         if (task) {
 286                 ctx->pid = get_task_pid(task, PIDTYPE_PID);
 287                 kernel = false;
 288 
 289                 /* acquire a reference to the task's mm */
 290                 ctx->mm = get_task_mm(current);
 291 
 292                 /* ensure this mm_struct can't be freed */
 293                 cxl_context_mm_count_get(ctx);
 294 
 295                 if (ctx->mm) {
 296                         /* decrement the use count from above */
 297                         mmput(ctx->mm);
 298                         /* make TLBIs for this context global */
 299                         mm_context_add_copro(ctx->mm);
 300                 }
 301         }
 302 
 303         /*
 304          * Increment driver use count. Enables global TLBIs for hash
 305          * and callbacks to handle the segment table
 306          */
 307         cxl_ctx_get();
 308 
 309         /* See the comment in afu_ioctl_start_work() */
 310         smp_mb();
 311 
 312         if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
 313                 put_pid(ctx->pid);
 314                 ctx->pid = NULL;
 315                 cxl_adapter_context_put(ctx->afu->adapter);
 316                 cxl_ctx_put();
 317                 if (task) {
 318                         cxl_context_mm_count_put(ctx);
 319                         if (ctx->mm)
 320                                 mm_context_remove_copro(ctx->mm);
 321                 }
 322                 goto out;
 323         }
 324 
 325         ctx->status = STARTED;
 326 out:
 327         mutex_unlock(&ctx->status_mutex);
 328         return rc;
 329 }
 330 EXPORT_SYMBOL_GPL(cxl_start_context);
 331 
 332 int cxl_process_element(struct cxl_context *ctx)
 333 {
 334         return ctx->external_pe;
 335 }
 336 EXPORT_SYMBOL_GPL(cxl_process_element);
 337 
 338 /* Stop a context.  Returns 0 on success, otherwise -Errno */
 339 int cxl_stop_context(struct cxl_context *ctx)
 340 {
 341         return __detach_context(ctx);
 342 }
 343 EXPORT_SYMBOL_GPL(cxl_stop_context);
 344 
 345 void cxl_set_master(struct cxl_context *ctx)
 346 {
 347         ctx->master = true;
 348 }
 349 EXPORT_SYMBOL_GPL(cxl_set_master);
 350 
 351 /* wrappers around afu_* file ops which are EXPORTED */
 352 int cxl_fd_open(struct inode *inode, struct file *file)
 353 {
 354         return afu_open(inode, file);
 355 }
 356 EXPORT_SYMBOL_GPL(cxl_fd_open);
 357 int cxl_fd_release(struct inode *inode, struct file *file)
 358 {
 359         return afu_release(inode, file);
 360 }
 361 EXPORT_SYMBOL_GPL(cxl_fd_release);
 362 long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 363 {
 364         return afu_ioctl(file, cmd, arg);
 365 }
 366 EXPORT_SYMBOL_GPL(cxl_fd_ioctl);
 367 int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
 368 {
 369         return afu_mmap(file, vm);
 370 }
 371 EXPORT_SYMBOL_GPL(cxl_fd_mmap);
 372 __poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
 373 {
 374         return afu_poll(file, poll);
 375 }
 376 EXPORT_SYMBOL_GPL(cxl_fd_poll);
 377 ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
 378                         loff_t *off)
 379 {
 380         return afu_read(file, buf, count, off);
 381 }
 382 EXPORT_SYMBOL_GPL(cxl_fd_read);
 383 
 384 #define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME
 385 
 386 /* Get a struct file and fd for a context and attach the ops */
 387 struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
 388                         int *fd)
 389 {
 390         struct file *file;
 391         int rc, flags, fdtmp;
 392         char *name = NULL;
 393 
 394         /* only allow one per context */
 395         if (ctx->mapping)
 396                 return ERR_PTR(-EEXIST);
 397 
 398         flags = O_RDWR | O_CLOEXEC;
 399 
 400         /* This code is similar to anon_inode_getfd() */
 401         rc = get_unused_fd_flags(flags);
 402         if (rc < 0)
 403                 return ERR_PTR(rc);
 404         fdtmp = rc;
 405 
 406         /*
 407          * Patch the file ops.  Needs to be careful that this is rentrant safe.
 408          */
 409         if (fops) {
 410                 PATCH_FOPS(open);
 411                 PATCH_FOPS(poll);
 412                 PATCH_FOPS(read);
 413                 PATCH_FOPS(release);
 414                 PATCH_FOPS(unlocked_ioctl);
 415                 PATCH_FOPS(compat_ioctl);
 416                 PATCH_FOPS(mmap);
 417         } else /* use default ops */
 418                 fops = (struct file_operations *)&afu_fops;
 419 
 420         name = kasprintf(GFP_KERNEL, "cxl:%d", ctx->pe);
 421         file = cxl_getfile(name, fops, ctx, flags);
 422         kfree(name);
 423         if (IS_ERR(file))
 424                 goto err_fd;
 425 
 426         cxl_context_set_mapping(ctx, file->f_mapping);
 427         *fd = fdtmp;
 428         return file;
 429 
 430 err_fd:
 431         put_unused_fd(fdtmp);
 432         return NULL;
 433 }
 434 EXPORT_SYMBOL_GPL(cxl_get_fd);
 435 
 436 struct cxl_context *cxl_fops_get_context(struct file *file)
 437 {
 438         return file->private_data;
 439 }
 440 EXPORT_SYMBOL_GPL(cxl_fops_get_context);
 441 
 442 void cxl_set_driver_ops(struct cxl_context *ctx,
 443                         struct cxl_afu_driver_ops *ops)
 444 {
 445         WARN_ON(!ops->fetch_event || !ops->event_delivered);
 446         atomic_set(&ctx->afu_driver_events, 0);
 447         ctx->afu_driver_ops = ops;
 448 }
 449 EXPORT_SYMBOL_GPL(cxl_set_driver_ops);
 450 
 451 void cxl_context_events_pending(struct cxl_context *ctx,
 452                                 unsigned int new_events)
 453 {
 454         atomic_add(new_events, &ctx->afu_driver_events);
 455         wake_up_all(&ctx->wq);
 456 }
 457 EXPORT_SYMBOL_GPL(cxl_context_events_pending);
 458 
 459 int cxl_start_work(struct cxl_context *ctx,
 460                    struct cxl_ioctl_start_work *work)
 461 {
 462         int rc;
 463 
 464         /* code taken from afu_ioctl_start_work */
 465         if (!(work->flags & CXL_START_WORK_NUM_IRQS))
 466                 work->num_interrupts = ctx->afu->pp_irqs;
 467         else if ((work->num_interrupts < ctx->afu->pp_irqs) ||
 468                  (work->num_interrupts > ctx->afu->irqs_max)) {
 469                 return -EINVAL;
 470         }
 471 
 472         rc = afu_register_irqs(ctx, work->num_interrupts);
 473         if (rc)
 474                 return rc;
 475 
 476         rc = cxl_start_context(ctx, work->work_element_descriptor, current);
 477         if (rc < 0) {
 478                 afu_release_irqs(ctx, ctx);
 479                 return rc;
 480         }
 481 
 482         return 0;
 483 }
 484 EXPORT_SYMBOL_GPL(cxl_start_work);
 485 
 486 void __iomem *cxl_psa_map(struct cxl_context *ctx)
 487 {
 488         if (ctx->status != STARTED)
 489                 return NULL;
 490 
 491         pr_devel("%s: psn_phys%llx size:%llx\n",
 492                 __func__, ctx->psn_phys, ctx->psn_size);
 493         return ioremap(ctx->psn_phys, ctx->psn_size);
 494 }
 495 EXPORT_SYMBOL_GPL(cxl_psa_map);
 496 
 497 void cxl_psa_unmap(void __iomem *addr)
 498 {
 499         iounmap(addr);
 500 }
 501 EXPORT_SYMBOL_GPL(cxl_psa_unmap);
 502 
 503 int cxl_afu_reset(struct cxl_context *ctx)
 504 {
 505         struct cxl_afu *afu = ctx->afu;
 506         int rc;
 507 
 508         rc = cxl_ops->afu_reset(afu);
 509         if (rc)
 510                 return rc;
 511 
 512         return cxl_ops->afu_check_and_enable(afu);
 513 }
 514 EXPORT_SYMBOL_GPL(cxl_afu_reset);
 515 
 516 void cxl_perst_reloads_same_image(struct cxl_afu *afu,
 517                                   bool perst_reloads_same_image)
 518 {
 519         afu->adapter->perst_same_image = perst_reloads_same_image;
 520 }
 521 EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
 522 
 523 ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
 524 {
 525         struct cxl_afu *afu = cxl_pci_to_afu(dev);
 526         if (IS_ERR(afu))
 527                 return -ENODEV;
 528 
 529         return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
 530 }
 531 EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);

/* [<][>][^][v][top][bottom][index][help] */