root/drivers/s390/cio/vfio_ccw_drv.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vfio_ccw_sch_quiesce
  2. vfio_ccw_sch_io_todo
  3. vfio_ccw_sch_irq
  4. vfio_ccw_sch_probe
  5. vfio_ccw_sch_remove
  6. vfio_ccw_sch_shutdown
  7. vfio_ccw_sch_event
  8. vfio_ccw_debug_init
  9. vfio_ccw_debug_exit
  10. vfio_ccw_sch_init
  11. vfio_ccw_sch_exit

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * VFIO based Physical Subchannel device driver
   4  *
   5  * Copyright IBM Corp. 2017
   6  * Copyright Red Hat, Inc. 2019
   7  *
   8  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
   9  *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
  10  *            Cornelia Huck <cohuck@redhat.com>
  11  */
  12 
  13 #include <linux/module.h>
  14 #include <linux/init.h>
  15 #include <linux/device.h>
  16 #include <linux/slab.h>
  17 #include <linux/uuid.h>
  18 #include <linux/mdev.h>
  19 
  20 #include <asm/isc.h>
  21 
  22 #include "ioasm.h"
  23 #include "css.h"
  24 #include "vfio_ccw_private.h"
  25 
  26 struct workqueue_struct *vfio_ccw_work_q;
  27 static struct kmem_cache *vfio_ccw_io_region;
  28 static struct kmem_cache *vfio_ccw_cmd_region;
  29 
  30 debug_info_t *vfio_ccw_debug_msg_id;
  31 debug_info_t *vfio_ccw_debug_trace_id;
  32 
  33 /*
  34  * Helpers
  35  */
  36 int vfio_ccw_sch_quiesce(struct subchannel *sch)
  37 {
  38         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
  39         DECLARE_COMPLETION_ONSTACK(completion);
  40         int iretry, ret = 0;
  41 
  42         spin_lock_irq(sch->lock);
  43         if (!sch->schib.pmcw.ena)
  44                 goto out_unlock;
  45         ret = cio_disable_subchannel(sch);
  46         if (ret != -EBUSY)
  47                 goto out_unlock;
  48 
  49         iretry = 255;
  50         do {
  51 
  52                 ret = cio_cancel_halt_clear(sch, &iretry);
  53 
  54                 if (ret == -EIO) {
  55                         pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
  56                                sch->schid.ssid, sch->schid.sch_no);
  57                         break;
  58                 }
  59 
  60                 /*
  61                  * Flush all I/O and wait for
  62                  * cancel/halt/clear completion.
  63                  */
  64                 private->completion = &completion;
  65                 spin_unlock_irq(sch->lock);
  66 
  67                 if (ret == -EBUSY)
  68                         wait_for_completion_timeout(&completion, 3*HZ);
  69 
  70                 private->completion = NULL;
  71                 flush_workqueue(vfio_ccw_work_q);
  72                 spin_lock_irq(sch->lock);
  73                 ret = cio_disable_subchannel(sch);
  74         } while (ret == -EBUSY);
  75 out_unlock:
  76         private->state = VFIO_CCW_STATE_NOT_OPER;
  77         spin_unlock_irq(sch->lock);
  78         return ret;
  79 }
  80 
  81 static void vfio_ccw_sch_io_todo(struct work_struct *work)
  82 {
  83         struct vfio_ccw_private *private;
  84         struct irb *irb;
  85         bool is_final;
  86 
  87         private = container_of(work, struct vfio_ccw_private, io_work);
  88         irb = &private->irb;
  89 
  90         is_final = !(scsw_actl(&irb->scsw) &
  91                      (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
  92         if (scsw_is_solicited(&irb->scsw)) {
  93                 cp_update_scsw(&private->cp, &irb->scsw);
  94                 if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING)
  95                         cp_free(&private->cp);
  96         }
  97         mutex_lock(&private->io_mutex);
  98         memcpy(private->io_region->irb_area, irb, sizeof(*irb));
  99         mutex_unlock(&private->io_mutex);
 100 
 101         if (private->mdev && is_final)
 102                 private->state = VFIO_CCW_STATE_IDLE;
 103 
 104         if (private->io_trigger)
 105                 eventfd_signal(private->io_trigger, 1);
 106 }
 107 
 108 /*
 109  * Css driver callbacks
 110  */
 111 static void vfio_ccw_sch_irq(struct subchannel *sch)
 112 {
 113         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
 114 
 115         inc_irq_stat(IRQIO_CIO);
 116         vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
 117 }
 118 
 119 static int vfio_ccw_sch_probe(struct subchannel *sch)
 120 {
 121         struct pmcw *pmcw = &sch->schib.pmcw;
 122         struct vfio_ccw_private *private;
 123         int ret = -ENOMEM;
 124 
 125         if (pmcw->qf) {
 126                 dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
 127                          dev_name(&sch->dev));
 128                 return -ENODEV;
 129         }
 130 
 131         private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
 132         if (!private)
 133                 return -ENOMEM;
 134 
 135         private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
 136                                        GFP_KERNEL);
 137         if (!private->cp.guest_cp)
 138                 goto out_free;
 139 
 140         private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
 141                                                GFP_KERNEL | GFP_DMA);
 142         if (!private->io_region)
 143                 goto out_free;
 144 
 145         private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
 146                                                 GFP_KERNEL | GFP_DMA);
 147         if (!private->cmd_region)
 148                 goto out_free;
 149 
 150         private->sch = sch;
 151         dev_set_drvdata(&sch->dev, private);
 152         mutex_init(&private->io_mutex);
 153 
 154         spin_lock_irq(sch->lock);
 155         private->state = VFIO_CCW_STATE_NOT_OPER;
 156         sch->isc = VFIO_CCW_ISC;
 157         ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
 158         spin_unlock_irq(sch->lock);
 159         if (ret)
 160                 goto out_free;
 161 
 162         INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
 163         atomic_set(&private->avail, 1);
 164         private->state = VFIO_CCW_STATE_STANDBY;
 165 
 166         ret = vfio_ccw_mdev_reg(sch);
 167         if (ret)
 168                 goto out_disable;
 169 
 170         if (dev_get_uevent_suppress(&sch->dev)) {
 171                 dev_set_uevent_suppress(&sch->dev, 0);
 172                 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
 173         }
 174 
 175         VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
 176                            sch->schid.cssid, sch->schid.ssid,
 177                            sch->schid.sch_no);
 178         return 0;
 179 
 180 out_disable:
 181         cio_disable_subchannel(sch);
 182 out_free:
 183         dev_set_drvdata(&sch->dev, NULL);
 184         if (private->cmd_region)
 185                 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
 186         if (private->io_region)
 187                 kmem_cache_free(vfio_ccw_io_region, private->io_region);
 188         kfree(private->cp.guest_cp);
 189         kfree(private);
 190         return ret;
 191 }
 192 
 193 static int vfio_ccw_sch_remove(struct subchannel *sch)
 194 {
 195         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
 196 
 197         vfio_ccw_sch_quiesce(sch);
 198 
 199         vfio_ccw_mdev_unreg(sch);
 200 
 201         dev_set_drvdata(&sch->dev, NULL);
 202 
 203         kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
 204         kmem_cache_free(vfio_ccw_io_region, private->io_region);
 205         kfree(private->cp.guest_cp);
 206         kfree(private);
 207 
 208         VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
 209                            sch->schid.cssid, sch->schid.ssid,
 210                            sch->schid.sch_no);
 211         return 0;
 212 }
 213 
 214 static void vfio_ccw_sch_shutdown(struct subchannel *sch)
 215 {
 216         vfio_ccw_sch_quiesce(sch);
 217 }
 218 
 219 /**
 220  * vfio_ccw_sch_event - process subchannel event
 221  * @sch: subchannel
 222  * @process: non-zero if function is called in process context
 223  *
 224  * An unspecified event occurred for this subchannel. Adjust data according
 225  * to the current operational state of the subchannel. Return zero when the
 226  * event has been handled sufficiently or -EAGAIN when this function should
 227  * be called again in process context.
 228  */
 229 static int vfio_ccw_sch_event(struct subchannel *sch, int process)
 230 {
 231         struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
 232         unsigned long flags;
 233         int rc = -EAGAIN;
 234 
 235         spin_lock_irqsave(sch->lock, flags);
 236         if (!device_is_registered(&sch->dev))
 237                 goto out_unlock;
 238 
 239         if (work_pending(&sch->todo_work))
 240                 goto out_unlock;
 241 
 242         if (cio_update_schib(sch)) {
 243                 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
 244                 rc = 0;
 245                 goto out_unlock;
 246         }
 247 
 248         private = dev_get_drvdata(&sch->dev);
 249         if (private->state == VFIO_CCW_STATE_NOT_OPER) {
 250                 private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
 251                                  VFIO_CCW_STATE_STANDBY;
 252         }
 253         rc = 0;
 254 
 255 out_unlock:
 256         spin_unlock_irqrestore(sch->lock, flags);
 257 
 258         return rc;
 259 }
 260 
 261 static struct css_device_id vfio_ccw_sch_ids[] = {
 262         { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
 263         { /* end of list */ },
 264 };
 265 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
 266 
 267 static struct css_driver vfio_ccw_sch_driver = {
 268         .drv = {
 269                 .name = "vfio_ccw",
 270                 .owner = THIS_MODULE,
 271         },
 272         .subchannel_type = vfio_ccw_sch_ids,
 273         .irq = vfio_ccw_sch_irq,
 274         .probe = vfio_ccw_sch_probe,
 275         .remove = vfio_ccw_sch_remove,
 276         .shutdown = vfio_ccw_sch_shutdown,
 277         .sch_event = vfio_ccw_sch_event,
 278 };
 279 
 280 static int __init vfio_ccw_debug_init(void)
 281 {
 282         vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1,
 283                                                11 * sizeof(long));
 284         if (!vfio_ccw_debug_msg_id)
 285                 goto out_unregister;
 286         debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view);
 287         debug_set_level(vfio_ccw_debug_msg_id, 2);
 288         vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16);
 289         if (!vfio_ccw_debug_trace_id)
 290                 goto out_unregister;
 291         debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view);
 292         debug_set_level(vfio_ccw_debug_trace_id, 2);
 293         return 0;
 294 
 295 out_unregister:
 296         debug_unregister(vfio_ccw_debug_msg_id);
 297         debug_unregister(vfio_ccw_debug_trace_id);
 298         return -1;
 299 }
 300 
 301 static void vfio_ccw_debug_exit(void)
 302 {
 303         debug_unregister(vfio_ccw_debug_msg_id);
 304         debug_unregister(vfio_ccw_debug_trace_id);
 305 }
 306 
 307 static int __init vfio_ccw_sch_init(void)
 308 {
 309         int ret;
 310 
 311         ret = vfio_ccw_debug_init();
 312         if (ret)
 313                 return ret;
 314 
 315         vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
 316         if (!vfio_ccw_work_q) {
 317                 ret = -ENOMEM;
 318                 goto out_err;
 319         }
 320 
 321         vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
 322                                         sizeof(struct ccw_io_region), 0,
 323                                         SLAB_ACCOUNT, 0,
 324                                         sizeof(struct ccw_io_region), NULL);
 325         if (!vfio_ccw_io_region) {
 326                 ret = -ENOMEM;
 327                 goto out_err;
 328         }
 329 
 330         vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
 331                                         sizeof(struct ccw_cmd_region), 0,
 332                                         SLAB_ACCOUNT, 0,
 333                                         sizeof(struct ccw_cmd_region), NULL);
 334         if (!vfio_ccw_cmd_region) {
 335                 ret = -ENOMEM;
 336                 goto out_err;
 337         }
 338 
 339         isc_register(VFIO_CCW_ISC);
 340         ret = css_driver_register(&vfio_ccw_sch_driver);
 341         if (ret) {
 342                 isc_unregister(VFIO_CCW_ISC);
 343                 goto out_err;
 344         }
 345 
 346         return ret;
 347 
 348 out_err:
 349         kmem_cache_destroy(vfio_ccw_cmd_region);
 350         kmem_cache_destroy(vfio_ccw_io_region);
 351         destroy_workqueue(vfio_ccw_work_q);
 352         vfio_ccw_debug_exit();
 353         return ret;
 354 }
 355 
 356 static void __exit vfio_ccw_sch_exit(void)
 357 {
 358         css_driver_unregister(&vfio_ccw_sch_driver);
 359         isc_unregister(VFIO_CCW_ISC);
 360         kmem_cache_destroy(vfio_ccw_io_region);
 361         kmem_cache_destroy(vfio_ccw_cmd_region);
 362         destroy_workqueue(vfio_ccw_work_q);
 363         vfio_ccw_debug_exit();
 364 }
 365 module_init(vfio_ccw_sch_init);
 366 module_exit(vfio_ccw_sch_exit);
 367 
 368 MODULE_LICENSE("GPL v2");

/* [<][>][^][v][top][bottom][index][help] */