1/* 2 * VFIO-KVM bridge pseudo device 3 * 4 * Copyright (C) 2013 Red Hat, Inc. All rights reserved. 5 * Author: Alex Williamson <alex.williamson@redhat.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12#include <linux/errno.h> 13#include <linux/file.h> 14#include <linux/kvm_host.h> 15#include <linux/list.h> 16#include <linux/module.h> 17#include <linux/mutex.h> 18#include <linux/slab.h> 19#include <linux/uaccess.h> 20#include <linux/vfio.h> 21#include "vfio.h" 22 23struct kvm_vfio_group { 24 struct list_head node; 25 struct vfio_group *vfio_group; 26}; 27 28struct kvm_vfio { 29 struct list_head group_list; 30 struct mutex lock; 31 bool noncoherent; 32}; 33 34static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep) 35{ 36 struct vfio_group *vfio_group; 37 struct vfio_group *(*fn)(struct file *); 38 39 fn = symbol_get(vfio_group_get_external_user); 40 if (!fn) 41 return ERR_PTR(-EINVAL); 42 43 vfio_group = fn(filep); 44 45 symbol_put(vfio_group_get_external_user); 46 47 return vfio_group; 48} 49 50static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group) 51{ 52 void (*fn)(struct vfio_group *); 53 54 fn = symbol_get(vfio_group_put_external_user); 55 if (!fn) 56 return; 57 58 fn(vfio_group); 59 60 symbol_put(vfio_group_put_external_user); 61} 62 63static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group) 64{ 65 long (*fn)(struct vfio_group *, unsigned long); 66 long ret; 67 68 fn = symbol_get(vfio_external_check_extension); 69 if (!fn) 70 return false; 71 72 ret = fn(vfio_group, VFIO_DMA_CC_IOMMU); 73 74 symbol_put(vfio_external_check_extension); 75 76 return ret > 0; 77} 78 79/* 80 * Groups can use the same or different IOMMU domains. If the same then 81 * adding a new group may change the coherency of groups we've previously 82 * been told about. We don't want to care about any of that so we retest 83 * each group and bail as soon as we find one that's noncoherent. This 84 * means we only ever [un]register_noncoherent_dma once for the whole device. 85 */ 86static void kvm_vfio_update_coherency(struct kvm_device *dev) 87{ 88 struct kvm_vfio *kv = dev->private; 89 bool noncoherent = false; 90 struct kvm_vfio_group *kvg; 91 92 mutex_lock(&kv->lock); 93 94 list_for_each_entry(kvg, &kv->group_list, node) { 95 if (!kvm_vfio_group_is_coherent(kvg->vfio_group)) { 96 noncoherent = true; 97 break; 98 } 99 } 100 101 if (noncoherent != kv->noncoherent) { 102 kv->noncoherent = noncoherent; 103 104 if (kv->noncoherent) 105 kvm_arch_register_noncoherent_dma(dev->kvm); 106 else 107 kvm_arch_unregister_noncoherent_dma(dev->kvm); 108 } 109 110 mutex_unlock(&kv->lock); 111} 112 113static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) 114{ 115 struct kvm_vfio *kv = dev->private; 116 struct vfio_group *vfio_group; 117 struct kvm_vfio_group *kvg; 118 int32_t __user *argp = (int32_t __user *)(unsigned long)arg; 119 struct fd f; 120 int32_t fd; 121 int ret; 122 123 switch (attr) { 124 case KVM_DEV_VFIO_GROUP_ADD: 125 if (get_user(fd, argp)) 126 return -EFAULT; 127 128 f = fdget(fd); 129 if (!f.file) 130 return -EBADF; 131 132 vfio_group = kvm_vfio_group_get_external_user(f.file); 133 fdput(f); 134 135 if (IS_ERR(vfio_group)) 136 return PTR_ERR(vfio_group); 137 138 mutex_lock(&kv->lock); 139 140 list_for_each_entry(kvg, &kv->group_list, node) { 141 if (kvg->vfio_group == vfio_group) { 142 mutex_unlock(&kv->lock); 143 kvm_vfio_group_put_external_user(vfio_group); 144 return -EEXIST; 145 } 146 } 147 148 kvg = kzalloc(sizeof(*kvg), GFP_KERNEL); 149 if (!kvg) { 150 mutex_unlock(&kv->lock); 151 kvm_vfio_group_put_external_user(vfio_group); 152 return -ENOMEM; 153 } 154 155 list_add_tail(&kvg->node, &kv->group_list); 156 kvg->vfio_group = vfio_group; 157 158 kvm_arch_start_assignment(dev->kvm); 159 160 mutex_unlock(&kv->lock); 161 162 kvm_vfio_update_coherency(dev); 163 164 return 0; 165 166 case KVM_DEV_VFIO_GROUP_DEL: 167 if (get_user(fd, argp)) 168 return -EFAULT; 169 170 f = fdget(fd); 171 if (!f.file) 172 return -EBADF; 173 174 vfio_group = kvm_vfio_group_get_external_user(f.file); 175 fdput(f); 176 177 if (IS_ERR(vfio_group)) 178 return PTR_ERR(vfio_group); 179 180 ret = -ENOENT; 181 182 mutex_lock(&kv->lock); 183 184 list_for_each_entry(kvg, &kv->group_list, node) { 185 if (kvg->vfio_group != vfio_group) 186 continue; 187 188 list_del(&kvg->node); 189 kvm_vfio_group_put_external_user(kvg->vfio_group); 190 kfree(kvg); 191 ret = 0; 192 break; 193 } 194 195 kvm_arch_end_assignment(dev->kvm); 196 197 mutex_unlock(&kv->lock); 198 199 kvm_vfio_group_put_external_user(vfio_group); 200 201 kvm_vfio_update_coherency(dev); 202 203 return ret; 204 } 205 206 return -ENXIO; 207} 208 209static int kvm_vfio_set_attr(struct kvm_device *dev, 210 struct kvm_device_attr *attr) 211{ 212 switch (attr->group) { 213 case KVM_DEV_VFIO_GROUP: 214 return kvm_vfio_set_group(dev, attr->attr, attr->addr); 215 } 216 217 return -ENXIO; 218} 219 220static int kvm_vfio_has_attr(struct kvm_device *dev, 221 struct kvm_device_attr *attr) 222{ 223 switch (attr->group) { 224 case KVM_DEV_VFIO_GROUP: 225 switch (attr->attr) { 226 case KVM_DEV_VFIO_GROUP_ADD: 227 case KVM_DEV_VFIO_GROUP_DEL: 228 return 0; 229 } 230 231 break; 232 } 233 234 return -ENXIO; 235} 236 237static void kvm_vfio_destroy(struct kvm_device *dev) 238{ 239 struct kvm_vfio *kv = dev->private; 240 struct kvm_vfio_group *kvg, *tmp; 241 242 list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) { 243 kvm_vfio_group_put_external_user(kvg->vfio_group); 244 list_del(&kvg->node); 245 kfree(kvg); 246 kvm_arch_end_assignment(dev->kvm); 247 } 248 249 kvm_vfio_update_coherency(dev); 250 251 kfree(kv); 252 kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */ 253} 254 255static int kvm_vfio_create(struct kvm_device *dev, u32 type); 256 257static struct kvm_device_ops kvm_vfio_ops = { 258 .name = "kvm-vfio", 259 .create = kvm_vfio_create, 260 .destroy = kvm_vfio_destroy, 261 .set_attr = kvm_vfio_set_attr, 262 .has_attr = kvm_vfio_has_attr, 263}; 264 265static int kvm_vfio_create(struct kvm_device *dev, u32 type) 266{ 267 struct kvm_device *tmp; 268 struct kvm_vfio *kv; 269 270 /* Only one VFIO "device" per VM */ 271 list_for_each_entry(tmp, &dev->kvm->devices, vm_node) 272 if (tmp->ops == &kvm_vfio_ops) 273 return -EBUSY; 274 275 kv = kzalloc(sizeof(*kv), GFP_KERNEL); 276 if (!kv) 277 return -ENOMEM; 278 279 INIT_LIST_HEAD(&kv->group_list); 280 mutex_init(&kv->lock); 281 282 dev->private = kv; 283 284 return 0; 285} 286 287int kvm_vfio_ops_init(void) 288{ 289 return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO); 290} 291 292void kvm_vfio_ops_exit(void) 293{ 294 kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO); 295} 296