1/****************************************************************************** 2 * evtchn.c 3 * 4 * Driver for receiving and demuxing event-channel signals. 5 * 6 * Copyright (c) 2004-2005, K A Fraser 7 * Multi-process extensions Copyright (c) 2004, Steven Smith 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License version 2 11 * as published by the Free Software Foundation; or, when distributed 12 * separately from the Linux kernel or incorporated into other 13 * software packages, subject to the following license: 14 * 15 * Permission is hereby granted, free of charge, to any person obtaining a copy 16 * of this source file (the "Software"), to deal in the Software without 17 * restriction, including without limitation the rights to use, copy, modify, 18 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 19 * and to permit persons to whom the Software is furnished to do so, subject to 20 * the following conditions: 21 * 22 * The above copyright notice and this permission notice shall be included in 23 * all copies or substantial portions of the Software. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 31 * IN THE SOFTWARE. 32 */ 33 34#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 35 36#include <linux/module.h> 37#include <linux/kernel.h> 38#include <linux/sched.h> 39#include <linux/slab.h> 40#include <linux/string.h> 41#include <linux/errno.h> 42#include <linux/fs.h> 43#include <linux/miscdevice.h> 44#include <linux/major.h> 45#include <linux/proc_fs.h> 46#include <linux/stat.h> 47#include <linux/poll.h> 48#include <linux/irq.h> 49#include <linux/init.h> 50#include <linux/mutex.h> 51#include <linux/cpu.h> 52#include <linux/mm.h> 53#include <linux/vmalloc.h> 54 55#include <xen/xen.h> 56#include <xen/events.h> 57#include <xen/evtchn.h> 58#include <asm/xen/hypervisor.h> 59 60struct per_user_data { 61 struct mutex bind_mutex; /* serialize bind/unbind operations */ 62 struct rb_root evtchns; 63 unsigned int nr_evtchns; 64 65 /* Notification ring, accessed via /dev/xen/evtchn. */ 66 unsigned int ring_size; 67 evtchn_port_t *ring; 68 unsigned int ring_cons, ring_prod, ring_overflow; 69 struct mutex ring_cons_mutex; /* protect against concurrent readers */ 70 spinlock_t ring_prod_lock; /* product against concurrent interrupts */ 71 72 /* Processes wait on this queue when ring is empty. */ 73 wait_queue_head_t evtchn_wait; 74 struct fasync_struct *evtchn_async_queue; 75 const char *name; 76}; 77 78struct user_evtchn { 79 struct rb_node node; 80 struct per_user_data *user; 81 unsigned port; 82 bool enabled; 83}; 84 85static evtchn_port_t *evtchn_alloc_ring(unsigned int size) 86{ 87 evtchn_port_t *ring; 88 size_t s = size * sizeof(*ring); 89 90 ring = kmalloc(s, GFP_KERNEL); 91 if (!ring) 92 ring = vmalloc(s); 93 94 return ring; 95} 96 97static void evtchn_free_ring(evtchn_port_t *ring) 98{ 99 kvfree(ring); 100} 101 102static unsigned int evtchn_ring_offset(struct per_user_data *u, 103 unsigned int idx) 104{ 105 return idx & (u->ring_size - 1); 106} 107 108static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u, 109 unsigned int idx) 110{ 111 return u->ring + evtchn_ring_offset(u, idx); 112} 113 114static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) 115{ 116 struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL; 117 118 u->nr_evtchns++; 119 120 while (*new) { 121 struct user_evtchn *this; 122 123 this = container_of(*new, struct user_evtchn, node); 124 125 parent = *new; 126 if (this->port < evtchn->port) 127 new = &((*new)->rb_left); 128 else if (this->port > evtchn->port) 129 new = &((*new)->rb_right); 130 else 131 return -EEXIST; 132 } 133 134 /* Add new node and rebalance tree. */ 135 rb_link_node(&evtchn->node, parent, new); 136 rb_insert_color(&evtchn->node, &u->evtchns); 137 138 return 0; 139} 140 141static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) 142{ 143 u->nr_evtchns--; 144 rb_erase(&evtchn->node, &u->evtchns); 145 kfree(evtchn); 146} 147 148static struct user_evtchn *find_evtchn(struct per_user_data *u, unsigned port) 149{ 150 struct rb_node *node = u->evtchns.rb_node; 151 152 while (node) { 153 struct user_evtchn *evtchn; 154 155 evtchn = container_of(node, struct user_evtchn, node); 156 157 if (evtchn->port < port) 158 node = node->rb_left; 159 else if (evtchn->port > port) 160 node = node->rb_right; 161 else 162 return evtchn; 163 } 164 return NULL; 165} 166 167static irqreturn_t evtchn_interrupt(int irq, void *data) 168{ 169 struct user_evtchn *evtchn = data; 170 struct per_user_data *u = evtchn->user; 171 172 WARN(!evtchn->enabled, 173 "Interrupt for port %d, but apparently not enabled; per-user %p\n", 174 evtchn->port, u); 175 176 disable_irq_nosync(irq); 177 evtchn->enabled = false; 178 179 spin_lock(&u->ring_prod_lock); 180 181 if ((u->ring_prod - u->ring_cons) < u->ring_size) { 182 *evtchn_ring_entry(u, u->ring_prod) = evtchn->port; 183 wmb(); /* Ensure ring contents visible */ 184 if (u->ring_cons == u->ring_prod++) { 185 wake_up_interruptible(&u->evtchn_wait); 186 kill_fasync(&u->evtchn_async_queue, 187 SIGIO, POLL_IN); 188 } 189 } else 190 u->ring_overflow = 1; 191 192 spin_unlock(&u->ring_prod_lock); 193 194 return IRQ_HANDLED; 195} 196 197static ssize_t evtchn_read(struct file *file, char __user *buf, 198 size_t count, loff_t *ppos) 199{ 200 int rc; 201 unsigned int c, p, bytes1 = 0, bytes2 = 0; 202 struct per_user_data *u = file->private_data; 203 204 /* Whole number of ports. */ 205 count &= ~(sizeof(evtchn_port_t)-1); 206 207 if (count == 0) 208 return 0; 209 210 if (count > PAGE_SIZE) 211 count = PAGE_SIZE; 212 213 for (;;) { 214 mutex_lock(&u->ring_cons_mutex); 215 216 rc = -EFBIG; 217 if (u->ring_overflow) 218 goto unlock_out; 219 220 c = u->ring_cons; 221 p = u->ring_prod; 222 if (c != p) 223 break; 224 225 mutex_unlock(&u->ring_cons_mutex); 226 227 if (file->f_flags & O_NONBLOCK) 228 return -EAGAIN; 229 230 rc = wait_event_interruptible(u->evtchn_wait, 231 u->ring_cons != u->ring_prod); 232 if (rc) 233 return rc; 234 } 235 236 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ 237 if (((c ^ p) & u->ring_size) != 0) { 238 bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) * 239 sizeof(evtchn_port_t); 240 bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t); 241 } else { 242 bytes1 = (p - c) * sizeof(evtchn_port_t); 243 bytes2 = 0; 244 } 245 246 /* Truncate chunks according to caller's maximum byte count. */ 247 if (bytes1 > count) { 248 bytes1 = count; 249 bytes2 = 0; 250 } else if ((bytes1 + bytes2) > count) { 251 bytes2 = count - bytes1; 252 } 253 254 rc = -EFAULT; 255 rmb(); /* Ensure that we see the port before we copy it. */ 256 if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) || 257 ((bytes2 != 0) && 258 copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) 259 goto unlock_out; 260 261 u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t); 262 rc = bytes1 + bytes2; 263 264 unlock_out: 265 mutex_unlock(&u->ring_cons_mutex); 266 return rc; 267} 268 269static ssize_t evtchn_write(struct file *file, const char __user *buf, 270 size_t count, loff_t *ppos) 271{ 272 int rc, i; 273 evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL); 274 struct per_user_data *u = file->private_data; 275 276 if (kbuf == NULL) 277 return -ENOMEM; 278 279 /* Whole number of ports. */ 280 count &= ~(sizeof(evtchn_port_t)-1); 281 282 rc = 0; 283 if (count == 0) 284 goto out; 285 286 if (count > PAGE_SIZE) 287 count = PAGE_SIZE; 288 289 rc = -EFAULT; 290 if (copy_from_user(kbuf, buf, count) != 0) 291 goto out; 292 293 mutex_lock(&u->bind_mutex); 294 295 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) { 296 unsigned port = kbuf[i]; 297 struct user_evtchn *evtchn; 298 299 evtchn = find_evtchn(u, port); 300 if (evtchn && !evtchn->enabled) { 301 evtchn->enabled = true; 302 enable_irq(irq_from_evtchn(port)); 303 } 304 } 305 306 mutex_unlock(&u->bind_mutex); 307 308 rc = count; 309 310 out: 311 free_page((unsigned long)kbuf); 312 return rc; 313} 314 315static int evtchn_resize_ring(struct per_user_data *u) 316{ 317 unsigned int new_size; 318 evtchn_port_t *new_ring, *old_ring; 319 320 /* 321 * Ensure the ring is large enough to capture all possible 322 * events. i.e., one free slot for each bound event. 323 */ 324 if (u->nr_evtchns <= u->ring_size) 325 return 0; 326 327 if (u->ring_size == 0) 328 new_size = 64; 329 else 330 new_size = 2 * u->ring_size; 331 332 new_ring = evtchn_alloc_ring(new_size); 333 if (!new_ring) 334 return -ENOMEM; 335 336 old_ring = u->ring; 337 338 /* 339 * Access to the ring contents is serialized by either the 340 * prod /or/ cons lock so take both when resizing. 341 */ 342 mutex_lock(&u->ring_cons_mutex); 343 spin_lock_irq(&u->ring_prod_lock); 344 345 /* 346 * Copy the old ring contents to the new ring. 347 * 348 * To take care of wrapping, a full ring, and the new index 349 * pointing into the second half, simply copy the old contents 350 * twice. 351 * 352 * +---------+ +------------------+ 353 * |34567 12| -> |34567 1234567 12| 354 * +-----p-c-+ +-------c------p---+ 355 */ 356 memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring)); 357 memcpy(new_ring + u->ring_size, old_ring, 358 u->ring_size * sizeof(*u->ring)); 359 360 u->ring = new_ring; 361 u->ring_size = new_size; 362 363 spin_unlock_irq(&u->ring_prod_lock); 364 mutex_unlock(&u->ring_cons_mutex); 365 366 evtchn_free_ring(old_ring); 367 368 return 0; 369} 370 371static int evtchn_bind_to_user(struct per_user_data *u, int port) 372{ 373 struct user_evtchn *evtchn; 374 struct evtchn_close close; 375 int rc = 0; 376 377 /* 378 * Ports are never reused, so every caller should pass in a 379 * unique port. 380 * 381 * (Locking not necessary because we haven't registered the 382 * interrupt handler yet, and our caller has already 383 * serialized bind operations.) 384 */ 385 386 evtchn = kzalloc(sizeof(*evtchn), GFP_KERNEL); 387 if (!evtchn) 388 return -ENOMEM; 389 390 evtchn->user = u; 391 evtchn->port = port; 392 evtchn->enabled = true; /* start enabled */ 393 394 rc = add_evtchn(u, evtchn); 395 if (rc < 0) 396 goto err; 397 398 rc = evtchn_resize_ring(u); 399 if (rc < 0) 400 goto err; 401 402 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0, 403 u->name, evtchn); 404 if (rc < 0) 405 goto err; 406 407 rc = evtchn_make_refcounted(port); 408 return rc; 409 410err: 411 /* bind failed, should close the port now */ 412 close.port = port; 413 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 414 BUG(); 415 del_evtchn(u, evtchn); 416 return rc; 417} 418 419static void evtchn_unbind_from_user(struct per_user_data *u, 420 struct user_evtchn *evtchn) 421{ 422 int irq = irq_from_evtchn(evtchn->port); 423 424 BUG_ON(irq < 0); 425 426 unbind_from_irqhandler(irq, evtchn); 427 428 del_evtchn(u, evtchn); 429} 430 431static long evtchn_ioctl(struct file *file, 432 unsigned int cmd, unsigned long arg) 433{ 434 int rc; 435 struct per_user_data *u = file->private_data; 436 void __user *uarg = (void __user *) arg; 437 438 /* Prevent bind from racing with unbind */ 439 mutex_lock(&u->bind_mutex); 440 441 switch (cmd) { 442 case IOCTL_EVTCHN_BIND_VIRQ: { 443 struct ioctl_evtchn_bind_virq bind; 444 struct evtchn_bind_virq bind_virq; 445 446 rc = -EFAULT; 447 if (copy_from_user(&bind, uarg, sizeof(bind))) 448 break; 449 450 bind_virq.virq = bind.virq; 451 bind_virq.vcpu = 0; 452 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 453 &bind_virq); 454 if (rc != 0) 455 break; 456 457 rc = evtchn_bind_to_user(u, bind_virq.port); 458 if (rc == 0) 459 rc = bind_virq.port; 460 break; 461 } 462 463 case IOCTL_EVTCHN_BIND_INTERDOMAIN: { 464 struct ioctl_evtchn_bind_interdomain bind; 465 struct evtchn_bind_interdomain bind_interdomain; 466 467 rc = -EFAULT; 468 if (copy_from_user(&bind, uarg, sizeof(bind))) 469 break; 470 471 bind_interdomain.remote_dom = bind.remote_domain; 472 bind_interdomain.remote_port = bind.remote_port; 473 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, 474 &bind_interdomain); 475 if (rc != 0) 476 break; 477 478 rc = evtchn_bind_to_user(u, bind_interdomain.local_port); 479 if (rc == 0) 480 rc = bind_interdomain.local_port; 481 break; 482 } 483 484 case IOCTL_EVTCHN_BIND_UNBOUND_PORT: { 485 struct ioctl_evtchn_bind_unbound_port bind; 486 struct evtchn_alloc_unbound alloc_unbound; 487 488 rc = -EFAULT; 489 if (copy_from_user(&bind, uarg, sizeof(bind))) 490 break; 491 492 alloc_unbound.dom = DOMID_SELF; 493 alloc_unbound.remote_dom = bind.remote_domain; 494 rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, 495 &alloc_unbound); 496 if (rc != 0) 497 break; 498 499 rc = evtchn_bind_to_user(u, alloc_unbound.port); 500 if (rc == 0) 501 rc = alloc_unbound.port; 502 break; 503 } 504 505 case IOCTL_EVTCHN_UNBIND: { 506 struct ioctl_evtchn_unbind unbind; 507 struct user_evtchn *evtchn; 508 509 rc = -EFAULT; 510 if (copy_from_user(&unbind, uarg, sizeof(unbind))) 511 break; 512 513 rc = -EINVAL; 514 if (unbind.port >= xen_evtchn_nr_channels()) 515 break; 516 517 rc = -ENOTCONN; 518 evtchn = find_evtchn(u, unbind.port); 519 if (!evtchn) 520 break; 521 522 disable_irq(irq_from_evtchn(unbind.port)); 523 evtchn_unbind_from_user(u, evtchn); 524 rc = 0; 525 break; 526 } 527 528 case IOCTL_EVTCHN_NOTIFY: { 529 struct ioctl_evtchn_notify notify; 530 struct user_evtchn *evtchn; 531 532 rc = -EFAULT; 533 if (copy_from_user(¬ify, uarg, sizeof(notify))) 534 break; 535 536 rc = -ENOTCONN; 537 evtchn = find_evtchn(u, notify.port); 538 if (evtchn) { 539 notify_remote_via_evtchn(notify.port); 540 rc = 0; 541 } 542 break; 543 } 544 545 case IOCTL_EVTCHN_RESET: { 546 /* Initialise the ring to empty. Clear errors. */ 547 mutex_lock(&u->ring_cons_mutex); 548 spin_lock_irq(&u->ring_prod_lock); 549 u->ring_cons = u->ring_prod = u->ring_overflow = 0; 550 spin_unlock_irq(&u->ring_prod_lock); 551 mutex_unlock(&u->ring_cons_mutex); 552 rc = 0; 553 break; 554 } 555 556 default: 557 rc = -ENOSYS; 558 break; 559 } 560 mutex_unlock(&u->bind_mutex); 561 562 return rc; 563} 564 565static unsigned int evtchn_poll(struct file *file, poll_table *wait) 566{ 567 unsigned int mask = POLLOUT | POLLWRNORM; 568 struct per_user_data *u = file->private_data; 569 570 poll_wait(file, &u->evtchn_wait, wait); 571 if (u->ring_cons != u->ring_prod) 572 mask |= POLLIN | POLLRDNORM; 573 if (u->ring_overflow) 574 mask = POLLERR; 575 return mask; 576} 577 578static int evtchn_fasync(int fd, struct file *filp, int on) 579{ 580 struct per_user_data *u = filp->private_data; 581 return fasync_helper(fd, filp, on, &u->evtchn_async_queue); 582} 583 584static int evtchn_open(struct inode *inode, struct file *filp) 585{ 586 struct per_user_data *u; 587 588 u = kzalloc(sizeof(*u), GFP_KERNEL); 589 if (u == NULL) 590 return -ENOMEM; 591 592 u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm); 593 if (u->name == NULL) { 594 kfree(u); 595 return -ENOMEM; 596 } 597 598 init_waitqueue_head(&u->evtchn_wait); 599 600 mutex_init(&u->bind_mutex); 601 mutex_init(&u->ring_cons_mutex); 602 spin_lock_init(&u->ring_prod_lock); 603 604 filp->private_data = u; 605 606 return nonseekable_open(inode, filp); 607} 608 609static int evtchn_release(struct inode *inode, struct file *filp) 610{ 611 struct per_user_data *u = filp->private_data; 612 struct rb_node *node; 613 614 while ((node = u->evtchns.rb_node)) { 615 struct user_evtchn *evtchn; 616 617 evtchn = rb_entry(node, struct user_evtchn, node); 618 disable_irq(irq_from_evtchn(evtchn->port)); 619 evtchn_unbind_from_user(u, evtchn); 620 } 621 622 evtchn_free_ring(u->ring); 623 kfree(u->name); 624 kfree(u); 625 626 return 0; 627} 628 629static const struct file_operations evtchn_fops = { 630 .owner = THIS_MODULE, 631 .read = evtchn_read, 632 .write = evtchn_write, 633 .unlocked_ioctl = evtchn_ioctl, 634 .poll = evtchn_poll, 635 .fasync = evtchn_fasync, 636 .open = evtchn_open, 637 .release = evtchn_release, 638 .llseek = no_llseek, 639}; 640 641static struct miscdevice evtchn_miscdev = { 642 .minor = MISC_DYNAMIC_MINOR, 643 .name = "xen/evtchn", 644 .fops = &evtchn_fops, 645}; 646static int __init evtchn_init(void) 647{ 648 int err; 649 650 if (!xen_domain()) 651 return -ENODEV; 652 653 /* Create '/dev/xen/evtchn'. */ 654 err = misc_register(&evtchn_miscdev); 655 if (err != 0) { 656 pr_err("Could not register /dev/xen/evtchn\n"); 657 return err; 658 } 659 660 pr_info("Event-channel device installed\n"); 661 662 return 0; 663} 664 665static void __exit evtchn_cleanup(void) 666{ 667 misc_deregister(&evtchn_miscdev); 668} 669 670module_init(evtchn_init); 671module_exit(evtchn_cleanup); 672 673MODULE_LICENSE("GPL"); 674