root/drivers/xen/xenbus/xenbus_client.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xenbus_strstate
  2. xenbus_watch_path
  3. xenbus_watch_pathfmt
  4. __xenbus_switch_state
  5. xenbus_switch_state
  6. xenbus_frontend_closed
  7. xenbus_va_dev_error
  8. xenbus_dev_error
  9. xenbus_dev_fatal
  10. xenbus_switch_fatal
  11. xenbus_grant_ring
  12. xenbus_alloc_evtchn
  13. xenbus_free_evtchn
  14. xenbus_map_ring_valloc
  15. __xenbus_map_ring
  16. xenbus_map_ring_setup_grant_hvm
  17. xenbus_map_ring_valloc_hvm
  18. xenbus_map_ring
  19. xenbus_unmap_ring_vfree
  20. xenbus_map_ring_valloc_pv
  21. xenbus_unmap_ring_vfree_pv
  22. xenbus_unmap_ring_setup_grant_hvm
  23. xenbus_unmap_ring_vfree_hvm
  24. xenbus_unmap_ring
  25. xenbus_read_driver_state
  26. xenbus_ring_ops_init

   1 /******************************************************************************
   2  * Client-facing interface for the Xenbus driver.  In other words, the
   3  * interface between the Xenbus and the device-specific code, be it the
   4  * frontend or the backend of that driver.
   5  *
   6  * Copyright (C) 2005 XenSource Ltd
   7  *
   8  * This program is free software; you can redistribute it and/or
   9  * modify it under the terms of the GNU General Public License version 2
  10  * as published by the Free Software Foundation; or, when distributed
  11  * separately from the Linux kernel or incorporated into other
  12  * software packages, subject to the following license:
  13  *
  14  * Permission is hereby granted, free of charge, to any person obtaining a copy
  15  * of this source file (the "Software"), to deal in the Software without
  16  * restriction, including without limitation the rights to use, copy, modify,
  17  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  18  * and to permit persons to whom the Software is furnished to do so, subject to
  19  * the following conditions:
  20  *
  21  * The above copyright notice and this permission notice shall be included in
  22  * all copies or substantial portions of the Software.
  23  *
  24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  27  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  29  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  30  * IN THE SOFTWARE.
  31  */
  32 
  33 #include <linux/mm.h>
  34 #include <linux/slab.h>
  35 #include <linux/types.h>
  36 #include <linux/spinlock.h>
  37 #include <linux/vmalloc.h>
  38 #include <linux/export.h>
  39 #include <asm/xen/hypervisor.h>
  40 #include <xen/page.h>
  41 #include <xen/interface/xen.h>
  42 #include <xen/interface/event_channel.h>
  43 #include <xen/balloon.h>
  44 #include <xen/events.h>
  45 #include <xen/grant_table.h>
  46 #include <xen/xenbus.h>
  47 #include <xen/xen.h>
  48 #include <xen/features.h>
  49 
  50 #include "xenbus.h"
  51 
  52 #define XENBUS_PAGES(_grants)   (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE))
  53 
  54 #define XENBUS_MAX_RING_PAGES   (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS))
  55 
  56 struct xenbus_map_node {
  57         struct list_head next;
  58         union {
  59                 struct {
  60                         struct vm_struct *area;
  61                 } pv;
  62                 struct {
  63                         struct page *pages[XENBUS_MAX_RING_PAGES];
  64                         unsigned long addrs[XENBUS_MAX_RING_GRANTS];
  65                         void *addr;
  66                 } hvm;
  67         };
  68         grant_handle_t handles[XENBUS_MAX_RING_GRANTS];
  69         unsigned int   nr_handles;
  70 };
  71 
  72 static DEFINE_SPINLOCK(xenbus_valloc_lock);
  73 static LIST_HEAD(xenbus_valloc_pages);
  74 
  75 struct xenbus_ring_ops {
  76         int (*map)(struct xenbus_device *dev,
  77                    grant_ref_t *gnt_refs, unsigned int nr_grefs,
  78                    void **vaddr);
  79         int (*unmap)(struct xenbus_device *dev, void *vaddr);
  80 };
  81 
  82 static const struct xenbus_ring_ops *ring_ops __read_mostly;
  83 
  84 const char *xenbus_strstate(enum xenbus_state state)
  85 {
  86         static const char *const name[] = {
  87                 [ XenbusStateUnknown      ] = "Unknown",
  88                 [ XenbusStateInitialising ] = "Initialising",
  89                 [ XenbusStateInitWait     ] = "InitWait",
  90                 [ XenbusStateInitialised  ] = "Initialised",
  91                 [ XenbusStateConnected    ] = "Connected",
  92                 [ XenbusStateClosing      ] = "Closing",
  93                 [ XenbusStateClosed       ] = "Closed",
  94                 [XenbusStateReconfiguring] = "Reconfiguring",
  95                 [XenbusStateReconfigured] = "Reconfigured",
  96         };
  97         return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
  98 }
  99 EXPORT_SYMBOL_GPL(xenbus_strstate);
 100 
 101 /**
 102  * xenbus_watch_path - register a watch
 103  * @dev: xenbus device
 104  * @path: path to watch
 105  * @watch: watch to register
 106  * @callback: callback to register
 107  *
 108  * Register a @watch on the given path, using the given xenbus_watch structure
 109  * for storage, and the given @callback function as the callback.  Return 0 on
 110  * success, or -errno on error.  On success, the given @path will be saved as
 111  * @watch->node, and remains the caller's to free.  On error, @watch->node will
 112  * be NULL, the device will switch to %XenbusStateClosing, and the error will
 113  * be saved in the store.
 114  */
 115 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
 116                       struct xenbus_watch *watch,
 117                       void (*callback)(struct xenbus_watch *,
 118                                        const char *, const char *))
 119 {
 120         int err;
 121 
 122         watch->node = path;
 123         watch->callback = callback;
 124 
 125         err = register_xenbus_watch(watch);
 126 
 127         if (err) {
 128                 watch->node = NULL;
 129                 watch->callback = NULL;
 130                 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
 131         }
 132 
 133         return err;
 134 }
 135 EXPORT_SYMBOL_GPL(xenbus_watch_path);
 136 
 137 
 138 /**
 139  * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
 140  * @dev: xenbus device
 141  * @watch: watch to register
 142  * @callback: callback to register
 143  * @pathfmt: format of path to watch
 144  *
 145  * Register a watch on the given @path, using the given xenbus_watch
 146  * structure for storage, and the given @callback function as the callback.
 147  * Return 0 on success, or -errno on error.  On success, the watched path
 148  * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
 149  * kfree().  On error, watch->node will be NULL, so the caller has nothing to
 150  * free, the device will switch to %XenbusStateClosing, and the error will be
 151  * saved in the store.
 152  */
 153 int xenbus_watch_pathfmt(struct xenbus_device *dev,
 154                          struct xenbus_watch *watch,
 155                          void (*callback)(struct xenbus_watch *,
 156                                           const char *, const char *),
 157                          const char *pathfmt, ...)
 158 {
 159         int err;
 160         va_list ap;
 161         char *path;
 162 
 163         va_start(ap, pathfmt);
 164         path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
 165         va_end(ap);
 166 
 167         if (!path) {
 168                 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
 169                 return -ENOMEM;
 170         }
 171         err = xenbus_watch_path(dev, path, watch, callback);
 172 
 173         if (err)
 174                 kfree(path);
 175         return err;
 176 }
 177 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
 178 
 179 static void xenbus_switch_fatal(struct xenbus_device *, int, int,
 180                                 const char *, ...);
 181 
 182 static int
 183 __xenbus_switch_state(struct xenbus_device *dev,
 184                       enum xenbus_state state, int depth)
 185 {
 186         /* We check whether the state is currently set to the given value, and
 187            if not, then the state is set.  We don't want to unconditionally
 188            write the given state, because we don't want to fire watches
 189            unnecessarily.  Furthermore, if the node has gone, we don't write
 190            to it, as the device will be tearing down, and we don't want to
 191            resurrect that directory.
 192 
 193            Note that, because of this cached value of our state, this
 194            function will not take a caller's Xenstore transaction
 195            (something it was trying to in the past) because dev->state
 196            would not get reset if the transaction was aborted.
 197          */
 198 
 199         struct xenbus_transaction xbt;
 200         int current_state;
 201         int err, abort;
 202 
 203         if (state == dev->state)
 204                 return 0;
 205 
 206 again:
 207         abort = 1;
 208 
 209         err = xenbus_transaction_start(&xbt);
 210         if (err) {
 211                 xenbus_switch_fatal(dev, depth, err, "starting transaction");
 212                 return 0;
 213         }
 214 
 215         err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state);
 216         if (err != 1)
 217                 goto abort;
 218 
 219         err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
 220         if (err) {
 221                 xenbus_switch_fatal(dev, depth, err, "writing new state");
 222                 goto abort;
 223         }
 224 
 225         abort = 0;
 226 abort:
 227         err = xenbus_transaction_end(xbt, abort);
 228         if (err) {
 229                 if (err == -EAGAIN && !abort)
 230                         goto again;
 231                 xenbus_switch_fatal(dev, depth, err, "ending transaction");
 232         } else
 233                 dev->state = state;
 234 
 235         return 0;
 236 }
 237 
 238 /**
 239  * xenbus_switch_state
 240  * @dev: xenbus device
 241  * @state: new state
 242  *
 243  * Advertise in the store a change of the given driver to the given new_state.
 244  * Return 0 on success, or -errno on error.  On error, the device will switch
 245  * to XenbusStateClosing, and the error will be saved in the store.
 246  */
 247 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
 248 {
 249         return __xenbus_switch_state(dev, state, 0);
 250 }
 251 
 252 EXPORT_SYMBOL_GPL(xenbus_switch_state);
 253 
 254 int xenbus_frontend_closed(struct xenbus_device *dev)
 255 {
 256         xenbus_switch_state(dev, XenbusStateClosed);
 257         complete(&dev->down);
 258         return 0;
 259 }
 260 EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
 261 
 262 static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
 263                                 const char *fmt, va_list ap)
 264 {
 265         unsigned int len;
 266         char *printf_buffer;
 267         char *path_buffer;
 268 
 269 #define PRINTF_BUFFER_SIZE 4096
 270 
 271         printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
 272         if (!printf_buffer)
 273                 return;
 274 
 275         len = sprintf(printf_buffer, "%i ", -err);
 276         vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap);
 277 
 278         dev_err(&dev->dev, "%s\n", printf_buffer);
 279 
 280         path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
 281         if (path_buffer)
 282                 xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer);
 283 
 284         kfree(printf_buffer);
 285         kfree(path_buffer);
 286 }
 287 
 288 /**
 289  * xenbus_dev_error
 290  * @dev: xenbus device
 291  * @err: error to report
 292  * @fmt: error message format
 293  *
 294  * Report the given negative errno into the store, along with the given
 295  * formatted message.
 296  */
 297 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
 298 {
 299         va_list ap;
 300 
 301         va_start(ap, fmt);
 302         xenbus_va_dev_error(dev, err, fmt, ap);
 303         va_end(ap);
 304 }
 305 EXPORT_SYMBOL_GPL(xenbus_dev_error);
 306 
 307 /**
 308  * xenbus_dev_fatal
 309  * @dev: xenbus device
 310  * @err: error to report
 311  * @fmt: error message format
 312  *
 313  * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
 314  * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
 315  * closedown of this driver and its peer.
 316  */
 317 
 318 void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
 319 {
 320         va_list ap;
 321 
 322         va_start(ap, fmt);
 323         xenbus_va_dev_error(dev, err, fmt, ap);
 324         va_end(ap);
 325 
 326         xenbus_switch_state(dev, XenbusStateClosing);
 327 }
 328 EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
 329 
 330 /**
 331  * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
 332  * avoiding recursion within xenbus_switch_state.
 333  */
 334 static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
 335                                 const char *fmt, ...)
 336 {
 337         va_list ap;
 338 
 339         va_start(ap, fmt);
 340         xenbus_va_dev_error(dev, err, fmt, ap);
 341         va_end(ap);
 342 
 343         if (!depth)
 344                 __xenbus_switch_state(dev, XenbusStateClosing, 1);
 345 }
 346 
 347 /**
 348  * xenbus_grant_ring
 349  * @dev: xenbus device
 350  * @vaddr: starting virtual address of the ring
 351  * @nr_pages: number of pages to be granted
 352  * @grefs: grant reference array to be filled in
 353  *
 354  * Grant access to the given @vaddr to the peer of the given device.
 355  * Then fill in @grefs with grant references.  Return 0 on success, or
 356  * -errno on error.  On error, the device will switch to
 357  * XenbusStateClosing, and the error will be saved in the store.
 358  */
 359 int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
 360                       unsigned int nr_pages, grant_ref_t *grefs)
 361 {
 362         int err;
 363         int i, j;
 364 
 365         for (i = 0; i < nr_pages; i++) {
 366                 err = gnttab_grant_foreign_access(dev->otherend_id,
 367                                                   virt_to_gfn(vaddr), 0);
 368                 if (err < 0) {
 369                         xenbus_dev_fatal(dev, err,
 370                                          "granting access to ring page");
 371                         goto fail;
 372                 }
 373                 grefs[i] = err;
 374 
 375                 vaddr = vaddr + XEN_PAGE_SIZE;
 376         }
 377 
 378         return 0;
 379 
 380 fail:
 381         for (j = 0; j < i; j++)
 382                 gnttab_end_foreign_access_ref(grefs[j], 0);
 383         return err;
 384 }
 385 EXPORT_SYMBOL_GPL(xenbus_grant_ring);
 386 
 387 
 388 /**
 389  * Allocate an event channel for the given xenbus_device, assigning the newly
 390  * created local port to *port.  Return 0 on success, or -errno on error.  On
 391  * error, the device will switch to XenbusStateClosing, and the error will be
 392  * saved in the store.
 393  */
 394 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
 395 {
 396         struct evtchn_alloc_unbound alloc_unbound;
 397         int err;
 398 
 399         alloc_unbound.dom = DOMID_SELF;
 400         alloc_unbound.remote_dom = dev->otherend_id;
 401 
 402         err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
 403                                           &alloc_unbound);
 404         if (err)
 405                 xenbus_dev_fatal(dev, err, "allocating event channel");
 406         else
 407                 *port = alloc_unbound.port;
 408 
 409         return err;
 410 }
 411 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
 412 
 413 
 414 /**
 415  * Free an existing event channel. Returns 0 on success or -errno on error.
 416  */
 417 int xenbus_free_evtchn(struct xenbus_device *dev, int port)
 418 {
 419         struct evtchn_close close;
 420         int err;
 421 
 422         close.port = port;
 423 
 424         err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
 425         if (err)
 426                 xenbus_dev_error(dev, err, "freeing event channel %d", port);
 427 
 428         return err;
 429 }
 430 EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
 431 
 432 
 433 /**
 434  * xenbus_map_ring_valloc
 435  * @dev: xenbus device
 436  * @gnt_refs: grant reference array
 437  * @nr_grefs: number of grant references
 438  * @vaddr: pointer to address to be filled out by mapping
 439  *
 440  * Map @nr_grefs pages of memory into this domain from another
 441  * domain's grant table.  xenbus_map_ring_valloc allocates @nr_grefs
 442  * pages of virtual address space, maps the pages to that address, and
 443  * sets *vaddr to that address.  Returns 0 on success, and GNTST_*
 444  * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
 445  * error. If an error is returned, device will switch to
 446  * XenbusStateClosing and the error message will be saved in XenStore.
 447  */
 448 int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
 449                            unsigned int nr_grefs, void **vaddr)
 450 {
 451         int err;
 452 
 453         err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
 454         /* Some hypervisors are buggy and can return 1. */
 455         if (err > 0)
 456                 err = GNTST_general_error;
 457 
 458         return err;
 459 }
 460 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
 461 
 462 /* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned
 463  * long), e.g. 32-on-64.  Caller is responsible for preparing the
 464  * right array to feed into this function */
 465 static int __xenbus_map_ring(struct xenbus_device *dev,
 466                              grant_ref_t *gnt_refs,
 467                              unsigned int nr_grefs,
 468                              grant_handle_t *handles,
 469                              phys_addr_t *addrs,
 470                              unsigned int flags,
 471                              bool *leaked)
 472 {
 473         struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
 474         struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
 475         int i, j;
 476         int err = GNTST_okay;
 477 
 478         if (nr_grefs > XENBUS_MAX_RING_GRANTS)
 479                 return -EINVAL;
 480 
 481         for (i = 0; i < nr_grefs; i++) {
 482                 memset(&map[i], 0, sizeof(map[i]));
 483                 gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
 484                                   dev->otherend_id);
 485                 handles[i] = INVALID_GRANT_HANDLE;
 486         }
 487 
 488         gnttab_batch_map(map, i);
 489 
 490         for (i = 0; i < nr_grefs; i++) {
 491                 if (map[i].status != GNTST_okay) {
 492                         err = map[i].status;
 493                         xenbus_dev_fatal(dev, map[i].status,
 494                                          "mapping in shared page %d from domain %d",
 495                                          gnt_refs[i], dev->otherend_id);
 496                         goto fail;
 497                 } else
 498                         handles[i] = map[i].handle;
 499         }
 500 
 501         return GNTST_okay;
 502 
 503  fail:
 504         for (i = j = 0; i < nr_grefs; i++) {
 505                 if (handles[i] != INVALID_GRANT_HANDLE) {
 506                         memset(&unmap[j], 0, sizeof(unmap[j]));
 507                         gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
 508                                             GNTMAP_host_map, handles[i]);
 509                         j++;
 510                 }
 511         }
 512 
 513         if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
 514                 BUG();
 515 
 516         *leaked = false;
 517         for (i = 0; i < j; i++) {
 518                 if (unmap[i].status != GNTST_okay) {
 519                         *leaked = true;
 520                         break;
 521                 }
 522         }
 523 
 524         return err;
 525 }
 526 
 527 struct map_ring_valloc_hvm
 528 {
 529         unsigned int idx;
 530 
 531         /* Why do we need two arrays? See comment of __xenbus_map_ring */
 532         phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
 533         unsigned long addrs[XENBUS_MAX_RING_GRANTS];
 534 };
 535 
 536 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
 537                                             unsigned int goffset,
 538                                             unsigned int len,
 539                                             void *data)
 540 {
 541         struct map_ring_valloc_hvm *info = data;
 542         unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
 543 
 544         info->phys_addrs[info->idx] = vaddr;
 545         info->addrs[info->idx] = vaddr;
 546 
 547         info->idx++;
 548 }
 549 
 550 static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
 551                                       grant_ref_t *gnt_ref,
 552                                       unsigned int nr_grefs,
 553                                       void **vaddr)
 554 {
 555         struct xenbus_map_node *node;
 556         int err;
 557         void *addr;
 558         bool leaked = false;
 559         struct map_ring_valloc_hvm info = {
 560                 .idx = 0,
 561         };
 562         unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
 563 
 564         if (nr_grefs > XENBUS_MAX_RING_GRANTS)
 565                 return -EINVAL;
 566 
 567         *vaddr = NULL;
 568 
 569         node = kzalloc(sizeof(*node), GFP_KERNEL);
 570         if (!node)
 571                 return -ENOMEM;
 572 
 573         err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
 574         if (err)
 575                 goto out_err;
 576 
 577         gnttab_foreach_grant(node->hvm.pages, nr_grefs,
 578                              xenbus_map_ring_setup_grant_hvm,
 579                              &info);
 580 
 581         err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
 582                                 info.phys_addrs, GNTMAP_host_map, &leaked);
 583         node->nr_handles = nr_grefs;
 584 
 585         if (err)
 586                 goto out_free_ballooned_pages;
 587 
 588         addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP,
 589                     PAGE_KERNEL);
 590         if (!addr) {
 591                 err = -ENOMEM;
 592                 goto out_xenbus_unmap_ring;
 593         }
 594 
 595         node->hvm.addr = addr;
 596 
 597         spin_lock(&xenbus_valloc_lock);
 598         list_add(&node->next, &xenbus_valloc_pages);
 599         spin_unlock(&xenbus_valloc_lock);
 600 
 601         *vaddr = addr;
 602         return 0;
 603 
 604  out_xenbus_unmap_ring:
 605         if (!leaked)
 606                 xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs);
 607         else
 608                 pr_alert("leaking %p size %u page(s)",
 609                          addr, nr_pages);
 610  out_free_ballooned_pages:
 611         if (!leaked)
 612                 free_xenballooned_pages(nr_pages, node->hvm.pages);
 613  out_err:
 614         kfree(node);
 615         return err;
 616 }
 617 
 618 
 619 /**
 620  * xenbus_map_ring
 621  * @dev: xenbus device
 622  * @gnt_refs: grant reference array
 623  * @nr_grefs: number of grant reference
 624  * @handles: pointer to grant handle to be filled
 625  * @vaddrs: addresses to be mapped to
 626  * @leaked: fail to clean up a failed map, caller should not free vaddr
 627  *
 628  * Map pages of memory into this domain from another domain's grant table.
 629  * xenbus_map_ring does not allocate the virtual address space (you must do
 630  * this yourself!). It only maps in the pages to the specified address.
 631  * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
 632  * or -ENOMEM / -EINVAL on error. If an error is returned, device will switch to
 633  * XenbusStateClosing and the first error message will be saved in XenStore.
 634  * Further more if we fail to map the ring, caller should check @leaked.
 635  * If @leaked is not zero it means xenbus_map_ring fails to clean up, caller
 636  * should not free the address space of @vaddr.
 637  */
 638 int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs,
 639                     unsigned int nr_grefs, grant_handle_t *handles,
 640                     unsigned long *vaddrs, bool *leaked)
 641 {
 642         phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
 643         int i;
 644 
 645         if (nr_grefs > XENBUS_MAX_RING_GRANTS)
 646                 return -EINVAL;
 647 
 648         for (i = 0; i < nr_grefs; i++)
 649                 phys_addrs[i] = (unsigned long)vaddrs[i];
 650 
 651         return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles,
 652                                  phys_addrs, GNTMAP_host_map, leaked);
 653 }
 654 EXPORT_SYMBOL_GPL(xenbus_map_ring);
 655 
 656 
 657 /**
 658  * xenbus_unmap_ring_vfree
 659  * @dev: xenbus device
 660  * @vaddr: addr to unmap
 661  *
 662  * Based on Rusty Russell's skeleton driver's unmap_page.
 663  * Unmap a page of memory in this domain that was imported from another domain.
 664  * Use xenbus_unmap_ring_vfree if you mapped in your memory with
 665  * xenbus_map_ring_valloc (it will free the virtual address space).
 666  * Returns 0 on success and returns GNTST_* on error
 667  * (see xen/include/interface/grant_table.h).
 668  */
 669 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
 670 {
 671         return ring_ops->unmap(dev, vaddr);
 672 }
 673 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
 674 
 675 #ifdef CONFIG_XEN_PV
 676 static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
 677                                      grant_ref_t *gnt_refs,
 678                                      unsigned int nr_grefs,
 679                                      void **vaddr)
 680 {
 681         struct xenbus_map_node *node;
 682         struct vm_struct *area;
 683         pte_t *ptes[XENBUS_MAX_RING_GRANTS];
 684         phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
 685         int err = GNTST_okay;
 686         int i;
 687         bool leaked;
 688 
 689         *vaddr = NULL;
 690 
 691         if (nr_grefs > XENBUS_MAX_RING_GRANTS)
 692                 return -EINVAL;
 693 
 694         node = kzalloc(sizeof(*node), GFP_KERNEL);
 695         if (!node)
 696                 return -ENOMEM;
 697 
 698         area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
 699         if (!area) {
 700                 kfree(node);
 701                 return -ENOMEM;
 702         }
 703 
 704         for (i = 0; i < nr_grefs; i++)
 705                 phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
 706 
 707         err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
 708                                 phys_addrs,
 709                                 GNTMAP_host_map | GNTMAP_contains_pte,
 710                                 &leaked);
 711         if (err)
 712                 goto failed;
 713 
 714         node->nr_handles = nr_grefs;
 715         node->pv.area = area;
 716 
 717         spin_lock(&xenbus_valloc_lock);
 718         list_add(&node->next, &xenbus_valloc_pages);
 719         spin_unlock(&xenbus_valloc_lock);
 720 
 721         *vaddr = area->addr;
 722         return 0;
 723 
 724 failed:
 725         if (!leaked)
 726                 free_vm_area(area);
 727         else
 728                 pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
 729 
 730         kfree(node);
 731         return err;
 732 }
 733 
 734 static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
 735 {
 736         struct xenbus_map_node *node;
 737         struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
 738         unsigned int level;
 739         int i;
 740         bool leaked = false;
 741         int err;
 742 
 743         spin_lock(&xenbus_valloc_lock);
 744         list_for_each_entry(node, &xenbus_valloc_pages, next) {
 745                 if (node->pv.area->addr == vaddr) {
 746                         list_del(&node->next);
 747                         goto found;
 748                 }
 749         }
 750         node = NULL;
 751  found:
 752         spin_unlock(&xenbus_valloc_lock);
 753 
 754         if (!node) {
 755                 xenbus_dev_error(dev, -ENOENT,
 756                                  "can't find mapped virtual address %p", vaddr);
 757                 return GNTST_bad_virt_addr;
 758         }
 759 
 760         for (i = 0; i < node->nr_handles; i++) {
 761                 unsigned long addr;
 762 
 763                 memset(&unmap[i], 0, sizeof(unmap[i]));
 764                 addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i);
 765                 unmap[i].host_addr = arbitrary_virt_to_machine(
 766                         lookup_address(addr, &level)).maddr;
 767                 unmap[i].dev_bus_addr = 0;
 768                 unmap[i].handle = node->handles[i];
 769         }
 770 
 771         if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
 772                 BUG();
 773 
 774         err = GNTST_okay;
 775         leaked = false;
 776         for (i = 0; i < node->nr_handles; i++) {
 777                 if (unmap[i].status != GNTST_okay) {
 778                         leaked = true;
 779                         xenbus_dev_error(dev, unmap[i].status,
 780                                          "unmapping page at handle %d error %d",
 781                                          node->handles[i], unmap[i].status);
 782                         err = unmap[i].status;
 783                         break;
 784                 }
 785         }
 786 
 787         if (!leaked)
 788                 free_vm_area(node->pv.area);
 789         else
 790                 pr_alert("leaking VM area %p size %u page(s)",
 791                          node->pv.area, node->nr_handles);
 792 
 793         kfree(node);
 794         return err;
 795 }
 796 
 797 static const struct xenbus_ring_ops ring_ops_pv = {
 798         .map = xenbus_map_ring_valloc_pv,
 799         .unmap = xenbus_unmap_ring_vfree_pv,
 800 };
 801 #endif
 802 
 803 struct unmap_ring_vfree_hvm
 804 {
 805         unsigned int idx;
 806         unsigned long addrs[XENBUS_MAX_RING_GRANTS];
 807 };
 808 
 809 static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
 810                                               unsigned int goffset,
 811                                               unsigned int len,
 812                                               void *data)
 813 {
 814         struct unmap_ring_vfree_hvm *info = data;
 815 
 816         info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
 817 
 818         info->idx++;
 819 }
 820 
 821 static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
 822 {
 823         int rv;
 824         struct xenbus_map_node *node;
 825         void *addr;
 826         struct unmap_ring_vfree_hvm info = {
 827                 .idx = 0,
 828         };
 829         unsigned int nr_pages;
 830 
 831         spin_lock(&xenbus_valloc_lock);
 832         list_for_each_entry(node, &xenbus_valloc_pages, next) {
 833                 addr = node->hvm.addr;
 834                 if (addr == vaddr) {
 835                         list_del(&node->next);
 836                         goto found;
 837                 }
 838         }
 839         node = addr = NULL;
 840  found:
 841         spin_unlock(&xenbus_valloc_lock);
 842 
 843         if (!node) {
 844                 xenbus_dev_error(dev, -ENOENT,
 845                                  "can't find mapped virtual address %p", vaddr);
 846                 return GNTST_bad_virt_addr;
 847         }
 848 
 849         nr_pages = XENBUS_PAGES(node->nr_handles);
 850 
 851         gnttab_foreach_grant(node->hvm.pages, node->nr_handles,
 852                              xenbus_unmap_ring_setup_grant_hvm,
 853                              &info);
 854 
 855         rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
 856                                info.addrs);
 857         if (!rv) {
 858                 vunmap(vaddr);
 859                 free_xenballooned_pages(nr_pages, node->hvm.pages);
 860         }
 861         else
 862                 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
 863 
 864         kfree(node);
 865         return rv;
 866 }
 867 
 868 /**
 869  * xenbus_unmap_ring
 870  * @dev: xenbus device
 871  * @handles: grant handle array
 872  * @nr_handles: number of handles in the array
 873  * @vaddrs: addresses to unmap
 874  *
 875  * Unmap memory in this domain that was imported from another domain.
 876  * Returns 0 on success and returns GNTST_* on error
 877  * (see xen/include/interface/grant_table.h).
 878  */
 879 int xenbus_unmap_ring(struct xenbus_device *dev,
 880                       grant_handle_t *handles, unsigned int nr_handles,
 881                       unsigned long *vaddrs)
 882 {
 883         struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
 884         int i;
 885         int err;
 886 
 887         if (nr_handles > XENBUS_MAX_RING_GRANTS)
 888                 return -EINVAL;
 889 
 890         for (i = 0; i < nr_handles; i++)
 891                 gnttab_set_unmap_op(&unmap[i], vaddrs[i],
 892                                     GNTMAP_host_map, handles[i]);
 893 
 894         if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
 895                 BUG();
 896 
 897         err = GNTST_okay;
 898         for (i = 0; i < nr_handles; i++) {
 899                 if (unmap[i].status != GNTST_okay) {
 900                         xenbus_dev_error(dev, unmap[i].status,
 901                                          "unmapping page at handle %d error %d",
 902                                          handles[i], unmap[i].status);
 903                         err = unmap[i].status;
 904                         break;
 905                 }
 906         }
 907 
 908         return err;
 909 }
 910 EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
 911 
 912 
 913 /**
 914  * xenbus_read_driver_state
 915  * @path: path for driver
 916  *
 917  * Return the state of the driver rooted at the given store path, or
 918  * XenbusStateUnknown if no state can be read.
 919  */
 920 enum xenbus_state xenbus_read_driver_state(const char *path)
 921 {
 922         enum xenbus_state result;
 923         int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
 924         if (err)
 925                 result = XenbusStateUnknown;
 926 
 927         return result;
 928 }
 929 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
 930 
 931 static const struct xenbus_ring_ops ring_ops_hvm = {
 932         .map = xenbus_map_ring_valloc_hvm,
 933         .unmap = xenbus_unmap_ring_vfree_hvm,
 934 };
 935 
 936 void __init xenbus_ring_ops_init(void)
 937 {
 938 #ifdef CONFIG_XEN_PV
 939         if (!xen_feature(XENFEAT_auto_translated_physmap))
 940                 ring_ops = &ring_ops_pv;
 941         else
 942 #endif
 943                 ring_ops = &ring_ops_hvm;
 944 }

/* [<][>][^][v][top][bottom][index][help] */