This source file includes following definitions.
- xenbus_strstate
- xenbus_watch_path
- xenbus_watch_pathfmt
- __xenbus_switch_state
- xenbus_switch_state
- xenbus_frontend_closed
- xenbus_va_dev_error
- xenbus_dev_error
- xenbus_dev_fatal
- xenbus_switch_fatal
- xenbus_grant_ring
- xenbus_alloc_evtchn
- xenbus_free_evtchn
- xenbus_map_ring_valloc
- __xenbus_map_ring
- xenbus_map_ring_setup_grant_hvm
- xenbus_map_ring_valloc_hvm
- xenbus_map_ring
- xenbus_unmap_ring_vfree
- xenbus_map_ring_valloc_pv
- xenbus_unmap_ring_vfree_pv
- xenbus_unmap_ring_setup_grant_hvm
- xenbus_unmap_ring_vfree_hvm
- xenbus_unmap_ring
- xenbus_read_driver_state
- xenbus_ring_ops_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/mm.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/spinlock.h>
37 #include <linux/vmalloc.h>
38 #include <linux/export.h>
39 #include <asm/xen/hypervisor.h>
40 #include <xen/page.h>
41 #include <xen/interface/xen.h>
42 #include <xen/interface/event_channel.h>
43 #include <xen/balloon.h>
44 #include <xen/events.h>
45 #include <xen/grant_table.h>
46 #include <xen/xenbus.h>
47 #include <xen/xen.h>
48 #include <xen/features.h>
49
50 #include "xenbus.h"
51
52 #define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE))
53
54 #define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS))
55
56 struct xenbus_map_node {
57 struct list_head next;
58 union {
59 struct {
60 struct vm_struct *area;
61 } pv;
62 struct {
63 struct page *pages[XENBUS_MAX_RING_PAGES];
64 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
65 void *addr;
66 } hvm;
67 };
68 grant_handle_t handles[XENBUS_MAX_RING_GRANTS];
69 unsigned int nr_handles;
70 };
71
72 static DEFINE_SPINLOCK(xenbus_valloc_lock);
73 static LIST_HEAD(xenbus_valloc_pages);
74
75 struct xenbus_ring_ops {
76 int (*map)(struct xenbus_device *dev,
77 grant_ref_t *gnt_refs, unsigned int nr_grefs,
78 void **vaddr);
79 int (*unmap)(struct xenbus_device *dev, void *vaddr);
80 };
81
82 static const struct xenbus_ring_ops *ring_ops __read_mostly;
83
84 const char *xenbus_strstate(enum xenbus_state state)
85 {
86 static const char *const name[] = {
87 [ XenbusStateUnknown ] = "Unknown",
88 [ XenbusStateInitialising ] = "Initialising",
89 [ XenbusStateInitWait ] = "InitWait",
90 [ XenbusStateInitialised ] = "Initialised",
91 [ XenbusStateConnected ] = "Connected",
92 [ XenbusStateClosing ] = "Closing",
93 [ XenbusStateClosed ] = "Closed",
94 [XenbusStateReconfiguring] = "Reconfiguring",
95 [XenbusStateReconfigured] = "Reconfigured",
96 };
97 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
98 }
99 EXPORT_SYMBOL_GPL(xenbus_strstate);
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
116 struct xenbus_watch *watch,
117 void (*callback)(struct xenbus_watch *,
118 const char *, const char *))
119 {
120 int err;
121
122 watch->node = path;
123 watch->callback = callback;
124
125 err = register_xenbus_watch(watch);
126
127 if (err) {
128 watch->node = NULL;
129 watch->callback = NULL;
130 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
131 }
132
133 return err;
134 }
135 EXPORT_SYMBOL_GPL(xenbus_watch_path);
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153 int xenbus_watch_pathfmt(struct xenbus_device *dev,
154 struct xenbus_watch *watch,
155 void (*callback)(struct xenbus_watch *,
156 const char *, const char *),
157 const char *pathfmt, ...)
158 {
159 int err;
160 va_list ap;
161 char *path;
162
163 va_start(ap, pathfmt);
164 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
165 va_end(ap);
166
167 if (!path) {
168 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
169 return -ENOMEM;
170 }
171 err = xenbus_watch_path(dev, path, watch, callback);
172
173 if (err)
174 kfree(path);
175 return err;
176 }
177 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
178
179 static void xenbus_switch_fatal(struct xenbus_device *, int, int,
180 const char *, ...);
181
182 static int
183 __xenbus_switch_state(struct xenbus_device *dev,
184 enum xenbus_state state, int depth)
185 {
186
187
188
189
190
191
192
193
194
195
196
197
198
199 struct xenbus_transaction xbt;
200 int current_state;
201 int err, abort;
202
203 if (state == dev->state)
204 return 0;
205
206 again:
207 abort = 1;
208
209 err = xenbus_transaction_start(&xbt);
210 if (err) {
211 xenbus_switch_fatal(dev, depth, err, "starting transaction");
212 return 0;
213 }
214
215 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state);
216 if (err != 1)
217 goto abort;
218
219 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
220 if (err) {
221 xenbus_switch_fatal(dev, depth, err, "writing new state");
222 goto abort;
223 }
224
225 abort = 0;
226 abort:
227 err = xenbus_transaction_end(xbt, abort);
228 if (err) {
229 if (err == -EAGAIN && !abort)
230 goto again;
231 xenbus_switch_fatal(dev, depth, err, "ending transaction");
232 } else
233 dev->state = state;
234
235 return 0;
236 }
237
238
239
240
241
242
243
244
245
246
247 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
248 {
249 return __xenbus_switch_state(dev, state, 0);
250 }
251
252 EXPORT_SYMBOL_GPL(xenbus_switch_state);
253
254 int xenbus_frontend_closed(struct xenbus_device *dev)
255 {
256 xenbus_switch_state(dev, XenbusStateClosed);
257 complete(&dev->down);
258 return 0;
259 }
260 EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
261
262 static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
263 const char *fmt, va_list ap)
264 {
265 unsigned int len;
266 char *printf_buffer;
267 char *path_buffer;
268
269 #define PRINTF_BUFFER_SIZE 4096
270
271 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
272 if (!printf_buffer)
273 return;
274
275 len = sprintf(printf_buffer, "%i ", -err);
276 vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap);
277
278 dev_err(&dev->dev, "%s\n", printf_buffer);
279
280 path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
281 if (path_buffer)
282 xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer);
283
284 kfree(printf_buffer);
285 kfree(path_buffer);
286 }
287
288
289
290
291
292
293
294
295
296
297 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
298 {
299 va_list ap;
300
301 va_start(ap, fmt);
302 xenbus_va_dev_error(dev, err, fmt, ap);
303 va_end(ap);
304 }
305 EXPORT_SYMBOL_GPL(xenbus_dev_error);
306
307
308
309
310
311
312
313
314
315
316
317
318 void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
319 {
320 va_list ap;
321
322 va_start(ap, fmt);
323 xenbus_va_dev_error(dev, err, fmt, ap);
324 va_end(ap);
325
326 xenbus_switch_state(dev, XenbusStateClosing);
327 }
328 EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
329
330
331
332
333
334 static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
335 const char *fmt, ...)
336 {
337 va_list ap;
338
339 va_start(ap, fmt);
340 xenbus_va_dev_error(dev, err, fmt, ap);
341 va_end(ap);
342
343 if (!depth)
344 __xenbus_switch_state(dev, XenbusStateClosing, 1);
345 }
346
347
348
349
350
351
352
353
354
355
356
357
358
359 int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
360 unsigned int nr_pages, grant_ref_t *grefs)
361 {
362 int err;
363 int i, j;
364
365 for (i = 0; i < nr_pages; i++) {
366 err = gnttab_grant_foreign_access(dev->otherend_id,
367 virt_to_gfn(vaddr), 0);
368 if (err < 0) {
369 xenbus_dev_fatal(dev, err,
370 "granting access to ring page");
371 goto fail;
372 }
373 grefs[i] = err;
374
375 vaddr = vaddr + XEN_PAGE_SIZE;
376 }
377
378 return 0;
379
380 fail:
381 for (j = 0; j < i; j++)
382 gnttab_end_foreign_access_ref(grefs[j], 0);
383 return err;
384 }
385 EXPORT_SYMBOL_GPL(xenbus_grant_ring);
386
387
388
389
390
391
392
393
394 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
395 {
396 struct evtchn_alloc_unbound alloc_unbound;
397 int err;
398
399 alloc_unbound.dom = DOMID_SELF;
400 alloc_unbound.remote_dom = dev->otherend_id;
401
402 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
403 &alloc_unbound);
404 if (err)
405 xenbus_dev_fatal(dev, err, "allocating event channel");
406 else
407 *port = alloc_unbound.port;
408
409 return err;
410 }
411 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
412
413
414
415
416
417 int xenbus_free_evtchn(struct xenbus_device *dev, int port)
418 {
419 struct evtchn_close close;
420 int err;
421
422 close.port = port;
423
424 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
425 if (err)
426 xenbus_dev_error(dev, err, "freeing event channel %d", port);
427
428 return err;
429 }
430 EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448 int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
449 unsigned int nr_grefs, void **vaddr)
450 {
451 int err;
452
453 err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
454
455 if (err > 0)
456 err = GNTST_general_error;
457
458 return err;
459 }
460 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
461
462
463
464
465 static int __xenbus_map_ring(struct xenbus_device *dev,
466 grant_ref_t *gnt_refs,
467 unsigned int nr_grefs,
468 grant_handle_t *handles,
469 phys_addr_t *addrs,
470 unsigned int flags,
471 bool *leaked)
472 {
473 struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
474 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
475 int i, j;
476 int err = GNTST_okay;
477
478 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
479 return -EINVAL;
480
481 for (i = 0; i < nr_grefs; i++) {
482 memset(&map[i], 0, sizeof(map[i]));
483 gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
484 dev->otherend_id);
485 handles[i] = INVALID_GRANT_HANDLE;
486 }
487
488 gnttab_batch_map(map, i);
489
490 for (i = 0; i < nr_grefs; i++) {
491 if (map[i].status != GNTST_okay) {
492 err = map[i].status;
493 xenbus_dev_fatal(dev, map[i].status,
494 "mapping in shared page %d from domain %d",
495 gnt_refs[i], dev->otherend_id);
496 goto fail;
497 } else
498 handles[i] = map[i].handle;
499 }
500
501 return GNTST_okay;
502
503 fail:
504 for (i = j = 0; i < nr_grefs; i++) {
505 if (handles[i] != INVALID_GRANT_HANDLE) {
506 memset(&unmap[j], 0, sizeof(unmap[j]));
507 gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
508 GNTMAP_host_map, handles[i]);
509 j++;
510 }
511 }
512
513 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
514 BUG();
515
516 *leaked = false;
517 for (i = 0; i < j; i++) {
518 if (unmap[i].status != GNTST_okay) {
519 *leaked = true;
520 break;
521 }
522 }
523
524 return err;
525 }
526
527 struct map_ring_valloc_hvm
528 {
529 unsigned int idx;
530
531
532 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
533 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
534 };
535
536 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
537 unsigned int goffset,
538 unsigned int len,
539 void *data)
540 {
541 struct map_ring_valloc_hvm *info = data;
542 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
543
544 info->phys_addrs[info->idx] = vaddr;
545 info->addrs[info->idx] = vaddr;
546
547 info->idx++;
548 }
549
550 static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
551 grant_ref_t *gnt_ref,
552 unsigned int nr_grefs,
553 void **vaddr)
554 {
555 struct xenbus_map_node *node;
556 int err;
557 void *addr;
558 bool leaked = false;
559 struct map_ring_valloc_hvm info = {
560 .idx = 0,
561 };
562 unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
563
564 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
565 return -EINVAL;
566
567 *vaddr = NULL;
568
569 node = kzalloc(sizeof(*node), GFP_KERNEL);
570 if (!node)
571 return -ENOMEM;
572
573 err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
574 if (err)
575 goto out_err;
576
577 gnttab_foreach_grant(node->hvm.pages, nr_grefs,
578 xenbus_map_ring_setup_grant_hvm,
579 &info);
580
581 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
582 info.phys_addrs, GNTMAP_host_map, &leaked);
583 node->nr_handles = nr_grefs;
584
585 if (err)
586 goto out_free_ballooned_pages;
587
588 addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP,
589 PAGE_KERNEL);
590 if (!addr) {
591 err = -ENOMEM;
592 goto out_xenbus_unmap_ring;
593 }
594
595 node->hvm.addr = addr;
596
597 spin_lock(&xenbus_valloc_lock);
598 list_add(&node->next, &xenbus_valloc_pages);
599 spin_unlock(&xenbus_valloc_lock);
600
601 *vaddr = addr;
602 return 0;
603
604 out_xenbus_unmap_ring:
605 if (!leaked)
606 xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs);
607 else
608 pr_alert("leaking %p size %u page(s)",
609 addr, nr_pages);
610 out_free_ballooned_pages:
611 if (!leaked)
612 free_xenballooned_pages(nr_pages, node->hvm.pages);
613 out_err:
614 kfree(node);
615 return err;
616 }
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638 int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs,
639 unsigned int nr_grefs, grant_handle_t *handles,
640 unsigned long *vaddrs, bool *leaked)
641 {
642 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
643 int i;
644
645 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
646 return -EINVAL;
647
648 for (i = 0; i < nr_grefs; i++)
649 phys_addrs[i] = (unsigned long)vaddrs[i];
650
651 return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles,
652 phys_addrs, GNTMAP_host_map, leaked);
653 }
654 EXPORT_SYMBOL_GPL(xenbus_map_ring);
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
670 {
671 return ring_ops->unmap(dev, vaddr);
672 }
673 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
674
675 #ifdef CONFIG_XEN_PV
676 static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
677 grant_ref_t *gnt_refs,
678 unsigned int nr_grefs,
679 void **vaddr)
680 {
681 struct xenbus_map_node *node;
682 struct vm_struct *area;
683 pte_t *ptes[XENBUS_MAX_RING_GRANTS];
684 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
685 int err = GNTST_okay;
686 int i;
687 bool leaked;
688
689 *vaddr = NULL;
690
691 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
692 return -EINVAL;
693
694 node = kzalloc(sizeof(*node), GFP_KERNEL);
695 if (!node)
696 return -ENOMEM;
697
698 area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
699 if (!area) {
700 kfree(node);
701 return -ENOMEM;
702 }
703
704 for (i = 0; i < nr_grefs; i++)
705 phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
706
707 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
708 phys_addrs,
709 GNTMAP_host_map | GNTMAP_contains_pte,
710 &leaked);
711 if (err)
712 goto failed;
713
714 node->nr_handles = nr_grefs;
715 node->pv.area = area;
716
717 spin_lock(&xenbus_valloc_lock);
718 list_add(&node->next, &xenbus_valloc_pages);
719 spin_unlock(&xenbus_valloc_lock);
720
721 *vaddr = area->addr;
722 return 0;
723
724 failed:
725 if (!leaked)
726 free_vm_area(area);
727 else
728 pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
729
730 kfree(node);
731 return err;
732 }
733
734 static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
735 {
736 struct xenbus_map_node *node;
737 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
738 unsigned int level;
739 int i;
740 bool leaked = false;
741 int err;
742
743 spin_lock(&xenbus_valloc_lock);
744 list_for_each_entry(node, &xenbus_valloc_pages, next) {
745 if (node->pv.area->addr == vaddr) {
746 list_del(&node->next);
747 goto found;
748 }
749 }
750 node = NULL;
751 found:
752 spin_unlock(&xenbus_valloc_lock);
753
754 if (!node) {
755 xenbus_dev_error(dev, -ENOENT,
756 "can't find mapped virtual address %p", vaddr);
757 return GNTST_bad_virt_addr;
758 }
759
760 for (i = 0; i < node->nr_handles; i++) {
761 unsigned long addr;
762
763 memset(&unmap[i], 0, sizeof(unmap[i]));
764 addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i);
765 unmap[i].host_addr = arbitrary_virt_to_machine(
766 lookup_address(addr, &level)).maddr;
767 unmap[i].dev_bus_addr = 0;
768 unmap[i].handle = node->handles[i];
769 }
770
771 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
772 BUG();
773
774 err = GNTST_okay;
775 leaked = false;
776 for (i = 0; i < node->nr_handles; i++) {
777 if (unmap[i].status != GNTST_okay) {
778 leaked = true;
779 xenbus_dev_error(dev, unmap[i].status,
780 "unmapping page at handle %d error %d",
781 node->handles[i], unmap[i].status);
782 err = unmap[i].status;
783 break;
784 }
785 }
786
787 if (!leaked)
788 free_vm_area(node->pv.area);
789 else
790 pr_alert("leaking VM area %p size %u page(s)",
791 node->pv.area, node->nr_handles);
792
793 kfree(node);
794 return err;
795 }
796
797 static const struct xenbus_ring_ops ring_ops_pv = {
798 .map = xenbus_map_ring_valloc_pv,
799 .unmap = xenbus_unmap_ring_vfree_pv,
800 };
801 #endif
802
803 struct unmap_ring_vfree_hvm
804 {
805 unsigned int idx;
806 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
807 };
808
809 static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
810 unsigned int goffset,
811 unsigned int len,
812 void *data)
813 {
814 struct unmap_ring_vfree_hvm *info = data;
815
816 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
817
818 info->idx++;
819 }
820
821 static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
822 {
823 int rv;
824 struct xenbus_map_node *node;
825 void *addr;
826 struct unmap_ring_vfree_hvm info = {
827 .idx = 0,
828 };
829 unsigned int nr_pages;
830
831 spin_lock(&xenbus_valloc_lock);
832 list_for_each_entry(node, &xenbus_valloc_pages, next) {
833 addr = node->hvm.addr;
834 if (addr == vaddr) {
835 list_del(&node->next);
836 goto found;
837 }
838 }
839 node = addr = NULL;
840 found:
841 spin_unlock(&xenbus_valloc_lock);
842
843 if (!node) {
844 xenbus_dev_error(dev, -ENOENT,
845 "can't find mapped virtual address %p", vaddr);
846 return GNTST_bad_virt_addr;
847 }
848
849 nr_pages = XENBUS_PAGES(node->nr_handles);
850
851 gnttab_foreach_grant(node->hvm.pages, node->nr_handles,
852 xenbus_unmap_ring_setup_grant_hvm,
853 &info);
854
855 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
856 info.addrs);
857 if (!rv) {
858 vunmap(vaddr);
859 free_xenballooned_pages(nr_pages, node->hvm.pages);
860 }
861 else
862 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
863
864 kfree(node);
865 return rv;
866 }
867
868
869
870
871
872
873
874
875
876
877
878
879 int xenbus_unmap_ring(struct xenbus_device *dev,
880 grant_handle_t *handles, unsigned int nr_handles,
881 unsigned long *vaddrs)
882 {
883 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
884 int i;
885 int err;
886
887 if (nr_handles > XENBUS_MAX_RING_GRANTS)
888 return -EINVAL;
889
890 for (i = 0; i < nr_handles; i++)
891 gnttab_set_unmap_op(&unmap[i], vaddrs[i],
892 GNTMAP_host_map, handles[i]);
893
894 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
895 BUG();
896
897 err = GNTST_okay;
898 for (i = 0; i < nr_handles; i++) {
899 if (unmap[i].status != GNTST_okay) {
900 xenbus_dev_error(dev, unmap[i].status,
901 "unmapping page at handle %d error %d",
902 handles[i], unmap[i].status);
903 err = unmap[i].status;
904 break;
905 }
906 }
907
908 return err;
909 }
910 EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
911
912
913
914
915
916
917
918
919
920 enum xenbus_state xenbus_read_driver_state(const char *path)
921 {
922 enum xenbus_state result;
923 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
924 if (err)
925 result = XenbusStateUnknown;
926
927 return result;
928 }
929 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
930
931 static const struct xenbus_ring_ops ring_ops_hvm = {
932 .map = xenbus_map_ring_valloc_hvm,
933 .unmap = xenbus_unmap_ring_vfree_hvm,
934 };
935
936 void __init xenbus_ring_ops_init(void)
937 {
938 #ifdef CONFIG_XEN_PV
939 if (!xen_feature(XENFEAT_auto_translated_physmap))
940 ring_ops = &ring_ops_pv;
941 else
942 #endif
943 ring_ops = &ring_ops_hvm;
944 }