This source file includes following definitions.
- vio_cmo_num_OF_devs
- vio_cmo_alloc
- vio_cmo_dealloc
- vio_cmo_entitlement_update
- vio_cmo_balance
- vio_dma_iommu_alloc_coherent
- vio_dma_iommu_free_coherent
- vio_dma_iommu_map_page
- vio_dma_iommu_unmap_page
- vio_dma_iommu_map_sg
- vio_dma_iommu_unmap_sg
- vio_cmo_set_dev_desired
- vio_cmo_bus_probe
- vio_cmo_bus_remove
- vio_cmo_set_dma_ops
- vio_cmo_bus_init
- cmo_allocs_failed_show
- cmo_allocs_failed_store
- cmo_desired_store
- cmo_high_show
- cmo_high_store
- vio_cmo_sysfs_init
- vio_cmo_entitlement_update
- vio_cmo_set_dev_desired
- vio_cmo_bus_probe
- vio_cmo_bus_remove
- vio_cmo_set_dma_ops
- vio_cmo_bus_init
- vio_cmo_sysfs_init
- vio_h_cop_sync
- vio_build_iommu_table
- vio_match_device
- vio_bus_probe
- vio_bus_remove
- __vio_register_driver
- vio_unregister_driver
- vio_dev_release
- vio_register_device_node
- vio_bus_scan_register_devices
- vio_bus_init
- vio_device_init
- name_show
- devspec_show
- modalias_show
- vio_unregister_device
- vio_bus_match
- vio_hotplug
- vio_get_attribute
- vio_find_name
- vio_find_node
- vio_enable_interrupts
- vio_disable_interrupts
- vio_init
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/cpu.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/stat.h>
17 #include <linux/device.h>
18 #include <linux/init.h>
19 #include <linux/slab.h>
20 #include <linux/console.h>
21 #include <linux/export.h>
22 #include <linux/mm.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/kobject.h>
25
26 #include <asm/iommu.h>
27 #include <asm/dma.h>
28 #include <asm/vio.h>
29 #include <asm/prom.h>
30 #include <asm/firmware.h>
31 #include <asm/tce.h>
32 #include <asm/page.h>
33 #include <asm/hvcall.h>
34
35 static struct vio_dev vio_bus_device = {
36 .name = "vio",
37 .type = "",
38 .dev.init_name = "vio",
39 .dev.bus = &vio_bus_type,
40 };
41
42 #ifdef CONFIG_PPC_SMLPAR
43
44
45
46
47
48
49 struct vio_cmo_pool {
50 size_t size;
51 size_t free;
52 };
53
54
55 #define VIO_CMO_BALANCE_DELAY 100
56
57
58 #define VIO_CMO_BALANCE_CHUNK 131072
59
60
61
62
63
64
65
66 struct vio_cmo_dev_entry {
67 struct vio_dev *viodev;
68 struct list_head list;
69 };
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86 static struct vio_cmo {
87 spinlock_t lock;
88 struct delayed_work balance_q;
89 struct list_head device_list;
90 size_t entitled;
91 struct vio_cmo_pool reserve;
92 struct vio_cmo_pool excess;
93 size_t spare;
94 size_t min;
95 size_t desired;
96 size_t curr;
97 size_t high;
98 } vio_cmo;
99
100
101
102
103 static int vio_cmo_num_OF_devs(void)
104 {
105 struct device_node *node_vroot;
106 int count = 0;
107
108
109
110
111
112 node_vroot = of_find_node_by_name(NULL, "vdevice");
113 if (node_vroot) {
114 struct device_node *of_node;
115 struct property *prop;
116
117 for_each_child_of_node(node_vroot, of_node) {
118 prop = of_find_property(of_node, "ibm,my-dma-window",
119 NULL);
120 if (prop)
121 count++;
122 }
123 }
124 of_node_put(node_vroot);
125 return count;
126 }
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142 static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
143 {
144 unsigned long flags;
145 size_t reserve_free = 0;
146 size_t excess_free = 0;
147 int ret = -ENOMEM;
148
149 spin_lock_irqsave(&vio_cmo.lock, flags);
150
151
152 if (viodev->cmo.entitled > viodev->cmo.allocated)
153 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
154
155
156 if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
157 excess_free = vio_cmo.excess.free;
158
159
160 if ((reserve_free + excess_free) >= size) {
161 vio_cmo.curr += size;
162 if (vio_cmo.curr > vio_cmo.high)
163 vio_cmo.high = vio_cmo.curr;
164 viodev->cmo.allocated += size;
165 size -= min(reserve_free, size);
166 vio_cmo.excess.free -= size;
167 ret = 0;
168 }
169
170 spin_unlock_irqrestore(&vio_cmo.lock, flags);
171 return ret;
172 }
173
174
175
176
177
178
179
180
181
182
183
184
185
186 static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
187 {
188 unsigned long flags;
189 size_t spare_needed = 0;
190 size_t excess_freed = 0;
191 size_t reserve_freed = size;
192 size_t tmp;
193 int balance = 0;
194
195 spin_lock_irqsave(&vio_cmo.lock, flags);
196 vio_cmo.curr -= size;
197
198
199 if (viodev->cmo.allocated > viodev->cmo.entitled) {
200 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
201 viodev->cmo.entitled));
202 reserve_freed -= excess_freed;
203 }
204
205
206 viodev->cmo.allocated -= (reserve_freed + excess_freed);
207
208
209 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
210
211
212
213
214
215 if (spare_needed && excess_freed) {
216 tmp = min(excess_freed, spare_needed);
217 vio_cmo.excess.size -= tmp;
218 vio_cmo.reserve.size += tmp;
219 vio_cmo.spare += tmp;
220 excess_freed -= tmp;
221 spare_needed -= tmp;
222 balance = 1;
223 }
224
225
226
227
228
229
230
231 if (spare_needed && reserve_freed) {
232 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
233
234 vio_cmo.spare += tmp;
235 viodev->cmo.entitled -= tmp;
236 reserve_freed -= tmp;
237 spare_needed -= tmp;
238 balance = 1;
239 }
240
241
242
243
244
245
246 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
247 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
248
249 vio_cmo.excess.size -= tmp;
250 vio_cmo.reserve.size += tmp;
251 excess_freed -= tmp;
252 balance = 1;
253 }
254
255
256 if (excess_freed)
257 vio_cmo.excess.free += excess_freed;
258
259 if (balance)
260 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
261 spin_unlock_irqrestore(&vio_cmo.lock, flags);
262 }
263
264
265
266
267
268
269
270
271
272
273
274
275 int vio_cmo_entitlement_update(size_t new_entitlement)
276 {
277 struct vio_dev *viodev;
278 struct vio_cmo_dev_entry *dev_ent;
279 unsigned long flags;
280 size_t avail, delta, tmp;
281
282 spin_lock_irqsave(&vio_cmo.lock, flags);
283
284
285 if (new_entitlement > vio_cmo.entitled) {
286 delta = new_entitlement - vio_cmo.entitled;
287
288
289 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
290 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
291 vio_cmo.spare += tmp;
292 vio_cmo.reserve.size += tmp;
293 delta -= tmp;
294 }
295
296
297 vio_cmo.entitled += delta;
298 vio_cmo.excess.size += delta;
299 vio_cmo.excess.free += delta;
300
301 goto out;
302 }
303
304
305 delta = vio_cmo.entitled - new_entitlement;
306 avail = vio_cmo.excess.free;
307
308
309
310
311
312 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
313 if (avail >= delta)
314 break;
315
316 viodev = dev_ent->viodev;
317 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
318 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
319 avail += viodev->cmo.entitled -
320 max_t(size_t, viodev->cmo.allocated,
321 VIO_CMO_MIN_ENT);
322 }
323
324 if (delta <= avail) {
325 vio_cmo.entitled -= delta;
326
327
328 tmp = min(vio_cmo.excess.free, delta);
329 vio_cmo.excess.size -= tmp;
330 vio_cmo.excess.free -= tmp;
331 delta -= tmp;
332
333
334
335
336
337 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
338 if (!delta)
339 break;
340
341 viodev = dev_ent->viodev;
342 tmp = 0;
343 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
344 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
345 tmp = viodev->cmo.entitled -
346 max_t(size_t, viodev->cmo.allocated,
347 VIO_CMO_MIN_ENT);
348 viodev->cmo.entitled -= min(tmp, delta);
349 delta -= min(tmp, delta);
350 }
351 } else {
352 spin_unlock_irqrestore(&vio_cmo.lock, flags);
353 return -ENOMEM;
354 }
355
356 out:
357 schedule_delayed_work(&vio_cmo.balance_q, 0);
358 spin_unlock_irqrestore(&vio_cmo.lock, flags);
359 return 0;
360 }
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383 static void vio_cmo_balance(struct work_struct *work)
384 {
385 struct vio_cmo *cmo;
386 struct vio_dev *viodev;
387 struct vio_cmo_dev_entry *dev_ent;
388 unsigned long flags;
389 size_t avail = 0, level, chunk, need;
390 int devcount = 0, fulfilled;
391
392 cmo = container_of(work, struct vio_cmo, balance_q.work);
393
394 spin_lock_irqsave(&vio_cmo.lock, flags);
395
396
397 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
398 BUG_ON(cmo->min > cmo->entitled);
399 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
400 cmo->min += cmo->spare;
401 cmo->desired = cmo->min;
402
403
404
405
406
407 avail = cmo->entitled - cmo->spare;
408 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
409 viodev = dev_ent->viodev;
410 devcount++;
411 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
412 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
413 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
414 }
415
416
417
418
419
420
421 level = VIO_CMO_MIN_ENT;
422 while (avail) {
423 fulfilled = 0;
424 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
425 viodev = dev_ent->viodev;
426
427 if (viodev->cmo.desired <= level) {
428 fulfilled++;
429 continue;
430 }
431
432
433
434
435
436
437 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
438 chunk = min(chunk, (viodev->cmo.desired -
439 viodev->cmo.entitled));
440 viodev->cmo.entitled += chunk;
441
442
443
444
445
446
447 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
448 max(viodev->cmo.allocated, level);
449 avail -= need;
450
451 }
452 if (fulfilled == devcount)
453 break;
454 level += VIO_CMO_BALANCE_CHUNK;
455 }
456
457
458 cmo->reserve.size = cmo->min;
459 cmo->excess.free = 0;
460 cmo->excess.size = 0;
461 need = 0;
462 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
463 viodev = dev_ent->viodev;
464
465 if (viodev->cmo.entitled)
466 cmo->reserve.size += (viodev->cmo.entitled -
467 VIO_CMO_MIN_ENT);
468
469 if (viodev->cmo.allocated > viodev->cmo.entitled)
470 need += viodev->cmo.allocated - viodev->cmo.entitled;
471 }
472 cmo->excess.size = cmo->entitled - cmo->reserve.size;
473 cmo->excess.free = cmo->excess.size - need;
474
475 cancel_delayed_work(to_delayed_work(work));
476 spin_unlock_irqrestore(&vio_cmo.lock, flags);
477 }
478
479 static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
480 dma_addr_t *dma_handle, gfp_t flag,
481 unsigned long attrs)
482 {
483 struct vio_dev *viodev = to_vio_dev(dev);
484 void *ret;
485
486 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
487 atomic_inc(&viodev->cmo.allocs_failed);
488 return NULL;
489 }
490
491 ret = iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
492 dma_handle, dev->coherent_dma_mask, flag,
493 dev_to_node(dev));
494 if (unlikely(ret == NULL)) {
495 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
496 atomic_inc(&viodev->cmo.allocs_failed);
497 }
498
499 return ret;
500 }
501
502 static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
503 void *vaddr, dma_addr_t dma_handle,
504 unsigned long attrs)
505 {
506 struct vio_dev *viodev = to_vio_dev(dev);
507
508 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
509 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
510 }
511
512 static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
513 unsigned long offset, size_t size,
514 enum dma_data_direction direction,
515 unsigned long attrs)
516 {
517 struct vio_dev *viodev = to_vio_dev(dev);
518 struct iommu_table *tbl = get_iommu_table_base(dev);
519 dma_addr_t ret = DMA_MAPPING_ERROR;
520
521 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
522 goto out_fail;
523 ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev),
524 direction, attrs);
525 if (unlikely(ret == DMA_MAPPING_ERROR))
526 goto out_deallocate;
527 return ret;
528
529 out_deallocate:
530 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
531 out_fail:
532 atomic_inc(&viodev->cmo.allocs_failed);
533 return DMA_MAPPING_ERROR;
534 }
535
536 static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
537 size_t size,
538 enum dma_data_direction direction,
539 unsigned long attrs)
540 {
541 struct vio_dev *viodev = to_vio_dev(dev);
542 struct iommu_table *tbl = get_iommu_table_base(dev);
543
544 iommu_unmap_page(tbl, dma_handle, size, direction, attrs);
545 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
546 }
547
548 static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
549 int nelems, enum dma_data_direction direction,
550 unsigned long attrs)
551 {
552 struct vio_dev *viodev = to_vio_dev(dev);
553 struct iommu_table *tbl = get_iommu_table_base(dev);
554 struct scatterlist *sgl;
555 int ret, count;
556 size_t alloc_size = 0;
557
558 for_each_sg(sglist, sgl, nelems, count)
559 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
560
561 if (vio_cmo_alloc(viodev, alloc_size))
562 goto out_fail;
563 ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev),
564 direction, attrs);
565 if (unlikely(!ret))
566 goto out_deallocate;
567
568 for_each_sg(sglist, sgl, ret, count)
569 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
570 if (alloc_size)
571 vio_cmo_dealloc(viodev, alloc_size);
572 return ret;
573
574 out_deallocate:
575 vio_cmo_dealloc(viodev, alloc_size);
576 out_fail:
577 atomic_inc(&viodev->cmo.allocs_failed);
578 return 0;
579 }
580
581 static void vio_dma_iommu_unmap_sg(struct device *dev,
582 struct scatterlist *sglist, int nelems,
583 enum dma_data_direction direction,
584 unsigned long attrs)
585 {
586 struct vio_dev *viodev = to_vio_dev(dev);
587 struct iommu_table *tbl = get_iommu_table_base(dev);
588 struct scatterlist *sgl;
589 size_t alloc_size = 0;
590 int count;
591
592 for_each_sg(sglist, sgl, nelems, count)
593 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
594
595 ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs);
596 vio_cmo_dealloc(viodev, alloc_size);
597 }
598
599 static const struct dma_map_ops vio_dma_mapping_ops = {
600 .alloc = vio_dma_iommu_alloc_coherent,
601 .free = vio_dma_iommu_free_coherent,
602 .map_sg = vio_dma_iommu_map_sg,
603 .unmap_sg = vio_dma_iommu_unmap_sg,
604 .map_page = vio_dma_iommu_map_page,
605 .unmap_page = vio_dma_iommu_unmap_page,
606 .dma_supported = dma_iommu_dma_supported,
607 .get_required_mask = dma_iommu_get_required_mask,
608 .mmap = dma_common_mmap,
609 .get_sgtable = dma_common_get_sgtable,
610 };
611
612
613
614
615
616
617
618
619
620
621
622 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
623 {
624 unsigned long flags;
625 struct vio_cmo_dev_entry *dev_ent;
626 int found = 0;
627
628 if (!firmware_has_feature(FW_FEATURE_CMO))
629 return;
630
631 spin_lock_irqsave(&vio_cmo.lock, flags);
632 if (desired < VIO_CMO_MIN_ENT)
633 desired = VIO_CMO_MIN_ENT;
634
635
636
637
638
639
640 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
641 if (viodev == dev_ent->viodev) {
642 found = 1;
643 break;
644 }
645 if (!found) {
646 spin_unlock_irqrestore(&vio_cmo.lock, flags);
647 return;
648 }
649
650
651 if (desired >= viodev->cmo.desired) {
652
653 vio_cmo.desired += desired - viodev->cmo.desired;
654 viodev->cmo.desired = desired;
655 } else {
656
657 vio_cmo.desired -= viodev->cmo.desired - desired;
658 viodev->cmo.desired = desired;
659
660
661
662
663 if (viodev->cmo.entitled > desired) {
664 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
665 vio_cmo.excess.size += viodev->cmo.entitled - desired;
666
667
668
669
670
671 if (viodev->cmo.allocated < viodev->cmo.entitled)
672 vio_cmo.excess.free += viodev->cmo.entitled -
673 max(viodev->cmo.allocated, desired);
674 viodev->cmo.entitled = desired;
675 }
676 }
677 schedule_delayed_work(&vio_cmo.balance_q, 0);
678 spin_unlock_irqrestore(&vio_cmo.lock, flags);
679 }
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695 static int vio_cmo_bus_probe(struct vio_dev *viodev)
696 {
697 struct vio_cmo_dev_entry *dev_ent;
698 struct device *dev = &viodev->dev;
699 struct iommu_table *tbl;
700 struct vio_driver *viodrv = to_vio_driver(dev->driver);
701 unsigned long flags;
702 size_t size;
703 bool dma_capable = false;
704
705 tbl = get_iommu_table_base(dev);
706
707
708 switch (viodev->family) {
709 case VDEVICE:
710 if (of_get_property(viodev->dev.of_node,
711 "ibm,my-dma-window", NULL))
712 dma_capable = true;
713 break;
714 case PFO:
715 dma_capable = false;
716 break;
717 default:
718 dev_warn(dev, "unknown device family: %d\n", viodev->family);
719 BUG();
720 break;
721 }
722
723
724 if (dma_capable) {
725
726 if (!viodrv->get_desired_dma) {
727 dev_err(dev, "%s: device driver does not support CMO\n",
728 __func__);
729 return -EINVAL;
730 }
731
732 viodev->cmo.desired =
733 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
734 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
735 viodev->cmo.desired = VIO_CMO_MIN_ENT;
736 size = VIO_CMO_MIN_ENT;
737
738 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
739 GFP_KERNEL);
740 if (!dev_ent)
741 return -ENOMEM;
742
743 dev_ent->viodev = viodev;
744 spin_lock_irqsave(&vio_cmo.lock, flags);
745 list_add(&dev_ent->list, &vio_cmo.device_list);
746 } else {
747 viodev->cmo.desired = 0;
748 size = 0;
749 spin_lock_irqsave(&vio_cmo.lock, flags);
750 }
751
752
753
754
755
756
757
758 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
759 VIO_CMO_MIN_ENT)) {
760
761 if (size)
762 vio_cmo.desired += (viodev->cmo.desired -
763 VIO_CMO_MIN_ENT);
764 } else {
765 size_t tmp;
766
767 tmp = vio_cmo.spare + vio_cmo.excess.free;
768 if (tmp < size) {
769 dev_err(dev, "%s: insufficient free "
770 "entitlement to add device. "
771 "Need %lu, have %lu\n", __func__,
772 size, (vio_cmo.spare + tmp));
773 spin_unlock_irqrestore(&vio_cmo.lock, flags);
774 return -ENOMEM;
775 }
776
777
778 tmp = min(size, vio_cmo.excess.free);
779 vio_cmo.excess.free -= tmp;
780 vio_cmo.excess.size -= tmp;
781 vio_cmo.reserve.size += tmp;
782
783
784 vio_cmo.spare -= size - tmp;
785
786
787 vio_cmo.min += size;
788 vio_cmo.desired += viodev->cmo.desired;
789 }
790 spin_unlock_irqrestore(&vio_cmo.lock, flags);
791 return 0;
792 }
793
794
795
796
797
798
799
800
801
802
803
804 static void vio_cmo_bus_remove(struct vio_dev *viodev)
805 {
806 struct vio_cmo_dev_entry *dev_ent;
807 unsigned long flags;
808 size_t tmp;
809
810 spin_lock_irqsave(&vio_cmo.lock, flags);
811 if (viodev->cmo.allocated) {
812 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
813 "allocated after remove operation.\n",
814 __func__, viodev->cmo.allocated);
815 BUG();
816 }
817
818
819
820
821
822 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
823 if (viodev == dev_ent->viodev) {
824 list_del(&dev_ent->list);
825 kfree(dev_ent);
826 break;
827 }
828
829
830
831
832
833
834 if (viodev->cmo.entitled) {
835
836
837
838
839
840 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
841
842
843
844
845
846
847 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
848
849
850 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
851 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
852 vio_cmo.spare));
853 vio_cmo.spare += tmp;
854 viodev->cmo.entitled -= tmp;
855 }
856
857
858 vio_cmo.excess.size += viodev->cmo.entitled;
859 vio_cmo.excess.free += viodev->cmo.entitled;
860 vio_cmo.reserve.size -= viodev->cmo.entitled;
861
862
863
864
865
866
867 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
868 viodev->cmo.desired = VIO_CMO_MIN_ENT;
869 atomic_set(&viodev->cmo.allocs_failed, 0);
870 }
871
872 spin_unlock_irqrestore(&vio_cmo.lock, flags);
873 }
874
875 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
876 {
877 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
878 }
879
880
881
882
883
884
885
886
887 static void vio_cmo_bus_init(void)
888 {
889 struct hvcall_mpp_data mpp_data;
890 int err;
891
892 memset(&vio_cmo, 0, sizeof(struct vio_cmo));
893 spin_lock_init(&vio_cmo.lock);
894 INIT_LIST_HEAD(&vio_cmo.device_list);
895 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
896
897
898 err = h_get_mpp(&mpp_data);
899
900
901
902
903
904 if (err != H_SUCCESS) {
905 printk(KERN_ERR "%s: unable to determine system IO "\
906 "entitlement. (%d)\n", __func__, err);
907 vio_cmo.entitled = 0;
908 } else {
909 vio_cmo.entitled = mpp_data.entitled_mem;
910 }
911
912
913 vio_cmo.spare = VIO_CMO_MIN_ENT;
914 vio_cmo.reserve.size = vio_cmo.spare;
915 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
916 VIO_CMO_MIN_ENT);
917 if (vio_cmo.reserve.size > vio_cmo.entitled) {
918 printk(KERN_ERR "%s: insufficient system entitlement\n",
919 __func__);
920 panic("%s: Insufficient system entitlement", __func__);
921 }
922
923
924 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
925 vio_cmo.excess.free = vio_cmo.excess.size;
926 vio_cmo.min = vio_cmo.reserve.size;
927 vio_cmo.desired = vio_cmo.reserve.size;
928 }
929
930
931
932 #define viodev_cmo_rd_attr(name) \
933 static ssize_t cmo_##name##_show(struct device *dev, \
934 struct device_attribute *attr, \
935 char *buf) \
936 { \
937 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
938 }
939
940 static ssize_t cmo_allocs_failed_show(struct device *dev,
941 struct device_attribute *attr, char *buf)
942 {
943 struct vio_dev *viodev = to_vio_dev(dev);
944 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
945 }
946
947 static ssize_t cmo_allocs_failed_store(struct device *dev,
948 struct device_attribute *attr, const char *buf, size_t count)
949 {
950 struct vio_dev *viodev = to_vio_dev(dev);
951 atomic_set(&viodev->cmo.allocs_failed, 0);
952 return count;
953 }
954
955 static ssize_t cmo_desired_store(struct device *dev,
956 struct device_attribute *attr, const char *buf, size_t count)
957 {
958 struct vio_dev *viodev = to_vio_dev(dev);
959 size_t new_desired;
960 int ret;
961
962 ret = kstrtoul(buf, 10, &new_desired);
963 if (ret)
964 return ret;
965
966 vio_cmo_set_dev_desired(viodev, new_desired);
967 return count;
968 }
969
970 viodev_cmo_rd_attr(desired);
971 viodev_cmo_rd_attr(entitled);
972 viodev_cmo_rd_attr(allocated);
973
974 static ssize_t name_show(struct device *, struct device_attribute *, char *);
975 static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
976 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
977 char *buf);
978
979 static struct device_attribute dev_attr_name;
980 static struct device_attribute dev_attr_devspec;
981 static struct device_attribute dev_attr_modalias;
982
983 static DEVICE_ATTR_RO(cmo_entitled);
984 static DEVICE_ATTR_RO(cmo_allocated);
985 static DEVICE_ATTR_RW(cmo_desired);
986 static DEVICE_ATTR_RW(cmo_allocs_failed);
987
988 static struct attribute *vio_cmo_dev_attrs[] = {
989 &dev_attr_name.attr,
990 &dev_attr_devspec.attr,
991 &dev_attr_modalias.attr,
992 &dev_attr_cmo_entitled.attr,
993 &dev_attr_cmo_allocated.attr,
994 &dev_attr_cmo_desired.attr,
995 &dev_attr_cmo_allocs_failed.attr,
996 NULL,
997 };
998 ATTRIBUTE_GROUPS(vio_cmo_dev);
999
1000
1001
1002 #define viobus_cmo_rd_attr(name) \
1003 static ssize_t cmo_bus_##name##_show(struct bus_type *bt, char *buf) \
1004 { \
1005 return sprintf(buf, "%lu\n", vio_cmo.name); \
1006 } \
1007 static struct bus_attribute bus_attr_cmo_bus_##name = \
1008 __ATTR(cmo_##name, S_IRUGO, cmo_bus_##name##_show, NULL)
1009
1010 #define viobus_cmo_pool_rd_attr(name, var) \
1011 static ssize_t \
1012 cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \
1013 { \
1014 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
1015 } \
1016 static BUS_ATTR_RO(cmo_##name##_##var)
1017
1018 viobus_cmo_rd_attr(entitled);
1019 viobus_cmo_rd_attr(spare);
1020 viobus_cmo_rd_attr(min);
1021 viobus_cmo_rd_attr(desired);
1022 viobus_cmo_rd_attr(curr);
1023 viobus_cmo_pool_rd_attr(reserve, size);
1024 viobus_cmo_pool_rd_attr(excess, size);
1025 viobus_cmo_pool_rd_attr(excess, free);
1026
1027 static ssize_t cmo_high_show(struct bus_type *bt, char *buf)
1028 {
1029 return sprintf(buf, "%lu\n", vio_cmo.high);
1030 }
1031
1032 static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
1033 size_t count)
1034 {
1035 unsigned long flags;
1036
1037 spin_lock_irqsave(&vio_cmo.lock, flags);
1038 vio_cmo.high = vio_cmo.curr;
1039 spin_unlock_irqrestore(&vio_cmo.lock, flags);
1040
1041 return count;
1042 }
1043 static BUS_ATTR_RW(cmo_high);
1044
1045 static struct attribute *vio_bus_attrs[] = {
1046 &bus_attr_cmo_bus_entitled.attr,
1047 &bus_attr_cmo_bus_spare.attr,
1048 &bus_attr_cmo_bus_min.attr,
1049 &bus_attr_cmo_bus_desired.attr,
1050 &bus_attr_cmo_bus_curr.attr,
1051 &bus_attr_cmo_high.attr,
1052 &bus_attr_cmo_reserve_size.attr,
1053 &bus_attr_cmo_excess_size.attr,
1054 &bus_attr_cmo_excess_free.attr,
1055 NULL,
1056 };
1057 ATTRIBUTE_GROUPS(vio_bus);
1058
1059 static void vio_cmo_sysfs_init(void)
1060 {
1061 vio_bus_type.dev_groups = vio_cmo_dev_groups;
1062 vio_bus_type.bus_groups = vio_bus_groups;
1063 }
1064 #else
1065 int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1066 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1067 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1068 static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1069 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1070 static void vio_cmo_bus_init(void) {}
1071 static void vio_cmo_sysfs_init(void) { }
1072 #endif
1073 EXPORT_SYMBOL(vio_cmo_entitlement_update);
1074 EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106 int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
1107 {
1108 struct device *dev = &vdev->dev;
1109 unsigned long deadline = 0;
1110 long hret = 0;
1111 int ret = 0;
1112
1113 if (op->timeout)
1114 deadline = jiffies + msecs_to_jiffies(op->timeout);
1115
1116 while (true) {
1117 hret = plpar_hcall_norets(H_COP, op->flags,
1118 vdev->resource_id,
1119 op->in, op->inlen, op->out,
1120 op->outlen, op->csbcpb);
1121
1122 if (hret == H_SUCCESS ||
1123 (hret != H_NOT_ENOUGH_RESOURCES &&
1124 hret != H_BUSY && hret != H_RESOURCE) ||
1125 (op->timeout && time_after(deadline, jiffies)))
1126 break;
1127
1128 dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
1129 }
1130
1131 switch (hret) {
1132 case H_SUCCESS:
1133 ret = 0;
1134 break;
1135 case H_OP_MODE:
1136 case H_TOO_BIG:
1137 ret = -E2BIG;
1138 break;
1139 case H_RESCINDED:
1140 ret = -EACCES;
1141 break;
1142 case H_HARDWARE:
1143 ret = -EPERM;
1144 break;
1145 case H_NOT_ENOUGH_RESOURCES:
1146 case H_RESOURCE:
1147 case H_BUSY:
1148 ret = -EBUSY;
1149 break;
1150 default:
1151 ret = -EINVAL;
1152 break;
1153 }
1154
1155 if (ret)
1156 dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
1157 __func__, ret, hret);
1158
1159 op->hcall_err = hret;
1160 return ret;
1161 }
1162 EXPORT_SYMBOL(vio_h_cop_sync);
1163
1164 static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1165 {
1166 const __be32 *dma_window;
1167 struct iommu_table *tbl;
1168 unsigned long offset, size;
1169
1170 dma_window = of_get_property(dev->dev.of_node,
1171 "ibm,my-dma-window", NULL);
1172 if (!dma_window)
1173 return NULL;
1174
1175 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
1176 if (tbl == NULL)
1177 return NULL;
1178
1179 kref_init(&tbl->it_kref);
1180
1181 of_parse_dma_window(dev->dev.of_node, dma_window,
1182 &tbl->it_index, &offset, &size);
1183
1184
1185 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
1186 tbl->it_size = size >> tbl->it_page_shift;
1187
1188 tbl->it_offset = offset >> tbl->it_page_shift;
1189 tbl->it_busno = 0;
1190 tbl->it_type = TCE_VB;
1191 tbl->it_blocksize = 16;
1192
1193 if (firmware_has_feature(FW_FEATURE_LPAR))
1194 tbl->it_ops = &iommu_table_lpar_multi_ops;
1195 else
1196 tbl->it_ops = &iommu_table_pseries_ops;
1197
1198 return iommu_init_table(tbl, -1, 0, 0);
1199 }
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211 static const struct vio_device_id *vio_match_device(
1212 const struct vio_device_id *ids, const struct vio_dev *dev)
1213 {
1214 while (ids->type[0] != '\0') {
1215 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
1216 of_device_is_compatible(dev->dev.of_node,
1217 ids->compat))
1218 return ids;
1219 ids++;
1220 }
1221 return NULL;
1222 }
1223
1224
1225
1226
1227
1228
1229 static int vio_bus_probe(struct device *dev)
1230 {
1231 struct vio_dev *viodev = to_vio_dev(dev);
1232 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1233 const struct vio_device_id *id;
1234 int error = -ENODEV;
1235
1236 if (!viodrv->probe)
1237 return error;
1238
1239 id = vio_match_device(viodrv->id_table, viodev);
1240 if (id) {
1241 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1242 if (firmware_has_feature(FW_FEATURE_CMO)) {
1243 error = vio_cmo_bus_probe(viodev);
1244 if (error)
1245 return error;
1246 }
1247 error = viodrv->probe(viodev, id);
1248 if (error && firmware_has_feature(FW_FEATURE_CMO))
1249 vio_cmo_bus_remove(viodev);
1250 }
1251
1252 return error;
1253 }
1254
1255
1256 static int vio_bus_remove(struct device *dev)
1257 {
1258 struct vio_dev *viodev = to_vio_dev(dev);
1259 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1260 struct device *devptr;
1261 int ret = 1;
1262
1263
1264
1265
1266
1267 devptr = get_device(dev);
1268
1269 if (viodrv->remove)
1270 ret = viodrv->remove(viodev);
1271
1272 if (!ret && firmware_has_feature(FW_FEATURE_CMO))
1273 vio_cmo_bus_remove(viodev);
1274
1275 put_device(devptr);
1276 return ret;
1277 }
1278
1279
1280
1281
1282
1283 int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
1284 const char *mod_name)
1285 {
1286 pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
1287
1288
1289 viodrv->driver.name = viodrv->name;
1290 viodrv->driver.pm = viodrv->pm;
1291 viodrv->driver.bus = &vio_bus_type;
1292 viodrv->driver.owner = owner;
1293 viodrv->driver.mod_name = mod_name;
1294
1295 return driver_register(&viodrv->driver);
1296 }
1297 EXPORT_SYMBOL(__vio_register_driver);
1298
1299
1300
1301
1302
1303 void vio_unregister_driver(struct vio_driver *viodrv)
1304 {
1305 driver_unregister(&viodrv->driver);
1306 }
1307 EXPORT_SYMBOL(vio_unregister_driver);
1308
1309
1310 static void vio_dev_release(struct device *dev)
1311 {
1312 struct iommu_table *tbl = get_iommu_table_base(dev);
1313
1314 if (tbl)
1315 iommu_tce_table_put(tbl);
1316 of_node_put(dev->of_node);
1317 kfree(to_vio_dev(dev));
1318 }
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329 struct vio_dev *vio_register_device_node(struct device_node *of_node)
1330 {
1331 struct vio_dev *viodev;
1332 struct device_node *parent_node;
1333 const __be32 *prop;
1334 enum vio_dev_family family;
1335
1336
1337
1338
1339
1340 parent_node = of_get_parent(of_node);
1341 if (parent_node) {
1342 if (of_node_is_type(parent_node, "ibm,platform-facilities"))
1343 family = PFO;
1344 else if (of_node_is_type(parent_node, "vdevice"))
1345 family = VDEVICE;
1346 else {
1347 pr_warn("%s: parent(%pOF) of %pOFn not recognized.\n",
1348 __func__,
1349 parent_node,
1350 of_node);
1351 of_node_put(parent_node);
1352 return NULL;
1353 }
1354 of_node_put(parent_node);
1355 } else {
1356 pr_warn("%s: could not determine the parent of node %pOFn.\n",
1357 __func__, of_node);
1358 return NULL;
1359 }
1360
1361 if (family == PFO) {
1362 if (of_get_property(of_node, "interrupt-controller", NULL)) {
1363 pr_debug("%s: Skipping the interrupt controller %pOFn.\n",
1364 __func__, of_node);
1365 return NULL;
1366 }
1367 }
1368
1369
1370 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1371 if (viodev == NULL) {
1372 pr_warn("%s: allocation failure for VIO device.\n", __func__);
1373 return NULL;
1374 }
1375
1376
1377 viodev->family = family;
1378 if (viodev->family == VDEVICE) {
1379 unsigned int unit_address;
1380
1381 viodev->type = of_node_get_device_type(of_node);
1382 if (!viodev->type) {
1383 pr_warn("%s: node %pOFn is missing the 'device_type' "
1384 "property.\n", __func__, of_node);
1385 goto out;
1386 }
1387
1388 prop = of_get_property(of_node, "reg", NULL);
1389 if (prop == NULL) {
1390 pr_warn("%s: node %pOFn missing 'reg'\n",
1391 __func__, of_node);
1392 goto out;
1393 }
1394 unit_address = of_read_number(prop, 1);
1395 dev_set_name(&viodev->dev, "%x", unit_address);
1396 viodev->irq = irq_of_parse_and_map(of_node, 0);
1397 viodev->unit_address = unit_address;
1398 } else {
1399
1400
1401
1402 prop = of_get_property(of_node, "ibm,resource-id", NULL);
1403 if (prop != NULL)
1404 viodev->resource_id = of_read_number(prop, 1);
1405
1406 dev_set_name(&viodev->dev, "%pOFn", of_node);
1407 viodev->type = dev_name(&viodev->dev);
1408 viodev->irq = 0;
1409 }
1410
1411 viodev->name = of_node->name;
1412 viodev->dev.of_node = of_node_get(of_node);
1413
1414 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1415
1416
1417 viodev->dev.parent = &vio_bus_device.dev;
1418 viodev->dev.bus = &vio_bus_type;
1419 viodev->dev.release = vio_dev_release;
1420
1421 if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
1422 if (firmware_has_feature(FW_FEATURE_CMO))
1423 vio_cmo_set_dma_ops(viodev);
1424 else
1425 set_dma_ops(&viodev->dev, &dma_iommu_ops);
1426
1427 set_iommu_table_base(&viodev->dev,
1428 vio_build_iommu_table(viodev));
1429
1430
1431
1432 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1433 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask;
1434 }
1435
1436
1437 if (device_register(&viodev->dev)) {
1438 printk(KERN_ERR "%s: failed to register device %s\n",
1439 __func__, dev_name(&viodev->dev));
1440 put_device(&viodev->dev);
1441 return NULL;
1442 }
1443
1444 return viodev;
1445
1446 out:
1447 kfree(viodev);
1448
1449 return NULL;
1450 }
1451 EXPORT_SYMBOL(vio_register_device_node);
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461 static void vio_bus_scan_register_devices(char *root_name)
1462 {
1463 struct device_node *node_root, *node_child;
1464
1465 if (!root_name)
1466 return;
1467
1468 node_root = of_find_node_by_name(NULL, root_name);
1469 if (node_root) {
1470
1471
1472
1473
1474
1475 node_child = of_get_next_child(node_root, NULL);
1476 while (node_child) {
1477 vio_register_device_node(node_child);
1478 node_child = of_get_next_child(node_root, node_child);
1479 }
1480 of_node_put(node_root);
1481 }
1482 }
1483
1484
1485
1486
1487 static int __init vio_bus_init(void)
1488 {
1489 int err;
1490
1491 if (firmware_has_feature(FW_FEATURE_CMO))
1492 vio_cmo_sysfs_init();
1493
1494 err = bus_register(&vio_bus_type);
1495 if (err) {
1496 printk(KERN_ERR "failed to register VIO bus\n");
1497 return err;
1498 }
1499
1500
1501
1502
1503
1504 err = device_register(&vio_bus_device.dev);
1505 if (err) {
1506 printk(KERN_WARNING "%s: device_register returned %i\n",
1507 __func__, err);
1508 return err;
1509 }
1510
1511 if (firmware_has_feature(FW_FEATURE_CMO))
1512 vio_cmo_bus_init();
1513
1514 return 0;
1515 }
1516 postcore_initcall(vio_bus_init);
1517
1518 static int __init vio_device_init(void)
1519 {
1520 vio_bus_scan_register_devices("vdevice");
1521 vio_bus_scan_register_devices("ibm,platform-facilities");
1522
1523 return 0;
1524 }
1525 device_initcall(vio_device_init);
1526
1527 static ssize_t name_show(struct device *dev,
1528 struct device_attribute *attr, char *buf)
1529 {
1530 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
1531 }
1532 static DEVICE_ATTR_RO(name);
1533
1534 static ssize_t devspec_show(struct device *dev,
1535 struct device_attribute *attr, char *buf)
1536 {
1537 struct device_node *of_node = dev->of_node;
1538
1539 return sprintf(buf, "%pOF\n", of_node);
1540 }
1541 static DEVICE_ATTR_RO(devspec);
1542
1543 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1544 char *buf)
1545 {
1546 const struct vio_dev *vio_dev = to_vio_dev(dev);
1547 struct device_node *dn;
1548 const char *cp;
1549
1550 dn = dev->of_node;
1551 if (!dn) {
1552 strcpy(buf, "\n");
1553 return strlen(buf);
1554 }
1555 cp = of_get_property(dn, "compatible", NULL);
1556 if (!cp) {
1557 strcpy(buf, "\n");
1558 return strlen(buf);
1559 }
1560
1561 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1562 }
1563 static DEVICE_ATTR_RO(modalias);
1564
1565 static struct attribute *vio_dev_attrs[] = {
1566 &dev_attr_name.attr,
1567 &dev_attr_devspec.attr,
1568 &dev_attr_modalias.attr,
1569 NULL,
1570 };
1571 ATTRIBUTE_GROUPS(vio_dev);
1572
1573 void vio_unregister_device(struct vio_dev *viodev)
1574 {
1575 device_unregister(&viodev->dev);
1576 if (viodev->family == VDEVICE)
1577 irq_dispose_mapping(viodev->irq);
1578 }
1579 EXPORT_SYMBOL(vio_unregister_device);
1580
1581 static int vio_bus_match(struct device *dev, struct device_driver *drv)
1582 {
1583 const struct vio_dev *vio_dev = to_vio_dev(dev);
1584 struct vio_driver *vio_drv = to_vio_driver(drv);
1585 const struct vio_device_id *ids = vio_drv->id_table;
1586
1587 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
1588 }
1589
1590 static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
1591 {
1592 const struct vio_dev *vio_dev = to_vio_dev(dev);
1593 struct device_node *dn;
1594 const char *cp;
1595
1596 dn = dev->of_node;
1597 if (!dn)
1598 return -ENODEV;
1599 cp = of_get_property(dn, "compatible", NULL);
1600 if (!cp)
1601 return -ENODEV;
1602
1603 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
1604 return 0;
1605 }
1606
1607 struct bus_type vio_bus_type = {
1608 .name = "vio",
1609 .dev_groups = vio_dev_groups,
1610 .uevent = vio_hotplug,
1611 .match = vio_bus_match,
1612 .probe = vio_bus_probe,
1613 .remove = vio_bus_remove,
1614 };
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625 const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
1626 {
1627 return of_get_property(vdev->dev.of_node, which, length);
1628 }
1629 EXPORT_SYMBOL(vio_get_attribute);
1630
1631 #ifdef CONFIG_PPC_PSERIES
1632
1633
1634
1635 static struct vio_dev *vio_find_name(const char *name)
1636 {
1637 struct device *found;
1638
1639 found = bus_find_device_by_name(&vio_bus_type, NULL, name);
1640 if (!found)
1641 return NULL;
1642
1643 return to_vio_dev(found);
1644 }
1645
1646
1647
1648
1649
1650
1651
1652
1653 struct vio_dev *vio_find_node(struct device_node *vnode)
1654 {
1655 char kobj_name[20];
1656 struct device_node *vnode_parent;
1657
1658 vnode_parent = of_get_parent(vnode);
1659 if (!vnode_parent)
1660 return NULL;
1661
1662
1663 if (of_node_is_type(vnode_parent, "vdevice")) {
1664 const __be32 *prop;
1665
1666 prop = of_get_property(vnode, "reg", NULL);
1667 if (!prop)
1668 goto out;
1669 snprintf(kobj_name, sizeof(kobj_name), "%x",
1670 (uint32_t)of_read_number(prop, 1));
1671 } else if (of_node_is_type(vnode_parent, "ibm,platform-facilities"))
1672 snprintf(kobj_name, sizeof(kobj_name), "%pOFn", vnode);
1673 else
1674 goto out;
1675
1676 of_node_put(vnode_parent);
1677 return vio_find_name(kobj_name);
1678 out:
1679 of_node_put(vnode_parent);
1680 return NULL;
1681 }
1682 EXPORT_SYMBOL(vio_find_node);
1683
1684 int vio_enable_interrupts(struct vio_dev *dev)
1685 {
1686 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
1687 if (rc != H_SUCCESS)
1688 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
1689 return rc;
1690 }
1691 EXPORT_SYMBOL(vio_enable_interrupts);
1692
1693 int vio_disable_interrupts(struct vio_dev *dev)
1694 {
1695 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
1696 if (rc != H_SUCCESS)
1697 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
1698 return rc;
1699 }
1700 EXPORT_SYMBOL(vio_disable_interrupts);
1701 #endif
1702
1703 static int __init vio_init(void)
1704 {
1705 dma_debug_add_bus(&vio_bus_type);
1706 return 0;
1707 }
1708 fs_initcall(vio_init);