This source file includes following definitions.
- kvmppc_find_table
- kvmppc_rm_tce_to_ua
- kvmppc_rm_tce_validate
- kvmppc_page_address
- kvmppc_rm_tce_put
- kvmppc_rm_ioba_validate
- iommu_tce_xchg_no_kill_rm
- iommu_tce_kill_rm
- kvmppc_rm_clear_tce
- kvmppc_rm_tce_iommu_mapped_dec
- kvmppc_rm_tce_iommu_do_unmap
- kvmppc_rm_tce_iommu_unmap
- kvmppc_rm_tce_iommu_do_map
- kvmppc_rm_tce_iommu_map
- kvmppc_rm_h_put_tce
- kvmppc_rm_ua_to_hpa
- kvmppc_rm_h_put_tce_indirect
- kvmppc_rm_h_stuff_tce
- kvmppc_h_get_tce
1
2
3
4
5
6
7
8
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/hugetlb.h>
17 #include <linux/list.h>
18 #include <linux/stringify.h>
19
20 #include <asm/kvm_ppc.h>
21 #include <asm/kvm_book3s.h>
22 #include <asm/book3s/64/mmu-hash.h>
23 #include <asm/mmu_context.h>
24 #include <asm/hvcall.h>
25 #include <asm/synch.h>
26 #include <asm/ppc-opcode.h>
27 #include <asm/kvm_host.h>
28 #include <asm/udbg.h>
29 #include <asm/iommu.h>
30 #include <asm/tce.h>
31 #include <asm/pte-walk.h>
32
33 #ifdef CONFIG_BUG
34
35 #define WARN_ON_ONCE_RM(condition) ({ \
36 static bool __section(.data.unlikely) __warned; \
37 int __ret_warn_once = !!(condition); \
38 \
39 if (unlikely(__ret_warn_once && !__warned)) { \
40 __warned = true; \
41 pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
42 __stringify(condition), \
43 __func__, __LINE__); \
44 dump_stack(); \
45 } \
46 unlikely(__ret_warn_once); \
47 })
48
49 #else
50
51 #define WARN_ON_ONCE_RM(condition) ({ \
52 int __ret_warn_on = !!(condition); \
53 unlikely(__ret_warn_on); \
54 })
55
56 #endif
57
58
59
60
61
62
63
64 struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
65 unsigned long liobn)
66 {
67 struct kvmppc_spapr_tce_table *stt;
68
69 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
70 if (stt->liobn == liobn)
71 return stt;
72
73 return NULL;
74 }
75 EXPORT_SYMBOL_GPL(kvmppc_find_table);
76
77 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
78 static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
79 unsigned long *ua, unsigned long **prmap)
80 {
81 unsigned long gfn = tce >> PAGE_SHIFT;
82 struct kvm_memory_slot *memslot;
83
84 memslot = search_memslots(kvm_memslots_raw(kvm), gfn);
85 if (!memslot)
86 return -EINVAL;
87
88 *ua = __gfn_to_hva_memslot(memslot, gfn) |
89 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
90
91 if (prmap)
92 *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
93
94 return 0;
95 }
96
97
98
99
100
101
102
103
104
105 static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
106 unsigned long tce)
107 {
108 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
109 enum dma_data_direction dir = iommu_tce_direction(tce);
110 struct kvmppc_spapr_tce_iommu_table *stit;
111 unsigned long ua = 0;
112
113
114 if (dir == DMA_NONE)
115 return H_SUCCESS;
116
117 if (iommu_tce_check_gpa(stt->page_shift, gpa))
118 return H_PARAMETER;
119
120 if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL))
121 return H_TOO_HARD;
122
123 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
124 unsigned long hpa = 0;
125 struct mm_iommu_table_group_mem_t *mem;
126 long shift = stit->tbl->it_page_shift;
127
128 mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
129 if (!mem)
130 return H_TOO_HARD;
131
132 if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
133 return H_TOO_HARD;
134 }
135
136 return H_SUCCESS;
137 }
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158 static u64 *kvmppc_page_address(struct page *page)
159 {
160 #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
161 #error TODO: fix to avoid page_address() here
162 #endif
163 return (u64 *) page_address(page);
164 }
165
166
167
168
169
170
171 static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
172 unsigned long idx, unsigned long tce)
173 {
174 struct page *page;
175 u64 *tbl;
176
177 idx -= stt->offset;
178 page = stt->pages[idx / TCES_PER_PAGE];
179
180
181
182
183 WARN_ON_ONCE_RM(!page);
184 tbl = kvmppc_page_address(page);
185
186 tbl[idx % TCES_PER_PAGE] = tce;
187 }
188
189
190
191
192
193
194
195 static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt,
196 unsigned long ioba, unsigned long npages, bool clearing)
197 {
198 unsigned long i, idx, sttpage, sttpages;
199 unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages);
200
201 if (ret)
202 return ret;
203
204
205
206
207 if (clearing)
208 return H_SUCCESS;
209
210 idx = (ioba >> stt->page_shift) - stt->offset;
211 sttpage = idx / TCES_PER_PAGE;
212 sttpages = _ALIGN_UP(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
213 TCES_PER_PAGE;
214 for (i = sttpage; i < sttpage + sttpages; ++i)
215 if (!stt->pages[i])
216 return H_TOO_HARD;
217
218 return H_SUCCESS;
219 }
220
221 static long iommu_tce_xchg_no_kill_rm(struct mm_struct *mm,
222 struct iommu_table *tbl,
223 unsigned long entry, unsigned long *hpa,
224 enum dma_data_direction *direction)
225 {
226 long ret;
227
228 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true);
229
230 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
231 (*direction == DMA_BIDIRECTIONAL))) {
232 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
233
234
235
236
237 if (pua && *pua)
238 mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
239 }
240
241 return ret;
242 }
243
244 extern void iommu_tce_kill_rm(struct iommu_table *tbl,
245 unsigned long entry, unsigned long pages)
246 {
247 if (tbl->it_ops->tce_kill)
248 tbl->it_ops->tce_kill(tbl, entry, pages, true);
249 }
250
251 static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
252 unsigned long entry)
253 {
254 unsigned long hpa = 0;
255 enum dma_data_direction dir = DMA_NONE;
256
257 iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
258 }
259
260 static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
261 struct iommu_table *tbl, unsigned long entry)
262 {
263 struct mm_iommu_table_group_mem_t *mem = NULL;
264 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
265 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
266
267 if (!pua)
268
269 return H_TOO_HARD;
270
271 mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
272 if (!mem)
273 return H_TOO_HARD;
274
275 mm_iommu_mapped_dec(mem);
276
277 *pua = cpu_to_be64(0);
278
279 return H_SUCCESS;
280 }
281
282 static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
283 struct iommu_table *tbl, unsigned long entry)
284 {
285 enum dma_data_direction dir = DMA_NONE;
286 unsigned long hpa = 0;
287 long ret;
288
289 if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir))
290
291
292
293
294 return H_TOO_HARD;
295
296 if (dir == DMA_NONE)
297 return H_SUCCESS;
298
299 ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
300 if (ret)
301 iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
302
303 return ret;
304 }
305
306 static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
307 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
308 unsigned long entry)
309 {
310 unsigned long i, ret = H_SUCCESS;
311 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
312 unsigned long io_entry = entry * subpages;
313
314 for (i = 0; i < subpages; ++i) {
315 ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
316 if (ret != H_SUCCESS)
317 break;
318 }
319
320 return ret;
321 }
322
323 static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
324 unsigned long entry, unsigned long ua,
325 enum dma_data_direction dir)
326 {
327 long ret;
328 unsigned long hpa = 0;
329 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
330 struct mm_iommu_table_group_mem_t *mem;
331
332 if (!pua)
333
334 return H_TOO_HARD;
335
336 mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
337 if (!mem)
338 return H_TOO_HARD;
339
340 if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
341 &hpa)))
342 return H_TOO_HARD;
343
344 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
345 return H_TOO_HARD;
346
347 ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
348 if (ret) {
349 mm_iommu_mapped_dec(mem);
350
351
352
353
354 return H_TOO_HARD;
355 }
356
357 if (dir != DMA_NONE)
358 kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
359
360 *pua = cpu_to_be64(ua);
361
362 return 0;
363 }
364
365 static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
366 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
367 unsigned long entry, unsigned long ua,
368 enum dma_data_direction dir)
369 {
370 unsigned long i, pgoff, ret = H_SUCCESS;
371 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
372 unsigned long io_entry = entry * subpages;
373
374 for (i = 0, pgoff = 0; i < subpages;
375 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
376
377 ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
378 io_entry + i, ua + pgoff, dir);
379 if (ret != H_SUCCESS)
380 break;
381 }
382
383 return ret;
384 }
385
386 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
387 unsigned long ioba, unsigned long tce)
388 {
389 struct kvmppc_spapr_tce_table *stt;
390 long ret;
391 struct kvmppc_spapr_tce_iommu_table *stit;
392 unsigned long entry, ua = 0;
393 enum dma_data_direction dir;
394
395
396
397
398
399 if (kvm_is_radix(vcpu->kvm))
400 return H_TOO_HARD;
401
402 stt = kvmppc_find_table(vcpu->kvm, liobn);
403 if (!stt)
404 return H_TOO_HARD;
405
406 ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0);
407 if (ret != H_SUCCESS)
408 return ret;
409
410 ret = kvmppc_rm_tce_validate(stt, tce);
411 if (ret != H_SUCCESS)
412 return ret;
413
414 dir = iommu_tce_direction(tce);
415 if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
416 return H_PARAMETER;
417
418 entry = ioba >> stt->page_shift;
419
420 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
421 if (dir == DMA_NONE)
422 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
423 stit->tbl, entry);
424 else
425 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
426 stit->tbl, entry, ua, dir);
427
428 iommu_tce_kill_rm(stit->tbl, entry, 1);
429
430 if (ret != H_SUCCESS) {
431 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
432 return ret;
433 }
434 }
435
436 kvmppc_rm_tce_put(stt, entry, tce);
437
438 return H_SUCCESS;
439 }
440
441 static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
442 unsigned long ua, unsigned long *phpa)
443 {
444 pte_t *ptep, pte;
445 unsigned shift = 0;
446
447
448
449
450
451
452
453
454
455
456 ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
457 if (!ptep || !pte_present(*ptep))
458 return -ENXIO;
459 pte = *ptep;
460
461 if (!shift)
462 shift = PAGE_SHIFT;
463
464
465 if (shift > PAGE_SHIFT)
466 return -EAGAIN;
467
468 if (!pte_young(pte))
469 return -EAGAIN;
470
471 *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
472 (ua & ~PAGE_MASK);
473
474 return 0;
475 }
476
477 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
478 unsigned long liobn, unsigned long ioba,
479 unsigned long tce_list, unsigned long npages)
480 {
481 struct kvmppc_spapr_tce_table *stt;
482 long i, ret = H_SUCCESS;
483 unsigned long tces, entry, ua = 0;
484 unsigned long *rmap = NULL;
485 bool prereg = false;
486 struct kvmppc_spapr_tce_iommu_table *stit;
487
488
489 if (kvm_is_radix(vcpu->kvm))
490 return H_TOO_HARD;
491
492 stt = kvmppc_find_table(vcpu->kvm, liobn);
493 if (!stt)
494 return H_TOO_HARD;
495
496 entry = ioba >> stt->page_shift;
497
498
499
500
501 if (npages > 512)
502 return H_PARAMETER;
503
504 if (tce_list & (SZ_4K - 1))
505 return H_PARAMETER;
506
507 ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false);
508 if (ret != H_SUCCESS)
509 return ret;
510
511 if (mm_iommu_preregistered(vcpu->kvm->mm)) {
512
513
514
515
516
517 struct mm_iommu_table_group_mem_t *mem;
518
519 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
520 return H_TOO_HARD;
521
522 mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
523 if (mem)
524 prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
525 IOMMU_PAGE_SHIFT_4K, &tces) == 0;
526 }
527
528 if (!prereg) {
529
530
531
532
533
534
535 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
536 return H_TOO_HARD;
537
538 rmap = (void *) vmalloc_to_phys(rmap);
539 if (WARN_ON_ONCE_RM(!rmap))
540 return H_TOO_HARD;
541
542
543
544
545
546
547
548
549
550 lock_rmap(rmap);
551 if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
552 ret = H_TOO_HARD;
553 goto unlock_exit;
554 }
555 }
556
557 for (i = 0; i < npages; ++i) {
558 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
559
560 ret = kvmppc_rm_tce_validate(stt, tce);
561 if (ret != H_SUCCESS)
562 goto unlock_exit;
563 }
564
565 for (i = 0; i < npages; ++i) {
566 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
567
568 ua = 0;
569 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
570 ret = H_PARAMETER;
571 goto invalidate_exit;
572 }
573
574 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
575 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
576 stit->tbl, entry + i, ua,
577 iommu_tce_direction(tce));
578
579 if (ret != H_SUCCESS) {
580 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
581 entry);
582 goto invalidate_exit;
583 }
584 }
585
586 kvmppc_rm_tce_put(stt, entry + i, tce);
587 }
588
589 invalidate_exit:
590 list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
591 iommu_tce_kill_rm(stit->tbl, entry, npages);
592
593 unlock_exit:
594 if (rmap)
595 unlock_rmap(rmap);
596
597 return ret;
598 }
599
600 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
601 unsigned long liobn, unsigned long ioba,
602 unsigned long tce_value, unsigned long npages)
603 {
604 struct kvmppc_spapr_tce_table *stt;
605 long i, ret;
606 struct kvmppc_spapr_tce_iommu_table *stit;
607
608
609 if (kvm_is_radix(vcpu->kvm))
610 return H_TOO_HARD;
611
612 stt = kvmppc_find_table(vcpu->kvm, liobn);
613 if (!stt)
614 return H_TOO_HARD;
615
616 ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0);
617 if (ret != H_SUCCESS)
618 return ret;
619
620
621 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
622 return H_PARAMETER;
623
624 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
625 unsigned long entry = ioba >> stt->page_shift;
626
627 for (i = 0; i < npages; ++i) {
628 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
629 stit->tbl, entry + i);
630
631 if (ret == H_SUCCESS)
632 continue;
633
634 if (ret == H_TOO_HARD)
635 goto invalidate_exit;
636
637 WARN_ON_ONCE_RM(1);
638 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
639 }
640 }
641
642 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
643 kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
644
645 invalidate_exit:
646 list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
647 iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
648
649 return ret;
650 }
651
652
653 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
654 unsigned long ioba)
655 {
656 struct kvmppc_spapr_tce_table *stt;
657 long ret;
658 unsigned long idx;
659 struct page *page;
660 u64 *tbl;
661
662 stt = kvmppc_find_table(vcpu->kvm, liobn);
663 if (!stt)
664 return H_TOO_HARD;
665
666 ret = kvmppc_ioba_validate(stt, ioba, 1);
667 if (ret != H_SUCCESS)
668 return ret;
669
670 idx = (ioba >> stt->page_shift) - stt->offset;
671 page = stt->pages[idx / TCES_PER_PAGE];
672 if (!page) {
673 vcpu->arch.regs.gpr[4] = 0;
674 return H_SUCCESS;
675 }
676 tbl = (u64 *)page_address(page);
677
678 vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
679
680 return H_SUCCESS;
681 }
682 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
683
684 #endif