This source file includes following definitions.
- kvmppc_mmu_book3s_64_reset_msr
- kvmppc_mmu_book3s_64_find_slbe
- kvmppc_slb_sid_shift
- kvmppc_slb_offset_mask
- kvmppc_slb_calc_vpn
- kvmppc_mmu_book3s_64_ea_to_vp
- mmu_pagesize
- kvmppc_mmu_book3s_64_get_pagesize
- kvmppc_mmu_book3s_64_get_page
- kvmppc_mmu_book3s_64_get_pteg
- kvmppc_mmu_book3s_64_get_avpn
- decode_pagesize
- kvmppc_mmu_book3s_64_xlate
- kvmppc_mmu_book3s_64_slbmte
- kvmppc_mmu_book3s_64_slbfee
- kvmppc_mmu_book3s_64_slbmfee
- kvmppc_mmu_book3s_64_slbmfev
- kvmppc_mmu_book3s_64_slbie
- kvmppc_mmu_book3s_64_slbia
- kvmppc_mmu_book3s_64_mtsrin
- kvmppc_mmu_book3s_64_tlbie
- segment_contains_magic_page
- kvmppc_mmu_book3s_64_esid_to_vsid
- kvmppc_mmu_book3s_64_is_dcbz32
- kvmppc_mmu_book3s_64_init
1
2
3
4
5
6
7
8
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14
15 #include <asm/kvm_ppc.h>
16 #include <asm/kvm_book3s.h>
17 #include <asm/book3s/64/mmu-hash.h>
18
19
20
21 #ifdef DEBUG_MMU
22 #define dprintk(X...) printk(KERN_INFO X)
23 #else
24 #define dprintk(X...) do { } while(0)
25 #endif
26
27 static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
28 {
29 unsigned long msr = vcpu->arch.intr_msr;
30 unsigned long cur_msr = kvmppc_get_msr(vcpu);
31
32
33 if (MSR_TM_TRANSACTIONAL(cur_msr))
34 msr |= MSR_TS_S;
35 else
36 msr |= cur_msr & MSR_TS_MASK;
37
38 kvmppc_set_msr(vcpu, msr);
39 }
40
41 static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
42 struct kvm_vcpu *vcpu,
43 gva_t eaddr)
44 {
45 int i;
46 u64 esid = GET_ESID(eaddr);
47 u64 esid_1t = GET_ESID_1T(eaddr);
48
49 for (i = 0; i < vcpu->arch.slb_nr; i++) {
50 u64 cmp_esid = esid;
51
52 if (!vcpu->arch.slb[i].valid)
53 continue;
54
55 if (vcpu->arch.slb[i].tb)
56 cmp_esid = esid_1t;
57
58 if (vcpu->arch.slb[i].esid == cmp_esid)
59 return &vcpu->arch.slb[i];
60 }
61
62 dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
63 eaddr, esid, esid_1t);
64 for (i = 0; i < vcpu->arch.slb_nr; i++) {
65 if (vcpu->arch.slb[i].vsid)
66 dprintk(" %d: %c%c%c %llx %llx\n", i,
67 vcpu->arch.slb[i].valid ? 'v' : ' ',
68 vcpu->arch.slb[i].large ? 'l' : ' ',
69 vcpu->arch.slb[i].tb ? 't' : ' ',
70 vcpu->arch.slb[i].esid,
71 vcpu->arch.slb[i].vsid);
72 }
73
74 return NULL;
75 }
76
77 static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe)
78 {
79 return slbe->tb ? SID_SHIFT_1T : SID_SHIFT;
80 }
81
82 static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe)
83 {
84 return (1ul << kvmppc_slb_sid_shift(slbe)) - 1;
85 }
86
87 static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr)
88 {
89 eaddr &= kvmppc_slb_offset_mask(slb);
90
91 return (eaddr >> VPN_SHIFT) |
92 ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT));
93 }
94
95 static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
96 bool data)
97 {
98 struct kvmppc_slb *slb;
99
100 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
101 if (!slb)
102 return 0;
103
104 return kvmppc_slb_calc_vpn(slb, eaddr);
105 }
106
107 static int mmu_pagesize(int mmu_pg)
108 {
109 switch (mmu_pg) {
110 case MMU_PAGE_64K:
111 return 16;
112 case MMU_PAGE_16M:
113 return 24;
114 }
115 return 12;
116 }
117
118 static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
119 {
120 return mmu_pagesize(slbe->base_page_size);
121 }
122
123 static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
124 {
125 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
126
127 return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
128 }
129
130 static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu,
131 struct kvmppc_slb *slbe, gva_t eaddr,
132 bool second)
133 {
134 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
135 u64 hash, pteg, htabsize;
136 u32 ssize;
137 hva_t r;
138 u64 vpn;
139
140 htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1);
141
142 vpn = kvmppc_slb_calc_vpn(slbe, eaddr);
143 ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M;
144 hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize);
145 if (second)
146 hash = ~hash;
147 hash &= ((1ULL << 39ULL) - 1ULL);
148 hash &= htabsize;
149 hash <<= 7ULL;
150
151 pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
152 pteg |= hash;
153
154 dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n",
155 page, vcpu_book3s->sdr1, pteg, slbe->vsid);
156
157
158
159 if (vcpu->arch.papr_enabled)
160 r = pteg;
161 else
162 r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
163
164 if (kvm_is_error_hva(r))
165 return r;
166 return r | (pteg & ~PAGE_MASK);
167 }
168
169 static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
170 {
171 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
172 u64 avpn;
173
174 avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
175 avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
176
177 if (p < 16)
178 avpn >>= ((80 - p) - 56) - 8;
179 else
180 avpn <<= p - 16;
181
182 return avpn;
183 }
184
185
186
187
188
189
190 static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
191 {
192 switch (slbe->base_page_size) {
193 case MMU_PAGE_64K:
194 if ((r & 0xf000) == 0x1000)
195 return MMU_PAGE_64K;
196 break;
197 case MMU_PAGE_16M:
198 if ((r & 0xff000) == 0)
199 return MMU_PAGE_16M;
200 break;
201 }
202 return -1;
203 }
204
205 static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
206 struct kvmppc_pte *gpte, bool data,
207 bool iswrite)
208 {
209 struct kvmppc_slb *slbe;
210 hva_t ptegp;
211 u64 pteg[16];
212 u64 avpn = 0;
213 u64 v, r;
214 u64 v_val, v_mask;
215 u64 eaddr_mask;
216 int i;
217 u8 pp, key = 0;
218 bool found = false;
219 bool second = false;
220 int pgsize;
221 ulong mp_ea = vcpu->arch.magic_page_ea;
222
223
224 if (unlikely(mp_ea) &&
225 unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
226 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
227 gpte->eaddr = eaddr;
228 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
229 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
230 gpte->raddr &= KVM_PAM;
231 gpte->may_execute = true;
232 gpte->may_read = true;
233 gpte->may_write = true;
234 gpte->page_size = MMU_PAGE_4K;
235 gpte->wimg = HPTE_R_M;
236
237 return 0;
238 }
239
240 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
241 if (!slbe)
242 goto no_seg_found;
243
244 avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
245 v_val = avpn & HPTE_V_AVPN;
246
247 if (slbe->tb)
248 v_val |= SLB_VSID_B_1T;
249 if (slbe->large)
250 v_val |= HPTE_V_LARGE;
251 v_val |= HPTE_V_VALID;
252
253 v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
254 HPTE_V_SECONDARY;
255
256 pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
257
258 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
259
260 do_second:
261 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second);
262 if (kvm_is_error_hva(ptegp))
263 goto no_page_found;
264
265 if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
266 printk_ratelimited(KERN_ERR
267 "KVM: Can't copy data from 0x%lx!\n", ptegp);
268 goto no_page_found;
269 }
270
271 if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp)
272 key = 4;
273 else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks)
274 key = 4;
275
276 for (i=0; i<16; i+=2) {
277 u64 pte0 = be64_to_cpu(pteg[i]);
278 u64 pte1 = be64_to_cpu(pteg[i + 1]);
279
280
281 if ((pte0 & v_mask) == v_val) {
282
283 if (slbe->large &&
284 (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
285 pgsize = decode_pagesize(slbe, pte1);
286 if (pgsize < 0)
287 continue;
288 }
289 found = true;
290 break;
291 }
292 }
293
294 if (!found) {
295 if (second)
296 goto no_page_found;
297 v_val |= HPTE_V_SECONDARY;
298 second = true;
299 goto do_second;
300 }
301
302 v = be64_to_cpu(pteg[i]);
303 r = be64_to_cpu(pteg[i+1]);
304 pp = (r & HPTE_R_PP) | key;
305 if (r & HPTE_R_PP0)
306 pp |= 8;
307
308 gpte->eaddr = eaddr;
309 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
310
311 eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1;
312 gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
313 gpte->page_size = pgsize;
314 gpte->may_execute = ((r & HPTE_R_N) ? false : true);
315 if (unlikely(vcpu->arch.disable_kernel_nx) &&
316 !(kvmppc_get_msr(vcpu) & MSR_PR))
317 gpte->may_execute = true;
318 gpte->may_read = false;
319 gpte->may_write = false;
320 gpte->wimg = r & HPTE_R_WIMG;
321
322 switch (pp) {
323 case 0:
324 case 1:
325 case 2:
326 case 6:
327 gpte->may_write = true;
328
329 case 3:
330 case 5:
331 case 7:
332 case 10:
333 gpte->may_read = true;
334 break;
335 }
336
337 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
338 "-> 0x%lx\n",
339 eaddr, avpn, gpte->vpage, gpte->raddr);
340
341
342
343 if (gpte->may_read && !(r & HPTE_R_R)) {
344
345
346
347
348
349
350
351 char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
352 r |= HPTE_R_R;
353 put_user(r >> 8, addr + 6);
354 }
355 if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
356
357
358 char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
359 r |= HPTE_R_C;
360 put_user(r, addr + 7);
361 }
362
363 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
364
365 if (!gpte->may_read || (iswrite && !gpte->may_write))
366 return -EPERM;
367 return 0;
368
369 no_page_found:
370 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
371 return -ENOENT;
372
373 no_seg_found:
374 dprintk("KVM MMU: Trigger segment fault\n");
375 return -EINVAL;
376 }
377
378 static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
379 {
380 u64 esid, esid_1t;
381 int slb_nr;
382 struct kvmppc_slb *slbe;
383
384 dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
385
386 esid = GET_ESID(rb);
387 esid_1t = GET_ESID_1T(rb);
388 slb_nr = rb & 0xfff;
389
390 if (slb_nr > vcpu->arch.slb_nr)
391 return;
392
393 slbe = &vcpu->arch.slb[slb_nr];
394
395 slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
396 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
397 slbe->esid = slbe->tb ? esid_1t : esid;
398 slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16);
399 slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
400 slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0;
401 slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0;
402 slbe->nx = (rs & SLB_VSID_N) ? 1 : 0;
403 slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
404
405 slbe->base_page_size = MMU_PAGE_4K;
406 if (slbe->large) {
407 if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) {
408 switch (rs & SLB_VSID_LP) {
409 case SLB_VSID_LP_00:
410 slbe->base_page_size = MMU_PAGE_16M;
411 break;
412 case SLB_VSID_LP_01:
413 slbe->base_page_size = MMU_PAGE_64K;
414 break;
415 }
416 } else
417 slbe->base_page_size = MMU_PAGE_16M;
418 }
419
420 slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
421 slbe->origv = rs;
422
423
424 kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
425 }
426
427 static int kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr,
428 ulong *ret_slb)
429 {
430 struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
431
432 if (slbe) {
433 *ret_slb = slbe->origv;
434 return 0;
435 }
436 *ret_slb = 0;
437 return -ENOENT;
438 }
439
440 static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
441 {
442 struct kvmppc_slb *slbe;
443
444 if (slb_nr > vcpu->arch.slb_nr)
445 return 0;
446
447 slbe = &vcpu->arch.slb[slb_nr];
448
449 return slbe->orige;
450 }
451
452 static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
453 {
454 struct kvmppc_slb *slbe;
455
456 if (slb_nr > vcpu->arch.slb_nr)
457 return 0;
458
459 slbe = &vcpu->arch.slb[slb_nr];
460
461 return slbe->origv;
462 }
463
464 static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
465 {
466 struct kvmppc_slb *slbe;
467 u64 seg_size;
468
469 dprintk("KVM MMU: slbie(0x%llx)\n", ea);
470
471 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
472
473 if (!slbe)
474 return;
475
476 dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
477
478 slbe->valid = false;
479 slbe->orige = 0;
480 slbe->origv = 0;
481
482 seg_size = 1ull << kvmppc_slb_sid_shift(slbe);
483 kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size);
484 }
485
486 static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
487 {
488 int i;
489
490 dprintk("KVM MMU: slbia()\n");
491
492 for (i = 1; i < vcpu->arch.slb_nr; i++) {
493 vcpu->arch.slb[i].valid = false;
494 vcpu->arch.slb[i].orige = 0;
495 vcpu->arch.slb[i].origv = 0;
496 }
497
498 if (kvmppc_get_msr(vcpu) & MSR_IR) {
499 kvmppc_mmu_flush_segments(vcpu);
500 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
501 }
502 }
503
504 static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
505 ulong value)
506 {
507 u64 rb = 0, rs = 0;
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527 dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);
528
529
530 rb |= (srnum & 0xf) << 28;
531
532 rb |= 1 << 27;
533
534 rb |= srnum;
535
536
537 rs |= (value & 0xfffffff) << 12;
538
539 rs |= ((value >> 28) & 0x7) << 9;
540
541 kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
542 }
543
544 static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
545 bool large)
546 {
547 u64 mask = 0xFFFFFFFFFULL;
548 long i;
549 struct kvm_vcpu *v;
550
551 dprintk("KVM MMU: tlbie(0x%lx)\n", va);
552
553
554
555
556
557
558
559 if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) {
560
561 if (va & 1) {
562 if ((va & 0xf000) == 0x1000)
563 mask = 0xFFFFFFFF0ULL;
564 else
565 mask = 0xFFFFFF000ULL;
566 }
567 } else {
568
569 if (large)
570 mask = 0xFFFFFF000ULL;
571 }
572
573 kvm_for_each_vcpu(i, v, vcpu->kvm)
574 kvmppc_mmu_pte_vflush(v, va >> 12, mask);
575 }
576
577 #ifdef CONFIG_PPC_64K_PAGES
578 static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
579 {
580 ulong mp_ea = vcpu->arch.magic_page_ea;
581
582 return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) &&
583 (mp_ea >> SID_SHIFT) == esid;
584 }
585 #endif
586
587 static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
588 u64 *vsid)
589 {
590 ulong ea = esid << SID_SHIFT;
591 struct kvmppc_slb *slb;
592 u64 gvsid = esid;
593 ulong mp_ea = vcpu->arch.magic_page_ea;
594 int pagesize = MMU_PAGE_64K;
595 u64 msr = kvmppc_get_msr(vcpu);
596
597 if (msr & (MSR_DR|MSR_IR)) {
598 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
599 if (slb) {
600 gvsid = slb->vsid;
601 pagesize = slb->base_page_size;
602 if (slb->tb) {
603 gvsid <<= SID_SHIFT_1T - SID_SHIFT;
604 gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
605 gvsid |= VSID_1T;
606 }
607 }
608 }
609
610 switch (msr & (MSR_DR|MSR_IR)) {
611 case 0:
612 gvsid = VSID_REAL | esid;
613 break;
614 case MSR_IR:
615 gvsid |= VSID_REAL_IR;
616 break;
617 case MSR_DR:
618 gvsid |= VSID_REAL_DR;
619 break;
620 case MSR_DR|MSR_IR:
621 if (!slb)
622 goto no_slb;
623
624 break;
625 default:
626 BUG();
627 break;
628 }
629
630 #ifdef CONFIG_PPC_64K_PAGES
631
632
633
634
635
636
637 if (pagesize >= MMU_PAGE_64K &&
638 mmu_psize_defs[MMU_PAGE_64K].shift &&
639 !segment_contains_magic_page(vcpu, esid))
640 gvsid |= VSID_64K;
641 #endif
642
643 if (kvmppc_get_msr(vcpu) & MSR_PR)
644 gvsid |= VSID_PR;
645
646 *vsid = gvsid;
647 return 0;
648
649 no_slb:
650
651 if (unlikely(mp_ea) &&
652 unlikely(esid == (mp_ea >> SID_SHIFT)) &&
653 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
654 *vsid = VSID_REAL | esid;
655 return 0;
656 }
657
658 return -EINVAL;
659 }
660
661 static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
662 {
663 return (to_book3s(vcpu)->hid[5] & 0x80);
664 }
665
666 void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
667 {
668 struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
669
670 mmu->mfsrin = NULL;
671 mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin;
672 mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
673 mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
674 mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
675 mmu->slbfee = kvmppc_mmu_book3s_64_slbfee;
676 mmu->slbie = kvmppc_mmu_book3s_64_slbie;
677 mmu->slbia = kvmppc_mmu_book3s_64_slbia;
678 mmu->xlate = kvmppc_mmu_book3s_64_xlate;
679 mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr;
680 mmu->tlbie = kvmppc_mmu_book3s_64_tlbie;
681 mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
682 mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
683 mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
684
685 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
686 }