slbe              183 arch/powerpc/include/uapi/asm/kvm.h 					__u64 slbe;
slbe               77 arch/powerpc/kvm/book3s_64_mmu.c static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe)
slbe               79 arch/powerpc/kvm/book3s_64_mmu.c 	return slbe->tb ? SID_SHIFT_1T : SID_SHIFT;
slbe               82 arch/powerpc/kvm/book3s_64_mmu.c static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe)
slbe               84 arch/powerpc/kvm/book3s_64_mmu.c 	return (1ul << kvmppc_slb_sid_shift(slbe)) - 1;
slbe              118 arch/powerpc/kvm/book3s_64_mmu.c static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
slbe              120 arch/powerpc/kvm/book3s_64_mmu.c 	return mmu_pagesize(slbe->base_page_size);
slbe              123 arch/powerpc/kvm/book3s_64_mmu.c static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
slbe              125 arch/powerpc/kvm/book3s_64_mmu.c 	int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
slbe              127 arch/powerpc/kvm/book3s_64_mmu.c 	return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
slbe              131 arch/powerpc/kvm/book3s_64_mmu.c 				struct kvmppc_slb *slbe, gva_t eaddr,
slbe              142 arch/powerpc/kvm/book3s_64_mmu.c 	vpn = kvmppc_slb_calc_vpn(slbe, eaddr);
slbe              143 arch/powerpc/kvm/book3s_64_mmu.c 	ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M;
slbe              144 arch/powerpc/kvm/book3s_64_mmu.c 	hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize);
slbe              155 arch/powerpc/kvm/book3s_64_mmu.c 		page, vcpu_book3s->sdr1, pteg, slbe->vsid);
slbe              169 arch/powerpc/kvm/book3s_64_mmu.c static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
slbe              171 arch/powerpc/kvm/book3s_64_mmu.c 	int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
slbe              174 arch/powerpc/kvm/book3s_64_mmu.c 	avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
slbe              175 arch/powerpc/kvm/book3s_64_mmu.c 	avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
slbe              190 arch/powerpc/kvm/book3s_64_mmu.c static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
slbe              192 arch/powerpc/kvm/book3s_64_mmu.c 	switch (slbe->base_page_size) {
slbe              209 arch/powerpc/kvm/book3s_64_mmu.c 	struct kvmppc_slb *slbe;
slbe              240 arch/powerpc/kvm/book3s_64_mmu.c 	slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
slbe              241 arch/powerpc/kvm/book3s_64_mmu.c 	if (!slbe)
slbe              244 arch/powerpc/kvm/book3s_64_mmu.c 	avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
slbe              247 arch/powerpc/kvm/book3s_64_mmu.c 	if (slbe->tb)
slbe              249 arch/powerpc/kvm/book3s_64_mmu.c 	if (slbe->large)
slbe              256 arch/powerpc/kvm/book3s_64_mmu.c 	pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
slbe              261 arch/powerpc/kvm/book3s_64_mmu.c 	ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second);
slbe              271 arch/powerpc/kvm/book3s_64_mmu.c 	if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp)
slbe              273 arch/powerpc/kvm/book3s_64_mmu.c 	else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks)
slbe              283 arch/powerpc/kvm/book3s_64_mmu.c 			if (slbe->large &&
slbe              285 arch/powerpc/kvm/book3s_64_mmu.c 				pgsize = decode_pagesize(slbe, pte1);
slbe              382 arch/powerpc/kvm/book3s_64_mmu.c 	struct kvmppc_slb *slbe;
slbe              393 arch/powerpc/kvm/book3s_64_mmu.c 	slbe = &vcpu->arch.slb[slb_nr];
slbe              395 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
slbe              396 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->tb    = (rs & SLB_VSID_B_1T) ? 1 : 0;
slbe              397 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->esid  = slbe->tb ? esid_1t : esid;
slbe              398 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->vsid  = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16);
slbe              399 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
slbe              400 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->Ks    = (rs & SLB_VSID_KS) ? 1 : 0;
slbe              401 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->Kp    = (rs & SLB_VSID_KP) ? 1 : 0;
slbe              402 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->nx    = (rs & SLB_VSID_N) ? 1 : 0;
slbe              403 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
slbe              405 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->base_page_size = MMU_PAGE_4K;
slbe              406 arch/powerpc/kvm/book3s_64_mmu.c 	if (slbe->large) {
slbe              410 arch/powerpc/kvm/book3s_64_mmu.c 				slbe->base_page_size = MMU_PAGE_16M;
slbe              413 arch/powerpc/kvm/book3s_64_mmu.c 				slbe->base_page_size = MMU_PAGE_64K;
slbe              417 arch/powerpc/kvm/book3s_64_mmu.c 			slbe->base_page_size = MMU_PAGE_16M;
slbe              420 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
slbe              421 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->origv = rs;
slbe              430 arch/powerpc/kvm/book3s_64_mmu.c 	struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
slbe              432 arch/powerpc/kvm/book3s_64_mmu.c 	if (slbe) {
slbe              433 arch/powerpc/kvm/book3s_64_mmu.c 		*ret_slb = slbe->origv;
slbe              442 arch/powerpc/kvm/book3s_64_mmu.c 	struct kvmppc_slb *slbe;
slbe              447 arch/powerpc/kvm/book3s_64_mmu.c 	slbe = &vcpu->arch.slb[slb_nr];
slbe              449 arch/powerpc/kvm/book3s_64_mmu.c 	return slbe->orige;
slbe              454 arch/powerpc/kvm/book3s_64_mmu.c 	struct kvmppc_slb *slbe;
slbe              459 arch/powerpc/kvm/book3s_64_mmu.c 	slbe = &vcpu->arch.slb[slb_nr];
slbe              461 arch/powerpc/kvm/book3s_64_mmu.c 	return slbe->origv;
slbe              466 arch/powerpc/kvm/book3s_64_mmu.c 	struct kvmppc_slb *slbe;
slbe              471 arch/powerpc/kvm/book3s_64_mmu.c 	slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
slbe              473 arch/powerpc/kvm/book3s_64_mmu.c 	if (!slbe)
slbe              476 arch/powerpc/kvm/book3s_64_mmu.c 	dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
slbe              478 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->valid = false;
slbe              479 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->orige = 0;
slbe              480 arch/powerpc/kvm/book3s_64_mmu.c 	slbe->origv = 0;
slbe              482 arch/powerpc/kvm/book3s_64_mmu.c 	seg_size = 1ull << kvmppc_slb_sid_shift(slbe);
slbe              344 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvmppc_slb *slbe;
slbe              357 arch/powerpc/kvm/book3s_64_mmu_hv.c 		slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
slbe              358 arch/powerpc/kvm/book3s_64_mmu_hv.c 		if (!slbe)
slbe              360 arch/powerpc/kvm/book3s_64_mmu_hv.c 		slb_v = slbe->origv;
slbe             1545 arch/powerpc/kvm/book3s_hv.c 		sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
slbe             1563 arch/powerpc/kvm/book3s_hv.c 		if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
slbe             1564 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
slbe             1450 arch/powerpc/kvm/book3s_pr.c 			sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
slbe             1482 arch/powerpc/kvm/book3s_pr.c 			u64 rb = sregs->u.s.ppc64.slb[i].slbe;
slbe              135 arch/powerpc/platforms/cell/spu_base.c static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb)
slbe              140 arch/powerpc/platforms/cell/spu_base.c 			__func__, slbe, slb->vsid, slb->esid);
slbe              142 arch/powerpc/platforms/cell/spu_base.c 	out_be64(&priv2->slb_index_W, slbe);
slbe              183 tools/arch/powerpc/include/uapi/asm/kvm.h 					__u64 slbe;