vsid 26 arch/microblaze/include/asm/mmu.h unsigned long vsid:24; /* Virtual segment identifier */ vsid 54 arch/microblaze/include/asm/mmu.h unsigned long vsid:24; /* Virtual Segment Identifier */ vsid 76 arch/powerpc/include/asm/book3s/32/mmu-hash.h unsigned long vsid:24; /* Virtual segment identifier */ vsid 159 arch/powerpc/include/asm/book3s/64/mmu-hash.h void (*hugepage_invalidate)(unsigned long vsid, vsid 421 arch/powerpc/include/asm/book3s/64/mmu-hash.h unsigned long vsid, int ssize) vsid 427 arch/powerpc/include/asm/book3s/64/mmu-hash.h return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask); vsid 437 arch/powerpc/include/asm/book3s/64/mmu-hash.h unsigned long hash, vsid; vsid 446 arch/powerpc/include/asm/book3s/64/mmu-hash.h vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT); vsid 447 arch/powerpc/include/asm/book3s/64/mmu-hash.h hash = vsid ^ (vsid << 25) ^ vsid 457 arch/powerpc/include/asm/book3s/64/mmu-hash.h unsigned long vsid, pte_t *ptep, unsigned long trap, vsid 460 arch/powerpc/include/asm/book3s/64/mmu-hash.h unsigned long vsid, pte_t *ptep, unsigned long trap, vsid 469 arch/powerpc/include/asm/book3s/64/mmu-hash.h int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, vsid 474 arch/powerpc/include/asm/book3s/64/mmu-hash.h unsigned long vsid, pmd_t *pmdp, unsigned long trap, vsid 478 arch/powerpc/include/asm/book3s/64/mmu-hash.h unsigned long vsid, pmd_t *pmdp, vsid 487 arch/powerpc/include/asm/book3s/64/mmu-hash.h unsigned long vsid, unsigned long trap, vsid 510 arch/powerpc/include/asm/book3s/64/mmu-hash.h u64 vsid; vsid 745 arch/powerpc/include/asm/book3s/64/mmu-hash.h unsigned long vsid; vsid 750 arch/powerpc/include/asm/book3s/64/mmu-hash.h vsid = protovsid * vsid_multiplier; vsid 751 arch/powerpc/include/asm/book3s/64/mmu-hash.h vsid = (vsid >> vsid_bits) + (vsid & vsid_modulus); vsid 752 arch/powerpc/include/asm/book3s/64/mmu-hash.h return (vsid + ((vsid + 1) >> vsid_bits)) & vsid_modulus; vsid 59 arch/powerpc/include/asm/book3s/64/tlbflush-hash.h extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr, vsid 13 arch/powerpc/include/asm/copro.h u64 esid, vsid; vsid 103 arch/powerpc/include/asm/kvm_book3s.h u64 vsid; vsid 161 arch/powerpc/include/asm/kvm_book3s_asm.h u64 vsid; vsid 406 arch/powerpc/include/asm/kvm_host.h int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); vsid 413 arch/powerpc/include/asm/kvm_host.h u64 vsid; vsid 145 arch/powerpc/include/asm/lppaca.h __be64 vsid; vsid 240 arch/powerpc/kernel/asm-offsets.c OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid); vsid 73 arch/powerpc/kvm/book3s_32_mmu.c u64 *vsid); vsid 83 arch/powerpc/kvm/book3s_32_mmu.c u64 vsid; vsid 89 arch/powerpc/kvm/book3s_32_mmu.c kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); vsid 90 arch/powerpc/kvm/book3s_32_mmu.c return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16); vsid 161 arch/powerpc/kvm/book3s_32_mmu.c u64 vsid; vsid 163 arch/powerpc/kvm/book3s_32_mmu.c eaddr >> SID_SHIFT, &vsid); vsid 164 arch/powerpc/kvm/book3s_32_mmu.c vsid <<= 16; vsid 165 arch/powerpc/kvm/book3s_32_mmu.c pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid; vsid 354 arch/powerpc/kvm/book3s_32_mmu.c u64 *vsid) vsid 372 arch/powerpc/kvm/book3s_32_mmu.c *vsid = VSID_REAL | esid; vsid 375 arch/powerpc/kvm/book3s_32_mmu.c *vsid = VSID_REAL_IR | gvsid; vsid 378 arch/powerpc/kvm/book3s_32_mmu.c *vsid = VSID_REAL_DR | gvsid; vsid 382 arch/powerpc/kvm/book3s_32_mmu.c *vsid = sr_vsid(sr); vsid 384 arch/powerpc/kvm/book3s_32_mmu.c *vsid = VSID_BAT | gvsid; vsid 391 arch/powerpc/kvm/book3s_32_mmu.c *vsid |= VSID_PR; vsid 106 arch/powerpc/kvm/book3s_32_mmu_host.c static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, vsid 114 arch/powerpc/kvm/book3s_32_mmu_host.c hash = ((vsid ^ page) << 6); vsid 135 arch/powerpc/kvm/book3s_32_mmu_host.c u64 vsid; vsid 158 arch/powerpc/kvm/book3s_32_mmu_host.c vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); vsid 159 arch/powerpc/kvm/book3s_32_mmu_host.c map = find_sid_vsid(vcpu, vsid); vsid 162 arch/powerpc/kvm/book3s_32_mmu_host.c map = find_sid_vsid(vcpu, vsid); vsid 166 arch/powerpc/kvm/book3s_32_mmu_host.c vsid = map->host_vsid; vsid 167 arch/powerpc/kvm/book3s_32_mmu_host.c vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) | vsid 176 arch/powerpc/kvm/book3s_32_mmu_host.c pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary); vsid 194 arch/powerpc/kvm/book3s_32_mmu_host.c pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V | vsid 65 arch/powerpc/kvm/book3s_64_mmu.c if (vcpu->arch.slb[i].vsid) vsid 71 arch/powerpc/kvm/book3s_64_mmu.c vcpu->arch.slb[i].vsid); vsid 92 arch/powerpc/kvm/book3s_64_mmu.c ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT)); vsid 155 arch/powerpc/kvm/book3s_64_mmu.c page, vcpu_book3s->sdr1, pteg, slbe->vsid); vsid 175 arch/powerpc/kvm/book3s_64_mmu.c avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p); vsid 398 arch/powerpc/kvm/book3s_64_mmu.c slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16); vsid 588 arch/powerpc/kvm/book3s_64_mmu.c u64 *vsid) vsid 600 arch/powerpc/kvm/book3s_64_mmu.c gvsid = slb->vsid; vsid 646 arch/powerpc/kvm/book3s_64_mmu.c *vsid = gvsid; vsid 654 arch/powerpc/kvm/book3s_64_mmu.c *vsid = VSID_REAL | esid; vsid 76 arch/powerpc/kvm/book3s_64_mmu_host.c u64 vsid; vsid 106 arch/powerpc/kvm/book3s_64_mmu_host.c vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); vsid 107 arch/powerpc/kvm/book3s_64_mmu_host.c map = find_sid_vsid(vcpu, vsid); vsid 111 arch/powerpc/kvm/book3s_64_mmu_host.c map = find_sid_vsid(vcpu, vsid); vsid 115 arch/powerpc/kvm/book3s_64_mmu_host.c vsid, orig_pte->eaddr); vsid 142 arch/powerpc/kvm/book3s_64_mmu_host.c if (vsid & VSID_64K) vsid 215 arch/powerpc/kvm/book3s_64_mmu_host.c u64 vsid; vsid 217 arch/powerpc/kvm/book3s_64_mmu_host.c vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); vsid 218 arch/powerpc/kvm/book3s_64_mmu_host.c if (vsid & VSID_64K) vsid 347 arch/powerpc/kvm/book3s_64_mmu_host.c svcpu->slb[slb_index].vsid = slb_vsid; vsid 1330 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long offset, vsid; vsid 1334 arch/powerpc/kvm/book3s_64_mmu_hv.c vsid = avpn >> 5; vsid 1337 arch/powerpc/kvm/book3s_64_mmu_hv.c offset |= ((vsid ^ pteg) & old_hash_mask) << pshift; vsid 1339 arch/powerpc/kvm/book3s_64_mmu_hv.c hash = vsid ^ (offset >> pshift); vsid 1341 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long offset, vsid; vsid 1345 arch/powerpc/kvm/book3s_64_mmu_hv.c vsid = avpn >> 17; vsid 1347 arch/powerpc/kvm/book3s_64_mmu_hv.c offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) << pshift; vsid 1349 arch/powerpc/kvm/book3s_64_mmu_hv.c hash = vsid ^ (vsid << 25) ^ (offset >> pshift); vsid 57 arch/powerpc/kvm/book3s_hv_ras.c unsigned long rs = be64_to_cpu(slb->save_area[i].vsid); vsid 1121 arch/powerpc/kvm/book3s_hv_rm_mmu.c unsigned long vsid, hash; vsid 1138 arch/powerpc/kvm/book3s_hv_rm_mmu.c vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T; vsid 1139 arch/powerpc/kvm/book3s_hv_rm_mmu.c vsid ^= vsid << 25; vsid 1142 arch/powerpc/kvm/book3s_hv_rm_mmu.c vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; vsid 1144 arch/powerpc/kvm/book3s_hv_rm_mmu.c hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt); vsid 678 arch/powerpc/kvm/book3s_pr.c u64 vsid; vsid 709 arch/powerpc/kvm/book3s_pr.c vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); vsid 715 arch/powerpc/kvm/book3s_pr.c pte.vpage |= vsid; vsid 717 arch/powerpc/kvm/book3s_pr.c if (vsid == -1) vsid 19 arch/powerpc/mm/book3s64/hash_4k.c int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, vsid 64 arch/powerpc/mm/book3s64/hash_4k.c vpn = hpt_vpn(ea, vsid, ssize); vsid 115 arch/powerpc/mm/book3s64/hash_4k.c hash_failure_debug(ea, access, vsid, trap, ssize, vsid 36 arch/powerpc/mm/book3s64/hash_64k.c int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, vsid 88 arch/powerpc/mm/book3s64/hash_64k.c vpn = hpt_vpn(ea, vsid, ssize); vsid 211 arch/powerpc/mm/book3s64/hash_64k.c hash_failure_debug(ea, access, vsid, trap, ssize, vsid 224 arch/powerpc/mm/book3s64/hash_64k.c unsigned long vsid, pte_t *ptep, unsigned long trap, vsid 270 arch/powerpc/mm/book3s64/hash_64k.c vpn = hpt_vpn(ea, vsid, ssize); vsid 323 arch/powerpc/mm/book3s64/hash_64k.c hash_failure_debug(ea, access, vsid, trap, ssize, vsid 21 arch/powerpc/mm/book3s64/hash_hugepage.c int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, vsid 79 arch/powerpc/mm/book3s64/hash_hugepage.c vpn = hpt_vpn(ea, vsid, ssize); vsid 88 arch/powerpc/mm/book3s64/hash_hugepage.c flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K, vsid 166 arch/powerpc/mm/book3s64/hash_hugepage.c hash_failure_debug(ea, access, vsid, trap, ssize, vsid 25 arch/powerpc/mm/book3s64/hash_hugetlbpage.c int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, vsid 38 arch/powerpc/mm/book3s64/hash_hugetlbpage.c vpn = hpt_vpn(ea, vsid, ssize); vsid 117 arch/powerpc/mm/book3s64/hash_hugetlbpage.c hash_failure_debug(ea, access, vsid, trap, ssize, vsid 522 arch/powerpc/mm/book3s64/hash_native.c unsigned long vsid; vsid 526 arch/powerpc/mm/book3s64/hash_native.c vsid = get_kernel_vsid(ea, ssize); vsid 527 arch/powerpc/mm/book3s64/hash_native.c vpn = hpt_vpn(ea, vsid, ssize); vsid 553 arch/powerpc/mm/book3s64/hash_native.c unsigned long vsid; vsid 557 arch/powerpc/mm/book3s64/hash_native.c vsid = get_kernel_vsid(ea, ssize); vsid 558 arch/powerpc/mm/book3s64/hash_native.c vpn = hpt_vpn(ea, vsid, ssize); vsid 616 arch/powerpc/mm/book3s64/hash_native.c static void native_hugepage_invalidate(unsigned long vsid, vsid 641 arch/powerpc/mm/book3s64/hash_native.c vpn = hpt_vpn(addr, vsid, ssize); vsid 678 arch/powerpc/mm/book3s64/hash_native.c static void native_hugepage_invalidate(unsigned long vsid, vsid 693 arch/powerpc/mm/book3s64/hash_native.c unsigned long vsid, seg_off; vsid 722 arch/powerpc/mm/book3s64/hash_native.c vsid = avpn >> 5; vsid 725 arch/powerpc/mm/book3s64/hash_native.c vpi = (vsid ^ pteg) & htab_hash_mask; vsid 728 arch/powerpc/mm/book3s64/hash_native.c *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; vsid 733 arch/powerpc/mm/book3s64/hash_native.c vsid = avpn >> 17; vsid 735 arch/powerpc/mm/book3s64/hash_native.c vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask; vsid 738 arch/powerpc/mm/book3s64/hash_native.c *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT; vsid 316 arch/powerpc/mm/book3s64/hash_pgtable.c unsigned long vsid; vsid 331 arch/powerpc/mm/book3s64/hash_pgtable.c vsid = get_user_vsid(&mm->context, addr, ssize); vsid 332 arch/powerpc/mm/book3s64/hash_pgtable.c WARN_ON(vsid == 0); vsid 334 arch/powerpc/mm/book3s64/hash_pgtable.c vsid = get_kernel_vsid(addr, mmu_kernel_ssize); vsid 341 arch/powerpc/mm/book3s64/hash_pgtable.c return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags); vsid 46 arch/powerpc/mm/book3s64/hash_tlb.c unsigned long vsid; vsid 91 arch/powerpc/mm/book3s64/hash_tlb.c vsid = get_user_vsid(&mm->context, addr, ssize); vsid 93 arch/powerpc/mm/book3s64/hash_tlb.c vsid = get_kernel_vsid(addr, mmu_kernel_ssize); vsid 96 arch/powerpc/mm/book3s64/hash_tlb.c WARN_ON(vsid == 0); vsid 97 arch/powerpc/mm/book3s64/hash_tlb.c vpn = hpt_vpn(addr, vsid, ssize); vsid 263 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vsid = get_kernel_vsid(vaddr, ssize); vsid 264 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); vsid 270 arch/powerpc/mm/book3s64/hash_utils.c if (!vsid) vsid 1209 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vsid, unsigned long trap, vsid 1217 arch/powerpc/mm/book3s64/hash_utils.c trap, vsid, ssize, psize, lpsize, pte); vsid 1250 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vsid; vsid 1271 arch/powerpc/mm/book3s64/hash_utils.c vsid = get_user_vsid(&mm->context, ea, ssize); vsid 1274 arch/powerpc/mm/book3s64/hash_utils.c vsid = get_kernel_vsid(ea, mmu_kernel_ssize); vsid 1280 arch/powerpc/mm/book3s64/hash_utils.c vsid = get_kernel_vsid(ea, mmu_kernel_ssize); vsid 1292 arch/powerpc/mm/book3s64/hash_utils.c DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); vsid 1295 arch/powerpc/mm/book3s64/hash_utils.c if (!vsid) { vsid 1347 arch/powerpc/mm/book3s64/hash_utils.c rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep, vsid 1351 arch/powerpc/mm/book3s64/hash_utils.c rc = __hash_page_huge(ea, access, vsid, ptep, trap, vsid 1412 arch/powerpc/mm/book3s64/hash_utils.c rc = __hash_page_64K(ea, access, vsid, ptep, trap, vsid 1421 arch/powerpc/mm/book3s64/hash_utils.c rc = __hash_page_4K(ea, access, vsid, ptep, trap, vsid 1430 arch/powerpc/mm/book3s64/hash_utils.c hash_failure_debug(ea, access, vsid, trap, ssize, psize, vsid 1525 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vsid; vsid 1547 arch/powerpc/mm/book3s64/hash_utils.c vsid = get_user_vsid(&mm->context, ea, ssize); vsid 1548 arch/powerpc/mm/book3s64/hash_utils.c if (!vsid) vsid 1583 arch/powerpc/mm/book3s64/hash_utils.c rc = __hash_page_64K(ea, access, vsid, ptep, trap, vsid 1587 arch/powerpc/mm/book3s64/hash_utils.c rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags, vsid 1594 arch/powerpc/mm/book3s64/hash_utils.c hash_failure_debug(ea, access, vsid, trap, ssize, vsid 1744 arch/powerpc/mm/book3s64/hash_utils.c void flush_hash_hugepage(unsigned long vsid, unsigned long addr, vsid 1765 arch/powerpc/mm/book3s64/hash_utils.c mmu_hash_ops.hugepage_invalidate(vsid, s_addr, hpte_slot_array, vsid 1786 arch/powerpc/mm/book3s64/hash_utils.c vpn = hpt_vpn(addr, vsid, ssize); vsid 1874 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); vsid 1875 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); vsid 1882 arch/powerpc/mm/book3s64/hash_utils.c if (!vsid) vsid 1899 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); vsid 1900 arch/powerpc/mm/book3s64/hash_utils.c unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); vsid 44 arch/powerpc/mm/book3s64/slb.c static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize, vsid 47 arch/powerpc/mm/book3s64/slb.c return (vsid << slb_vsid_shift(ssize)) | flags | vsid 90 arch/powerpc/mm/book3s64/slb.c WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); vsid 129 arch/powerpc/mm/book3s64/slb.c : "r" (be64_to_cpu(p->save_area[index].vsid)), vsid 178 arch/powerpc/mm/book3s64/slb.c :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)), vsid 204 arch/powerpc/mm/book3s64/slb.c slb_ptr->vsid = v; vsid 223 arch/powerpc/mm/book3s64/slb.c v = slb_ptr->vsid; vsid 460 arch/powerpc/mm/book3s64/slb.c be64_to_cpu(p->save_area[KSTACK_INDEX].vsid); vsid 643 arch/powerpc/mm/book3s64/slb.c unsigned long vsid; vsid 647 arch/powerpc/mm/book3s64/slb.c vsid = get_vsid(context, ea, ssize); vsid 648 arch/powerpc/mm/book3s64/slb.c if (!vsid) vsid 664 arch/powerpc/mm/book3s64/slb.c vsid_data = __mk_vsid_data(vsid, ssize, flags); vsid 92 arch/powerpc/mm/copro_fault.c u64 vsid, vsidkey; vsid 102 arch/powerpc/mm/copro_fault.c vsid = get_user_vsid(&mm->context, ea, ssize); vsid 109 arch/powerpc/mm/copro_fault.c vsid = get_kernel_vsid(ea, mmu_kernel_ssize); vsid 116 arch/powerpc/mm/copro_fault.c vsid = get_kernel_vsid(ea, mmu_kernel_ssize); vsid 123 arch/powerpc/mm/copro_fault.c vsid = get_kernel_vsid(ea, mmu_kernel_ssize); vsid 131 arch/powerpc/mm/copro_fault.c if (!vsid) vsid 134 arch/powerpc/mm/copro_fault.c vsid = (vsid << slb_vsid_shift(ssize)) | vsidkey; vsid 136 arch/powerpc/mm/copro_fault.c vsid |= mmu_psize_defs[psize].sllp | vsid 140 arch/powerpc/mm/copro_fault.c slb->vsid = vsid; vsid 211 arch/powerpc/mm/ptdump/hashpagetable.c unsigned long hash, vsid, vpn, hpte_group, want_v, hpte_v; vsid 216 arch/powerpc/mm/ptdump/hashpagetable.c vsid = get_kernel_vsid(ea, ssize); vsid 217 arch/powerpc/mm/ptdump/hashpagetable.c vpn = hpt_vpn(ea, vsid, ssize); vsid 243 arch/powerpc/mm/ptdump/hashpagetable.c unsigned long vsid, vpn, hash, hpte_group, want_v; vsid 249 arch/powerpc/mm/ptdump/hashpagetable.c vsid = get_kernel_vsid(ea, ssize); vsid 250 arch/powerpc/mm/ptdump/hashpagetable.c vpn = hpt_vpn(ea, vsid, ssize); vsid 140 arch/powerpc/platforms/cell/spu_base.c __func__, slbe, slb->vsid, slb->esid); vsid 146 arch/powerpc/platforms/cell/spu_base.c out_be64(&priv2->slb_vsid_RW, slb->vsid); vsid 219 arch/powerpc/platforms/cell/spu_base.c slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | vsid 954 arch/powerpc/platforms/pseries/lpar.c unsigned long lpar_rc, slot, vsid, flags; vsid 956 arch/powerpc/platforms/pseries/lpar.c vsid = get_kernel_vsid(ea, ssize); vsid 957 arch/powerpc/platforms/pseries/lpar.c vpn = hpt_vpn(ea, vsid, ssize); vsid 1183 arch/powerpc/platforms/pseries/lpar.c static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, vsid 1206 arch/powerpc/platforms/pseries/lpar.c vpn = hpt_vpn(addr, vsid, ssize); vsid 1233 arch/powerpc/platforms/pseries/lpar.c static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, vsid 1246 arch/powerpc/platforms/pseries/lpar.c unsigned long slot, vsid; vsid 1248 arch/powerpc/platforms/pseries/lpar.c vsid = get_kernel_vsid(ea, ssize); vsid 1249 arch/powerpc/platforms/pseries/lpar.c vpn = hpt_vpn(ea, vsid, ssize); vsid 1912 arch/powerpc/platforms/pseries/lpar.c static unsigned long vsid_unscramble(unsigned long vsid, int ssize) vsid 1933 arch/powerpc/platforms/pseries/lpar.c if (vsid >= vsid_modulus) vsid 1943 arch/powerpc/platforms/pseries/lpar.c max_mod_inv = 0x7fffffffffffffffull / vsid; vsid 1945 arch/powerpc/platforms/pseries/lpar.c return (vsid * modinv) % vsid_modulus; vsid 1950 arch/powerpc/platforms/pseries/lpar.c protovsid = (((vsid * max_mod_inv) % vsid_modulus) * tmp_modinv) % vsid_modulus; vsid 1951 arch/powerpc/platforms/pseries/lpar.c protovsid = (protovsid + vsid * modinv) % vsid_modulus; vsid 2457 arch/powerpc/xmon/xmon.c u64 esid, vsid; vsid 2463 arch/powerpc/xmon/xmon.c vsid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].vsid); vsid 2465 arch/powerpc/xmon/xmon.c if (esid || vsid) { vsid 2467 arch/powerpc/xmon/xmon.c 22, "slb_shadow", i, esid, vsid); vsid 3563 arch/powerpc/xmon/xmon.c unsigned long esid,vsid; vsid 3570 arch/powerpc/xmon/xmon.c asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i)); vsid 3572 arch/powerpc/xmon/xmon.c if (!esid && !vsid) vsid 3575 arch/powerpc/xmon/xmon.c printf("%02d %016lx %016lx", i, esid, vsid); vsid 3582 arch/powerpc/xmon/xmon.c llp = vsid & SLB_VSID_LLP; vsid 3583 arch/powerpc/xmon/xmon.c if (vsid & SLB_VSID_B_1T) { vsid 3586 arch/powerpc/xmon/xmon.c (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, vsid 3591 arch/powerpc/xmon/xmon.c (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT, vsid 24 drivers/misc/cxl/fault.c return ((sste->vsid_data == cpu_to_be64(slb->vsid)) && vsid 40 drivers/misc/cxl/fault.c if (slb->vsid & SLB_VSID_B_1T) vsid 75 drivers/misc/cxl/fault.c sste - ctx->sstp, slb->vsid, slb->esid); vsid 76 drivers/misc/cxl/fault.c trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid); vsid 78 drivers/misc/cxl/fault.c sste->vsid_data = cpu_to_be64(slb->vsid); vsid 299 drivers/misc/cxl/fault.c static u64 next_segment(u64 ea, u64 vsid) vsid 301 drivers/misc/cxl/fault.c if (vsid & SLB_VSID_B_1T) vsid 327 drivers/misc/cxl/fault.c ea = next_segment(ea, slb.vsid)) { vsid 109 drivers/misc/cxl/main.c unsigned long vsid; vsid 124 drivers/misc/cxl/main.c vsid = get_kernel_vsid((u64)ctx->sstp, mmu_kernel_ssize) << 12; vsid 141 drivers/misc/cxl/main.c sstp0 |= vsid >> (50-14); /* Top 14 bits of VSID */ vsid 142 drivers/misc/cxl/main.c sstp1 |= (vsid << (64-(50-14))) & ~ea_mask; vsid 147 drivers/misc/cxl/main.c (u64)ctx->sstp, (u64)ctx->sstp & ESID_MASK, mmu_kernel_ssize, vsid, sstp0, sstp1); vsid 145 tools/testing/selftests/powerpc/vphn/asm/lppaca.h __be64 vsid;