/linux-4.4.14/arch/powerpc/mm/ |
D | copro_fault.c | 103 u64 vsid, vsidkey; in copro_calculate_slb() local 111 vsid = get_vsid(mm->context.id, ea, ssize); in copro_calculate_slb() 121 vsid = get_kernel_vsid(ea, mmu_kernel_ssize); in copro_calculate_slb() 128 vsid = get_kernel_vsid(ea, mmu_kernel_ssize); in copro_calculate_slb() 136 vsid = (vsid << slb_vsid_shift(ssize)) | vsidkey; in copro_calculate_slb() 138 vsid |= mmu_psize_defs[psize].sllp | in copro_calculate_slb() 142 slb->vsid = vsid; in copro_calculate_slb()
|
D | hash_utils_64.c | 201 unsigned long vsid = get_kernel_vsid(vaddr, ssize); in htab_bolt_mapping() local 202 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); in htab_bolt_mapping() 208 if (!vsid) in htab_bolt_mapping() 960 unsigned long vsid, unsigned long trap, in hash_failure_debug() argument 968 trap, vsid, ssize, psize, lpsize, pte); in hash_failure_debug() 1000 unsigned long vsid; in hash_page_mm() local 1022 vsid = get_vsid(mm->context.id, ea, ssize); in hash_page_mm() 1025 vsid = get_kernel_vsid(ea, mmu_kernel_ssize); in hash_page_mm() 1039 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); in hash_page_mm() 1042 if (!vsid) { in hash_page_mm() [all …]
|
D | hash_native_64.c | 374 unsigned long vsid; in native_hpte_updateboltedpp() local 378 vsid = get_kernel_vsid(ea, ssize); in native_hpte_updateboltedpp() 379 vpn = hpt_vpn(ea, vsid, ssize); in native_hpte_updateboltedpp() 432 static void native_hugepage_invalidate(unsigned long vsid, in native_hugepage_invalidate() argument 457 vpn = hpt_vpn(addr, vsid, ssize); in native_hugepage_invalidate() 522 unsigned long vsid, seg_off; in hpte_decode() local 555 vsid = avpn >> 5; in hpte_decode() 558 vpi = (vsid ^ pteg) & htab_hash_mask; in hpte_decode() 561 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; in hpte_decode() 566 vsid = avpn >> 17; in hpte_decode() [all …]
|
D | tlb_hash64.c | 48 unsigned long vsid; in hpte_need_flush() local 85 vsid = get_vsid(mm->context.id, addr, ssize); in hpte_need_flush() 87 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); in hpte_need_flush() 90 WARN_ON(vsid == 0); in hpte_need_flush() 91 vpn = hpt_vpn(addr, vsid, ssize); in hpte_need_flush()
|
D | hugepage-hash64.c | 21 int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, in __hash_page_thp() argument 87 vpn = hpt_vpn(ea, vsid, ssize); in __hash_page_thp() 96 flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K, in __hash_page_thp() 171 hash_failure_debug(ea, access, vsid, trap, ssize, in __hash_page_thp()
|
D | hugetlbpage-hash64.c | 21 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, in __hash_page_huge() argument 33 vpn = hpt_vpn(ea, vsid, ssize); in __hash_page_huge() 116 hash_failure_debug(ea, access, vsid, trap, ssize, in __hash_page_huge()
|
D | pgtable_64.c | 730 unsigned long vsid; in hpte_do_hugepage_flush() local 746 vsid = get_vsid(mm->context.id, addr, ssize); in hpte_do_hugepage_flush() 747 WARN_ON(vsid == 0); in hpte_do_hugepage_flush() 749 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); in hpte_do_hugepage_flush() 757 return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags); in hpte_do_hugepage_flush()
|
D | slb.c | 73 p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags)); in slb_shadow_update() 120 be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid); in __slb_flush_and_rebolt()
|
/linux-4.4.14/arch/powerpc/kvm/ |
D | book3s_32_mmu.c | 85 u64 *vsid); 95 u64 vsid; in kvmppc_mmu_book3s_32_ea_to_vp() local 101 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_book3s_32_ea_to_vp() 102 return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16); in kvmppc_mmu_book3s_32_ea_to_vp() 173 u64 vsid; in kvmppc_mmu_book3s_32_xlate_bat() local 175 eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_book3s_32_xlate_bat() 176 vsid <<= 16; in kvmppc_mmu_book3s_32_xlate_bat() 177 pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid; in kvmppc_mmu_book3s_32_xlate_bat() 364 u64 *vsid) in kvmppc_mmu_book3s_32_esid_to_vsid() argument 382 *vsid = VSID_REAL | esid; in kvmppc_mmu_book3s_32_esid_to_vsid() [all …]
|
D | book3s_32_mmu_host.c | 118 static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, in kvmppc_mmu_get_pteg() argument 126 hash = ((vsid ^ page) << 6); in kvmppc_mmu_get_pteg() 147 u64 vsid; in kvmppc_mmu_map_page() local 170 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 171 map = find_sid_vsid(vcpu, vsid); in kvmppc_mmu_map_page() 174 map = find_sid_vsid(vcpu, vsid); in kvmppc_mmu_map_page() 178 vsid = map->host_vsid; in kvmppc_mmu_map_page() 179 vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) | in kvmppc_mmu_map_page() 188 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary); in kvmppc_mmu_map_page() 206 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V | in kvmppc_mmu_map_page()
|
D | book3s_64_mmu_host.c | 88 u64 vsid; in kvmppc_mmu_map_page() local 118 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 119 map = find_sid_vsid(vcpu, vsid); in kvmppc_mmu_map_page() 123 map = find_sid_vsid(vcpu, vsid); in kvmppc_mmu_map_page() 127 vsid, orig_pte->eaddr); in kvmppc_mmu_map_page() 152 if (vsid & VSID_64K) in kvmppc_mmu_map_page() 220 u64 vsid; in kvmppc_mmu_unmap_page() local 222 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_unmap_page() 223 if (vsid & VSID_64K) in kvmppc_mmu_unmap_page() 346 svcpu->slb[slb_index].vsid = slb_vsid; in kvmppc_mmu_map_segment()
|
D | book3s_64_mmu.c | 68 if (vcpu->arch.slb[i].vsid) in kvmppc_mmu_book3s_64_find_slbe() 74 vcpu->arch.slb[i].vsid); in kvmppc_mmu_book3s_64_find_slbe() 95 ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT)); in kvmppc_slb_calc_vpn() 158 page, vcpu_book3s->sdr1, pteg, slbe->vsid); in kvmppc_mmu_book3s_64_get_pteg() 178 avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p); in kvmppc_mmu_book3s_64_get_avpn() 401 slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16); in kvmppc_mmu_book3s_64_slbmte() 578 u64 *vsid) in kvmppc_mmu_book3s_64_esid_to_vsid() argument 590 gvsid = slb->vsid; in kvmppc_mmu_book3s_64_esid_to_vsid() 636 *vsid = gvsid; in kvmppc_mmu_book3s_64_esid_to_vsid() 644 *vsid = VSID_REAL | esid; in kvmppc_mmu_book3s_64_esid_to_vsid()
|
D | book3s_hv_rm_mmu.c | 841 unsigned long vsid, hash; in kvmppc_hv_find_lock_hpte() local 858 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T; in kvmppc_hv_find_lock_hpte() 859 vsid ^= vsid << 25; in kvmppc_hv_find_lock_hpte() 862 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; in kvmppc_hv_find_lock_hpte() 864 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask; in kvmppc_hv_find_lock_hpte()
|
D | book3s_hv_ras.c | 55 unsigned long rs = be64_to_cpu(slb->save_area[i].vsid); in reload_slb()
|
D | book3s_pr.c | 542 u64 vsid; in kvmppc_handle_pagefault() local 572 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); in kvmppc_handle_pagefault() 578 pte.vpage |= vsid; in kvmppc_handle_pagefault() 580 if (vsid == -1) in kvmppc_handle_pagefault()
|
/linux-4.4.14/arch/powerpc/include/asm/ |
D | mmu-hash64.h | 289 unsigned long vsid, int ssize) in hpt_vpn() argument 295 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask); in hpt_vpn() 305 unsigned long hash, vsid; in hpt_hash() local 314 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT); in hpt_hash() 315 hash = vsid ^ (vsid << 25) ^ in hpt_hash() 325 unsigned long vsid, pte_t *ptep, unsigned long trap, 328 unsigned long vsid, pte_t *ptep, unsigned long trap, 337 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, 342 unsigned long vsid, pmd_t *pmdp, unsigned long trap, 346 unsigned long vsid, pmd_t *pmdp, in __hash_page_thp() argument [all …]
|
D | copro.h | 15 u64 esid, vsid; member
|
D | mmu-hash32.h | 65 unsigned long vsid:24; /* Virtual segment identifier */ member
|
D | lppaca.h | 131 __be64 vsid; member
|
D | kvm_book3s_asm.h | 155 u64 vsid;
|
D | tlbflush.h | 130 extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
|
D | kvm_host.h | 362 int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); 369 u64 vsid; member
|
D | machdep.h | 60 void (*hugepage_invalidate)(unsigned long vsid,
|
D | kvm_book3s.h | 76 u64 vsid; member
|
/linux-4.4.14/drivers/misc/cxl/ |
D | fault.c | 27 return ((sste->vsid_data == cpu_to_be64(slb->vsid)) && in sste_matches() 43 if (slb->vsid & SLB_VSID_B_1T) in find_free_sste() 78 sste - ctx->sstp, slb->vsid, slb->esid); in cxl_load_segment() 79 trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid); in cxl_load_segment() 81 sste->vsid_data = cpu_to_be64(slb->vsid); in cxl_load_segment() 250 static u64 next_segment(u64 ea, u64 vsid) in next_segment() argument 252 if (vsid & SLB_VSID_B_1T) in next_segment() 283 ea = next_segment(ea, slb.vsid)) { in cxl_prefault_vma()
|
D | main.c | 95 unsigned long vsid; in cxl_alloc_sst() local 110 vsid = get_kernel_vsid((u64)ctx->sstp, mmu_kernel_ssize) << 12; in cxl_alloc_sst() 127 sstp0 |= vsid >> (50-14); /* Top 14 bits of VSID */ in cxl_alloc_sst() 128 sstp1 |= (vsid << (64-(50-14))) & ~ea_mask; in cxl_alloc_sst() 133 (u64)ctx->sstp, (u64)ctx->sstp & ESID_MASK, mmu_kernel_ssize, vsid, sstp0, sstp1); in cxl_alloc_sst()
|
/linux-4.4.14/arch/microblaze/include/asm/ |
D | mmu.h | 26 unsigned long vsid:24; /* Virtual segment identifier */ member 54 unsigned long vsid:24; /* Virtual Segment Identifier */ member
|
/linux-4.4.14/arch/powerpc/platforms/pseries/ |
D | lpar.c | 366 unsigned long lpar_rc, slot, vsid, flags; in pSeries_lpar_hpte_updateboltedpp() local 368 vsid = get_kernel_vsid(ea, ssize); in pSeries_lpar_hpte_updateboltedpp() 369 vpn = hpt_vpn(ea, vsid, ssize); in pSeries_lpar_hpte_updateboltedpp() 448 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, in pSeries_lpar_hugepage_invalidate() argument 471 vpn = hpt_vpn(addr, vsid, ssize); in pSeries_lpar_hugepage_invalidate() 502 unsigned long slot, vsid; in pSeries_lpar_hpte_removebolted() local 504 vsid = get_kernel_vsid(ea, ssize); in pSeries_lpar_hpte_removebolted() 505 vpn = hpt_vpn(ea, vsid, ssize); in pSeries_lpar_hpte_removebolted()
|
/linux-4.4.14/arch/powerpc/xmon/ |
D | xmon.c | 2130 u64 esid, vsid; in dump_one_paca() local 2136 vsid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].vsid); in dump_one_paca() 2138 if (esid || vsid) { in dump_one_paca() 2140 i, esid, vsid); in dump_one_paca() 2804 unsigned long esid,vsid; in dump_segments() local 2811 asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i)); in dump_segments() 2812 if (esid || vsid) { in dump_segments() 2813 printf("%02d %016lx %016lx", i, esid, vsid); in dump_segments() 2815 llp = vsid & SLB_VSID_LLP; in dump_segments() 2816 if (vsid & SLB_VSID_B_1T) { in dump_segments() [all …]
|
/linux-4.4.14/arch/powerpc/platforms/cell/ |
D | spu_base.c | 153 __func__, slbe, slb->vsid, slb->esid); in spu_load_slb() 159 out_be64(&priv2->slb_vsid_RW, slb->vsid); in spu_load_slb() 230 slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | in __spu_kernel_slb()
|
/linux-4.4.14/arch/powerpc/kernel/ |
D | mce_power.c | 108 unsigned long rs = be64_to_cpu(slb->save_area[i].vsid); in flush_and_reload_slb()
|
D | asm-offsets.c | 233 offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); in main()
|
/linux-4.4.14/Documentation/virtual/kvm/ |
D | api.txt | 2289 be OR'ed into the "vsid" argument of the slbmte instruction.
|