Home
last modified time | relevance | path

Searched refs:esid (Results 1 – 20 of 20) sorted by relevance

/linux-4.4.14/arch/powerpc/kvm/
Dbook3s_64_mmu.c49 u64 esid = GET_ESID(eaddr); in kvmppc_mmu_book3s_64_find_slbe() local
53 u64 cmp_esid = esid; in kvmppc_mmu_book3s_64_find_slbe()
61 if (vcpu->arch.slb[i].esid == cmp_esid) in kvmppc_mmu_book3s_64_find_slbe()
66 eaddr, esid, esid_1t); in kvmppc_mmu_book3s_64_find_slbe()
73 vcpu->arch.slb[i].esid, in kvmppc_mmu_book3s_64_find_slbe()
381 u64 esid, esid_1t; in kvmppc_mmu_book3s_64_slbmte() local
389 esid = GET_ESID(rb); in kvmppc_mmu_book3s_64_slbmte()
400 slbe->esid = slbe->tb ? esid_1t : esid; in kvmppc_mmu_book3s_64_slbmte()
427 kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT); in kvmppc_mmu_book3s_64_slbmte()
466 dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid); in kvmppc_mmu_book3s_64_slbie()
[all …]
Dbook3s_64_mmu_host.c268 static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) in kvmppc_mmu_next_segment() argument
278 if (!(svcpu->slb[i].esid & SLB_ESID_V)) in kvmppc_mmu_next_segment()
280 else if ((svcpu->slb[i].esid & ESID_MASK) == esid) { in kvmppc_mmu_next_segment()
312 u64 esid = eaddr >> SID_SHIFT; in kvmppc_mmu_map_segment() local
322 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { in kvmppc_mmu_map_segment()
324 svcpu->slb[slb_index].esid = 0; in kvmppc_mmu_map_segment()
333 map->guest_esid = esid; in kvmppc_mmu_map_segment()
345 svcpu->slb[slb_index].esid = slb_esid; in kvmppc_mmu_map_segment()
362 if ((svcpu->slb[i].esid & SLB_ESID_V) && in kvmppc_mmu_flush_segment()
363 (svcpu->slb[i].esid & seg_mask) == ea) { in kvmppc_mmu_flush_segment()
[all …]
Dbook3s_32_mmu_host.c316 u32 esid = eaddr >> SID_SHIFT; in kvmppc_mmu_map_segment() local
323 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { in kvmppc_mmu_map_segment()
325 svcpu->sr[esid] = SR_INVALID; in kvmppc_mmu_map_segment()
334 map->guest_esid = esid; in kvmppc_mmu_map_segment()
336 svcpu->sr[esid] = sr; in kvmppc_mmu_map_segment()
338 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr); in kvmppc_mmu_map_segment()
Dbook3s_32_mmu.c84 static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
363 static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, in kvmppc_mmu_book3s_32_esid_to_vsid() argument
366 ulong ea = esid << SID_SHIFT; in kvmppc_mmu_book3s_32_esid_to_vsid()
368 u64 gvsid = esid; in kvmppc_mmu_book3s_32_esid_to_vsid()
382 *vsid = VSID_REAL | esid; in kvmppc_mmu_book3s_32_esid_to_vsid()
Dbook3s_hv_ras.c54 unsigned long rb = be64_to_cpu(slb->save_area[i].esid); in reload_slb()
/linux-4.4.14/drivers/misc/cxl/
Dfault.c28 (sste->esid_data == cpu_to_be64(slb->esid))); in sste_matches()
44 hash = (slb->esid >> SID_SHIFT_1T) & mask; in find_free_sste()
46 hash = (slb->esid >> SID_SHIFT) & mask; in find_free_sste()
78 sste - ctx->sstp, slb->vsid, slb->esid); in cxl_load_segment()
79 trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid); in cxl_load_segment()
82 sste->esid_data = cpu_to_be64(slb->esid); in cxl_load_segment()
288 if (last_esid == slb.esid) in cxl_prefault_vma()
292 last_esid = slb.esid; in cxl_prefault_vma()
/linux-4.4.14/arch/powerpc/mm/
Dslb.c72 p->save_area[index].esid = 0; in slb_shadow_update()
74 p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index)); in slb_shadow_update()
79 get_slb_shadow()->save_area[index].esid = 0; in slb_shadow_clear()
Dcopro_fault.c141 slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V; in copro_calculate_slb()
/linux-4.4.14/arch/powerpc/include/asm/
Dcopro.h15 u64 esid, vsid; member
Dlppaca.h130 __be64 esid; member
Dkvm_book3s_asm.h154 u64 esid;
Dkvm_host.h362 int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
368 u64 esid; member
Dkvm_book3s.h75 u64 esid; member
/linux-4.4.14/drivers/s390/cio/
Ddevice_id.c135 cdev->private->flags.esid = 0; in snsid_init()
158 cdev->private->flags.esid = 1; in snsid_check()
Dio_sch.h143 unsigned int esid:1; /* Ext. SenseID supported by HW */ member
Ddevice_ops.c433 if (cdev->private->flags.esid == 0) in ccw_device_get_ciw()
/linux-4.4.14/arch/powerpc/xmon/
Dxmon.c2130 u64 esid, vsid; in dump_one_paca() local
2135 esid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].esid); in dump_one_paca()
2138 if (esid || vsid) { in dump_one_paca()
2140 i, esid, vsid); in dump_one_paca()
2804 unsigned long esid,vsid; in dump_segments() local
2810 asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i)); in dump_segments()
2812 if (esid || vsid) { in dump_segments()
2813 printf("%02d %016lx %016lx", i, esid, vsid); in dump_segments()
2814 if (esid & SLB_ESID_V) { in dump_segments()
2818 GET_ESID_1T(esid), in dump_segments()
[all …]
/linux-4.4.14/arch/powerpc/platforms/cell/
Dspu_base.c153 __func__, slbe, slb->vsid, slb->esid); in spu_load_slb()
161 out_be64(&priv2->slb_esid_RW, slb->esid); in spu_load_slb()
232 slb->esid = (ea & ESID_MASK) | SLB_ESID_V; in __spu_kernel_slb()
246 if (!((slbs[i].esid ^ ea) & ESID_MASK)) in __slb_present()
/linux-4.4.14/arch/powerpc/kernel/
Dmce_power.c107 unsigned long rb = be64_to_cpu(slb->save_area[i].esid); in flush_and_reload_slb()
Dasm-offsets.c235 offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); in main()