Lines Matching refs:svcpu
270 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_mmu_next_segment() local
277 for (i = 0; i < svcpu->slb_max; i++) { in kvmppc_mmu_next_segment()
278 if (!(svcpu->slb[i].esid & SLB_ESID_V)) in kvmppc_mmu_next_segment()
280 else if ((svcpu->slb[i].esid & ESID_MASK) == esid) { in kvmppc_mmu_next_segment()
298 if ((svcpu->slb_max) == max_slb_size) in kvmppc_mmu_next_segment()
301 r = svcpu->slb_max; in kvmppc_mmu_next_segment()
302 svcpu->slb_max++; in kvmppc_mmu_next_segment()
305 svcpu_put(svcpu); in kvmppc_mmu_next_segment()
311 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_mmu_map_segment() local
324 svcpu->slb[slb_index].esid = 0; in kvmppc_mmu_map_segment()
345 svcpu->slb[slb_index].esid = slb_esid; in kvmppc_mmu_map_segment()
346 svcpu->slb[slb_index].vsid = slb_vsid; in kvmppc_mmu_map_segment()
351 svcpu_put(svcpu); in kvmppc_mmu_map_segment()
357 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_mmu_flush_segment() local
361 for (i = 0; i < svcpu->slb_max; i++) { in kvmppc_mmu_flush_segment()
362 if ((svcpu->slb[i].esid & SLB_ESID_V) && in kvmppc_mmu_flush_segment()
363 (svcpu->slb[i].esid & seg_mask) == ea) { in kvmppc_mmu_flush_segment()
365 svcpu->slb[i].esid = 0; in kvmppc_mmu_flush_segment()
369 svcpu_put(svcpu); in kvmppc_mmu_flush_segment()
374 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_mmu_flush_segments() local
375 svcpu->slb_max = 0; in kvmppc_mmu_flush_segments()
376 svcpu->slb[0].esid = 0; in kvmppc_mmu_flush_segments()
377 svcpu_put(svcpu); in kvmppc_mmu_flush_segments()