Lines Matching refs:cpu
83 static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) in spgd_addr() argument
88 return &cpu->lg->pgdirs[i].pgdir[index]; in spgd_addr()
97 static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) in spmd_addr() argument
115 static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) in spte_addr() argument
118 pmd_t *pmd = spmd_addr(cpu, spgd, vaddr); in spte_addr()
136 static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) in gpgd_addr() argument
139 return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); in gpgd_addr()
152 static unsigned long gpte_addr(struct lg_cpu *cpu, in gpte_addr() argument
162 static unsigned long gpte_addr(struct lg_cpu *cpu, in gpte_addr() argument
206 static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) in gpte_to_spte() argument
219 base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE; in gpte_to_spte()
229 kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); in gpte_to_spte()
253 static bool gpte_in_iomem(struct lg_cpu *cpu, pte_t gpte) in gpte_in_iomem() argument
259 return (pte_pfn(gpte) >= cpu->lg->pfn_limit in gpte_in_iomem()
260 && pte_pfn(gpte) < cpu->lg->device_limit); in gpte_in_iomem()
263 static bool check_gpte(struct lg_cpu *cpu, pte_t gpte) in check_gpte() argument
266 pte_pfn(gpte) >= cpu->lg->pfn_limit) { in check_gpte()
267 kill_guest(cpu, "bad page table entry"); in check_gpte()
273 static bool check_gpgd(struct lg_cpu *cpu, pgd_t gpgd) in check_gpgd() argument
276 (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) { in check_gpgd()
277 kill_guest(cpu, "bad page directory entry"); in check_gpgd()
284 static bool check_gpmd(struct lg_cpu *cpu, pmd_t gpmd) in check_gpmd() argument
287 (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) { in check_gpmd()
288 kill_guest(cpu, "bad page middle directory entry"); in check_gpmd()
303 static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate, in find_spte() argument
313 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); in find_spte()
328 kill_guest(cpu, "out of memory allocating pte page"); in find_spte()
344 spmd = spmd_addr(cpu, *spgd, vaddr); in find_spte()
361 kill_guest(cpu, "out of memory allocating pmd page"); in find_spte()
374 return spte_addr(cpu, *spgd, vaddr); in find_spte()
393 bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode, in demand_page() argument
409 if (unlikely(cpu->linear_pages)) { in demand_page()
413 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); in demand_page()
422 if (!check_gpgd(cpu, gpgd)) in demand_page()
430 if (likely(!cpu->linear_pages)) { in demand_page()
431 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); in demand_page()
440 if (!check_gpmd(cpu, gpmd)) in demand_page()
448 gpte_ptr = gpte_addr(cpu, gpmd, vaddr); in demand_page()
454 gpte_ptr = gpte_addr(cpu, gpgd, vaddr); in demand_page()
457 if (unlikely(cpu->linear_pages)) { in demand_page()
462 gpte = lgread(cpu, gpte_ptr, pte_t); in demand_page()
481 if (gpte_in_iomem(cpu, gpte)) { in demand_page()
490 if (!check_gpte(cpu, gpte)) in demand_page()
499 spte = find_spte(cpu, vaddr, true, pgd_flags(gpgd), pmd_flags(gpmd)); in demand_page()
514 *spte = gpte_to_spte(cpu, gpte, 1); in demand_page()
522 set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); in demand_page()
528 if (likely(!cpu->linear_pages)) in demand_page()
529 lgwrite(cpu, gpte_ptr, pte_t, gpte); in demand_page()
551 static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) in page_writable() argument
561 spte = find_spte(cpu, vaddr, false, 0, 0); in page_writable()
578 void pin_page(struct lg_cpu *cpu, unsigned long vaddr) in pin_page() argument
582 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2, &iomem)) in pin_page()
583 kill_guest(cpu, "bad stack page %#lx", vaddr); in pin_page()
668 void guest_pagetable_flush_user(struct lg_cpu *cpu) in guest_pagetable_flush_user() argument
671 flush_user_mappings(cpu->lg, cpu->cpu_pgd); in guest_pagetable_flush_user()
676 bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr) in __guest_pa() argument
685 if (unlikely(cpu->linear_pages)) { in __guest_pa()
691 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); in __guest_pa()
697 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); in __guest_pa()
700 gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t); in __guest_pa()
702 gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); in __guest_pa()
719 unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) in guest_pa() argument
723 if (!__guest_pa(cpu, vaddr, &paddr)) in guest_pa()
724 kill_guest(cpu, "Bad address %#lx", vaddr); in guest_pa()
747 static unsigned int new_pgdir(struct lg_cpu *cpu, in new_pgdir() argument
757 next = prandom_u32() % ARRAY_SIZE(cpu->lg->pgdirs); in new_pgdir()
759 if (!cpu->lg->pgdirs[next].pgdir) { in new_pgdir()
760 cpu->lg->pgdirs[next].pgdir = in new_pgdir()
763 if (!cpu->lg->pgdirs[next].pgdir) in new_pgdir()
764 next = cpu->cpu_pgd; in new_pgdir()
774 cpu->lg->pgdirs[next].gpgdir = gpgdir; in new_pgdir()
776 flush_user_mappings(cpu->lg, next); in new_pgdir()
779 cpu->lg->pgdirs[next].last_host_cpu = -1; in new_pgdir()
794 static bool allocate_switcher_mapping(struct lg_cpu *cpu) in allocate_switcher_mapping() argument
799 pte_t *pte = find_spte(cpu, switcher_addr + i * PAGE_SIZE, true, in allocate_switcher_mapping()
818 cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped = true; in allocate_switcher_mapping()
850 void guest_pagetable_clear_all(struct lg_cpu *cpu) in guest_pagetable_clear_all() argument
852 release_all_pagetables(cpu->lg); in guest_pagetable_clear_all()
854 pin_stack_pages(cpu); in guest_pagetable_clear_all()
856 if (!allocate_switcher_mapping(cpu)) in guest_pagetable_clear_all()
857 kill_guest(cpu, "Cannot populate switcher mapping"); in guest_pagetable_clear_all()
867 void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) in guest_new_pagetable() argument
875 if (unlikely(cpu->linear_pages)) { in guest_new_pagetable()
876 release_all_pagetables(cpu->lg); in guest_new_pagetable()
877 cpu->linear_pages = false; in guest_new_pagetable()
879 newpgdir = ARRAY_SIZE(cpu->lg->pgdirs); in guest_new_pagetable()
882 newpgdir = find_pgdir(cpu->lg, pgtable); in guest_new_pagetable()
889 if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs)) in guest_new_pagetable()
890 newpgdir = new_pgdir(cpu, pgtable, &repin); in guest_new_pagetable()
892 cpu->cpu_pgd = newpgdir; in guest_new_pagetable()
898 pin_stack_pages(cpu); in guest_new_pagetable()
900 if (!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped) { in guest_new_pagetable()
901 if (!allocate_switcher_mapping(cpu)) in guest_new_pagetable()
902 kill_guest(cpu, "Cannot populate switcher mapping"); in guest_new_pagetable()
932 static void __guest_set_pte(struct lg_cpu *cpu, int idx, in __guest_set_pte() argument
936 pgd_t *spgd = spgd_addr(cpu, idx, vaddr); in __guest_set_pte()
944 spmd = spmd_addr(cpu, *spgd, vaddr); in __guest_set_pte()
948 pte_t *spte = spte_addr(cpu, *spgd, vaddr); in __guest_set_pte()
958 && !gpte_in_iomem(cpu, gpte)) { in __guest_set_pte()
959 if (!check_gpte(cpu, gpte)) in __guest_set_pte()
962 gpte_to_spte(cpu, gpte, in __guest_set_pte()
989 void guest_set_pte(struct lg_cpu *cpu, in guest_set_pte() argument
994 kill_guest(cpu, "attempt to set pte into Switcher pages"); in guest_set_pte()
1002 if (vaddr >= cpu->lg->kernel_address) { in guest_set_pte()
1004 for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) in guest_set_pte()
1005 if (cpu->lg->pgdirs[i].pgdir) in guest_set_pte()
1006 __guest_set_pte(cpu, i, vaddr, gpte); in guest_set_pte()
1009 int pgdir = find_pgdir(cpu->lg, gpgdir); in guest_set_pte()
1010 if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs)) in guest_set_pte()
1012 __guest_set_pte(cpu, pgdir, vaddr, gpte); in guest_set_pte()
1075 struct lg_cpu *cpu = &lg->cpus[0]; in init_guest_pagetable() local
1079 cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated); in init_guest_pagetable()
1084 cpu->linear_pages = true; in init_guest_pagetable()
1087 if (!allocate_switcher_mapping(cpu)) { in init_guest_pagetable()
1096 void page_table_guest_data_init(struct lg_cpu *cpu) in page_table_guest_data_init() argument
1106 if (get_user(cpu->lg->kernel_address, in page_table_guest_data_init()
1107 &cpu->lg->lguest_data->kernel_address) in page_table_guest_data_init()
1112 || put_user(top, &cpu->lg->lguest_data->reserve_mem)) { in page_table_guest_data_init()
1113 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); in page_table_guest_data_init()
1122 if (cpu->lg->kernel_address >= switcher_addr) in page_table_guest_data_init()
1123 kill_guest(cpu, "bad kernel address %#lx", in page_table_guest_data_init()
1124 cpu->lg->kernel_address); in page_table_guest_data_init()
1142 static void remove_switcher_percpu_map(struct lg_cpu *cpu, unsigned int i) in remove_switcher_percpu_map() argument
1148 pte = find_spte(cpu, base, false, 0, 0); in remove_switcher_percpu_map()
1152 pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0); in remove_switcher_percpu_map()
1167 void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) in map_switcher_in_guest() argument
1172 struct pgdir *pgdir = &cpu->lg->pgdirs[cpu->cpu_pgd]; in map_switcher_in_guest()
1191 remove_switcher_percpu_map(cpu, i); in map_switcher_in_guest()
1194 remove_switcher_percpu_map(cpu, pgdir->last_host_cpu); in map_switcher_in_guest()
1208 pte = find_spte(cpu, base, false, 0, 0); in map_switcher_in_guest()
1209 regs_page = pfn_to_page(__pa(cpu->regs_page) >> PAGE_SHIFT); in map_switcher_in_guest()
1218 pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0); in map_switcher_in_guest()