Lines Matching refs:addr

29 	unsigned long i, j, addr;  in subpage_prot_free()  local
38 addr = 0; in subpage_prot_free()
44 for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr; in subpage_prot_free()
45 ++j, addr += PAGE_SIZE) in subpage_prot_free()
60 static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, in hpte_flush_range() argument
69 pgd = pgd_offset(mm, addr); in hpte_flush_range()
72 pud = pud_offset(pgd, addr); in hpte_flush_range()
75 pmd = pmd_offset(pud, addr); in hpte_flush_range()
78 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in hpte_flush_range()
81 pte_update(mm, addr, pte, 0, 0, 0); in hpte_flush_range()
82 addr += PAGE_SIZE; in hpte_flush_range()
93 static void subpage_prot_clear(unsigned long addr, unsigned long len) in subpage_prot_clear() argument
103 limit = addr + len; in subpage_prot_clear()
106 for (; addr < limit; addr = next) { in subpage_prot_clear()
107 next = pmd_addr_end(addr, limit); in subpage_prot_clear()
108 if (addr < 0x100000000UL) { in subpage_prot_clear()
111 spm = spt->protptrs[addr >> SBP_L3_SHIFT]; in subpage_prot_clear()
115 spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)]; in subpage_prot_clear()
118 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1); in subpage_prot_clear()
120 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); in subpage_prot_clear()
122 if (addr + (nw << PAGE_SHIFT) > next) in subpage_prot_clear()
123 nw = (next - addr) >> PAGE_SHIFT; in subpage_prot_clear()
128 hpte_flush_range(mm, addr, nw); in subpage_prot_clear()
134 static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr, in subpage_walk_pmd_entry() argument
138 split_huge_page_pmd(vma, addr, pmd); in subpage_walk_pmd_entry()
142 static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, in subpage_mark_vma_nohuge() argument
155 vma = find_vma(mm, addr); in subpage_mark_vma_nohuge()
159 if (vma && ((addr + len) <= vma->vm_start)) in subpage_mark_vma_nohuge()
163 if (vma->vm_start >= (addr + len)) in subpage_mark_vma_nohuge()
171 static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, in subpage_mark_vma_nohuge() argument
188 long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) in sys_subpage_prot() argument
199 if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) || in sys_subpage_prot()
200 addr >= TASK_SIZE || len >= TASK_SIZE || addr + len > TASK_SIZE) in sys_subpage_prot()
203 if (is_hugepage_only_range(mm, addr, len)) in sys_subpage_prot()
208 subpage_prot_clear(addr, len); in sys_subpage_prot()
216 subpage_mark_vma_nohuge(mm, addr, len); in sys_subpage_prot()
217 for (limit = addr + len; addr < limit; addr = next) { in sys_subpage_prot()
218 next = pmd_addr_end(addr, limit); in sys_subpage_prot()
220 if (addr < 0x100000000UL) { in sys_subpage_prot()
223 spm = spt->protptrs[addr >> SBP_L3_SHIFT]; in sys_subpage_prot()
228 spt->protptrs[addr >> SBP_L3_SHIFT] = spm; in sys_subpage_prot()
231 spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1); in sys_subpage_prot()
239 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1); in sys_subpage_prot()
242 demote_segment_4k(mm, addr); in sys_subpage_prot()
245 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); in sys_subpage_prot()
247 if (addr + (nw << PAGE_SHIFT) > next) in sys_subpage_prot()
248 nw = (next - addr) >> PAGE_SHIFT; in sys_subpage_prot()
258 hpte_flush_range(mm, addr, nw); in sys_subpage_prot()