1 /*
2  *  Lockless get_user_pages_fast for s390
3  *
4  *  Copyright IBM Corp. 2010
5  *  Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6  */
7 #include <linux/sched.h>
8 #include <linux/mm.h>
9 #include <linux/hugetlb.h>
10 #include <linux/vmstat.h>
11 #include <linux/pagemap.h>
12 #include <linux/rwsem.h>
13 #include <asm/pgtable.h>
14 
15 /*
16  * The performance critical leaf functions are made noinline otherwise gcc
17  * inlines everything into a single function which results in too much
18  * register pressure.
19  */
gup_pte_range(pmd_t * pmdp,pmd_t pmd,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)20 static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
21 		unsigned long end, int write, struct page **pages, int *nr)
22 {
23 	unsigned long mask;
24 	pte_t *ptep, pte;
25 	struct page *page;
26 
27 	mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
28 
29 	ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
30 	do {
31 		pte = *ptep;
32 		barrier();
33 		/* Similar to the PMD case, NUMA hinting must take slow path */
34 		if (pte_protnone(pte))
35 			return 0;
36 		if ((pte_val(pte) & mask) != 0)
37 			return 0;
38 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
39 		page = pte_page(pte);
40 		if (!page_cache_get_speculative(page))
41 			return 0;
42 		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
43 			put_page(page);
44 			return 0;
45 		}
46 		pages[*nr] = page;
47 		(*nr)++;
48 
49 	} while (ptep++, addr += PAGE_SIZE, addr != end);
50 
51 	return 1;
52 }
53 
gup_huge_pmd(pmd_t * pmdp,pmd_t pmd,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)54 static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
55 		unsigned long end, int write, struct page **pages, int *nr)
56 {
57 	unsigned long mask, result;
58 	struct page *head, *page, *tail;
59 	int refs;
60 
61 	result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
62 	mask = result | _SEGMENT_ENTRY_INVALID;
63 	if ((pmd_val(pmd) & mask) != result)
64 		return 0;
65 	VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
66 
67 	refs = 0;
68 	head = pmd_page(pmd);
69 	page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
70 	tail = page;
71 	do {
72 		VM_BUG_ON(compound_head(page) != head);
73 		pages[*nr] = page;
74 		(*nr)++;
75 		page++;
76 		refs++;
77 	} while (addr += PAGE_SIZE, addr != end);
78 
79 	if (!page_cache_add_speculative(head, refs)) {
80 		*nr -= refs;
81 		return 0;
82 	}
83 
84 	if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
85 		*nr -= refs;
86 		while (refs--)
87 			put_page(head);
88 		return 0;
89 	}
90 
91 	/*
92 	 * Any tail page need their mapcount reference taken before we
93 	 * return.
94 	 */
95 	while (refs--) {
96 		if (PageTail(tail))
97 			get_huge_page_tail(tail);
98 		tail++;
99 	}
100 
101 	return 1;
102 }
103 
104 
gup_pmd_range(pud_t * pudp,pud_t pud,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)105 static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
106 		unsigned long end, int write, struct page **pages, int *nr)
107 {
108 	unsigned long next;
109 	pmd_t *pmdp, pmd;
110 
111 	pmdp = (pmd_t *) pudp;
112 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
113 		pmdp = (pmd_t *) pud_deref(pud);
114 	pmdp += pmd_index(addr);
115 	do {
116 		pmd = *pmdp;
117 		barrier();
118 		next = pmd_addr_end(addr, end);
119 		/*
120 		 * The pmd_trans_splitting() check below explains why
121 		 * pmdp_splitting_flush() has to serialize with
122 		 * smp_call_function() against our disabled IRQs, to stop
123 		 * this gup-fast code from running while we set the
124 		 * splitting bit in the pmd. Returning zero will take
125 		 * the slow path that will call wait_split_huge_page()
126 		 * if the pmd is still in splitting state.
127 		 */
128 		if (pmd_none(pmd) || pmd_trans_splitting(pmd))
129 			return 0;
130 		if (unlikely(pmd_large(pmd))) {
131 			/*
132 			 * NUMA hinting faults need to be handled in the GUP
133 			 * slowpath for accounting purposes and so that they
134 			 * can be serialised against THP migration.
135 			 */
136 			if (pmd_protnone(pmd))
137 				return 0;
138 			if (!gup_huge_pmd(pmdp, pmd, addr, next,
139 					  write, pages, nr))
140 				return 0;
141 		} else if (!gup_pte_range(pmdp, pmd, addr, next,
142 					  write, pages, nr))
143 			return 0;
144 	} while (pmdp++, addr = next, addr != end);
145 
146 	return 1;
147 }
148 
gup_pud_range(pgd_t * pgdp,pgd_t pgd,unsigned long addr,unsigned long end,int write,struct page ** pages,int * nr)149 static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
150 		unsigned long end, int write, struct page **pages, int *nr)
151 {
152 	unsigned long next;
153 	pud_t *pudp, pud;
154 
155 	pudp = (pud_t *) pgdp;
156 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
157 		pudp = (pud_t *) pgd_deref(pgd);
158 	pudp += pud_index(addr);
159 	do {
160 		pud = *pudp;
161 		barrier();
162 		next = pud_addr_end(addr, end);
163 		if (pud_none(pud))
164 			return 0;
165 		if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr))
166 			return 0;
167 	} while (pudp++, addr = next, addr != end);
168 
169 	return 1;
170 }
171 
172 /*
173  * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
174  * back to the regular GUP.
175  */
__get_user_pages_fast(unsigned long start,int nr_pages,int write,struct page ** pages)176 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
177 			  struct page **pages)
178 {
179 	struct mm_struct *mm = current->mm;
180 	unsigned long addr, len, end;
181 	unsigned long next, flags;
182 	pgd_t *pgdp, pgd;
183 	int nr = 0;
184 
185 	start &= PAGE_MASK;
186 	addr = start;
187 	len = (unsigned long) nr_pages << PAGE_SHIFT;
188 	end = start + len;
189 	if ((end <= start) || (end > TASK_SIZE))
190 		return 0;
191 	/*
192 	 * local_irq_save() doesn't prevent pagetable teardown, but does
193 	 * prevent the pagetables from being freed on s390.
194 	 *
195 	 * So long as we atomically load page table pointers versus teardown,
196 	 * we can follow the address down to the the page and take a ref on it.
197 	 */
198 	local_irq_save(flags);
199 	pgdp = pgd_offset(mm, addr);
200 	do {
201 		pgd = *pgdp;
202 		barrier();
203 		next = pgd_addr_end(addr, end);
204 		if (pgd_none(pgd))
205 			break;
206 		if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
207 			break;
208 	} while (pgdp++, addr = next, addr != end);
209 	local_irq_restore(flags);
210 
211 	return nr;
212 }
213 
214 /**
215  * get_user_pages_fast() - pin user pages in memory
216  * @start:	starting user address
217  * @nr_pages:	number of pages from start to pin
218  * @write:	whether pages will be written to
219  * @pages:	array that receives pointers to the pages pinned.
220  *		Should be at least nr_pages long.
221  *
222  * Attempt to pin user pages in memory without taking mm->mmap_sem.
223  * If not successful, it will fall back to taking the lock and
224  * calling get_user_pages().
225  *
226  * Returns number of pages pinned. This may be fewer than the number
227  * requested. If nr_pages is 0 or negative, returns 0. If no pages
228  * were pinned, returns -errno.
229  */
get_user_pages_fast(unsigned long start,int nr_pages,int write,struct page ** pages)230 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
231 			struct page **pages)
232 {
233 	struct mm_struct *mm = current->mm;
234 	int nr, ret;
235 
236 	start &= PAGE_MASK;
237 	nr = __get_user_pages_fast(start, nr_pages, write, pages);
238 	if (nr == nr_pages)
239 		return nr;
240 
241 	/* Try to get the remaining pages with get_user_pages */
242 	start += nr << PAGE_SHIFT;
243 	pages += nr;
244 	ret = get_user_pages_unlocked(current, mm, start,
245 			     nr_pages - nr, write, 0, pages);
246 	/* Have to be a bit careful with return values */
247 	if (nr > 0)
248 		ret = (ret < 0) ? nr : ret + nr;
249 	return ret;
250 }
251