1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
3 
4 /*
5  * Copyright 1995 Linus Torvalds
6  */
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
13 #include <linux/gfp.h>
14 #include <linux/bitops.h>
15 #include <linux/hardirq.h> /* for in_interrupt() */
16 #include <linux/hugetlb_inline.h>
17 
18 /*
19  * Bits in mapping->flags.  The lower __GFP_BITS_SHIFT bits are the page
20  * allocation mode flags.
21  */
22 enum mapping_flags {
23 	AS_EIO		= __GFP_BITS_SHIFT + 0,	/* IO error on async write */
24 	AS_ENOSPC	= __GFP_BITS_SHIFT + 1,	/* ENOSPC on async write */
25 	AS_MM_ALL_LOCKS	= __GFP_BITS_SHIFT + 2,	/* under mm_take_all_locks() */
26 	AS_UNEVICTABLE	= __GFP_BITS_SHIFT + 3,	/* e.g., ramdisk, SHM_LOCK */
27 	AS_EXITING	= __GFP_BITS_SHIFT + 4, /* final truncate in progress */
28 };
29 
mapping_set_error(struct address_space * mapping,int error)30 static inline void mapping_set_error(struct address_space *mapping, int error)
31 {
32 	if (unlikely(error)) {
33 		if (error == -ENOSPC)
34 			set_bit(AS_ENOSPC, &mapping->flags);
35 		else
36 			set_bit(AS_EIO, &mapping->flags);
37 	}
38 }
39 
mapping_set_unevictable(struct address_space * mapping)40 static inline void mapping_set_unevictable(struct address_space *mapping)
41 {
42 	set_bit(AS_UNEVICTABLE, &mapping->flags);
43 }
44 
mapping_clear_unevictable(struct address_space * mapping)45 static inline void mapping_clear_unevictable(struct address_space *mapping)
46 {
47 	clear_bit(AS_UNEVICTABLE, &mapping->flags);
48 }
49 
mapping_unevictable(struct address_space * mapping)50 static inline int mapping_unevictable(struct address_space *mapping)
51 {
52 	if (mapping)
53 		return test_bit(AS_UNEVICTABLE, &mapping->flags);
54 	return !!mapping;
55 }
56 
mapping_set_exiting(struct address_space * mapping)57 static inline void mapping_set_exiting(struct address_space *mapping)
58 {
59 	set_bit(AS_EXITING, &mapping->flags);
60 }
61 
mapping_exiting(struct address_space * mapping)62 static inline int mapping_exiting(struct address_space *mapping)
63 {
64 	return test_bit(AS_EXITING, &mapping->flags);
65 }
66 
mapping_gfp_mask(struct address_space * mapping)67 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
68 {
69 	return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
70 }
71 
72 /* Restricts the given gfp_mask to what the mapping allows. */
mapping_gfp_constraint(struct address_space * mapping,gfp_t gfp_mask)73 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
74 		gfp_t gfp_mask)
75 {
76 	return mapping_gfp_mask(mapping) & gfp_mask;
77 }
78 
79 /*
80  * This is non-atomic.  Only to be used before the mapping is activated.
81  * Probably needs a barrier...
82  */
mapping_set_gfp_mask(struct address_space * m,gfp_t mask)83 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
84 {
85 	m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
86 				(__force unsigned long)mask;
87 }
88 
89 /*
90  * The page cache can be done in larger chunks than
91  * one page, because it allows for more efficient
92  * throughput (it can then be mapped into user
93  * space in smaller chunks for same flexibility).
94  *
95  * Or rather, it _will_ be done in larger chunks.
96  */
97 #define PAGE_CACHE_SHIFT	PAGE_SHIFT
98 #define PAGE_CACHE_SIZE		PAGE_SIZE
99 #define PAGE_CACHE_MASK		PAGE_MASK
100 #define PAGE_CACHE_ALIGN(addr)	(((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
101 
102 #define page_cache_get(page)		get_page(page)
103 #define page_cache_release(page)	put_page(page)
104 void release_pages(struct page **pages, int nr, bool cold);
105 
106 /*
107  * speculatively take a reference to a page.
108  * If the page is free (_count == 0), then _count is untouched, and 0
109  * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
110  *
111  * This function must be called inside the same rcu_read_lock() section as has
112  * been used to lookup the page in the pagecache radix-tree (or page table):
113  * this allows allocators to use a synchronize_rcu() to stabilize _count.
114  *
115  * Unless an RCU grace period has passed, the count of all pages coming out
116  * of the allocator must be considered unstable. page_count may return higher
117  * than expected, and put_page must be able to do the right thing when the
118  * page has been finished with, no matter what it is subsequently allocated
119  * for (because put_page is what is used here to drop an invalid speculative
120  * reference).
121  *
122  * This is the interesting part of the lockless pagecache (and lockless
123  * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
124  * has the following pattern:
125  * 1. find page in radix tree
126  * 2. conditionally increment refcount
127  * 3. check the page is still in pagecache (if no, goto 1)
128  *
129  * Remove-side that cares about stability of _count (eg. reclaim) has the
130  * following (with tree_lock held for write):
131  * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
132  * B. remove page from pagecache
133  * C. free the page
134  *
135  * There are 2 critical interleavings that matter:
136  * - 2 runs before A: in this case, A sees elevated refcount and bails out
137  * - A runs before 2: in this case, 2 sees zero refcount and retries;
138  *   subsequently, B will complete and 1 will find no page, causing the
139  *   lookup to return NULL.
140  *
141  * It is possible that between 1 and 2, the page is removed then the exact same
142  * page is inserted into the same position in pagecache. That's OK: the
143  * old find_get_page using tree_lock could equally have run before or after
144  * such a re-insertion, depending on order that locks are granted.
145  *
146  * Lookups racing against pagecache insertion isn't a big problem: either 1
147  * will find the page or it will not. Likewise, the old find_get_page could run
148  * either before the insertion or afterwards, depending on timing.
149  */
page_cache_get_speculative(struct page * page)150 static inline int page_cache_get_speculative(struct page *page)
151 {
152 	VM_BUG_ON(in_interrupt());
153 
154 #ifdef CONFIG_TINY_RCU
155 # ifdef CONFIG_PREEMPT_COUNT
156 	VM_BUG_ON(!in_atomic());
157 # endif
158 	/*
159 	 * Preempt must be disabled here - we rely on rcu_read_lock doing
160 	 * this for us.
161 	 *
162 	 * Pagecache won't be truncated from interrupt context, so if we have
163 	 * found a page in the radix tree here, we have pinned its refcount by
164 	 * disabling preempt, and hence no need for the "speculative get" that
165 	 * SMP requires.
166 	 */
167 	VM_BUG_ON_PAGE(page_count(page) == 0, page);
168 	atomic_inc(&page->_count);
169 
170 #else
171 	if (unlikely(!get_page_unless_zero(page))) {
172 		/*
173 		 * Either the page has been freed, or will be freed.
174 		 * In either case, retry here and the caller should
175 		 * do the right thing (see comments above).
176 		 */
177 		return 0;
178 	}
179 #endif
180 	VM_BUG_ON_PAGE(PageTail(page), page);
181 
182 	return 1;
183 }
184 
185 /*
186  * Same as above, but add instead of inc (could just be merged)
187  */
page_cache_add_speculative(struct page * page,int count)188 static inline int page_cache_add_speculative(struct page *page, int count)
189 {
190 	VM_BUG_ON(in_interrupt());
191 
192 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
193 # ifdef CONFIG_PREEMPT_COUNT
194 	VM_BUG_ON(!in_atomic());
195 # endif
196 	VM_BUG_ON_PAGE(page_count(page) == 0, page);
197 	atomic_add(count, &page->_count);
198 
199 #else
200 	if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
201 		return 0;
202 #endif
203 	VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
204 
205 	return 1;
206 }
207 
page_freeze_refs(struct page * page,int count)208 static inline int page_freeze_refs(struct page *page, int count)
209 {
210 	return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
211 }
212 
page_unfreeze_refs(struct page * page,int count)213 static inline void page_unfreeze_refs(struct page *page, int count)
214 {
215 	VM_BUG_ON_PAGE(page_count(page) != 0, page);
216 	VM_BUG_ON(count == 0);
217 
218 	atomic_set(&page->_count, count);
219 }
220 
221 #ifdef CONFIG_NUMA
222 extern struct page *__page_cache_alloc(gfp_t gfp);
223 #else
__page_cache_alloc(gfp_t gfp)224 static inline struct page *__page_cache_alloc(gfp_t gfp)
225 {
226 	return alloc_pages(gfp, 0);
227 }
228 #endif
229 
page_cache_alloc(struct address_space * x)230 static inline struct page *page_cache_alloc(struct address_space *x)
231 {
232 	return __page_cache_alloc(mapping_gfp_mask(x));
233 }
234 
page_cache_alloc_cold(struct address_space * x)235 static inline struct page *page_cache_alloc_cold(struct address_space *x)
236 {
237 	return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
238 }
239 
page_cache_alloc_readahead(struct address_space * x)240 static inline struct page *page_cache_alloc_readahead(struct address_space *x)
241 {
242 	return __page_cache_alloc(mapping_gfp_mask(x) |
243 				  __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
244 }
245 
246 typedef int filler_t(void *, struct page *);
247 
248 pgoff_t page_cache_next_hole(struct address_space *mapping,
249 			     pgoff_t index, unsigned long max_scan);
250 pgoff_t page_cache_prev_hole(struct address_space *mapping,
251 			     pgoff_t index, unsigned long max_scan);
252 
253 #define FGP_ACCESSED		0x00000001
254 #define FGP_LOCK		0x00000002
255 #define FGP_CREAT		0x00000004
256 #define FGP_WRITE		0x00000008
257 #define FGP_NOFS		0x00000010
258 #define FGP_NOWAIT		0x00000020
259 
260 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
261 		int fgp_flags, gfp_t cache_gfp_mask);
262 
263 /**
264  * find_get_page - find and get a page reference
265  * @mapping: the address_space to search
266  * @offset: the page index
267  *
268  * Looks up the page cache slot at @mapping & @offset.  If there is a
269  * page cache page, it is returned with an increased refcount.
270  *
271  * Otherwise, %NULL is returned.
272  */
find_get_page(struct address_space * mapping,pgoff_t offset)273 static inline struct page *find_get_page(struct address_space *mapping,
274 					pgoff_t offset)
275 {
276 	return pagecache_get_page(mapping, offset, 0, 0);
277 }
278 
find_get_page_flags(struct address_space * mapping,pgoff_t offset,int fgp_flags)279 static inline struct page *find_get_page_flags(struct address_space *mapping,
280 					pgoff_t offset, int fgp_flags)
281 {
282 	return pagecache_get_page(mapping, offset, fgp_flags, 0);
283 }
284 
285 /**
286  * find_lock_page - locate, pin and lock a pagecache page
287  * pagecache_get_page - find and get a page reference
288  * @mapping: the address_space to search
289  * @offset: the page index
290  *
291  * Looks up the page cache slot at @mapping & @offset.  If there is a
292  * page cache page, it is returned locked and with an increased
293  * refcount.
294  *
295  * Otherwise, %NULL is returned.
296  *
297  * find_lock_page() may sleep.
298  */
find_lock_page(struct address_space * mapping,pgoff_t offset)299 static inline struct page *find_lock_page(struct address_space *mapping,
300 					pgoff_t offset)
301 {
302 	return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
303 }
304 
305 /**
306  * find_or_create_page - locate or add a pagecache page
307  * @mapping: the page's address_space
308  * @index: the page's index into the mapping
309  * @gfp_mask: page allocation mode
310  *
311  * Looks up the page cache slot at @mapping & @offset.  If there is a
312  * page cache page, it is returned locked and with an increased
313  * refcount.
314  *
315  * If the page is not present, a new page is allocated using @gfp_mask
316  * and added to the page cache and the VM's LRU list.  The page is
317  * returned locked and with an increased refcount.
318  *
319  * On memory exhaustion, %NULL is returned.
320  *
321  * find_or_create_page() may sleep, even if @gfp_flags specifies an
322  * atomic allocation!
323  */
find_or_create_page(struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)324 static inline struct page *find_or_create_page(struct address_space *mapping,
325 					pgoff_t offset, gfp_t gfp_mask)
326 {
327 	return pagecache_get_page(mapping, offset,
328 					FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
329 					gfp_mask);
330 }
331 
332 /**
333  * grab_cache_page_nowait - returns locked page at given index in given cache
334  * @mapping: target address_space
335  * @index: the page index
336  *
337  * Same as grab_cache_page(), but do not wait if the page is unavailable.
338  * This is intended for speculative data generators, where the data can
339  * be regenerated if the page couldn't be grabbed.  This routine should
340  * be safe to call while holding the lock for another page.
341  *
342  * Clear __GFP_FS when allocating the page to avoid recursion into the fs
343  * and deadlock against the caller's locked page.
344  */
grab_cache_page_nowait(struct address_space * mapping,pgoff_t index)345 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
346 				pgoff_t index)
347 {
348 	return pagecache_get_page(mapping, index,
349 			FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
350 			mapping_gfp_mask(mapping));
351 }
352 
353 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
354 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
355 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
356 			  unsigned int nr_entries, struct page **entries,
357 			  pgoff_t *indices);
358 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
359 			unsigned int nr_pages, struct page **pages);
360 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
361 			       unsigned int nr_pages, struct page **pages);
362 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
363 			int tag, unsigned int nr_pages, struct page **pages);
364 
365 struct page *grab_cache_page_write_begin(struct address_space *mapping,
366 			pgoff_t index, unsigned flags);
367 
368 /*
369  * Returns locked page at given index in given cache, creating it if needed.
370  */
grab_cache_page(struct address_space * mapping,pgoff_t index)371 static inline struct page *grab_cache_page(struct address_space *mapping,
372 								pgoff_t index)
373 {
374 	return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
375 }
376 
377 extern struct page * read_cache_page(struct address_space *mapping,
378 				pgoff_t index, filler_t *filler, void *data);
379 extern struct page * read_cache_page_gfp(struct address_space *mapping,
380 				pgoff_t index, gfp_t gfp_mask);
381 extern int read_cache_pages(struct address_space *mapping,
382 		struct list_head *pages, filler_t *filler, void *data);
383 
read_mapping_page(struct address_space * mapping,pgoff_t index,void * data)384 static inline struct page *read_mapping_page(struct address_space *mapping,
385 				pgoff_t index, void *data)
386 {
387 	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
388 	return read_cache_page(mapping, index, filler, data);
389 }
390 
391 /*
392  * Get the offset in PAGE_SIZE.
393  * (TODO: hugepage should have ->index in PAGE_SIZE)
394  */
page_to_pgoff(struct page * page)395 static inline pgoff_t page_to_pgoff(struct page *page)
396 {
397 	if (unlikely(PageHeadHuge(page)))
398 		return page->index << compound_order(page);
399 	else
400 		return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
401 }
402 
403 /*
404  * Return byte-offset into filesystem object for page.
405  */
page_offset(struct page * page)406 static inline loff_t page_offset(struct page *page)
407 {
408 	return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
409 }
410 
page_file_offset(struct page * page)411 static inline loff_t page_file_offset(struct page *page)
412 {
413 	return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
414 }
415 
416 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
417 				     unsigned long address);
418 
linear_page_index(struct vm_area_struct * vma,unsigned long address)419 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
420 					unsigned long address)
421 {
422 	pgoff_t pgoff;
423 	if (unlikely(is_vm_hugetlb_page(vma)))
424 		return linear_hugepage_index(vma, address);
425 	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
426 	pgoff += vma->vm_pgoff;
427 	return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
428 }
429 
430 extern void __lock_page(struct page *page);
431 extern int __lock_page_killable(struct page *page);
432 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
433 				unsigned int flags);
434 extern void unlock_page(struct page *page);
435 
__set_page_locked(struct page * page)436 static inline void __set_page_locked(struct page *page)
437 {
438 	__set_bit(PG_locked, &page->flags);
439 }
440 
__clear_page_locked(struct page * page)441 static inline void __clear_page_locked(struct page *page)
442 {
443 	__clear_bit(PG_locked, &page->flags);
444 }
445 
trylock_page(struct page * page)446 static inline int trylock_page(struct page *page)
447 {
448 	return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
449 }
450 
451 /*
452  * lock_page may only be called if we have the page's inode pinned.
453  */
lock_page(struct page * page)454 static inline void lock_page(struct page *page)
455 {
456 	might_sleep();
457 	if (!trylock_page(page))
458 		__lock_page(page);
459 }
460 
461 /*
462  * lock_page_killable is like lock_page but can be interrupted by fatal
463  * signals.  It returns 0 if it locked the page and -EINTR if it was
464  * killed while waiting.
465  */
lock_page_killable(struct page * page)466 static inline int lock_page_killable(struct page *page)
467 {
468 	might_sleep();
469 	if (!trylock_page(page))
470 		return __lock_page_killable(page);
471 	return 0;
472 }
473 
474 /*
475  * lock_page_or_retry - Lock the page, unless this would block and the
476  * caller indicated that it can handle a retry.
477  *
478  * Return value and mmap_sem implications depend on flags; see
479  * __lock_page_or_retry().
480  */
lock_page_or_retry(struct page * page,struct mm_struct * mm,unsigned int flags)481 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
482 				     unsigned int flags)
483 {
484 	might_sleep();
485 	return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
486 }
487 
488 /*
489  * This is exported only for wait_on_page_locked/wait_on_page_writeback,
490  * and for filesystems which need to wait on PG_private.
491  */
492 extern void wait_on_page_bit(struct page *page, int bit_nr);
493 
494 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
495 extern int wait_on_page_bit_killable_timeout(struct page *page,
496 					     int bit_nr, unsigned long timeout);
497 
wait_on_page_locked_killable(struct page * page)498 static inline int wait_on_page_locked_killable(struct page *page)
499 {
500 	if (PageLocked(page))
501 		return wait_on_page_bit_killable(page, PG_locked);
502 	return 0;
503 }
504 
505 extern wait_queue_head_t *page_waitqueue(struct page *page);
wake_up_page(struct page * page,int bit)506 static inline void wake_up_page(struct page *page, int bit)
507 {
508 	__wake_up_bit(page_waitqueue(page), &page->flags, bit);
509 }
510 
511 /*
512  * Wait for a page to be unlocked.
513  *
514  * This must be called with the caller "holding" the page,
515  * ie with increased "page->count" so that the page won't
516  * go away during the wait..
517  */
wait_on_page_locked(struct page * page)518 static inline void wait_on_page_locked(struct page *page)
519 {
520 	if (PageLocked(page))
521 		wait_on_page_bit(page, PG_locked);
522 }
523 
524 /*
525  * Wait for a page to complete writeback
526  */
wait_on_page_writeback(struct page * page)527 static inline void wait_on_page_writeback(struct page *page)
528 {
529 	if (PageWriteback(page))
530 		wait_on_page_bit(page, PG_writeback);
531 }
532 
533 extern void end_page_writeback(struct page *page);
534 void wait_for_stable_page(struct page *page);
535 
536 void page_endio(struct page *page, int rw, int err);
537 
538 /*
539  * Add an arbitrary waiter to a page's wait queue
540  */
541 extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
542 
543 /*
544  * Fault a userspace page into pagetables.  Return non-zero on a fault.
545  *
546  * This assumes that two userspace pages are always sufficient.  That's
547  * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
548  */
fault_in_pages_writeable(char __user * uaddr,int size)549 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
550 {
551 	int ret;
552 
553 	if (unlikely(size == 0))
554 		return 0;
555 
556 	/*
557 	 * Writing zeroes into userspace here is OK, because we know that if
558 	 * the zero gets there, we'll be overwriting it.
559 	 */
560 	ret = __put_user(0, uaddr);
561 	if (ret == 0) {
562 		char __user *end = uaddr + size - 1;
563 
564 		/*
565 		 * If the page was already mapped, this will get a cache miss
566 		 * for sure, so try to avoid doing it.
567 		 */
568 		if (((unsigned long)uaddr & PAGE_MASK) !=
569 				((unsigned long)end & PAGE_MASK))
570 			ret = __put_user(0, end);
571 	}
572 	return ret;
573 }
574 
fault_in_pages_readable(const char __user * uaddr,int size)575 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
576 {
577 	volatile char c;
578 	int ret;
579 
580 	if (unlikely(size == 0))
581 		return 0;
582 
583 	ret = __get_user(c, uaddr);
584 	if (ret == 0) {
585 		const char __user *end = uaddr + size - 1;
586 
587 		if (((unsigned long)uaddr & PAGE_MASK) !=
588 				((unsigned long)end & PAGE_MASK)) {
589 			ret = __get_user(c, end);
590 			(void)c;
591 		}
592 	}
593 	return ret;
594 }
595 
596 /*
597  * Multipage variants of the above prefault helpers, useful if more than
598  * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
599  * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
600  * filemap.c hotpaths.
601  */
fault_in_multipages_writeable(char __user * uaddr,int size)602 static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
603 {
604 	int ret = 0;
605 	char __user *end = uaddr + size - 1;
606 
607 	if (unlikely(size == 0))
608 		return ret;
609 
610 	/*
611 	 * Writing zeroes into userspace here is OK, because we know that if
612 	 * the zero gets there, we'll be overwriting it.
613 	 */
614 	while (uaddr <= end) {
615 		ret = __put_user(0, uaddr);
616 		if (ret != 0)
617 			return ret;
618 		uaddr += PAGE_SIZE;
619 	}
620 
621 	/* Check whether the range spilled into the next page. */
622 	if (((unsigned long)uaddr & PAGE_MASK) ==
623 			((unsigned long)end & PAGE_MASK))
624 		ret = __put_user(0, end);
625 
626 	return ret;
627 }
628 
fault_in_multipages_readable(const char __user * uaddr,int size)629 static inline int fault_in_multipages_readable(const char __user *uaddr,
630 					       int size)
631 {
632 	volatile char c;
633 	int ret = 0;
634 	const char __user *end = uaddr + size - 1;
635 
636 	if (unlikely(size == 0))
637 		return ret;
638 
639 	while (uaddr <= end) {
640 		ret = __get_user(c, uaddr);
641 		if (ret != 0)
642 			return ret;
643 		uaddr += PAGE_SIZE;
644 	}
645 
646 	/* Check whether the range spilled into the next page. */
647 	if (((unsigned long)uaddr & PAGE_MASK) ==
648 			((unsigned long)end & PAGE_MASK)) {
649 		ret = __get_user(c, end);
650 		(void)c;
651 	}
652 
653 	return ret;
654 }
655 
656 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
657 				pgoff_t index, gfp_t gfp_mask);
658 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
659 				pgoff_t index, gfp_t gfp_mask);
660 extern void delete_from_page_cache(struct page *page);
661 extern void __delete_from_page_cache(struct page *page, void *shadow,
662 				     struct mem_cgroup *memcg);
663 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
664 
665 /*
666  * Like add_to_page_cache_locked, but used to add newly allocated pages:
667  * the page is new, so we can just run __set_page_locked() against it.
668  */
add_to_page_cache(struct page * page,struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)669 static inline int add_to_page_cache(struct page *page,
670 		struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
671 {
672 	int error;
673 
674 	__set_page_locked(page);
675 	error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
676 	if (unlikely(error))
677 		__clear_page_locked(page);
678 	return error;
679 }
680 
dir_pages(struct inode * inode)681 static inline unsigned long dir_pages(struct inode *inode)
682 {
683 	return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >>
684 			       PAGE_CACHE_SHIFT;
685 }
686 
687 #endif /* _LINUX_PAGEMAP_H */
688