Lines Matching refs:page

102 #define page_cache_get(page)		get_page(page)  argument
103 #define page_cache_release(page) put_page(page) argument
104 void release_pages(struct page **pages, int nr, bool cold);
150 static inline int page_cache_get_speculative(struct page *page) in page_cache_get_speculative() argument
167 VM_BUG_ON_PAGE(page_count(page) == 0, page); in page_cache_get_speculative()
168 atomic_inc(&page->_count); in page_cache_get_speculative()
171 if (unlikely(!get_page_unless_zero(page))) { in page_cache_get_speculative()
180 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_get_speculative()
188 static inline int page_cache_add_speculative(struct page *page, int count) in page_cache_add_speculative() argument
196 VM_BUG_ON_PAGE(page_count(page) == 0, page); in page_cache_add_speculative()
197 atomic_add(count, &page->_count); in page_cache_add_speculative()
200 if (unlikely(!atomic_add_unless(&page->_count, count, 0))) in page_cache_add_speculative()
203 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); in page_cache_add_speculative()
208 static inline int page_freeze_refs(struct page *page, int count) in page_freeze_refs() argument
210 return likely(atomic_cmpxchg(&page->_count, count, 0) == count); in page_freeze_refs()
213 static inline void page_unfreeze_refs(struct page *page, int count) in page_unfreeze_refs() argument
215 VM_BUG_ON_PAGE(page_count(page) != 0, page); in page_unfreeze_refs()
218 atomic_set(&page->_count, count); in page_unfreeze_refs()
222 extern struct page *__page_cache_alloc(gfp_t gfp);
224 static inline struct page *__page_cache_alloc(gfp_t gfp) in __page_cache_alloc()
230 static inline struct page *page_cache_alloc(struct address_space *x) in page_cache_alloc()
235 static inline struct page *page_cache_alloc_cold(struct address_space *x) in page_cache_alloc_cold()
240 static inline struct page *page_cache_alloc_readahead(struct address_space *x) in page_cache_alloc_readahead()
246 typedef int filler_t(void *, struct page *);
260 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
273 static inline struct page *find_get_page(struct address_space *mapping, in find_get_page()
279 static inline struct page *find_get_page_flags(struct address_space *mapping, in find_get_page_flags()
299 static inline struct page *find_lock_page(struct address_space *mapping, in find_lock_page()
324 static inline struct page *find_or_create_page(struct address_space *mapping, in find_or_create_page()
345 static inline struct page *grab_cache_page_nowait(struct address_space *mapping, in grab_cache_page_nowait()
353 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
354 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
356 unsigned int nr_entries, struct page **entries,
359 unsigned int nr_pages, struct page **pages);
361 unsigned int nr_pages, struct page **pages);
363 int tag, unsigned int nr_pages, struct page **pages);
365 struct page *grab_cache_page_write_begin(struct address_space *mapping,
371 static inline struct page *grab_cache_page(struct address_space *mapping, in grab_cache_page()
377 extern struct page * read_cache_page(struct address_space *mapping,
379 extern struct page * read_cache_page_gfp(struct address_space *mapping,
384 static inline struct page *read_mapping_page(struct address_space *mapping, in read_mapping_page()
395 static inline pgoff_t page_to_pgoff(struct page *page) in page_to_pgoff() argument
397 if (unlikely(PageHeadHuge(page))) in page_to_pgoff()
398 return page->index << compound_order(page); in page_to_pgoff()
400 return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); in page_to_pgoff()
406 static inline loff_t page_offset(struct page *page) in page_offset() argument
408 return ((loff_t)page->index) << PAGE_CACHE_SHIFT; in page_offset()
411 static inline loff_t page_file_offset(struct page *page) in page_file_offset() argument
413 return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT; in page_file_offset()
430 extern void __lock_page(struct page *page);
431 extern int __lock_page_killable(struct page *page);
432 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
434 extern void unlock_page(struct page *page);
436 static inline void __set_page_locked(struct page *page) in __set_page_locked() argument
438 __set_bit(PG_locked, &page->flags); in __set_page_locked()
441 static inline void __clear_page_locked(struct page *page) in __clear_page_locked() argument
443 __clear_bit(PG_locked, &page->flags); in __clear_page_locked()
446 static inline int trylock_page(struct page *page) in trylock_page() argument
448 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); in trylock_page()
454 static inline void lock_page(struct page *page) in lock_page() argument
457 if (!trylock_page(page)) in lock_page()
458 __lock_page(page); in lock_page()
466 static inline int lock_page_killable(struct page *page) in lock_page_killable() argument
469 if (!trylock_page(page)) in lock_page_killable()
470 return __lock_page_killable(page); in lock_page_killable()
481 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, in lock_page_or_retry() argument
485 return trylock_page(page) || __lock_page_or_retry(page, mm, flags); in lock_page_or_retry()
492 extern void wait_on_page_bit(struct page *page, int bit_nr);
494 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
495 extern int wait_on_page_bit_killable_timeout(struct page *page,
498 static inline int wait_on_page_locked_killable(struct page *page) in wait_on_page_locked_killable() argument
500 if (PageLocked(page)) in wait_on_page_locked_killable()
501 return wait_on_page_bit_killable(page, PG_locked); in wait_on_page_locked_killable()
505 extern wait_queue_head_t *page_waitqueue(struct page *page);
506 static inline void wake_up_page(struct page *page, int bit) in wake_up_page() argument
508 __wake_up_bit(page_waitqueue(page), &page->flags, bit); in wake_up_page()
518 static inline void wait_on_page_locked(struct page *page) in wait_on_page_locked() argument
520 if (PageLocked(page)) in wait_on_page_locked()
521 wait_on_page_bit(page, PG_locked); in wait_on_page_locked()
527 static inline void wait_on_page_writeback(struct page *page) in wait_on_page_writeback() argument
529 if (PageWriteback(page)) in wait_on_page_writeback()
530 wait_on_page_bit(page, PG_writeback); in wait_on_page_writeback()
533 extern void end_page_writeback(struct page *page);
534 void wait_for_stable_page(struct page *page);
536 void page_endio(struct page *page, int rw, int err);
541 extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
656 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
658 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
660 extern void delete_from_page_cache(struct page *page);
661 extern void __delete_from_page_cache(struct page *page, void *shadow,
663 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
669 static inline int add_to_page_cache(struct page *page, in add_to_page_cache() argument
674 __set_page_locked(page); in add_to_page_cache()
675 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); in add_to_page_cache()
677 __clear_page_locked(page); in add_to_page_cache()