Searched refs:page (Results 1 - 200 of 4788) sorted by relevance

1234567891011>>

/linux-4.1.27/include/linux/
H A Dpage_owner.h8 extern void __reset_page_owner(struct page *page, unsigned int order);
9 extern void __set_page_owner(struct page *page,
12 static inline void reset_page_owner(struct page *page, unsigned int order) reset_page_owner() argument
17 __reset_page_owner(page, order); reset_page_owner()
20 static inline void set_page_owner(struct page *page, set_page_owner() argument
26 __set_page_owner(page, order, gfp_mask); set_page_owner()
29 static inline void reset_page_owner(struct page *page, unsigned int order) reset_page_owner() argument
32 static inline void set_page_owner(struct page *page, set_page_owner() argument
H A Dmm_inline.h8 * page_is_file_cache - should the page be on a file LRU or anon LRU?
9 * @page: the page to test
11 * Returns 1 if @page is page cache page backed by a regular filesystem,
12 * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
13 * Used by functions that manipulate the LRU lists, to sort a page
16 * We would like to get this info without a page flag, but the state
17 * needs to survive until the page is last deleted from the LRU, which
20 static inline int page_is_file_cache(struct page *page) page_is_file_cache() argument
22 return !PageSwapBacked(page); page_is_file_cache()
25 static __always_inline void add_page_to_lru_list(struct page *page, add_page_to_lru_list() argument
28 int nr_pages = hpage_nr_pages(page); add_page_to_lru_list()
30 list_add(&page->lru, &lruvec->lists[lru]); add_page_to_lru_list()
34 static __always_inline void del_page_from_lru_list(struct page *page, del_page_from_lru_list() argument
37 int nr_pages = hpage_nr_pages(page); del_page_from_lru_list()
39 list_del(&page->lru); del_page_from_lru_list()
44 * page_lru_base_type - which LRU list type should a page be on?
45 * @page: the page to test
49 * Returns the base LRU type - file or anon - @page should be on.
51 static inline enum lru_list page_lru_base_type(struct page *page) page_lru_base_type() argument
53 if (page_is_file_cache(page)) page_lru_base_type()
59 * page_off_lru - which LRU list was page on? clearing its lru flags.
60 * @page: the page to test
62 * Returns the LRU list a page was on, as an index into the array of LRU
65 static __always_inline enum lru_list page_off_lru(struct page *page) page_off_lru() argument
69 if (PageUnevictable(page)) { page_off_lru()
70 __ClearPageUnevictable(page); page_off_lru()
73 lru = page_lru_base_type(page); page_off_lru()
74 if (PageActive(page)) { page_off_lru()
75 __ClearPageActive(page); page_off_lru()
83 * page_lru - which LRU list should a page be on?
84 * @page: the page to test
86 * Returns the LRU list a page should be on, as an index
89 static __always_inline enum lru_list page_lru(struct page *page) page_lru() argument
93 if (PageUnevictable(page)) page_lru()
96 lru = page_lru_base_type(page); page_lru()
97 if (PageActive(page)) page_lru()
H A Dballoon_compaction.h10 * As the page isolation scanning step a compaction thread does is a lockless
11 * procedure (from a page standpoint), it might bring some racy situations while
12 * performing balloon page compaction. In order to sort out these racy scenarios
13 * and safely perform balloon's page compaction and migration we must, always,
16 * i. when updating a balloon's page ->mapping element, strictly do it under
19 * +-page_lock(page);
21 * ... page->mapping updates here ...
23 * ii. before isolating or dequeueing a balloon page from the balloon device
24 * pages list, the page reference counter must be raised by one and the
25 * extra refcount must be dropped when the page is enqueued back into
26 * the balloon device page list, thus a balloon page keeps its reference
29 * iii. after the lockless scan step have selected a potential balloon page for
31 * under the proper page lock, to ensure isolating a valid balloon page
35 * page lock together with removing page from balloon device page list.
38 * the aforementioned balloon page corner case, as well as to ensure the simple
47 #include <linux/page-flags.h>
56 * have to cope for page compaction / migration, as well as it serves the
57 * balloon driver as a page book-keeper for its registered balloon devices.
63 int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
64 struct page *page, enum migrate_mode mode);
67 extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
68 extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
79 extern bool balloon_page_isolate(struct page *page);
80 extern void balloon_page_putback(struct page *page);
81 extern int balloon_page_migrate(struct page *newpage,
82 struct page *page, enum migrate_mode mode);
85 * __is_movable_balloon_page - helper to perform @page PageBalloon tests
87 static inline bool __is_movable_balloon_page(struct page *page) __is_movable_balloon_page() argument
89 return PageBalloon(page); __is_movable_balloon_page()
94 * and PagePrivate to check that the page is not
97 * As we might return false positives in the case of a balloon page being just
98 * released under us, this need to be re-tested later, under the page lock.
100 static inline bool balloon_page_movable(struct page *page) balloon_page_movable() argument
102 return PageBalloon(page) && PagePrivate(page); balloon_page_movable()
106 * isolated_balloon_page - identify an isolated balloon page on private
107 * compaction/migration page lists.
109 static inline bool isolated_balloon_page(struct page *page) isolated_balloon_page() argument
111 return PageBalloon(page); isolated_balloon_page()
115 * balloon_page_insert - insert a page into the balloon's page list and make
116 * the page->private assignment accordingly.
118 * @page : page to be assigned as a 'balloon page'
120 * Caller must ensure the page is locked and the spin_lock protecting balloon
121 * pages list is held before inserting a page into the balloon device.
124 struct page *page) balloon_page_insert()
126 __SetPageBalloon(page); balloon_page_insert()
127 SetPagePrivate(page); balloon_page_insert()
128 set_page_private(page, (unsigned long)balloon); balloon_page_insert()
129 list_add(&page->lru, &balloon->pages); balloon_page_insert()
133 * balloon_page_delete - delete a page from balloon's page list and clear
134 * the page->private assignement accordingly.
135 * @page : page to be released from balloon's page list
137 * Caller must ensure the page is locked and the spin_lock protecting balloon
138 * pages list is held before deleting a page from the balloon device.
140 static inline void balloon_page_delete(struct page *page) balloon_page_delete() argument
142 __ClearPageBalloon(page); balloon_page_delete()
143 set_page_private(page, 0); balloon_page_delete()
144 if (PagePrivate(page)) { balloon_page_delete()
145 ClearPagePrivate(page); balloon_page_delete()
146 list_del(&page->lru); balloon_page_delete()
152 * that enqueues the given page.
154 static inline struct balloon_dev_info *balloon_page_device(struct page *page) balloon_page_device() argument
156 return (struct balloon_dev_info *)page_private(page); balloon_page_device()
167 struct page *page) balloon_page_insert()
169 __SetPageBalloon(page); balloon_page_insert()
170 list_add(&page->lru, &balloon->pages); balloon_page_insert()
173 static inline void balloon_page_delete(struct page *page) balloon_page_delete() argument
175 __ClearPageBalloon(page); balloon_page_delete()
176 list_del(&page->lru); balloon_page_delete()
179 static inline bool __is_movable_balloon_page(struct page *page) __is_movable_balloon_page() argument
184 static inline bool balloon_page_movable(struct page *page) balloon_page_movable() argument
189 static inline bool isolated_balloon_page(struct page *page) isolated_balloon_page() argument
194 static inline bool balloon_page_isolate(struct page *page) balloon_page_isolate() argument
199 static inline void balloon_page_putback(struct page *page) balloon_page_putback() argument
204 static inline int balloon_page_migrate(struct page *newpage, balloon_page_migrate() argument
205 struct page *page, enum migrate_mode mode) balloon_page_migrate()
123 balloon_page_insert(struct balloon_dev_info *balloon, struct page *page) balloon_page_insert() argument
166 balloon_page_insert(struct balloon_dev_info *balloon, struct page *page) balloon_page_insert() argument
H A Dmigrate.h8 typedef struct page *new_page_t(struct page *page, unsigned long private,
10 typedef void free_page_t(struct page *page, unsigned long private);
14 * - negative errno on page migration failure;
15 * - zero on page migration success;
33 struct page *, struct page *, enum migrate_mode);
39 extern void migrate_page_copy(struct page *newpage, struct page *page);
41 struct page *newpage, struct page *page);
43 struct page *newpage, struct page *page,
57 static inline void migrate_page_copy(struct page *newpage, migrate_page_copy() argument
58 struct page *page) {}
61 struct page *newpage, struct page *page) migrate_huge_page_move_mapping()
70 extern int migrate_misplaced_page(struct page *page,
77 static inline int migrate_misplaced_page(struct page *page, migrate_misplaced_page() argument
89 struct page *page, int node);
95 struct page *page, int node) migrate_misplaced_transhuge_page()
60 migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page) migrate_huge_page_move_mapping() argument
91 migrate_misplaced_transhuge_page(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, pmd_t entry, unsigned long address, struct page *page, int node) migrate_misplaced_transhuge_page() argument
H A Dpage-flags.h2 * Macros for manipulating and testing page->flags
17 * Various page->flags bits:
23 * specific data (which is normally at page->private). It can be used by
30 * PG_locked also pins a page in pagecache, and blocks truncation of the file
33 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
36 * PG_uptodate tells whether the page's contents is valid. When a read
37 * completes, the page becomes uptodate, unless a disk I/O error happened.
39 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
42 * PG_error is set to indicate that an I/O error occurred on this page.
44 * PG_arch_1 is an architecture specific page state bit. The generic code
45 * guarantees that this bit is cleared for a page when it first is entered into
46 * the page cache.
50 * struct page (these bits with information) are always mapped into kernel
53 * PG_hwpoison indicates that a page got corrupted in hardware and contains
60 * locked- and dirty-page accounting.
62 * The page flags field is split into two parts, the main flags area
90 PG_head, /* A head page */
91 PG_tail, /* A tail page */
93 PG_compound, /* A compound page */
95 PG_swapcache, /* Swap page: swp_entry_t in private */
107 PG_hwpoison, /* hardware poisoned page. Don't touch */
117 /* Two page bits are conscripted by FS-Cache to maintain local caching
121 PG_fscache = PG_private_2, /* page backed by cache */
124 /* Pinned in Xen as a read-only pagetable page. */
128 /* Has a grant mapping of another (foreign) domain's page. */
138 * Macros to create function definitions for page flags
141 static inline int Page##uname(const struct page *page) \
142 { return test_bit(PG_##lname, &page->flags); }
145 static inline void SetPage##uname(struct page *page) \
146 { set_bit(PG_##lname, &page->flags); }
149 static inline void ClearPage##uname(struct page *page) \
150 { clear_bit(PG_##lname, &page->flags); }
153 static inline void __SetPage##uname(struct page *page) \
154 { __set_bit(PG_##lname, &page->flags); }
157 static inline void __ClearPage##uname(struct page *page) \
158 { __clear_bit(PG_##lname, &page->flags); }
161 static inline int TestSetPage##uname(struct page *page) \
162 { return test_and_set_bit(PG_##lname, &page->flags); }
165 static inline int TestClearPage##uname(struct page *page) \
166 { return test_and_clear_bit(PG_##lname, &page->flags); }
169 static inline int __TestClearPage##uname(struct page *page) \
170 { return __test_and_clear_bit(PG_##lname, &page->flags); }
182 static inline int Page##uname(const struct page *page) { return 0; }
185 static inline void SetPage##uname(struct page *page) { }
188 static inline void ClearPage##uname(struct page *page) { }
191 static inline void __ClearPage##uname(struct page *page) { }
194 static inline int TestSetPage##uname(struct page *page) { return 0; }
197 static inline int TestClearPage##uname(struct page *page) { return 0; }
200 static inline int __TestClearPage##uname(struct page *page) { return 0; }
208 struct page; /* forward declaration */
230 * Private page markings that may be used by the filesystem that owns the page
241 * risky: they bypass page accounting.
293 * On an anonymous page mapped into a user virtual memory area,
294 * page->mapping points to its anon_vma, not to a struct address_space;
297 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
299 * and then page->mapping points, not to an anon_vma, but to a private
300 * structure which KSM associates with that merged page. See ksm.h.
305 * address_space which maps the page from disk; whereas "page_mapped"
306 * refers to user virtual address space into which the page is mapped.
312 static inline int PageAnon(struct page *page) PageAnon() argument
314 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; PageAnon()
319 * A KSM page is one of those write-protected "shared pages" or "merged pages"
320 * which KSM maps into multiple mms, wherever identical anonymous page content
321 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
322 * anon_vma, but to that page's node of the stable tree.
324 static inline int PageKsm(struct page *page) PageKsm() argument
326 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == PageKsm()
333 u64 stable_page_flags(struct page *page);
335 static inline int PageUptodate(struct page *page) PageUptodate() argument
337 int ret = test_bit(PG_uptodate, &(page)->flags); PageUptodate()
340 * Must ensure that the data we read out of the page is loaded PageUptodate()
341 * _after_ we've loaded page->flags to check for PageUptodate. PageUptodate()
342 * We can skip the barrier if the page is not uptodate, because PageUptodate()
353 static inline void __SetPageUptodate(struct page *page) __SetPageUptodate() argument
356 __set_bit(PG_uptodate, &(page)->flags); __SetPageUptodate()
359 static inline void SetPageUptodate(struct page *page) SetPageUptodate() argument
363 * so that all previous stores issued in order to bring the page SetPageUptodate()
367 set_bit(PG_uptodate, &(page)->flags); SetPageUptodate()
372 int test_clear_page_writeback(struct page *page);
373 int __test_set_page_writeback(struct page *page, bool keep_write);
375 #define test_set_page_writeback(page) \
376 __test_set_page_writeback(page, false)
377 #define test_set_page_writeback_keepwrite(page) \
378 __test_set_page_writeback(page, true)
380 static inline void set_page_writeback(struct page *page) set_page_writeback() argument
382 test_set_page_writeback(page); set_page_writeback()
385 static inline void set_page_writeback_keepwrite(struct page *page) set_page_writeback_keepwrite() argument
387 test_set_page_writeback_keepwrite(page); set_page_writeback_keepwrite()
392 * System with lots of page flags available. This allows separate
402 static inline int PageCompound(struct page *page) PageCompound() argument
404 return page->flags & ((1L << PG_head) | (1L << PG_tail)); PageCompound()
408 static inline void ClearPageCompound(struct page *page) ClearPageCompound() argument
410 BUG_ON(!PageHead(page)); ClearPageCompound()
411 ClearPageHead(page); ClearPageCompound()
419 * Reduce page flag use as much as possible by overlapping
420 * compound page flags with the flags used for page cache pages. Possible
429 * head and tail of a compound page. This saves one page flag
430 * but makes it impossible to use compound pages for the page cache.
432 * if compound pages enter the page cache.
434 * PG_compound & PG_reclaim => Tail page
435 * PG_compound & ~PG_reclaim => Head page
440 static inline int PageHead(struct page *page) PageHead() argument
442 return ((page->flags & PG_head_tail_mask) == PG_head_mask); PageHead()
445 static inline int PageTail(struct page *page) PageTail() argument
447 return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask); PageTail()
450 static inline void __SetPageTail(struct page *page) __SetPageTail() argument
452 page->flags |= PG_head_tail_mask; __SetPageTail()
455 static inline void __ClearPageTail(struct page *page) __ClearPageTail() argument
457 page->flags &= ~PG_head_tail_mask; __ClearPageTail()
461 static inline void ClearPageCompound(struct page *page) ClearPageCompound() argument
463 BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound)); ClearPageCompound()
464 clear_bit(PG_compound, &page->flags); ClearPageCompound()
471 int PageHuge(struct page *page);
472 int PageHeadHuge(struct page *page);
473 bool page_huge_active(struct page *page);
478 static inline bool page_huge_active(struct page *page) page_huge_active() argument
494 static inline int PageTransHuge(struct page *page) PageTransHuge() argument
496 VM_BUG_ON_PAGE(PageTail(page), page); PageTransHuge() local
497 return PageHead(page); PageTransHuge()
505 static inline int PageTransCompound(struct page *page) PageTransCompound() argument
507 return PageCompound(page); PageTransCompound()
515 static inline int PageTransTail(struct page *page) PageTransTail() argument
517 return PageTail(page); PageTransTail()
522 static inline int PageTransHuge(struct page *page) PageTransHuge() argument
527 static inline int PageTransCompound(struct page *page) PageTransCompound() argument
532 static inline int PageTransTail(struct page *page) PageTransTail() argument
539 * PageBuddy() indicate that the page is free and in the buddy system
549 static inline int PageBuddy(struct page *page) PageBuddy() argument
551 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; PageBuddy()
554 static inline void __SetPageBuddy(struct page *page) __SetPageBuddy() argument
556 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); __SetPageBuddy()
557 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); __SetPageBuddy()
560 static inline void __ClearPageBuddy(struct page *page) __ClearPageBuddy() argument
562 VM_BUG_ON_PAGE(!PageBuddy(page), page); __ClearPageBuddy()
563 atomic_set(&page->_mapcount, -1); __ClearPageBuddy()
568 static inline int PageBalloon(struct page *page) PageBalloon() argument
570 return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE; PageBalloon()
573 static inline void __SetPageBalloon(struct page *page) __SetPageBalloon() argument
575 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); __SetPageBalloon()
576 atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE); __SetPageBalloon()
579 static inline void __ClearPageBalloon(struct page *page) __ClearPageBalloon() argument
581 VM_BUG_ON_PAGE(!PageBalloon(page), page); __ClearPageBalloon()
582 atomic_set(&page->_mapcount, -1); __ClearPageBalloon()
589 static inline int PageSlabPfmemalloc(struct page *page) PageSlabPfmemalloc() argument
591 VM_BUG_ON_PAGE(!PageSlab(page), page); PageSlabPfmemalloc()
592 return PageActive(page); PageSlabPfmemalloc()
595 static inline void SetPageSlabPfmemalloc(struct page *page) SetPageSlabPfmemalloc() argument
597 VM_BUG_ON_PAGE(!PageSlab(page), page); SetPageSlabPfmemalloc()
598 SetPageActive(page); SetPageSlabPfmemalloc()
601 static inline void __ClearPageSlabPfmemalloc(struct page *page) __ClearPageSlabPfmemalloc() argument
603 VM_BUG_ON_PAGE(!PageSlab(page), page); __ClearPageSlabPfmemalloc()
604 __ClearPageActive(page); __ClearPageSlabPfmemalloc()
607 static inline void ClearPageSlabPfmemalloc(struct page *page) ClearPageSlabPfmemalloc() argument
609 VM_BUG_ON_PAGE(!PageSlab(page), page); ClearPageSlabPfmemalloc()
610 ClearPageActive(page); ClearPageSlabPfmemalloc()
626 * Flags checked when a page is freed. Pages being freed should not have
638 * Flags checked when a page is prepped for return by the page allocator.
640 * there has been a kernel bug or struct page corruption.
642 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
643 * alloc-free cycle to prevent from reusing the page.
651 * page_has_private - Determine if page has private stuff
652 * @page: The page to be checked
654 * Determine if a page has private stuff, indicating that release routines
657 static inline int page_has_private(struct page *page) page_has_private() argument
659 return !!(page->flags & PAGE_FLAGS_PRIVATE); page_has_private()
H A Dhugetlb_cgroup.h22 * Minimum page order trackable by hugetlb cgroup.
29 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) hugetlb_cgroup_from_page() argument
31 VM_BUG_ON_PAGE(!PageHuge(page), page); hugetlb_cgroup_from_page()
33 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) hugetlb_cgroup_from_page()
35 return (struct hugetlb_cgroup *)page[2].lru.next; hugetlb_cgroup_from_page()
39 int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) set_hugetlb_cgroup() argument
41 VM_BUG_ON_PAGE(!PageHuge(page), page); set_hugetlb_cgroup()
43 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) set_hugetlb_cgroup()
45 page[2].lru.next = (void *)h_cg; set_hugetlb_cgroup()
60 struct page *page);
62 struct page *page);
66 extern void hugetlb_cgroup_migrate(struct page *oldhpage,
67 struct page *newhpage);
70 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) hugetlb_cgroup_from_page() argument
76 int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) set_hugetlb_cgroup() argument
96 struct page *page) hugetlb_cgroup_commit_charge()
102 hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page) hugetlb_cgroup_uncharge_page() argument
118 static inline void hugetlb_cgroup_migrate(struct page *oldhpage, hugetlb_cgroup_migrate()
119 struct page *newhpage) hugetlb_cgroup_migrate()
94 hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg, struct page *page) hugetlb_cgroup_commit_charge() argument
H A Dhighmem.h14 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) flush_anon_page() argument
20 static inline void flush_kernel_dcache_page(struct page *page) flush_kernel_dcache_page() argument
42 struct page *kmap_to_page(void *addr);
48 static inline struct page *kmap_to_page(void *addr) kmap_to_page()
56 static inline void *kmap(struct page *page) kmap() argument
59 return page_address(page); kmap()
62 static inline void kunmap(struct page *page) kunmap() argument
66 static inline void *kmap_atomic(struct page *page) kmap_atomic() argument
69 return page_address(page); kmap_atomic()
71 #define kmap_atomic_prot(page, prot) kmap_atomic(page)
121 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
125 BUILD_BUG_ON(__same_type((addr), struct page *)); \
132 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
134 void *addr = kmap_atomic(page);
135 clear_user_page(addr, vaddr, page);
142 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
144 * @vma: The VMA the page is to be allocated for
145 * @vaddr: The virtual address the page will be inserted into
147 * This function will allocate a page for a VMA but the caller is expected
148 * to specify via movableflags whether the page will be movable in the
155 static inline struct page *
160 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
163 if (page)
164 clear_user_highpage(page, vaddr);
166 return page;
171 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
172 * @vma: The VMA the page is to be allocated for
173 * @vaddr: The virtual address the page will be inserted into
175 * This function will allocate a page for a VMA that the caller knows will
178 static inline struct page *
185 static inline void clear_highpage(struct page *page)
187 void *kaddr = kmap_atomic(page);
192 static inline void zero_user_segments(struct page *page,
196 void *kaddr = kmap_atomic(page);
207 flush_dcache_page(page);
210 static inline void zero_user_segment(struct page *page, zero_user_segment() argument
213 zero_user_segments(page, start, end, 0, 0); zero_user_segment()
216 static inline void zero_user(struct page *page, zero_user() argument
219 zero_user_segments(page, start, start + size, 0, 0); zero_user()
224 static inline void copy_user_highpage(struct page *to, struct page *from, copy_user_highpage()
238 static inline void copy_highpage(struct page *to, struct page *from) copy_highpage()
H A Dpagemap.h19 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
83 * The page cache can be done in larger chunks than
84 * one page, because it allows for more efficient
95 #define page_cache_get(page) get_page(page)
96 #define page_cache_release(page) put_page(page)
97 void release_pages(struct page **pages, int nr, bool cold);
100 * speculatively take a reference to a page.
101 * If the page is free (_count == 0), then _count is untouched, and 0
105 * been used to lookup the page in the pagecache radix-tree (or page table):
111 * page has been finished with, no matter what it is subsequently allocated
118 * 1. find page in radix tree
120 * 3. check the page is still in pagecache (if no, goto 1)
125 * B. remove page from pagecache
126 * C. free the page
131 * subsequently, B will complete and 1 will find no page, causing the
134 * It is possible that between 1 and 2, the page is removed then the exact same
135 * page is inserted into the same position in pagecache. That's OK: the
140 * will find the page or it will not. Likewise, the old find_get_page could run
143 static inline int page_cache_get_speculative(struct page *page) page_cache_get_speculative() argument
156 * found a page in the radix tree here, we have pinned its refcount by page_cache_get_speculative()
160 VM_BUG_ON_PAGE(page_count(page) == 0, page); page_cache_get_speculative() local
161 atomic_inc(&page->_count); page_cache_get_speculative()
164 if (unlikely(!get_page_unless_zero(page))) { page_cache_get_speculative()
166 * Either the page has been freed, or will be freed. page_cache_get_speculative()
173 VM_BUG_ON_PAGE(PageTail(page), page); page_cache_get_speculative() local
181 static inline int page_cache_add_speculative(struct page *page, int count) page_cache_add_speculative() argument
189 VM_BUG_ON_PAGE(page_count(page) == 0, page); page_cache_add_speculative() local
190 atomic_add(count, &page->_count); page_cache_add_speculative()
193 if (unlikely(!atomic_add_unless(&page->_count, count, 0))) page_cache_add_speculative()
196 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); page_cache_add_speculative() local
201 static inline int page_freeze_refs(struct page *page, int count) page_freeze_refs() argument
203 return likely(atomic_cmpxchg(&page->_count, count, 0) == count); page_freeze_refs()
206 static inline void page_unfreeze_refs(struct page *page, int count) page_unfreeze_refs() argument
208 VM_BUG_ON_PAGE(page_count(page) != 0, page); page_unfreeze_refs() local
211 atomic_set(&page->_count, count); page_unfreeze_refs()
215 extern struct page *__page_cache_alloc(gfp_t gfp);
217 static inline struct page *__page_cache_alloc(gfp_t gfp) __page_cache_alloc()
223 static inline struct page *page_cache_alloc(struct address_space *x) page_cache_alloc()
228 static inline struct page *page_cache_alloc_cold(struct address_space *x) page_cache_alloc_cold()
233 static inline struct page *page_cache_alloc_readahead(struct address_space *x) page_cache_alloc_readahead()
239 typedef int filler_t(void *, struct page *);
253 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
257 * find_get_page - find and get a page reference
259 * @offset: the page index
261 * Looks up the page cache slot at @mapping & @offset. If there is a
262 * page cache page, it is returned with an increased refcount.
266 static inline struct page *find_get_page(struct address_space *mapping, find_get_page()
272 static inline struct page *find_get_page_flags(struct address_space *mapping, find_get_page_flags()
279 * find_lock_page - locate, pin and lock a pagecache page
280 * pagecache_get_page - find and get a page reference
282 * @offset: the page index
284 * Looks up the page cache slot at @mapping & @offset. If there is a
285 * page cache page, it is returned locked and with an increased
292 static inline struct page *find_lock_page(struct address_space *mapping, find_lock_page()
299 * find_or_create_page - locate or add a pagecache page
300 * @mapping: the page's address_space
301 * @index: the page's index into the mapping
302 * @gfp_mask: page allocation mode
304 * Looks up the page cache slot at @mapping & @offset. If there is a
305 * page cache page, it is returned locked and with an increased
308 * If the page is not present, a new page is allocated using @gfp_mask
309 * and added to the page cache and the VM's LRU list. The page is
317 static inline struct page *find_or_create_page(struct address_space *mapping, find_or_create_page()
326 * grab_cache_page_nowait - returns locked page at given index in given cache
328 * @index: the page index
330 * Same as grab_cache_page(), but do not wait if the page is unavailable.
332 * be regenerated if the page couldn't be grabbed. This routine should
333 * be safe to call while holding the lock for another page.
335 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
336 * and deadlock against the caller's locked page.
338 static inline struct page *grab_cache_page_nowait(struct address_space *mapping, grab_cache_page_nowait()
346 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
347 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
349 unsigned int nr_entries, struct page **entries,
352 unsigned int nr_pages, struct page **pages);
354 unsigned int nr_pages, struct page **pages);
356 int tag, unsigned int nr_pages, struct page **pages);
358 struct page *grab_cache_page_write_begin(struct address_space *mapping,
362 * Returns locked page at given index in given cache, creating it if needed.
364 static inline struct page *grab_cache_page(struct address_space *mapping, grab_cache_page()
370 extern struct page * read_cache_page(struct address_space *mapping,
372 extern struct page * read_cache_page_gfp(struct address_space *mapping,
377 static inline struct page *read_mapping_page(struct address_space *mapping, read_mapping_page()
388 static inline pgoff_t page_to_pgoff(struct page *page) page_to_pgoff() argument
390 if (unlikely(PageHeadHuge(page))) page_to_pgoff()
391 return page->index << compound_order(page); page_to_pgoff()
393 return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); page_to_pgoff()
397 * Return byte-offset into filesystem object for page.
399 static inline loff_t page_offset(struct page *page) page_offset() argument
401 return ((loff_t)page->index) << PAGE_CACHE_SHIFT; page_offset()
404 static inline loff_t page_file_offset(struct page *page) page_file_offset() argument
406 return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT; page_file_offset()
423 extern void __lock_page(struct page *page);
424 extern int __lock_page_killable(struct page *page);
425 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
427 extern void unlock_page(struct page *page);
429 static inline void __set_page_locked(struct page *page) __set_page_locked() argument
431 __set_bit(PG_locked, &page->flags); __set_page_locked()
434 static inline void __clear_page_locked(struct page *page) __clear_page_locked() argument
436 __clear_bit(PG_locked, &page->flags); __clear_page_locked()
439 static inline int trylock_page(struct page *page) trylock_page() argument
441 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); trylock_page()
445 * lock_page may only be called if we have the page's inode pinned.
447 static inline void lock_page(struct page *page) lock_page() argument
450 if (!trylock_page(page)) lock_page()
451 __lock_page(page); lock_page()
456 * signals. It returns 0 if it locked the page and -EINTR if it was
459 static inline int lock_page_killable(struct page *page) lock_page_killable() argument
462 if (!trylock_page(page)) lock_page_killable()
463 return __lock_page_killable(page); lock_page_killable()
468 * lock_page_or_retry - Lock the page, unless this would block and the
474 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, lock_page_or_retry() argument
478 return trylock_page(page) || __lock_page_or_retry(page, mm, flags); lock_page_or_retry()
485 extern void wait_on_page_bit(struct page *page, int bit_nr);
487 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
488 extern int wait_on_page_bit_killable_timeout(struct page *page,
491 static inline int wait_on_page_locked_killable(struct page *page) wait_on_page_locked_killable() argument
493 if (PageLocked(page)) wait_on_page_locked_killable()
494 return wait_on_page_bit_killable(page, PG_locked); wait_on_page_locked_killable()
498 extern wait_queue_head_t *page_waitqueue(struct page *page); wake_up_page()
499 static inline void wake_up_page(struct page *page, int bit) wake_up_page() argument
501 __wake_up_bit(page_waitqueue(page), &page->flags, bit); wake_up_page()
505 * Wait for a page to be unlocked.
507 * This must be called with the caller "holding" the page,
508 * ie with increased "page->count" so that the page won't
511 static inline void wait_on_page_locked(struct page *page) wait_on_page_locked() argument
513 if (PageLocked(page)) wait_on_page_locked()
514 wait_on_page_bit(page, PG_locked); wait_on_page_locked()
518 * Wait for a page to complete writeback
520 static inline void wait_on_page_writeback(struct page *page) wait_on_page_writeback() argument
522 if (PageWriteback(page)) wait_on_page_writeback()
523 wait_on_page_bit(page, PG_writeback); wait_on_page_writeback()
526 extern void end_page_writeback(struct page *page);
527 void wait_for_stable_page(struct page *page);
529 void page_endio(struct page *page, int rw, int err);
532 * Add an arbitrary waiter to a page's wait queue
534 extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
537 * Fault a userspace page into pagetables. Return non-zero on a fault.
558 * If the page was already mapped, this will get a cache miss fault_in_pages_writeable()
614 /* Check whether the range spilled into the next page. */ fault_in_multipages_writeable()
639 /* Check whether the range spilled into the next page. */ fault_in_multipages_readable()
649 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
651 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
653 extern void delete_from_page_cache(struct page *page);
654 extern void __delete_from_page_cache(struct page *page, void *shadow);
655 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
659 * the page is new, so we can just run __set_page_locked() against it.
661 static inline int add_to_page_cache(struct page *page, add_to_page_cache() argument
666 __set_page_locked(page); add_to_page_cache()
667 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); add_to_page_cache()
669 __clear_page_locked(page); add_to_page_cache()
H A Dpage-isolation.h9 static inline bool is_migrate_isolate_page(struct page *page) is_migrate_isolate_page() argument
11 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; is_migrate_isolate_page()
22 static inline bool is_migrate_isolate_page(struct page *page) is_migrate_isolate_page() argument
32 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
34 void set_pageblock_migratetype(struct page *page, int migratetype);
35 int move_freepages_block(struct zone *zone, struct page *page,
38 struct page *start_page, struct page *end_page,
71 int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages);
72 void unset_migratetype_isolate(struct page *page, unsigned migratetype);
73 struct page *alloc_migrate_target(struct page *page, unsigned long private,
H A Dksm.h38 static inline struct stable_node *page_stable_node(struct page *page) page_stable_node() argument
40 return PageKsm(page) ? page_rmapping(page) : NULL; page_stable_node()
43 static inline void set_page_stable_node(struct page *page, set_page_stable_node() argument
46 page->mapping = (void *)stable_node + set_page_stable_node()
51 * When do_swap_page() first faults in from swap what used to be a KSM page,
55 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
59 * but what if the vma was unmerged while the page was swapped out?
61 struct page *ksm_might_need_to_copy(struct page *page,
64 int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
65 void ksm_migrate_page(struct page *newpage, struct page *oldpage);
85 static inline struct page *ksm_might_need_to_copy(struct page *page, ksm_might_need_to_copy() argument
88 return page; ksm_might_need_to_copy()
91 static inline int page_referenced_ksm(struct page *page, page_referenced_ksm() argument
97 static inline int rmap_walk_ksm(struct page *page, rmap_walk_ksm() argument
103 static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) ksm_migrate_page()
H A Dpageblock-flags.h46 /* Huge page sizes are variable */
66 struct page;
68 unsigned long get_pfnblock_flags_mask(struct page *page,
73 void set_pfnblock_flags_mask(struct page *page,
80 #define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \
81 get_pfnblock_flags_mask(page, page_to_pfn(page), \
84 #define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \
85 set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \
90 #define get_pageblock_skip(page) \
91 get_pageblock_flags_group(page, PB_migrate_skip, \
93 #define clear_pageblock_skip(page) \
94 set_pageblock_flags_group(page, 0, PB_migrate_skip, \
96 #define set_pageblock_skip(page) \
97 set_pageblock_flags_group(page, 1, PB_migrate_skip, \
H A Drmap.h15 * an anonymous page pointing to this anon_vma needs to be unmapped:
19 * in mprotect), the mapping field of an anonymous page cannot point
24 * the anon_vma object itself: we're guaranteed no page can be
32 * guarantee that the vma of page tables will exist for
91 TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
145 struct anon_vma *page_get_anon_vma(struct page *page);
148 * rmap interfaces called when adding or removing pte of page
150 void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
151 void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
152 void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
154 void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
155 void page_add_file_rmap(struct page *);
156 void page_remove_rmap(struct page *);
158 void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
160 void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
163 static inline void page_dup_rmap(struct page *page) page_dup_rmap() argument
165 atomic_inc(&page->_mapcount); page_dup_rmap()
171 int page_referenced(struct page *, int is_locked,
176 int try_to_unmap(struct page *, enum ttu_flags flags);
179 * Used by uprobes to replace a userspace page safely
181 pte_t *__page_check_address(struct page *, struct mm_struct *,
184 static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, page_check_address() argument
190 __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, page_check_address()
196 * Used by swapoff to help locate where page is expected in vma.
198 unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
206 int page_mkclean(struct page *);
210 * the page mlocked.
212 int try_to_munlock(struct page *);
217 struct anon_vma *page_lock_anon_vma_read(struct page *page);
219 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
225 * rmap_one: executed on each vma where page is mapped
232 int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
234 int (*done)(struct page *page);
235 struct anon_vma *(*anon_lock)(struct page *page);
239 int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
247 static inline int page_referenced(struct page *page, int is_locked, page_referenced() argument
255 #define try_to_unmap(page, refs) SWAP_FAIL
257 static inline int page_mkclean(struct page *page) page_mkclean() argument
H A Dquicklist.h17 void *page; member in struct:quicklist
31 * the first cacheline of the page itself. There is minmal overhead involved.
39 p = q->page; quicklist_alloc()
41 q->page = p[0]; quicklist_alloc()
56 struct page *page) __quicklist_free()
61 *(void **)p = q->page; __quicklist_free()
62 q->page = p; __quicklist_free()
73 struct page *page) quicklist_free_page()
75 __quicklist_free(nr, dtor, page_address(page), page); quicklist_free_page()
55 __quicklist_free(int nr, void (*dtor)(void *), void *p, struct page *page) __quicklist_free() argument
72 quicklist_free_page(int nr, void (*dtor)(void *), struct page *page) quicklist_free_page() argument
H A Dfrontswap.h10 int (*store)(unsigned, pgoff_t, struct page *);
11 int (*load)(unsigned, pgoff_t, struct page *);
27 extern int __frontswap_store(struct page *page);
28 extern int __frontswap_load(struct page *page);
71 static inline int frontswap_store(struct page *page) frontswap_store() argument
76 ret = __frontswap_store(page); frontswap_store()
80 static inline int frontswap_load(struct page *page) frontswap_load() argument
85 ret = __frontswap_load(page); frontswap_load()
H A Dmm.h52 #include <asm/page.h> set_max_mapnr()
62 * a zero page mapping on a read fault. set_max_mapnr()
65 * related to the physical page in case of virtualization. set_max_mapnr()
83 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) set_max_mapnr()
85 /* to align the pointer to the (next) page boundary */ set_max_mapnr()
126 #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ set_max_mapnr()
151 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ set_max_mapnr()
203 * low four bits) to a page protection mask.. set_max_mapnr()
224 pgoff_t pgoff; /* Logical page offset based on vma */ set_max_mapnr()
227 struct page *cow_page; /* Handler may choose to COW */ set_max_mapnr()
228 struct page *page; /* ->fault handlers should return a set_max_mapnr() member in struct:vm_fault
229 * page here, unless VM_FAULT_NOPAGE set_max_mapnr()
242 * to the functions called when a no-page or a wp-page exception occurs.
250 /* notification that a previously read-only page is about to become
293 * page for @addr. This is useful if the default behavior
294 * (using pte_page()) would not find the correct page.
296 struct page *(*find_special_page)(struct vm_area_struct *vma,
303 #define page_private(page) ((page)->private)
304 #define set_page_private(page, v) ((page)->private = (v))
306 /* It's valid only if the page is free path or free_list */ set_freepage_migratetype()
307 static inline void set_freepage_migratetype(struct page *page, int migratetype) set_freepage_migratetype() argument
309 page->index = migratetype; set_freepage_migratetype()
312 /* It's valid only if the page is free path or free_list */ get_freepage_migratetype()
313 static inline int get_freepage_migratetype(struct page *page) get_freepage_migratetype() argument
315 return page->index; get_freepage_migratetype()
319 * FIXME: take this include out, include page-flags.h in
322 #include <linux/page-flags.h>
326 * Methods to modify the page usage count.
328 * What counts for a page usage:
329 * - cache mapping (page->mapping)
330 * - private data (page->private)
331 * - page mapped in a task's page tables, each mapping
334 * Also, many kernel routines increase the page count before a critical
335 * routine so they can be sure the page doesn't go away from under them.
339 * Drop a ref, return true if the refcount fell to zero (the page has no users)
341 static inline int put_page_testzero(struct page *page) put_page_testzero() argument
343 VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page); put_page_testzero()
344 return atomic_dec_and_test(&page->_count); put_page_testzero()
348 * Try to grab a ref unless the page has a refcount of zero, return false if
353 static inline int get_page_unless_zero(struct page *page) get_page_unless_zero() argument
355 return atomic_inc_not_zero(&page->_count); get_page_unless_zero()
359 * Try to drop a ref unless the page has a refcount of one, return false if
365 static inline int put_page_unless_one(struct page *page) put_page_unless_one() argument
367 return atomic_add_unless(&page->_count, -1, 1); put_page_unless_one()
374 struct page *vmalloc_to_page(const void *addr);
404 static inline void compound_lock(struct page *page) compound_lock() argument
407 VM_BUG_ON_PAGE(PageSlab(page), page); compound_lock() local
408 bit_spin_lock(PG_compound_lock, &page->flags); compound_lock()
412 static inline void compound_unlock(struct page *page) compound_unlock() argument
415 VM_BUG_ON_PAGE(PageSlab(page), page); compound_unlock() local
416 bit_spin_unlock(PG_compound_lock, &page->flags); compound_unlock()
420 static inline unsigned long compound_lock_irqsave(struct page *page) compound_lock_irqsave() argument
425 compound_lock(page); compound_lock_irqsave()
430 static inline void compound_unlock_irqrestore(struct page *page, compound_unlock_irqrestore() argument
434 compound_unlock(page); compound_unlock_irqrestore()
439 static inline struct page *compound_head_by_tail(struct page *tail) compound_head_by_tail()
441 struct page *head = tail->first_page; compound_head_by_tail()
444 * page->first_page may be a dangling pointer to an old compound_head_by_tail()
445 * compound page, so recheck that it is still a tail compound_head_by_tail()
446 * page before returning. compound_head_by_tail()
455 * Since either compound page could be dismantled asynchronously in THP
456 * or we access asynchronously arbitrary positioned struct page, there
460 static inline struct page *compound_head(struct page *page) compound_head() argument
462 if (unlikely(PageTail(page))) compound_head()
463 return compound_head_by_tail(page); compound_head()
464 return page; compound_head()
468 * If we access compound page synchronously such as access to
469 * allocated page, there is no need to handle tail flag race, so we can
472 static inline struct page *compound_head_fast(struct page *page) compound_head_fast() argument
474 if (unlikely(PageTail(page))) compound_head_fast()
475 return page->first_page; compound_head_fast()
476 return page; compound_head_fast()
480 * The atomic page->_mapcount, starts from -1: so that transitions
484 static inline void page_mapcount_reset(struct page *page) page_mapcount_reset() argument
486 atomic_set(&(page)->_mapcount, -1); page_mapcount_reset()
489 static inline int page_mapcount(struct page *page) page_mapcount() argument
491 VM_BUG_ON_PAGE(PageSlab(page), page); page_mapcount() local
492 return atomic_read(&page->_mapcount) + 1; page_mapcount()
495 static inline int page_count(struct page *page) page_count() argument
497 return atomic_read(&compound_head(page)->_count); page_count()
500 static inline bool __compound_tail_refcounted(struct page *page) __compound_tail_refcounted() argument
502 return !PageSlab(page) && !PageHeadHuge(page); __compound_tail_refcounted()
506 * This takes a head page as parameter and tells if the
507 * tail page reference counting can be skipped.
510 * any given page where they return true here, until all tail pins
513 static inline bool compound_tail_refcounted(struct page *page) compound_tail_refcounted() argument
515 VM_BUG_ON_PAGE(!PageHead(page), page); compound_tail_refcounted()
516 return __compound_tail_refcounted(page); compound_tail_refcounted()
519 static inline void get_huge_page_tail(struct page *page) get_huge_page_tail() argument
524 VM_BUG_ON_PAGE(!PageTail(page), page); get_huge_page_tail()
525 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); get_huge_page_tail()
526 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); get_huge_page_tail()
527 if (compound_tail_refcounted(page->first_page)) get_huge_page_tail()
528 atomic_inc(&page->_mapcount); get_huge_page_tail()
531 extern bool __get_page_tail(struct page *page);
533 static inline void get_page(struct page *page) get_page() argument
535 if (unlikely(PageTail(page))) get_page()
536 if (likely(__get_page_tail(page))) get_page()
539 * Getting a normal page or the head of a compound page get_page()
540 * requires to already have an elevated page->_count. get_page()
542 VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); get_page()
543 atomic_inc(&page->_count); get_page()
546 static inline struct page *virt_to_head_page(const void *x) virt_to_head_page()
548 struct page *page = virt_to_page(x); virt_to_head_page() local
553 * already allocated page and this page won't be freed until virt_to_head_page()
556 return compound_head_fast(page); virt_to_head_page()
560 * Setup the page count before being freed into the page allocator for
563 static inline void init_page_count(struct page *page) init_page_count() argument
565 atomic_set(&page->_count, 1); init_page_count()
568 void put_page(struct page *page);
571 void split_page(struct page *page, unsigned int order);
572 int split_free_page(struct page *page);
577 * These are _only_ valid on the head of a PG_compound page.
580 static inline void set_compound_page_dtor(struct page *page, set_compound_page_dtor() argument
583 page[1].compound_dtor = dtor; set_compound_page_dtor()
586 static inline compound_page_dtor *get_compound_page_dtor(struct page *page) get_compound_page_dtor() argument
588 return page[1].compound_dtor; get_compound_page_dtor()
591 static inline unsigned int compound_order(struct page *page) compound_order() argument
593 if (!PageHead(page)) compound_order()
595 return page[1].compound_order; compound_order()
598 static inline void set_compound_order(struct page *page, unsigned long order) set_compound_order() argument
600 page[1].compound_order = order; set_compound_order()
618 struct page *page, pte_t *pte, bool write, bool anon);
622 * Multiple processes may "see" the same page. E.g. for untouched
623 * mappings of /dev/null, all processes see the same page full of
627 * For the non-reserved pages, page_count(page) denotes a reference count.
628 * page_count() == 0 means the page is free. page->lru is then used for
630 * page_count() > 0 means the page has been allocated.
634 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
638 * A page may be used by anyone else who does a __get_free_page().
640 * be used through the normal accessor functions. The top bits of page->flags
641 * and page->virtual store page management information, but all other fields
643 * page is the responsibility of the one who allocated it, and those who have
650 * A pagecache page contains an opaque `private' member, which belongs to the
651 * page's address_space. Usually, this is the address of a circular list of
652 * the page's disk buffers. PG_private must be set to tell the VM to call
655 * A page may belong to an inode's memory mapping. In this case, page->mapping
656 * is the pointer to the inode, and page->index is the file offset of the page,
661 * case PG_swapcache is set, and page->private is an offset into the swapcache.
664 * reference to the page. Setting PG_private should also increment the
665 * refcount. The each user mapping also has a reference to the page.
725 static inline enum zone_type page_zonenum(const struct page *page) page_zonenum() argument
727 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; page_zonenum()
738 * node id available in page flags.
742 static inline int page_zone_id(struct page *page) page_zone_id() argument
744 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; page_zone_id()
757 extern int page_to_nid(const struct page *page);
759 static inline int page_to_nid(const struct page *page) page_to_nid() argument
761 return (page->flags >> NODES_PGSHIFT) & NODES_MASK; page_to_nid()
803 static inline int page_cpupid_xchg_last(struct page *page, int cpupid) page_cpupid_xchg_last() argument
805 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); page_cpupid_xchg_last()
808 static inline int page_cpupid_last(struct page *page) page_cpupid_last() argument
810 return page->_last_cpupid; page_cpupid_last()
812 static inline void page_cpupid_reset_last(struct page *page) page_cpupid_reset_last() argument
814 page->_last_cpupid = -1 & LAST_CPUPID_MASK; page_cpupid_reset_last()
817 static inline int page_cpupid_last(struct page *page) page_cpupid_last() argument
819 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; page_cpupid_last()
822 extern int page_cpupid_xchg_last(struct page *page, int cpupid);
824 static inline void page_cpupid_reset_last(struct page *page) page_cpupid_reset_last() argument
828 page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); page_cpupid_reset_last()
829 page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; page_cpupid_reset_last()
833 static inline int page_cpupid_xchg_last(struct page *page, int cpupid) page_cpupid_xchg_last() argument
835 return page_to_nid(page); /* XXX */ page_cpupid_xchg_last()
838 static inline int page_cpupid_last(struct page *page) page_cpupid_last() argument
840 return page_to_nid(page); /* XXX */ page_cpupid_last()
868 static inline void page_cpupid_reset_last(struct page *page) page_cpupid_reset_last() argument
878 static inline struct zone *page_zone(const struct page *page) page_zone() argument
880 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; page_zone()
884 static inline void set_page_section(struct page *page, unsigned long section) set_page_section() argument
886 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); set_page_section()
887 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; set_page_section()
890 static inline unsigned long page_to_section(const struct page *page) page_to_section() argument
892 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; page_to_section()
896 static inline void set_page_zone(struct page *page, enum zone_type zone) set_page_zone() argument
898 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); set_page_zone()
899 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; set_page_zone()
902 static inline void set_page_node(struct page *page, unsigned long node) set_page_node() argument
904 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); set_page_node()
905 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; set_page_node()
908 static inline void set_page_links(struct page *page, enum zone_type zone, set_page_links() argument
911 set_page_zone(page, zone); set_page_links()
912 set_page_node(page, node); set_page_links()
914 set_page_section(page, pfn_to_section_nr(pfn)); set_page_links()
923 static __always_inline void *lowmem_page_address(const struct page *page) lowmem_page_address() argument
925 return __va(PFN_PHYS(page_to_pfn(page))); lowmem_page_address()
933 static inline void *page_address(const struct page *page) page_address() argument
935 return page->virtual; page_address()
937 static inline void set_page_address(struct page *page, void *address) set_page_address() argument
939 page->virtual = address; set_page_address()
945 void *page_address(const struct page *page);
946 void set_page_address(struct page *page, void *virtual);
951 #define page_address(page) lowmem_page_address(page)
952 #define set_page_address(page, address) do { } while(0)
956 extern void *page_rmapping(struct page *page);
957 extern struct anon_vma *page_anon_vma(struct page *page);
958 extern struct address_space *page_mapping(struct page *page);
960 extern struct address_space *__page_file_mapping(struct page *);
963 struct address_space *page_file_mapping(struct page *page) page_file_mapping() argument
965 if (unlikely(PageSwapCache(page))) page_file_mapping()
966 return __page_file_mapping(page); page_file_mapping()
968 return page->mapping; page_file_mapping()
972 * Return the pagecache index of the passed page. Regular pagecache pages
975 static inline pgoff_t page_index(struct page *page) page_index() argument
977 if (unlikely(PageSwapCache(page))) page_index()
978 return page_private(page); page_index()
979 return page->index; page_index()
982 extern pgoff_t __page_file_index(struct page *page);
985 * Return the file index of the page. Regular pagecache pages use ->index
988 static inline pgoff_t page_file_index(struct page *page) page_file_index() argument
990 if (unlikely(PageSwapCache(page))) page_file_index()
991 return __page_file_index(page); page_file_index()
993 return page->index; page_file_index()
997 * Return true if this page is mapped into pagetables.
999 static inline int page_mapped(struct page *page) page_mapped() argument
1001 return atomic_read(&(page)->_mapcount) >= 0; page_mapped()
1005 * Return true only if the page has been allocated with
1009 static inline bool page_is_pfmemalloc(struct page *page) page_is_pfmemalloc() argument
1013 * a pfmemalloc page. page_is_pfmemalloc()
1015 return page->index == -1UL; page_is_pfmemalloc()
1019 * Only to be called by the page allocator on a freshly allocated
1020 * page.
1022 static inline void set_page_pfmemalloc(struct page *page) set_page_pfmemalloc() argument
1024 page->index = -1UL; set_page_pfmemalloc()
1027 static inline void clear_page_pfmemalloc(struct page *page) clear_page_pfmemalloc() argument
1029 page->index = 0; clear_page_pfmemalloc()
1044 #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
1045 #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
1048 #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
1049 #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
1051 #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */
1059 /* Encode hstate index for a hwpoisoned large page */
1097 struct address_space *check_mapping; /* Check page->mapping if set */
1098 pgoff_t first_index; /* Lowest page->index to unmap */
1099 pgoff_t last_index; /* Highest page->index to unmap */
1102 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1123 * value means "do page table walk over the current vma,"
1124 * and a negative one means "abort current page table walk
1126 * @mm: mm_struct representing the target process of page table walk
1175 int truncate_inode_page(struct address_space *mapping, struct page *page);
1176 int generic_error_remove_page(struct address_space *mapping, struct page *page);
1177 int invalidate_inode_page(struct page *page);
1209 unsigned int foll_flags, struct page **pages,
1213 int write, int force, struct page **pages,
1217 int write, int force, struct page **pages,
1221 int write, int force, struct page **pages,
1225 int write, int force, struct page **pages);
1227 struct page **pages);
1230 struct page **pages);
1231 int get_kernel_page(unsigned long start, int write, struct page **pages);
1232 struct page *get_dump_page(unsigned long addr);
1234 extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1235 extern void do_invalidatepage(struct page *page, unsigned int offset,
1238 int __set_page_dirty_nobuffers(struct page *page);
1239 int __set_page_dirty_no_writeback(struct page *page);
1241 struct page *page);
1242 void account_page_dirtied(struct page *page, struct address_space *mapping);
1243 void account_page_cleaned(struct page *page, struct address_space *mapping);
1244 int set_page_dirty(struct page *page);
1245 int set_page_dirty_lock(struct page *page);
1246 int clear_page_dirty_for_io(struct page *page);
1296 struct page **pages);
1470 extern bool ptlock_alloc(struct page *page);
1471 extern void ptlock_free(struct page *page);
1473 static inline spinlock_t *ptlock_ptr(struct page *page) ptlock_ptr() argument
1475 return page->ptl; ptlock_ptr()
1482 static inline bool ptlock_alloc(struct page *page) ptlock_alloc() argument
1487 static inline void ptlock_free(struct page *page) ptlock_free() argument
1491 static inline spinlock_t *ptlock_ptr(struct page *page) ptlock_ptr() argument
1493 return &page->ptl; ptlock_ptr()
1502 static inline bool ptlock_init(struct page *page) ptlock_init() argument
1505 * prep_new_page() initialize page->private (and therefore page->ptl) ptlock_init()
1508 * It can happen if arch try to use slab for page table allocation: ptlock_init()
1509 * slab code uses page->slab_cache and page->first_page (for tail ptlock_init()
1510 * pages), which share storage with page->ptl. ptlock_init()
1512 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); ptlock_init()
1513 if (!ptlock_alloc(page)) ptlock_init()
1515 spin_lock_init(ptlock_ptr(page)); ptlock_init()
1519 /* Reset page->mapping so free_pages_check won't complain. */ pte_lock_deinit()
1520 static inline void pte_lock_deinit(struct page *page) pte_lock_deinit() argument
1522 page->mapping = NULL; pte_lock_deinit()
1523 ptlock_free(page); pte_lock_deinit()
1535 static inline bool ptlock_init(struct page *page) { return true; } pte_lock_deinit() argument
1536 static inline void pte_lock_deinit(struct page *page) {} pte_lock_deinit() argument
1545 static inline bool pgtable_page_ctor(struct page *page) pgtable_page_ctor() argument
1547 inc_zone_page_state(page, NR_PAGETABLE); pgtable_page_ctor()
1548 return ptlock_init(page); pgtable_page_ctor()
1551 static inline void pgtable_page_dtor(struct page *page) pgtable_page_dtor() argument
1553 pte_lock_deinit(page); pgtable_page_dtor()
1554 dec_zone_page_state(page, NR_PAGETABLE); pgtable_page_dtor()
1587 static struct page *pmd_to_page(pmd_t *pmd) pmd_to_page()
1598 static inline bool pgtable_pmd_page_ctor(struct page *page) pgtable_pmd_page_ctor() argument
1601 page->pmd_huge_pte = NULL; pgtable_pmd_page_ctor()
1603 return ptlock_init(page); pgtable_pmd_page_ctor()
1606 static inline void pgtable_pmd_page_dtor(struct page *page) pgtable_pmd_page_dtor() argument
1609 VM_BUG_ON_PAGE(page->pmd_huge_pte, page); pgtable_pmd_page_dtor()
1611 ptlock_free(page); pgtable_pmd_page_dtor()
1623 static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; } pgtable_pmd_page_dtor() argument
1624 static inline void pgtable_pmd_page_dtor(struct page *page) {} pgtable_pmd_page_dtor() argument
1653 * Free a highmem page into the buddy system, adjusting totalhigh_pages
1656 extern void free_highmem_page(struct page *page);
1659 extern void adjust_managed_page_count(struct page *page, long count);
1662 /* Free the reserved page into the buddy system, so it gets managed. */ __free_reserved_page()
1663 static inline void __free_reserved_page(struct page *page) __free_reserved_page() argument
1665 ClearPageReserved(page); __free_reserved_page()
1666 init_page_count(page); __free_reserved_page()
1667 __free_page(page); __free_reserved_page()
1670 static inline void free_reserved_page(struct page *page) free_reserved_page() argument
1672 __free_reserved_page(page); free_reserved_page()
1673 adjust_managed_page_count(page, 1); free_reserved_page()
1676 static inline void mark_page_reserved(struct page *page) mark_page_reserved() argument
1678 SetPageReserved(page); mark_page_reserved()
1679 adjust_managed_page_count(page, -1); mark_page_reserved()
1715 * An architecture is expected to register range of page frames backed by
1727 * registered physical page range. Similarly
1870 unsigned long flags, struct page **pages);
1942 /* mm/page-writeback.c */
1943 int write_one_page(struct page *page, int wait);
1948 #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
1962 struct page *pg,
2035 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2043 struct page *follow_page_mask(struct vm_area_struct *vma,
2047 static inline struct page *follow_page(struct vm_area_struct *vma, follow_page()
2055 #define FOLL_TOUCH 0x02 /* mark page accessed */
2056 #define FOLL_GET 0x04 /* do get_page on page */
2061 #define FOLL_POPULATE 0x40 /* fault in page */
2063 #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
2064 #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
2065 #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
2085 extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2093 kernel_map_pages(struct page *page, int numpages, int enable) kernel_map_pages() argument
2098 __kernel_map_pages(page, numpages, enable); kernel_map_pages()
2101 extern bool kernel_page_present(struct page *page);
2105 kernel_map_pages(struct page *page, int numpages, int enable) {} kernel_map_pages() argument
2107 static inline bool kernel_page_present(struct page *page) { return true; } kernel_page_present() argument
2145 void sparse_mem_maps_populate_node(struct page **map_map,
2151 struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
2166 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2180 extern void shake_page(struct page *p, int access);
2182 extern int soft_offline_page(struct page *page, int flags);
2185 extern void clear_huge_page(struct page *page,
2188 extern void copy_user_huge_page(struct page *dst, struct page *src,
2210 static inline bool page_is_guard(struct page *page) page_is_guard() argument
2217 page_ext = lookup_page_ext(page); page_is_guard()
2223 static inline bool page_is_guard(struct page *page) { return false; } page_is_guard() argument
H A Dcleancache.h15 * cleancache requires every file with a page in cleancache to have a
32 pgoff_t, struct page *);
34 pgoff_t, struct page *);
43 extern int __cleancache_get_page(struct page *);
44 extern void __cleancache_put_page(struct page *);
45 extern void __cleancache_invalidate_page(struct address_space *, struct page *);
51 static inline bool cleancache_fs_enabled(struct page *page) cleancache_fs_enabled() argument
53 return page->mapping->host->i_sb->cleancache_poolid >= 0; cleancache_fs_enabled()
90 static inline int cleancache_get_page(struct page *page) cleancache_get_page() argument
94 if (cleancache_enabled && cleancache_fs_enabled(page)) cleancache_get_page()
95 ret = __cleancache_get_page(page); cleancache_get_page()
99 static inline void cleancache_put_page(struct page *page) cleancache_put_page() argument
101 if (cleancache_enabled && cleancache_fs_enabled(page)) cleancache_put_page()
102 __cleancache_put_page(page); cleancache_put_page()
106 struct page *page) cleancache_invalidate_page()
108 /* careful... page->mapping is NULL sometimes when this is called */ cleancache_invalidate_page()
110 __cleancache_invalidate_page(mapping, page); cleancache_invalidate_page()
105 cleancache_invalidate_page(struct address_space *mapping, struct page *page) cleancache_invalidate_page() argument
H A Dkmemcheck.h11 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
12 void kmemcheck_free_shadow(struct page *page, int order);
17 void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
20 void kmemcheck_show_pages(struct page *p, unsigned int n);
21 void kmemcheck_hide_pages(struct page *p, unsigned int n);
23 bool kmemcheck_page_is_tracked(struct page *p);
30 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
31 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
32 void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
93 kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) kmemcheck_alloc_shadow() argument
98 kmemcheck_free_shadow(struct page *page, int order) kmemcheck_free_shadow() argument
113 static inline void kmemcheck_pagealloc_alloc(struct page *p, kmemcheck_pagealloc_alloc()
118 static inline bool kmemcheck_page_is_tracked(struct page *p) kmemcheck_page_is_tracked()
139 static inline void kmemcheck_mark_unallocated_pages(struct page *p, kmemcheck_mark_unallocated_pages()
144 static inline void kmemcheck_mark_uninitialized_pages(struct page *p, kmemcheck_mark_uninitialized_pages()
149 static inline void kmemcheck_mark_initialized_pages(struct page *p, kmemcheck_mark_initialized_pages()
H A Dkasan.h7 struct page;
38 void kasan_alloc_pages(struct page *page, unsigned int order);
39 void kasan_free_pages(struct page *page, unsigned int order);
41 void kasan_poison_slab(struct page *page);
64 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} kasan_free_pages() argument
65 static inline void kasan_free_pages(struct page *page, unsigned int order) {} kasan_free_pages() argument
67 static inline void kasan_poison_slab(struct page *page) {} kasan_unpoison_object_data() argument
H A Dscatterlist.h22 * scatterlist struct. We use that to place the page pointer AND encode
38 * We overload the LSB of the page pointer to indicate whether it's
48 * sg_assign_page - Assign a given page to an SG entry
50 * @page: The page
53 * Assign page to sg entry. Also see sg_set_page(), the most commonly used
57 static inline void sg_assign_page(struct scatterlist *sg, struct page *page) sg_assign_page() argument
65 BUG_ON((unsigned long) page & 0x03); sg_assign_page()
70 sg->page_link = page_link | (unsigned long) page; sg_assign_page()
74 * sg_set_page - Set sg entry to point at given page
76 * @page: The page
78 * @offset: Offset into page
81 * Use this function to set an sg entry pointing at a page, never assign
82 * the page directly. We encode sg table information in the lower bits
83 * of the page pointer. See sg_page() for looking up the page belonging
87 static inline void sg_set_page(struct scatterlist *sg, struct page *page, sg_set_page() argument
90 sg_assign_page(sg, page); sg_set_page()
95 static inline struct page *sg_page(struct scatterlist *sg) sg_page()
101 return (struct page *)((sg)->page_link & ~0x3); sg_page()
198 * This calls page_to_phys() on the page in this sg entry, and adds the
200 * on the sg page.
213 * This calls page_address() on the page in this sg entry, and adds the
214 * sg offset. The caller must know that the sg page has a valid virtual
238 struct page **pages, unsigned int n_pages,
259 * sg page iterator
261 * Iterates over sg entries page-by-page. On each successful iteration,
263 * to get the current page and its dma address. @piter->sg will point to the
264 * sg holding this page and @piter->sg_pgoffset to the page's page offset
269 struct scatterlist *sg; /* sg holding the page */
270 unsigned int sg_pgoffset; /* page offset within the sg */
283 * sg_page_iter_page - get the current page held by the page iterator
284 * @piter: page iterator holding the page
286 static inline struct page *sg_page_iter_page(struct sg_page_iter *piter) sg_page_iter_page()
292 * sg_page_iter_dma_address - get the dma address of the current page held by
293 * the page iterator.
294 * @piter: page iterator holding the page
304 * @piter: page iterator to hold current page, sg, sg_pgoffset
306 * @pgoffset: starting page offset
315 * Iterates over sg entries mapping page-by-page. On each successful
316 * iteration, @miter->page points to the mapped page and
334 struct page *page; /* currently mapped page */ member in struct:sg_mapping_iter
338 struct sg_page_iter piter; /* page iterator */
341 unsigned int __offset; /* offset within page */
342 unsigned int __remaining; /* remaining bytes on page */
H A Dhuge_mm.h18 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
54 extern pmd_t *page_check_address_pmd(struct page *page,
96 extern int split_huge_page_to_list(struct page *page, struct list_head *list); split_huge_page()
97 static inline int split_huge_page(struct page *page) split_huge_page() argument
99 return split_huge_page_to_list(page, NULL); split_huge_page()
150 static inline int hpage_nr_pages(struct page *page) hpage_nr_pages() argument
152 if (unlikely(PageTransHuge(page))) hpage_nr_pages()
160 extern struct page *huge_zero_page;
162 static inline bool is_huge_zero_page(struct page *page) is_huge_zero_page() argument
164 return ACCESS_ONCE(huge_zero_page) == page; is_huge_zero_page()
178 split_huge_page_to_list(struct page *page, struct list_head *list) split_huge_page_to_list() argument
182 static inline int split_huge_page(struct page *page) split_huge_page() argument
216 static inline bool is_huge_zero_page(struct page *page) is_huge_zero_page() argument
H A Dmemcontrol.h28 struct page;
58 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
59 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
76 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
78 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
80 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
81 void mem_cgroup_uncharge(struct page *page);
84 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
88 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
94 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
162 struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
192 void mem_cgroup_split_huge_fixup(struct page *head);
210 static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, mem_cgroup_try_charge() argument
218 static inline void mem_cgroup_commit_charge(struct page *page, mem_cgroup_commit_charge() argument
224 static inline void mem_cgroup_cancel_charge(struct page *page, mem_cgroup_cancel_charge() argument
229 static inline void mem_cgroup_uncharge(struct page *page) mem_cgroup_uncharge() argument
237 static inline void mem_cgroup_migrate(struct page *oldpage, mem_cgroup_migrate()
238 struct page *newpage, mem_cgroup_migrate()
249 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, mem_cgroup_page_lruvec() argument
255 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) try_get_mem_cgroup_from_page() argument
324 static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page) mem_cgroup_begin_page_stat() argument
369 static inline void mem_cgroup_split_huge_fixup(struct page *head) mem_cgroup_split_huge_fixup()
433 void __memcg_kmem_commit_charge(struct page *page,
435 void __memcg_kmem_uncharge_pages(struct page *page, int order);
488 * @page: pointer to struct page being freed
492 memcg_kmem_uncharge_pages(struct page *page, int order) memcg_kmem_uncharge_pages() argument
495 __memcg_kmem_uncharge_pages(page, order); memcg_kmem_uncharge_pages()
499 * memcg_kmem_commit_charge: embeds correct memcg in a page
500 * @page: pointer to struct page recently allocated
505 * failure of the allocation. if @page is NULL, this function will revert the
506 * charges. Otherwise, it will commit @page to @memcg.
509 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) memcg_kmem_commit_charge() argument
512 __memcg_kmem_commit_charge(page, memcg, order); memcg_kmem_commit_charge()
571 static inline void memcg_kmem_uncharge_pages(struct page *page, int order) memcg_kmem_uncharge_pages() argument
576 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) memcg_kmem_commit_charge() argument
H A Dswap.h13 #include <linux/page-flags.h>
14 #include <asm/page.h>
25 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
84 * areas somewhat tricky on machines that support multiple page sizes.
152 SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
171 #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
230 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
252 void *workingset_eviction(struct address_space *mapping, struct page *page);
254 void workingset_activation(struct page *page);
299 extern void lru_cache_add(struct page *);
300 extern void lru_cache_add_anon(struct page *page);
301 extern void lru_cache_add_file(struct page *page);
302 extern void lru_add_page_tail(struct page *page, struct page *page_tail,
304 extern void activate_page(struct page *);
305 extern void mark_page_accessed(struct page *);
309 extern void rotate_reclaimable_page(struct page *page);
310 extern void deactivate_file_page(struct page *page);
313 extern void add_page_to_unevictable_list(struct page *page);
315 extern void lru_cache_add_active_or_unevictable(struct page *page,
321 extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
332 extern int remove_mapping(struct address_space *mapping, struct page *page);
348 extern int page_evictable(struct page *page);
349 extern void check_move_unevictable_pages(struct page **, int nr_pages);
362 extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
365 static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry) mem_cgroup_swapout() argument
374 extern int swap_readpage(struct page *);
375 extern int swap_writepage(struct page *page, struct writeback_control *wbc);
377 extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
379 extern int swap_set_page_dirty(struct page *page);
392 extern int add_to_swap(struct page *, struct list_head *list);
393 extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
394 extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
395 extern void __delete_from_swap_cache(struct page *);
396 extern void delete_from_swap_cache(struct page *);
397 extern void free_page_and_swap_cache(struct page *);
398 extern void free_pages_and_swap_cache(struct page **, int);
399 extern struct page *lookup_swap_cache(swp_entry_t);
400 extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
402 extern struct page *swapin_readahead(swp_entry_t, gfp_t,
432 extern sector_t map_swap_page(struct page *, struct block_device **);
434 extern int page_swapcount(struct page *);
435 extern struct swap_info_struct *page_swap_info(struct page *);
436 extern int reuse_swap_page(struct page *);
437 extern int try_to_free_swap(struct page *);
452 #define free_page_and_swap_cache(page) \
453 page_cache_release(page)
486 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, swapin_readahead()
492 static inline int swap_writepage(struct page *p, struct writeback_control *wbc) swap_writepage()
497 static inline struct page *lookup_swap_cache(swp_entry_t swp) lookup_swap_cache()
502 static inline int add_to_swap(struct page *page, struct list_head *list) add_to_swap() argument
507 static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, add_to_swap_cache() argument
513 static inline void __delete_from_swap_cache(struct page *page) __delete_from_swap_cache() argument
517 static inline void delete_from_swap_cache(struct page *page) delete_from_swap_cache() argument
521 static inline int page_swapcount(struct page *page) page_swapcount() argument
526 #define reuse_swap_page(page) (page_mapcount(page) == 1)
528 static inline int try_to_free_swap(struct page *page) try_to_free_swap() argument
/linux-4.1.27/include/trace/events/
H A Dpagemap.h18 #define trace_pagemap_flags(page) ( \
19 (PageAnon(page) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \
20 (page_mapped(page) ? PAGEMAP_MAPPED : 0) | \
21 (PageSwapCache(page) ? PAGEMAP_SWAPCACHE : 0) | \
22 (PageSwapBacked(page) ? PAGEMAP_SWAPBACKED : 0) | \
23 (PageMappedToDisk(page) ? PAGEMAP_MAPPEDDISK : 0) | \
24 (page_has_private(page) ? PAGEMAP_BUFFERS : 0) \
30 struct page *page,
34 TP_ARGS(page, lru),
37 __field(struct page *, page )
44 __entry->page = page;
45 __entry->pfn = page_to_pfn(page);
47 __entry->flags = trace_pagemap_flags(page);
50 /* Flag format is based on page-types.c formatting for pagemap */
51 TP_printk("page=%p pfn=%lu lru=%d flags=%s%s%s%s%s%s",
52 __entry->page,
65 TP_PROTO(struct page *page),
67 TP_ARGS(page),
70 __field(struct page *, page )
75 __entry->page = page;
76 __entry->pfn = page_to_pfn(page);
79 /* Flag format is based on page-types.c formatting for pagemap */
80 TP_printk("page=%p pfn=%lu", __entry->page, __entry->pfn)
H A Dcma.h12 TP_PROTO(unsigned long pfn, const struct page *page,
15 TP_ARGS(pfn, page, count, align),
19 __field(const struct page *, page)
26 __entry->page = page;
31 TP_printk("pfn=%lx page=%p count=%u align=%u",
33 __entry->page,
40 TP_PROTO(unsigned long pfn, const struct page *page,
43 TP_ARGS(pfn, page, count),
47 __field(const struct page *, page)
53 __entry->page = page;
57 TP_printk("pfn=%lx page=%p count=%u",
59 __entry->page,
H A Dfilemap.h16 TP_PROTO(struct page *page),
18 TP_ARGS(page),
28 __entry->pfn = page_to_pfn(page);
29 __entry->i_ino = page->mapping->host->i_ino;
30 __entry->index = page->index;
31 if (page->mapping->host->i_sb)
32 __entry->s_dev = page->mapping->host->i_sb->s_dev;
34 __entry->s_dev = page->mapping->host->i_rdev;
37 TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu",
46 TP_PROTO(struct page *page),
47 TP_ARGS(page)
51 TP_PROTO(struct page *page),
52 TP_ARGS(page)
H A Dkmem.h163 TP_PROTO(struct page *page, unsigned int order),
165 TP_ARGS(page, order),
185 __entry->pfn = page_to_pfn(page);
189 TP_printk("page=%p pfn=%lu order=%d",
197 TP_PROTO(struct page *page, int cold),
199 TP_ARGS(page, cold),
207 __entry->pfn = page_to_pfn(page);
211 TP_printk("page=%p pfn=%lu order=0 cold=%d",
219 TP_PROTO(struct page *page, unsigned int order,
222 TP_ARGS(page, order, gfp_flags, migratetype),
232 __entry->pfn = page ? page_to_pfn(page) : -1UL;
238 TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
248 TP_PROTO(struct page *page, unsigned int order, int migratetype),
250 TP_ARGS(page, order, migratetype),
259 __entry->pfn = page ? page_to_pfn(page) : -1UL;
264 TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
274 TP_PROTO(struct page *page, unsigned int order, int migratetype),
276 TP_ARGS(page, order, migratetype)
281 TP_PROTO(struct page *page, unsigned int order, int migratetype),
283 TP_ARGS(page, order, migratetype),
303 __entry->pfn = page ? page_to_pfn(page) : -1UL;
308 TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
315 TP_PROTO(struct page *page,
319 TP_ARGS(page,
333 __entry->pfn = page_to_pfn(page);
339 get_pageblock_migratetype(page));
342 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
/linux-4.1.27/fs/9p/
H A Dcache.h44 extern int __v9fs_fscache_release_page(struct page *page, gfp_t gfp);
45 extern void __v9fs_fscache_invalidate_page(struct page *page);
47 struct page *page);
52 extern void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page);
54 struct page *page);
56 static inline int v9fs_fscache_release_page(struct page *page, v9fs_fscache_release_page() argument
59 return __v9fs_fscache_release_page(page, gfp); v9fs_fscache_release_page()
62 static inline void v9fs_fscache_invalidate_page(struct page *page) v9fs_fscache_invalidate_page() argument
64 __v9fs_fscache_invalidate_page(page); v9fs_fscache_invalidate_page()
68 struct page *page) v9fs_readpage_from_fscache()
70 return __v9fs_readpage_from_fscache(inode, page); v9fs_readpage_from_fscache()
83 struct page *page) v9fs_readpage_to_fscache()
85 if (PageFsCache(page)) v9fs_readpage_to_fscache()
86 __v9fs_readpage_to_fscache(inode, page); v9fs_readpage_to_fscache()
89 static inline void v9fs_uncache_page(struct inode *inode, struct page *page) v9fs_uncache_page() argument
92 fscache_uncache_page(v9inode->fscache, page); v9fs_uncache_page()
93 BUG_ON(PageFsCache(page)); v9fs_uncache_page()
97 struct page *page) v9fs_fscache_wait_on_page_write()
99 return __v9fs_fscache_wait_on_page_write(inode, page); v9fs_fscache_wait_on_page_write()
116 static inline int v9fs_fscache_release_page(struct page *page, v9fs_fscache_release_page() argument
121 static inline void v9fs_fscache_invalidate_page(struct page *page) {} v9fs_fscache_invalidate_page() argument
124 struct page *page) v9fs_readpage_from_fscache()
138 struct page *page) v9fs_readpage_to_fscache()
141 static inline void v9fs_uncache_page(struct inode *inode, struct page *page) v9fs_uncache_page() argument
145 struct page *page) v9fs_fscache_wait_on_page_write()
67 v9fs_readpage_from_fscache(struct inode *inode, struct page *page) v9fs_readpage_from_fscache() argument
82 v9fs_readpage_to_fscache(struct inode *inode, struct page *page) v9fs_readpage_to_fscache() argument
96 v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page) v9fs_fscache_wait_on_page_write() argument
123 v9fs_readpage_from_fscache(struct inode *inode, struct page *page) v9fs_readpage_from_fscache() argument
137 v9fs_readpage_to_fscache(struct inode *inode, struct page *page) v9fs_readpage_to_fscache() argument
144 v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page) v9fs_fscache_wait_on_page_write() argument
H A Dvfs_addr.c46 * v9fs_fid_readpage - read an entire page in from 9P
49 * @page: structure to page
52 static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page) v9fs_fid_readpage() argument
54 struct inode *inode = page->mapping->host; v9fs_fid_readpage()
55 struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; v9fs_fid_readpage()
61 BUG_ON(!PageLocked(page)); v9fs_fid_readpage()
63 retval = v9fs_readpage_from_fscache(inode, page); v9fs_fid_readpage()
69 retval = p9_client_read(fid, page_offset(page), &to, &err); v9fs_fid_readpage()
71 v9fs_uncache_page(inode, page); v9fs_fid_readpage()
76 zero_user(page, retval, PAGE_SIZE - retval); v9fs_fid_readpage()
77 flush_dcache_page(page); v9fs_fid_readpage()
78 SetPageUptodate(page); v9fs_fid_readpage()
80 v9fs_readpage_to_fscache(inode, page); v9fs_fid_readpage()
84 unlock_page(page); v9fs_fid_readpage()
89 * v9fs_vfs_readpage - read an entire page in from 9P
92 * @page: structure to page
96 static int v9fs_vfs_readpage(struct file *filp, struct page *page) v9fs_vfs_readpage() argument
98 return v9fs_fid_readpage(filp->private_data, page); v9fs_vfs_readpage()
130 * v9fs_release_page - release the private state associated with a page
132 * Returns 1 if the page can be released, false otherwise.
135 static int v9fs_release_page(struct page *page, gfp_t gfp) v9fs_release_page() argument
137 if (PagePrivate(page)) v9fs_release_page()
139 return v9fs_fscache_release_page(page, gfp); v9fs_release_page()
143 * v9fs_invalidate_page - Invalidate a page completely or partially
145 * @page: structure to page
146 * @offset: offset in the page
149 static void v9fs_invalidate_page(struct page *page, unsigned int offset, v9fs_invalidate_page() argument
154 * the private state assocated with the page v9fs_invalidate_page()
157 v9fs_fscache_invalidate_page(page); v9fs_invalidate_page()
160 static int v9fs_vfs_writepage_locked(struct page *page) v9fs_vfs_writepage_locked() argument
162 struct inode *inode = page->mapping->host; v9fs_vfs_writepage_locked()
169 if (page->index == size >> PAGE_CACHE_SHIFT) v9fs_vfs_writepage_locked()
174 bvec.bv_page = page; v9fs_vfs_writepage_locked()
182 set_page_writeback(page); v9fs_vfs_writepage_locked()
184 p9_client_write(v9inode->writeback_fid, page_offset(page), &from, &err); v9fs_vfs_writepage_locked()
186 end_page_writeback(page); v9fs_vfs_writepage_locked()
190 static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc) v9fs_vfs_writepage() argument
194 p9_debug(P9_DEBUG_VFS, "page %p\n", page); v9fs_vfs_writepage()
196 retval = v9fs_vfs_writepage_locked(page); v9fs_vfs_writepage()
199 redirty_page_for_writepage(wbc, page); v9fs_vfs_writepage()
202 SetPageError(page); v9fs_vfs_writepage()
203 mapping_set_error(page->mapping, retval); v9fs_vfs_writepage()
208 unlock_page(page); v9fs_vfs_writepage()
213 * v9fs_launder_page - Writeback a dirty page
217 static int v9fs_launder_page(struct page *page) v9fs_launder_page() argument
220 struct inode *inode = page->mapping->host; v9fs_launder_page()
222 v9fs_fscache_wait_on_page_write(inode, page); v9fs_launder_page()
223 if (clear_page_dirty_for_io(page)) { v9fs_launder_page()
224 retval = v9fs_vfs_writepage_locked(page); v9fs_launder_page()
269 struct page **pagep, void **fsdata) v9fs_write_begin()
272 struct page *page; v9fs_write_begin() local
282 page = grab_cache_page_write_begin(mapping, index, flags); v9fs_write_begin()
283 if (!page) { v9fs_write_begin()
288 if (PageUptodate(page)) v9fs_write_begin()
294 retval = v9fs_fid_readpage(v9inode->writeback_fid, page); v9fs_write_begin()
295 page_cache_release(page); v9fs_write_begin()
299 *pagep = page; v9fs_write_begin()
305 struct page *page, void *fsdata) v9fs_write_end()
308 struct inode *inode = page->mapping->host; v9fs_write_end()
318 zero_user(page, from + copied, len - copied); v9fs_write_end()
319 flush_dcache_page(page); v9fs_write_end()
322 if (!PageUptodate(page)) v9fs_write_end()
323 SetPageUptodate(page); v9fs_write_end()
332 set_page_dirty(page); v9fs_write_end()
333 unlock_page(page); v9fs_write_end()
334 page_cache_release(page); v9fs_write_end()
303 v9fs_write_end(struct file *filp, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) v9fs_write_end() argument
/linux-4.1.27/drivers/staging/android/ion/
H A Dion_page_pool.c29 struct page *page = alloc_pages(pool->gfp_mask, pool->order); ion_page_pool_alloc_pages() local
31 if (!page) ion_page_pool_alloc_pages()
33 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, ion_page_pool_alloc_pages()
35 return page; ion_page_pool_alloc_pages()
39 struct page *page) ion_page_pool_free_pages()
41 __free_pages(page, pool->order); ion_page_pool_free_pages()
44 static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) ion_page_pool_add() argument
47 if (PageHighMem(page)) { ion_page_pool_add()
48 list_add_tail(&page->lru, &pool->high_items); ion_page_pool_add()
51 list_add_tail(&page->lru, &pool->low_items); ion_page_pool_add()
58 static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high) ion_page_pool_remove()
60 struct page *page; ion_page_pool_remove() local
64 page = list_first_entry(&pool->high_items, struct page, lru); ion_page_pool_remove()
68 page = list_first_entry(&pool->low_items, struct page, lru); ion_page_pool_remove()
72 list_del(&page->lru); ion_page_pool_remove()
73 return page; ion_page_pool_remove()
76 struct page *ion_page_pool_alloc(struct ion_page_pool *pool) ion_page_pool_alloc()
78 struct page *page = NULL; ion_page_pool_alloc() local
84 page = ion_page_pool_remove(pool, true); ion_page_pool_alloc()
86 page = ion_page_pool_remove(pool, false); ion_page_pool_alloc()
89 if (!page) ion_page_pool_alloc()
90 page = ion_page_pool_alloc_pages(pool); ion_page_pool_alloc()
92 return page; ion_page_pool_alloc()
95 void ion_page_pool_free(struct ion_page_pool *pool, struct page *page) ion_page_pool_free() argument
99 BUG_ON(pool->order != compound_order(page)); ion_page_pool_free()
101 ret = ion_page_pool_add(pool, page); ion_page_pool_free()
103 ion_page_pool_free_pages(pool, page); ion_page_pool_free()
131 struct page *page; ion_page_pool_shrink() local
135 page = ion_page_pool_remove(pool, false); ion_page_pool_shrink()
137 page = ion_page_pool_remove(pool, true); ion_page_pool_shrink()
143 ion_page_pool_free_pages(pool, page); ion_page_pool_shrink()
38 ion_page_pool_free_pages(struct ion_page_pool *pool, struct page *page) ion_page_pool_free_pages() argument
H A Dion_system_heap.c17 #include <asm/page.h>
55 static struct page *alloc_buffer_page(struct ion_system_heap *heap, alloc_buffer_page()
61 struct page *page; alloc_buffer_page() local
64 page = ion_page_pool_alloc(pool); alloc_buffer_page()
70 page = alloc_pages(gfp_flags | __GFP_COMP, order); alloc_buffer_page()
71 if (!page) alloc_buffer_page()
73 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order, alloc_buffer_page()
77 return page; alloc_buffer_page()
81 struct ion_buffer *buffer, struct page *page) free_buffer_page()
83 unsigned int order = compound_order(page); free_buffer_page()
89 ion_page_pool_free(pool, page); free_buffer_page()
91 __free_pages(page, order); free_buffer_page()
96 static struct page *alloc_largest_available(struct ion_system_heap *heap, alloc_largest_available()
101 struct page *page; alloc_largest_available() local
110 page = alloc_buffer_page(heap, buffer, orders[i]); alloc_largest_available()
111 if (!page) alloc_largest_available()
114 return page; alloc_largest_available()
131 struct page *page, *tmp_page; ion_system_heap_allocate() local
144 page = alloc_largest_available(sys_heap, buffer, size_remaining, ion_system_heap_allocate()
146 if (!page) ion_system_heap_allocate()
148 list_add_tail(&page->lru, &pages); ion_system_heap_allocate()
149 size_remaining -= PAGE_SIZE << compound_order(page); ion_system_heap_allocate()
150 max_order = compound_order(page); ion_system_heap_allocate()
161 list_for_each_entry_safe(page, tmp_page, &pages, lru) { ion_system_heap_allocate()
162 sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0); ion_system_heap_allocate()
164 list_del(&page->lru); ion_system_heap_allocate()
173 list_for_each_entry_safe(page, tmp_page, &pages, lru) ion_system_heap_allocate()
174 free_buffer_page(sys_heap, buffer, page); ion_system_heap_allocate()
188 /* uncached pages come from the page pools, zero them before returning ion_system_heap_free()
316 struct page *page; ion_system_contig_heap_allocate() local
324 page = alloc_pages(low_order_gfp_flags, order); ion_system_contig_heap_allocate()
325 if (!page) ion_system_contig_heap_allocate()
328 split_page(page, order); ion_system_contig_heap_allocate()
332 __free_page(page + i); ion_system_contig_heap_allocate()
344 sg_set_page(table->sgl, page, len, 0); ion_system_contig_heap_allocate()
348 ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL); ion_system_contig_heap_allocate()
356 __free_page(page + i); ion_system_contig_heap_allocate()
364 struct page *page = sg_page(table->sgl); ion_system_contig_heap_free() local
369 __free_page(page + i); ion_system_contig_heap_free()
379 struct page *page = sg_page(table->sgl); ion_system_contig_heap_phys() local
380 *addr = page_to_phys(page); ion_system_contig_heap_phys()
80 free_buffer_page(struct ion_system_heap *heap, struct ion_buffer *buffer, struct page *page) free_buffer_page() argument
/linux-4.1.27/fs/sysv/
H A Ddir.c30 static inline void dir_put_page(struct page *page) dir_put_page() argument
32 kunmap(page); dir_put_page()
33 page_cache_release(page); dir_put_page()
41 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) dir_commit_chunk() argument
43 struct address_space *mapping = page->mapping; dir_commit_chunk()
47 block_write_end(NULL, mapping, pos, len, len, page, NULL); dir_commit_chunk()
53 err = write_one_page(page, 1); dir_commit_chunk()
55 unlock_page(page); dir_commit_chunk()
59 static struct page * dir_get_page(struct inode *dir, unsigned long n) dir_get_page()
62 struct page *page = read_mapping_page(mapping, n, NULL); dir_get_page() local
63 if (!IS_ERR(page)) dir_get_page()
64 kmap(page); dir_get_page()
65 return page; dir_get_page()
87 struct page *page = dir_get_page(inode, n); sysv_readdir() local
89 if (IS_ERR(page)) sysv_readdir()
91 kaddr = (char *)page_address(page); sysv_readdir()
103 dir_put_page(page); sysv_readdir()
107 dir_put_page(page); sysv_readdir()
131 struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_page) sysv_find_entry()
138 struct page *page = NULL; sysv_find_entry() local
150 page = dir_get_page(dir, n); sysv_find_entry()
151 if (!IS_ERR(page)) { sysv_find_entry()
152 kaddr = (char*)page_address(page); sysv_find_entry()
162 dir_put_page(page); sysv_find_entry()
173 *res_page = page; sysv_find_entry()
182 struct page *page = NULL; sysv_add_link() local
192 page = dir_get_page(dir, n); sysv_add_link()
193 err = PTR_ERR(page); sysv_add_link()
194 if (IS_ERR(page)) sysv_add_link()
196 kaddr = (char*)page_address(page); sysv_add_link()
207 dir_put_page(page); sysv_add_link()
213 pos = page_offset(page) + sysv_add_link()
214 (char*)de - (char*)page_address(page); sysv_add_link()
215 lock_page(page); sysv_add_link()
216 err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE); sysv_add_link()
222 err = dir_commit_chunk(page, pos, SYSV_DIRSIZE); sysv_add_link()
226 dir_put_page(page); sysv_add_link()
230 unlock_page(page); sysv_add_link()
234 int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page) sysv_delete_entry() argument
236 struct inode *inode = page->mapping->host; sysv_delete_entry()
237 char *kaddr = (char*)page_address(page); sysv_delete_entry()
238 loff_t pos = page_offset(page) + (char *)de - kaddr; sysv_delete_entry()
241 lock_page(page); sysv_delete_entry()
242 err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE); sysv_delete_entry()
245 err = dir_commit_chunk(page, pos, SYSV_DIRSIZE); sysv_delete_entry()
246 dir_put_page(page); sysv_delete_entry()
254 struct page *page = grab_cache_page(inode->i_mapping, 0); sysv_make_empty() local
259 if (!page) sysv_make_empty()
261 err = sysv_prepare_chunk(page, 0, 2 * SYSV_DIRSIZE); sysv_make_empty()
263 unlock_page(page); sysv_make_empty()
266 kmap(page); sysv_make_empty()
268 base = (char*)page_address(page); sysv_make_empty()
278 kunmap(page); sysv_make_empty()
279 err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE); sysv_make_empty()
281 page_cache_release(page); sysv_make_empty()
291 struct page *page = NULL; sysv_empty_dir() local
297 page = dir_get_page(inode, i); sysv_empty_dir()
299 if (IS_ERR(page)) sysv_empty_dir()
302 kaddr = (char *)page_address(page); sysv_empty_dir()
321 dir_put_page(page); sysv_empty_dir()
326 dir_put_page(page); sysv_empty_dir()
330 /* Releases the page */ sysv_set_link()
331 void sysv_set_link(struct sysv_dir_entry *de, struct page *page, sysv_set_link() argument
334 struct inode *dir = page->mapping->host; sysv_set_link()
335 loff_t pos = page_offset(page) + sysv_set_link()
336 (char *)de-(char*)page_address(page); sysv_set_link()
339 lock_page(page); sysv_set_link()
340 err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE); sysv_set_link()
343 err = dir_commit_chunk(page, pos, SYSV_DIRSIZE); sysv_set_link()
344 dir_put_page(page); sysv_set_link()
349 struct sysv_dir_entry * sysv_dotdot (struct inode *dir, struct page **p) sysv_dotdot()
351 struct page *page = dir_get_page(dir, 0); sysv_dotdot() local
354 if (!IS_ERR(page)) { sysv_dotdot()
355 de = (struct sysv_dir_entry*) page_address(page) + 1; sysv_dotdot()
356 *p = page; sysv_dotdot()
363 struct page *page; sysv_inode_by_name() local
364 struct sysv_dir_entry *de = sysv_find_entry (dentry, &page); sysv_inode_by_name()
369 dir_put_page(page); sysv_inode_by_name()
/linux-4.1.27/mm/
H A Dballoon_compaction.c14 * balloon_page_enqueue - allocates a new page and inserts it into the balloon
15 * page list.
16 * @b_dev_info: balloon device decriptor where we will insert a new page to
18 * Driver must call it to properly allocate a new enlisted balloon page
20 * This function returns the page address for the recently enqueued page or
21 * NULL in the case we fail to allocate a new page this turn.
23 struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info) balloon_page_enqueue()
26 struct page *page = alloc_page(balloon_mapping_gfp_mask() | balloon_page_enqueue() local
28 if (!page) balloon_page_enqueue()
32 * Block others from accessing the 'page' when we get around to balloon_page_enqueue()
34 * holding a reference to the 'page' at this point. balloon_page_enqueue()
36 BUG_ON(!trylock_page(page)); balloon_page_enqueue()
38 balloon_page_insert(b_dev_info, page); balloon_page_enqueue()
41 unlock_page(page); balloon_page_enqueue()
42 return page; balloon_page_enqueue()
47 * balloon_page_dequeue - removes a page from balloon's page list and returns
48 * the its address to allow the driver release the page.
49 * @b_dev_info: balloon device decriptor where we will grab a page from.
51 * Driver must call it to properly de-allocate a previous enlisted balloon page
53 * This function returns the page address for the recently dequeued page or
54 * NULL in the case we find balloon's page list temporarily empty due to
57 struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) balloon_page_dequeue()
59 struct page *page, *tmp; balloon_page_dequeue() local
65 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { balloon_page_dequeue()
67 * Block others from accessing the 'page' while we get around balloon_page_dequeue()
68 * establishing additional references and preparing the 'page' balloon_page_dequeue()
71 if (trylock_page(page)) { balloon_page_dequeue()
73 if (!PagePrivate(page)) { balloon_page_dequeue()
75 unlock_page(page); balloon_page_dequeue()
79 balloon_page_delete(page); balloon_page_dequeue()
81 unlock_page(page); balloon_page_dequeue()
90 * If we are unable to dequeue a balloon page because the page balloon_page_dequeue()
101 page = NULL; balloon_page_dequeue()
103 return page; balloon_page_dequeue()
109 static inline void __isolate_balloon_page(struct page *page) __isolate_balloon_page() argument
111 struct balloon_dev_info *b_dev_info = balloon_page_device(page); __isolate_balloon_page()
115 ClearPagePrivate(page); __isolate_balloon_page()
116 list_del(&page->lru); __isolate_balloon_page()
121 static inline void __putback_balloon_page(struct page *page) __putback_balloon_page() argument
123 struct balloon_dev_info *b_dev_info = balloon_page_device(page); __putback_balloon_page()
127 SetPagePrivate(page); __putback_balloon_page()
128 list_add(&page->lru, &b_dev_info->pages); __putback_balloon_page()
133 /* __isolate_lru_page() counterpart for a ballooned page */ balloon_page_isolate()
134 bool balloon_page_isolate(struct page *page) balloon_page_isolate() argument
140 * In case we 'win' a race for a balloon page being freed under us and balloon_page_isolate()
143 * release this page, thus avoiding a nasty leakage. balloon_page_isolate()
145 if (likely(get_page_unless_zero(page))) { balloon_page_isolate()
148 * compaction threads can race against page migration functions balloon_page_isolate()
149 * as well as race against the balloon driver releasing a page. balloon_page_isolate()
151 * In order to avoid having an already isolated balloon page balloon_page_isolate()
154 * the balloon driver, lets be sure we have the page lock balloon_page_isolate()
155 * before proceeding with the balloon page isolation steps. balloon_page_isolate()
157 if (likely(trylock_page(page))) { balloon_page_isolate()
159 * A ballooned page, by default, has PagePrivate set. balloon_page_isolate()
161 * an already isolated balloon page by clearing it. balloon_page_isolate()
163 if (balloon_page_movable(page)) { balloon_page_isolate()
164 __isolate_balloon_page(page); balloon_page_isolate()
165 unlock_page(page); balloon_page_isolate()
168 unlock_page(page); balloon_page_isolate()
170 put_page(page); balloon_page_isolate()
175 /* putback_lru_page() counterpart for a ballooned page */ balloon_page_putback()
176 void balloon_page_putback(struct page *page) balloon_page_putback() argument
179 * 'lock_page()' stabilizes the page and prevents races against balloon_page_putback()
182 lock_page(page); balloon_page_putback()
184 if (__is_movable_balloon_page(page)) { balloon_page_putback()
185 __putback_balloon_page(page); balloon_page_putback()
186 /* drop the extra ref count taken for page isolation */ balloon_page_putback()
187 put_page(page); balloon_page_putback()
190 dump_page(page, "not movable balloon page"); balloon_page_putback()
192 unlock_page(page); balloon_page_putback()
195 /* move_to_new_page() counterpart for a ballooned page */ balloon_page_migrate()
196 int balloon_page_migrate(struct page *newpage, balloon_page_migrate() argument
197 struct page *page, enum migrate_mode mode) balloon_page_migrate()
199 struct balloon_dev_info *balloon = balloon_page_device(page); balloon_page_migrate()
209 if (WARN_ON(!__is_movable_balloon_page(page))) { balloon_page_migrate()
210 dump_page(page, "not movable balloon page"); balloon_page_migrate()
216 rc = balloon->migratepage(balloon, newpage, page, mode); balloon_page_migrate()
H A Dswap.c41 /* How many pages do we try to swap or page in/out together? */
52 static void __page_cache_release(struct page *page) __page_cache_release() argument
54 if (PageLRU(page)) { __page_cache_release()
55 struct zone *zone = page_zone(page); __page_cache_release()
60 lruvec = mem_cgroup_page_lruvec(page, zone); __page_cache_release()
61 VM_BUG_ON_PAGE(!PageLRU(page), page); __page_cache_release()
62 __ClearPageLRU(page); __page_cache_release()
63 del_page_from_lru_list(page, lruvec, page_off_lru(page)); __page_cache_release()
66 mem_cgroup_uncharge(page); __page_cache_release()
69 static void __put_single_page(struct page *page) __put_single_page() argument
71 __page_cache_release(page); __put_single_page()
72 free_hot_cold_page(page, false); __put_single_page()
75 static void __put_compound_page(struct page *page) __put_compound_page() argument
81 * hugetlb. This is because hugetlb page does never have PageLRU set __put_compound_page()
85 if (!PageHuge(page)) __put_compound_page()
86 __page_cache_release(page); __put_compound_page()
87 dtor = get_compound_page_dtor(page); __put_compound_page()
88 (*dtor)(page); __put_compound_page()
95 * 1. Hugetlbfs page:
97 * PageHeadHuge will remain true until the compound page
101 * So if we see PageHeadHuge set, and we have the tail page pin,
102 * then we could safely put head page.
104 * 2. Slab THP page:
106 * PG_slab is cleared before the slab frees the head page, and
107 * tail pin cannot be the last reference left on the head page,
108 * because the slab code is free to reuse the compound page
114 * So if we see PageSlab set, and we have the tail page pin,
115 * then we could safely put head page.
118 void put_unrefcounted_compound_page(struct page *page_head, struct page *page) put_unrefcounted_compound_page() argument
121 * If @page is a THP tail, we must read the tail page put_unrefcounted_compound_page()
122 * flags after the head page flags. The put_unrefcounted_compound_page()
124 * between clearing PageTail and before the head page put_unrefcounted_compound_page()
128 if (likely(PageTail(page))) { put_unrefcounted_compound_page()
134 VM_BUG_ON_PAGE(page_mapcount(page) != 0, page); put_unrefcounted_compound_page() local
137 * If this is the tail of a slab THP page, put_unrefcounted_compound_page()
139 * held on the page, because the PG_slab cannot put_unrefcounted_compound_page()
144 * If this is the tail of a hugetlbfs page, put_unrefcounted_compound_page()
146 * the page instead, because PageHeadHuge will put_unrefcounted_compound_page()
147 * not go away until the compound page enters put_unrefcounted_compound_page()
156 * @page was a THP tail. The split @page_head put_unrefcounted_compound_page()
158 * hugetlbfs page of smaller order (only put_unrefcounted_compound_page()
161 if (put_page_testzero(page)) put_unrefcounted_compound_page()
162 __put_single_page(page); put_unrefcounted_compound_page()
166 void put_refcounted_compound_page(struct page *page_head, struct page *page) put_refcounted_compound_page() argument
168 if (likely(page != page_head && get_page_unless_zero(page_head))) { put_refcounted_compound_page()
173 * be a head page anymore by the time we obtain the put_refcounted_compound_page()
178 if (unlikely(!PageTail(page))) { put_refcounted_compound_page()
184 * and reallocated as a compound page put_refcounted_compound_page()
187 * cannot have become: a THP page, a put_refcounted_compound_page()
188 * compound page of higher order, a put_refcounted_compound_page()
189 * tail page. That is because we put_refcounted_compound_page()
200 if (put_page_testzero(page)) put_refcounted_compound_page()
201 __put_single_page(page); put_refcounted_compound_page()
204 VM_BUG_ON_PAGE(page_head != page->first_page, page); put_refcounted_compound_page()
214 VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page); put_refcounted_compound_page() local
215 atomic_dec(&page->_mapcount); put_refcounted_compound_page()
217 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); put_refcounted_compound_page()
228 VM_BUG_ON_PAGE(PageTail(page), page); put_refcounted_compound_page() local
233 static void put_compound_page(struct page *page) put_compound_page() argument
235 struct page *page_head; put_compound_page()
238 * We see the PageCompound set and PageTail not set, so @page maybe: put_compound_page()
239 * 1. hugetlbfs head page, or put_compound_page()
240 * 2. THP head page. put_compound_page()
242 if (likely(!PageTail(page))) { put_compound_page()
243 if (put_page_testzero(page)) { put_compound_page()
248 if (PageHead(page)) put_compound_page()
249 __put_compound_page(page); put_compound_page()
251 __put_single_page(page); put_compound_page()
257 * We see the PageCompound set and PageTail set, so @page maybe: put_compound_page()
258 * 1. a tail hugetlbfs page, or put_compound_page()
259 * 2. a tail THP page, or put_compound_page()
260 * 3. a split THP page. put_compound_page()
263 * __split_huge_page_refcount tearing down a THP page. put_compound_page()
265 page_head = compound_head_by_tail(page); put_compound_page()
267 put_unrefcounted_compound_page(page_head, page); put_compound_page()
269 put_refcounted_compound_page(page_head, page); put_compound_page()
272 void put_page(struct page *page) put_page() argument
274 if (unlikely(PageCompound(page))) put_page()
275 put_compound_page(page); put_page()
276 else if (put_page_testzero(page)) put_page()
277 __put_single_page(page); put_page()
285 bool __get_page_tail(struct page *page) __get_page_tail() argument
288 * This takes care of get_page() if run on a tail page __get_page_tail()
297 struct page *page_head = compound_head(page); __get_page_tail()
302 if (likely(PageTail(page))) { __get_page_tail()
304 * This is a hugetlbfs page or a slab __get_page_tail()
305 * page. __split_huge_page_refcount __get_page_tail()
309 __get_page_tail_foll(page, true); __get_page_tail()
314 * before us, "page" was a THP __get_page_tail()
317 * hugetlbfs page of smaller order __get_page_tail()
326 if (likely(page != page_head && get_page_unless_zero(page_head))) { __get_page_tail()
329 * may not be a head page anymore by the time __get_page_tail()
335 if (likely(PageTail(page))) { __get_page_tail()
336 __get_page_tail_foll(page, false); __get_page_tail()
349 * @pages: list of pages threaded on page->lru
351 * Release a list of pages which are strung together on page.lru. Currently
357 struct page *victim; put_pages_list()
359 victim = list_entry(pages->prev, struct page, lru); put_pages_list()
376 * were pinned, returns -errno. Each page returned must be released
380 struct page **pages) get_kernel_pages()
397 * get_kernel_page() - pin a kernel page in memory
400 * @pages: array that receives pointer to the page pinned.
403 * Returns 1 if page is pinned. If the page was not pinned, returns
404 * -errno. The page returned must be released with a put_page() call
407 int get_kernel_page(unsigned long start, int write, struct page **pages) get_kernel_page()
419 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), pagevec_lru_move_fn()
428 struct page *page = pvec->pages[i]; pagevec_lru_move_fn() local
429 struct zone *pagezone = page_zone(page); pagevec_lru_move_fn()
438 lruvec = mem_cgroup_page_lruvec(page, zone); pagevec_lru_move_fn()
439 (*move_fn)(page, lruvec, arg); pagevec_lru_move_fn()
447 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, pagevec_move_tail_fn() argument
452 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { pagevec_move_tail_fn()
453 enum lru_list lru = page_lru_base_type(page); pagevec_move_tail_fn()
454 list_move_tail(&page->lru, &lruvec->lists[lru]); pagevec_move_tail_fn()
472 * Writeback is about to end against a page which has been marked for immediate
476 void rotate_reclaimable_page(struct page *page) rotate_reclaimable_page() argument
478 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && rotate_reclaimable_page()
479 !PageUnevictable(page) && PageLRU(page)) { rotate_reclaimable_page()
483 page_cache_get(page); rotate_reclaimable_page()
486 if (!pagevec_add(pvec, page)) rotate_reclaimable_page()
502 static void __activate_page(struct page *page, struct lruvec *lruvec, __activate_page() argument
505 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { __activate_page()
506 int file = page_is_file_cache(page); __activate_page()
507 int lru = page_lru_base_type(page); __activate_page()
509 del_page_from_lru_list(page, lruvec, lru); __activate_page()
510 SetPageActive(page); __activate_page()
512 add_page_to_lru_list(page, lruvec, lru); __activate_page()
513 trace_mm_lru_activate(page); __activate_page()
536 void activate_page(struct page *page) activate_page() argument
538 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { activate_page()
541 page_cache_get(page); activate_page()
542 if (!pagevec_add(pvec, page)) activate_page()
558 void activate_page(struct page *page) activate_page() argument
560 struct zone *zone = page_zone(page); activate_page()
563 __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL); activate_page()
568 static void __lru_cache_activate_page(struct page *page) __lru_cache_activate_page() argument
574 * Search backwards on the optimistic assumption that the page being __lru_cache_activate_page()
576 * the local pagevec is examined as a !PageLRU page could be in the __lru_cache_activate_page()
579 * a remote pagevec's page PageActive potentially hits a race where __lru_cache_activate_page()
580 * a page is marked PageActive just after it is added to the inactive __lru_cache_activate_page()
584 struct page *pagevec_page = pvec->pages[i]; __lru_cache_activate_page()
586 if (pagevec_page == page) { __lru_cache_activate_page()
587 SetPageActive(page); __lru_cache_activate_page()
596 * Mark a page as having seen activity.
602 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
603 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
605 void mark_page_accessed(struct page *page) mark_page_accessed() argument
607 if (!PageActive(page) && !PageUnevictable(page) && mark_page_accessed()
608 PageReferenced(page)) { mark_page_accessed()
611 * If the page is on the LRU, queue it for activation via mark_page_accessed()
612 * activate_page_pvecs. Otherwise, assume the page is on a mark_page_accessed()
616 if (PageLRU(page)) mark_page_accessed()
617 activate_page(page); mark_page_accessed()
619 __lru_cache_activate_page(page); mark_page_accessed()
620 ClearPageReferenced(page); mark_page_accessed()
621 if (page_is_file_cache(page)) mark_page_accessed()
622 workingset_activation(page); mark_page_accessed()
623 } else if (!PageReferenced(page)) { mark_page_accessed()
624 SetPageReferenced(page); mark_page_accessed()
629 static void __lru_cache_add(struct page *page) __lru_cache_add() argument
633 page_cache_get(page); __lru_cache_add()
636 pagevec_add(pvec, page); __lru_cache_add()
641 * lru_cache_add: add a page to the page lists
642 * @page: the page to add
644 void lru_cache_add_anon(struct page *page) lru_cache_add_anon() argument
646 if (PageActive(page)) lru_cache_add_anon()
647 ClearPageActive(page); lru_cache_add_anon()
648 __lru_cache_add(page); lru_cache_add_anon()
651 void lru_cache_add_file(struct page *page) lru_cache_add_file() argument
653 if (PageActive(page)) lru_cache_add_file()
654 ClearPageActive(page); lru_cache_add_file()
655 __lru_cache_add(page); lru_cache_add_file()
660 * lru_cache_add - add a page to a page list
661 * @page: the page to be added to the LRU.
663 * Queue the page for addition to the LRU via pagevec. The decision on whether
664 * to add the page to the [in]active [file|anon] list is deferred until the
666 * have the page added to the active list using mark_page_accessed().
668 void lru_cache_add(struct page *page) lru_cache_add() argument
670 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); lru_cache_add()
671 VM_BUG_ON_PAGE(PageLRU(page), page); lru_cache_add() local
672 __lru_cache_add(page); lru_cache_add()
676 * add_page_to_unevictable_list - add a page to the unevictable list
677 * @page: the page to be added to the unevictable list
679 * Add page directly to its zone's unevictable list. To avoid races with
680 * tasks that might be making the page evictable, through eg. munlock,
681 * munmap or exit, while it's not on the lru, we want to add the page
685 void add_page_to_unevictable_list(struct page *page) add_page_to_unevictable_list() argument
687 struct zone *zone = page_zone(page); add_page_to_unevictable_list()
691 lruvec = mem_cgroup_page_lruvec(page, zone); add_page_to_unevictable_list()
692 ClearPageActive(page); add_page_to_unevictable_list()
693 SetPageUnevictable(page); add_page_to_unevictable_list()
694 SetPageLRU(page); add_page_to_unevictable_list()
695 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); add_page_to_unevictable_list()
701 * @page: the page to be added to LRU
702 * @vma: vma in which page is mapped for determining reclaimability
704 * Place @page on the active or unevictable LRU list, depending on its
705 * evictability. Note that if the page is not evictable, it goes
709 void lru_cache_add_active_or_unevictable(struct page *page, lru_cache_add_active_or_unevictable() argument
712 VM_BUG_ON_PAGE(PageLRU(page), page); lru_cache_add_active_or_unevictable() local
715 SetPageActive(page); lru_cache_add_active_or_unevictable()
716 lru_cache_add(page); lru_cache_add_active_or_unevictable()
720 if (!TestSetPageMlocked(page)) { lru_cache_add_active_or_unevictable()
726 __mod_zone_page_state(page_zone(page), NR_MLOCK, lru_cache_add_active_or_unevictable()
727 hpage_nr_pages(page)); lru_cache_add_active_or_unevictable()
730 add_page_to_unevictable_list(page); lru_cache_add_active_or_unevictable()
734 * If the page can not be invalidated, it is moved to the
738 * effective than the single-page writeout from reclaim.
740 * If the page isn't page_mapped and dirty/writeback, the page
743 * 1. active, mapped page -> none
744 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
745 * 3. inactive, mapped page -> none
746 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
750 * In 4, why it moves inactive's head, the VM expects the page would
752 * than the single-page writeout from reclaim.
754 static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, lru_deactivate_file_fn() argument
760 if (!PageLRU(page)) lru_deactivate_file_fn()
763 if (PageUnevictable(page)) lru_deactivate_file_fn()
766 /* Some processes are using the page */ lru_deactivate_file_fn()
767 if (page_mapped(page)) lru_deactivate_file_fn()
770 active = PageActive(page); lru_deactivate_file_fn()
771 file = page_is_file_cache(page); lru_deactivate_file_fn()
772 lru = page_lru_base_type(page); lru_deactivate_file_fn()
774 del_page_from_lru_list(page, lruvec, lru + active); lru_deactivate_file_fn()
775 ClearPageActive(page); lru_deactivate_file_fn()
776 ClearPageReferenced(page); lru_deactivate_file_fn()
777 add_page_to_lru_list(page, lruvec, lru); lru_deactivate_file_fn()
779 if (PageWriteback(page) || PageDirty(page)) { lru_deactivate_file_fn()
785 SetPageReclaim(page); lru_deactivate_file_fn()
788 * The page's writeback ends up during pagevec lru_deactivate_file_fn()
789 * We moves tha page into tail of inactive. lru_deactivate_file_fn()
791 list_move_tail(&page->lru, &lruvec->lists[lru]); lru_deactivate_file_fn()
830 * deactivate_file_page - forcefully deactivate a file page
831 * @page: page to deactivate
833 * This function hints the VM that @page is a good reclaim candidate,
834 * for example if its invalidation fails due to the page being dirty
837 void deactivate_file_page(struct page *page) deactivate_file_page() argument
840 * In a workload with many unevictable page such as mprotect, deactivate_file_page()
841 * unevictable page deactivation for accelerating reclaim is pointless. deactivate_file_page()
843 if (PageUnevictable(page)) deactivate_file_page()
846 if (likely(get_page_unless_zero(page))) { deactivate_file_page()
849 if (!pagevec_add(pvec, page)) deactivate_file_page()
905 * fell to zero, remove the page from the LRU and free it.
907 void release_pages(struct page **pages, int nr, bool cold) release_pages()
917 struct page *page = pages[i]; release_pages() local
919 if (unlikely(PageCompound(page))) { release_pages()
924 put_compound_page(page); release_pages()
938 if (!put_page_testzero(page)) release_pages()
941 if (PageLRU(page)) { release_pages()
942 struct zone *pagezone = page_zone(page); release_pages()
953 lruvec = mem_cgroup_page_lruvec(page, zone); release_pages()
954 VM_BUG_ON_PAGE(!PageLRU(page), page); release_pages()
955 __ClearPageLRU(page); release_pages()
956 del_page_from_lru_list(page, lruvec, page_off_lru(page)); release_pages()
960 __ClearPageActive(page); release_pages()
962 list_add(&page->lru, &pages_to_free); release_pages()
976 * cache-warm and we want to give them back to the page allocator ASAP.
992 void lru_add_page_tail(struct page *page, struct page *page_tail, lru_add_page_tail() argument
997 VM_BUG_ON_PAGE(!PageHead(page), page); lru_add_page_tail()
998 VM_BUG_ON_PAGE(PageCompound(page_tail), page); lru_add_page_tail() local
999 VM_BUG_ON_PAGE(PageLRU(page_tail), page); lru_add_page_tail() local
1006 if (likely(PageLRU(page))) lru_add_page_tail()
1007 list_add_tail(&page_tail->lru, &page->lru); lru_add_page_tail()
1009 /* page reclaim is reclaiming a huge page */ lru_add_page_tail()
1015 * Head page has not yet been counted, as an hpage, lru_add_page_tail()
1026 if (!PageUnevictable(page)) lru_add_page_tail()
1031 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, __pagevec_lru_add_fn() argument
1034 int file = page_is_file_cache(page); __pagevec_lru_add_fn()
1035 int active = PageActive(page); __pagevec_lru_add_fn()
1036 enum lru_list lru = page_lru(page); __pagevec_lru_add_fn()
1038 VM_BUG_ON_PAGE(PageLRU(page), page); __pagevec_lru_add_fn() local
1040 SetPageLRU(page); __pagevec_lru_add_fn()
1041 add_page_to_lru_list(page, lruvec, lru); __pagevec_lru_add_fn()
1043 trace_mm_lru_insertion(page, lru); __pagevec_lru_add_fn()
1093 * passed on to page-only pagevec operations.
1100 struct page *page = pvec->pages[i]; pagevec_remove_exceptionals() local
1101 if (!radix_tree_exceptional_entry(page)) pagevec_remove_exceptionals()
1102 pvec->pages[j++] = page; pagevec_remove_exceptionals()
1111 * @start: The starting page index
418 pagevec_lru_move_fn(struct pagevec *pvec, void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), void *arg) pagevec_lru_move_fn() argument
H A Ddebug-pagealloc.c32 static inline void set_page_poison(struct page *page) set_page_poison() argument
36 page_ext = lookup_page_ext(page); set_page_poison()
40 static inline void clear_page_poison(struct page *page) clear_page_poison() argument
44 page_ext = lookup_page_ext(page); clear_page_poison()
48 static inline bool page_poison(struct page *page) page_poison() argument
52 page_ext = lookup_page_ext(page); page_poison()
56 static void poison_page(struct page *page) poison_page() argument
58 void *addr = kmap_atomic(page); poison_page()
60 set_page_poison(page); poison_page()
65 static void poison_pages(struct page *page, int n) poison_pages() argument
70 poison_page(page + i); poison_pages()
107 static void unpoison_page(struct page *page) unpoison_page() argument
111 if (!page_poison(page)) unpoison_page()
114 addr = kmap_atomic(page); unpoison_page()
116 clear_page_poison(page); unpoison_page()
120 static void unpoison_pages(struct page *page, int n) unpoison_pages() argument
125 unpoison_page(page + i); unpoison_pages()
128 void __kernel_map_pages(struct page *page, int numpages, int enable) __kernel_map_pages() argument
134 unpoison_pages(page, numpages); __kernel_map_pages()
136 poison_pages(page, numpages); __kernel_map_pages()
H A Dpage_isolation.c6 #include <linux/page-isolation.h>
12 int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) set_migratetype_isolate() argument
20 zone = page_zone(page); set_migratetype_isolate()
24 pfn = page_to_pfn(page); set_migratetype_isolate()
48 if (!has_unmovable_pages(zone, page, arg.pages_found, set_migratetype_isolate()
60 int migratetype = get_pageblock_migratetype(page); set_migratetype_isolate()
62 set_pageblock_migratetype(page, MIGRATE_ISOLATE); set_migratetype_isolate()
64 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); set_migratetype_isolate()
75 void unset_migratetype_isolate(struct page *page, unsigned migratetype) unset_migratetype_isolate() argument
79 struct page *isolated_page = NULL; unset_migratetype_isolate()
82 struct page *buddy; unset_migratetype_isolate()
84 zone = page_zone(page); unset_migratetype_isolate()
86 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) unset_migratetype_isolate()
92 * it is possible that there is free buddy page. unset_migratetype_isolate()
97 if (PageBuddy(page)) { unset_migratetype_isolate()
98 order = page_order(page); unset_migratetype_isolate()
100 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); unset_migratetype_isolate()
102 buddy = page + (buddy_idx - page_idx); unset_migratetype_isolate()
106 __isolate_free_page(page, order); unset_migratetype_isolate()
107 kernel_map_pages(page, (1 << order), 1); unset_migratetype_isolate()
108 set_page_refcounted(page); unset_migratetype_isolate()
109 isolated_page = page; unset_migratetype_isolate()
120 nr_pages = move_freepages_block(zone, page, migratetype); unset_migratetype_isolate()
123 set_pageblock_migratetype(page, migratetype); unset_migratetype_isolate()
131 static inline struct page * __first_valid_page()
144 * start_isolate_page_range() -- make page-allocation-type of range of pages
150 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
162 struct page *page; start_isolate_page_range() local
170 page = __first_valid_page(pfn, pageblock_nr_pages); start_isolate_page_range()
171 if (page && start_isolate_page_range()
172 set_migratetype_isolate(page, skip_hwpoisoned_pages)) { start_isolate_page_range()
194 struct page *page; undo_isolate_page_range() local
200 page = __first_valid_page(pfn, pageblock_nr_pages); undo_isolate_page_range()
201 if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE) undo_isolate_page_range()
203 unset_migratetype_isolate(page, migratetype); undo_isolate_page_range()
218 struct page *page; __test_page_isolated_in_pageblock() local
225 page = pfn_to_page(pfn); __test_page_isolated_in_pageblock()
226 if (PageBuddy(page)) { __test_page_isolated_in_pageblock()
230 * although pageblock's migratation type of the page __test_page_isolated_in_pageblock()
231 * is MIGRATE_ISOLATE. Catch it and move the page into __test_page_isolated_in_pageblock()
234 if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) { __test_page_isolated_in_pageblock()
235 struct page *end_page; __test_page_isolated_in_pageblock()
237 end_page = page + (1 << page_order(page)) - 1; __test_page_isolated_in_pageblock()
238 move_freepages(page_zone(page), page, end_page, __test_page_isolated_in_pageblock() local
241 pfn += 1 << page_order(page); __test_page_isolated_in_pageblock()
243 else if (page_count(page) == 0 && __test_page_isolated_in_pageblock()
244 get_freepage_migratetype(page) == MIGRATE_ISOLATE) __test_page_isolated_in_pageblock()
246 else if (skip_hwpoisoned_pages && PageHWPoison(page)) { __test_page_isolated_in_pageblock()
248 * The HWPoisoned page may be not in buddy __test_page_isolated_in_pageblock()
266 struct page *page; test_pages_isolated() local
276 page = __first_valid_page(pfn, pageblock_nr_pages); test_pages_isolated()
277 if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE) test_pages_isolated()
280 page = __first_valid_page(start_pfn, end_pfn - start_pfn); test_pages_isolated()
281 if ((pfn < end_pfn) || !page) test_pages_isolated()
284 zone = page_zone(page); test_pages_isolated()
292 struct page *alloc_migrate_target(struct page *page, unsigned long private, alloc_migrate_target() argument
302 if (PageHuge(page)) { alloc_migrate_target()
303 nodemask_t src = nodemask_of_node(page_to_nid(page)); alloc_migrate_target()
306 return alloc_huge_page_node(page_hstate(compound_head(page)), alloc_migrate_target()
307 next_node(page_to_nid(page), dst)); alloc_migrate_target()
310 if (PageHighMem(page)) alloc_migrate_target()
H A Dpage_io.c9 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
28 struct page *page, bio_end_io_t end_io) get_swap_bio()
34 bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); get_swap_bio()
36 bio->bi_io_vec[0].bv_page = page; get_swap_bio()
49 struct page *page = bio->bi_io_vec[0].bv_page; end_swap_bio_write() local
52 SetPageError(page); end_swap_bio_write()
54 * We failed to write the page out to swap-space. end_swap_bio_write()
55 * Re-dirty the page in order to avoid it being reclaimed. end_swap_bio_write()
61 set_page_dirty(page); end_swap_bio_write()
66 ClearPageReclaim(page); end_swap_bio_write()
68 end_page_writeback(page); end_swap_bio_write()
75 struct page *page = bio->bi_io_vec[0].bv_page; end_swap_bio_read() local
78 SetPageError(page); end_swap_bio_read()
79 ClearPageUptodate(page); end_swap_bio_read()
87 SetPageUptodate(page); end_swap_bio_read()
90 * There is no guarantee that the page is in swap cache - the software end_swap_bio_read()
92 * swapcache page. So we must check PG_swapcache before proceeding with end_swap_bio_read()
95 if (likely(PageSwapCache(page))) { end_swap_bio_read()
98 sis = page_swap_info(page); end_swap_bio_read()
102 * expecting that the page will be swapped out again. end_swap_bio_read()
103 * So we can avoid an unnecessary write if the page end_swap_bio_read()
112 * and make the VM-owned decompressed page *dirty*, end_swap_bio_read()
113 * so the page should be swapped out somewhere again if end_swap_bio_read()
121 entry.val = page_private(page); end_swap_bio_read()
124 SetPageDirty(page); end_swap_bio_read()
132 unlock_page(page); end_swap_bio_read()
194 if (page_no) { /* exclude the header page */ generic_swapfile_activate()
232 int swap_writepage(struct page *page, struct writeback_control *wbc) swap_writepage() argument
236 if (try_to_free_swap(page)) { swap_writepage()
237 unlock_page(page); swap_writepage()
240 if (frontswap_store(page) == 0) { swap_writepage()
241 set_page_writeback(page); swap_writepage()
242 unlock_page(page); swap_writepage()
243 end_page_writeback(page); swap_writepage()
246 ret = __swap_writepage(page, wbc, end_swap_bio_write); swap_writepage()
251 static sector_t swap_page_sector(struct page *page) swap_page_sector() argument
253 return (sector_t)__page_file_index(page) << (PAGE_CACHE_SHIFT - 9); swap_page_sector()
256 int __swap_writepage(struct page *page, struct writeback_control *wbc, __swap_writepage() argument
261 struct swap_info_struct *sis = page_swap_info(page); __swap_writepage()
268 .bv_page = page, __swap_writepage()
276 kiocb.ki_pos = page_file_offset(page); __swap_writepage()
278 set_page_writeback(page); __swap_writepage()
279 unlock_page(page); __swap_writepage()
289 * Mark the page dirty and avoid __swap_writepage()
295 set_page_dirty(page); __swap_writepage()
296 ClearPageReclaim(page); __swap_writepage()
298 page_file_offset(page)); __swap_writepage()
300 end_page_writeback(page); __swap_writepage()
304 ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc); __swap_writepage()
311 bio = get_swap_bio(GFP_NOIO, page, end_write_func); __swap_writepage()
313 set_page_dirty(page); __swap_writepage()
314 unlock_page(page); __swap_writepage()
321 set_page_writeback(page); __swap_writepage()
322 unlock_page(page); __swap_writepage()
328 int swap_readpage(struct page *page) swap_readpage() argument
332 struct swap_info_struct *sis = page_swap_info(page); swap_readpage()
334 VM_BUG_ON_PAGE(!PageLocked(page), page); swap_readpage()
335 VM_BUG_ON_PAGE(PageUptodate(page), page); swap_readpage() local
336 if (frontswap_load(page) == 0) { swap_readpage()
337 SetPageUptodate(page); swap_readpage()
338 unlock_page(page); swap_readpage()
346 ret = mapping->a_ops->readpage(swap_file, page); swap_readpage()
352 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); swap_readpage()
359 bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); swap_readpage()
361 unlock_page(page); swap_readpage()
371 int swap_set_page_dirty(struct page *page) swap_set_page_dirty() argument
373 struct swap_info_struct *sis = page_swap_info(page); swap_set_page_dirty()
377 return mapping->a_ops->set_page_dirty(page); swap_set_page_dirty()
379 return __set_page_dirty_no_writeback(page); swap_set_page_dirty()
27 get_swap_bio(gfp_t gfp_flags, struct page *page, bio_end_io_t end_io) get_swap_bio() argument
H A Dkmemcheck.c8 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) kmemcheck_alloc_shadow() argument
10 struct page *shadow; kmemcheck_alloc_shadow()
29 page[i].shadow = page_address(&shadow[i]); kmemcheck_alloc_shadow()
33 * this memory will trigger a page fault and let us analyze kmemcheck_alloc_shadow()
36 kmemcheck_hide_pages(page, pages); kmemcheck_alloc_shadow()
39 void kmemcheck_free_shadow(struct page *page, int order) kmemcheck_free_shadow() argument
41 struct page *shadow; kmemcheck_free_shadow()
45 if (!kmemcheck_page_is_tracked(page)) kmemcheck_free_shadow()
50 kmemcheck_show_pages(page, pages); kmemcheck_free_shadow()
52 shadow = virt_to_page(page[0].shadow); kmemcheck_free_shadow()
55 page[i].shadow = NULL; kmemcheck_free_shadow()
78 * will still get page faults on access, they just kmemcheck_slab_alloc()
79 * won't ever be flagged as uninitialized. If page kmemcheck_slab_alloc()
100 void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order, kmemcheck_pagealloc_alloc() argument
117 kmemcheck_alloc_shadow(page, order, gfpflags, -1); kmemcheck_pagealloc_alloc()
120 kmemcheck_mark_initialized_pages(page, pages); kmemcheck_pagealloc_alloc()
122 kmemcheck_mark_uninitialized_pages(page, pages); kmemcheck_pagealloc_alloc()
H A Dfilemap.c55 * finished 'unifying' the page and buffer cache and SMP-threaded the
56 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
112 struct page *page, void *shadow) page_cache_tree_delete()
120 VM_BUG_ON(!PageLocked(page)); page_cache_tree_delete()
122 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); page_cache_tree_delete()
143 /* Clear tree tags for the removed page */ page_cache_tree_delete()
144 index = page->index; page_cache_tree_delete()
151 /* Delete page, swap shadow entry */ page_cache_tree_delete()
175 * Delete a page from the page cache and free it. Caller has to make
176 * sure the page is locked and that nobody else uses it - or that usage
179 void __delete_from_page_cache(struct page *page, void *shadow) __delete_from_page_cache() argument
181 struct address_space *mapping = page->mapping; __delete_from_page_cache()
183 trace_mm_filemap_delete_from_page_cache(page); __delete_from_page_cache()
187 * stale data around in the cleancache once our page is gone __delete_from_page_cache()
189 if (PageUptodate(page) && PageMappedToDisk(page)) __delete_from_page_cache()
190 cleancache_put_page(page); __delete_from_page_cache()
192 cleancache_invalidate_page(mapping, page); __delete_from_page_cache()
194 page_cache_tree_delete(mapping, page, shadow); __delete_from_page_cache()
196 page->mapping = NULL; __delete_from_page_cache()
197 /* Leave page->index set: truncation lookup relies upon it */ __delete_from_page_cache()
199 __dec_zone_page_state(page, NR_FILE_PAGES); __delete_from_page_cache()
200 if (PageSwapBacked(page)) __delete_from_page_cache()
201 __dec_zone_page_state(page, NR_SHMEM); __delete_from_page_cache()
202 BUG_ON(page_mapped(page)); __delete_from_page_cache()
205 * At this point page must be either written or cleaned by truncate. __delete_from_page_cache()
206 * Dirty page here signals a bug and loss of unwritten data. __delete_from_page_cache()
208 * This fixes dirty accounting after removing the page entirely but __delete_from_page_cache()
209 * leaves PageDirty set: it has no effect for truncated page and __delete_from_page_cache()
210 * anyway will be cleared before returning page into buddy allocator. __delete_from_page_cache()
212 if (WARN_ON_ONCE(PageDirty(page))) __delete_from_page_cache()
213 account_page_cleaned(page, mapping); __delete_from_page_cache()
217 * delete_from_page_cache - delete page from page cache
218 * @page: the page which the kernel is trying to remove from page cache
220 * This must be called only on pages that have been verified to be in the page
221 * cache and locked. It will never put the page into the free list, the caller
222 * has a reference on the page.
224 void delete_from_page_cache(struct page *page) delete_from_page_cache() argument
226 struct address_space *mapping = page->mapping; delete_from_page_cache()
227 void (*freepage)(struct page *); delete_from_page_cache()
229 BUG_ON(!PageLocked(page)); delete_from_page_cache()
233 __delete_from_page_cache(page, NULL); delete_from_page_cache()
237 freepage(page); delete_from_page_cache()
238 page_cache_release(page); delete_from_page_cache()
267 * these two operations is that if a dirty page/buffer is encountered, it must
349 struct page *page = pvec.pages[i]; filemap_fdatawait_range() local
352 if (page->index > end) filemap_fdatawait_range()
355 wait_on_page_writeback(page); filemap_fdatawait_range()
356 if (TestClearPageError(page)) filemap_fdatawait_range()
447 * replace_page_cache_page - replace a pagecache page with a new one
448 * @old: page to be replaced
449 * @new: page to replace with
452 * This function replaces a page in the pagecache with a new one. On
453 * success it acquires the pagecache reference for the new page and
454 * drops it for the old page. Both the old and new pages must be
455 * locked. This function does not add the new page to the LRU, the
461 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) replace_page_cache_page()
472 void (*freepage)(struct page *); replace_page_cache_page()
502 struct page *page, void **shadowp) page_cache_tree_insert()
508 error = __radix_tree_create(&mapping->page_tree, page->index, page_cache_tree_insert()
524 radix_tree_replace_slot(slot, page); page_cache_tree_insert()
543 static int __add_to_page_cache_locked(struct page *page, __add_to_page_cache_locked() argument
548 int huge = PageHuge(page); __add_to_page_cache_locked()
552 VM_BUG_ON_PAGE(!PageLocked(page), page); __add_to_page_cache_locked()
553 VM_BUG_ON_PAGE(PageSwapBacked(page), page); __add_to_page_cache_locked() local
556 error = mem_cgroup_try_charge(page, current->mm, __add_to_page_cache_locked()
565 mem_cgroup_cancel_charge(page, memcg); __add_to_page_cache_locked()
569 page_cache_get(page); __add_to_page_cache_locked()
570 page->mapping = mapping; __add_to_page_cache_locked()
571 page->index = offset; __add_to_page_cache_locked()
574 error = page_cache_tree_insert(mapping, page, shadowp); __add_to_page_cache_locked()
578 __inc_zone_page_state(page, NR_FILE_PAGES); __add_to_page_cache_locked()
581 mem_cgroup_commit_charge(page, memcg, false); __add_to_page_cache_locked()
582 trace_mm_filemap_add_to_page_cache(page); __add_to_page_cache_locked()
585 page->mapping = NULL; __add_to_page_cache_locked()
586 /* Leave page->index set: truncation relies upon it */ __add_to_page_cache_locked()
589 mem_cgroup_cancel_charge(page, memcg); __add_to_page_cache_locked()
590 page_cache_release(page); __add_to_page_cache_locked()
595 * add_to_page_cache_locked - add a locked page to the pagecache
596 * @page: page to add
597 * @mapping: the page's address_space
598 * @offset: page index
599 * @gfp_mask: page allocation mode
601 * This function is used to add a page to the pagecache. It must be locked.
602 * This function does not add the page to the LRU. The caller must do that.
604 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, add_to_page_cache_locked() argument
607 return __add_to_page_cache_locked(page, mapping, offset, add_to_page_cache_locked()
612 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, add_to_page_cache_lru() argument
618 __set_page_locked(page); add_to_page_cache_lru()
619 ret = __add_to_page_cache_locked(page, mapping, offset, add_to_page_cache_lru()
622 __clear_page_locked(page); add_to_page_cache_lru()
625 * The page might have been evicted from cache only add_to_page_cache_lru()
627 * any other repeatedly accessed page. add_to_page_cache_lru()
630 SetPageActive(page); add_to_page_cache_lru()
631 workingset_activation(page); add_to_page_cache_lru()
633 ClearPageActive(page); add_to_page_cache_lru()
634 lru_cache_add(page); add_to_page_cache_lru()
641 struct page *__page_cache_alloc(gfp_t gfp) __page_cache_alloc()
644 struct page *page; __page_cache_alloc() local
651 page = alloc_pages_exact_node(n, gfp, 0); __page_cache_alloc()
652 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); __page_cache_alloc()
654 return page; __page_cache_alloc()
667 * sure the appropriate page became available, this saves space
671 wait_queue_head_t *page_waitqueue(struct page *page) page_waitqueue() argument
673 const struct zone *zone = page_zone(page); page_waitqueue()
675 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; page_waitqueue()
679 void wait_on_page_bit(struct page *page, int bit_nr) wait_on_page_bit() argument
681 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); wait_on_page_bit()
683 if (test_bit(bit_nr, &page->flags)) wait_on_page_bit()
684 __wait_on_bit(page_waitqueue(page), &wait, bit_wait_io, wait_on_page_bit()
689 int wait_on_page_bit_killable(struct page *page, int bit_nr) wait_on_page_bit_killable() argument
691 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); wait_on_page_bit_killable()
693 if (!test_bit(bit_nr, &page->flags)) wait_on_page_bit_killable()
696 return __wait_on_bit(page_waitqueue(page), &wait, wait_on_page_bit_killable()
700 int wait_on_page_bit_killable_timeout(struct page *page, wait_on_page_bit_killable_timeout() argument
703 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); wait_on_page_bit_killable_timeout()
706 if (!test_bit(bit_nr, &page->flags)) wait_on_page_bit_killable_timeout()
708 return __wait_on_bit(page_waitqueue(page), &wait, wait_on_page_bit_killable_timeout()
714 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
715 * @page: Page defining the wait queue of interest
718 * Add an arbitrary @waiter to the wait queue for the nominated @page.
720 void add_page_wait_queue(struct page *page, wait_queue_t *waiter) add_page_wait_queue() argument
722 wait_queue_head_t *q = page_waitqueue(page); add_page_wait_queue()
732 * unlock_page - unlock a locked page
733 * @page: the page
735 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
743 void unlock_page(struct page *page) unlock_page() argument
745 VM_BUG_ON_PAGE(!PageLocked(page), page); unlock_page()
746 clear_bit_unlock(PG_locked, &page->flags); unlock_page()
748 wake_up_page(page, PG_locked); unlock_page()
753 * end_page_writeback - end writeback against a page
754 * @page: the page
756 void end_page_writeback(struct page *page) end_page_writeback() argument
761 * shuffle a page marked for immediate reclaim is too mild to end_page_writeback()
763 * ever page writeback. end_page_writeback()
765 if (PageReclaim(page)) { end_page_writeback()
766 ClearPageReclaim(page); end_page_writeback()
767 rotate_reclaimable_page(page); end_page_writeback()
770 if (!test_clear_page_writeback(page)) end_page_writeback()
774 wake_up_page(page, PG_writeback); end_page_writeback()
779 * After completing I/O on a page, call this routine to update the page
782 void page_endio(struct page *page, int rw, int err) page_endio() argument
786 SetPageUptodate(page); page_endio()
788 ClearPageUptodate(page); page_endio()
789 SetPageError(page); page_endio()
791 unlock_page(page); page_endio()
794 SetPageError(page); page_endio()
795 if (page->mapping) page_endio()
796 mapping_set_error(page->mapping, err); page_endio()
798 end_page_writeback(page); page_endio()
804 * __lock_page - get a lock on the page, assuming we need to sleep to get it
805 * @page: the page to lock
807 void __lock_page(struct page *page) __lock_page() argument
809 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); __lock_page()
811 __wait_on_bit_lock(page_waitqueue(page), &wait, bit_wait_io, __lock_page()
816 int __lock_page_killable(struct page *page) __lock_page_killable() argument
818 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); __lock_page_killable()
820 return __wait_on_bit_lock(page_waitqueue(page), &wait, __lock_page_killable()
827 * 1 - page is locked; mmap_sem is still held.
828 * 0 - page is not locked.
834 * with the page locked and the mmap_sem unperturbed.
836 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, __lock_page_or_retry() argument
849 wait_on_page_locked_killable(page); __lock_page_or_retry()
851 wait_on_page_locked(page); __lock_page_or_retry()
857 ret = __lock_page_killable(page); __lock_page_or_retry()
863 __lock_page(page); __lock_page_or_retry()
895 struct page *page; page_cache_next_hole() local
897 page = radix_tree_lookup(&mapping->page_tree, index); page_cache_next_hole()
898 if (!page || radix_tree_exceptional_entry(page)) page_cache_next_hole()
936 struct page *page; page_cache_prev_hole() local
938 page = radix_tree_lookup(&mapping->page_tree, index); page_cache_prev_hole()
939 if (!page || radix_tree_exceptional_entry(page)) page_cache_prev_hole()
951 * find_get_entry - find and get a page cache entry
953 * @offset: the page cache index
955 * Looks up the page cache slot at @mapping & @offset. If there is a
956 * page cache page, it is returned with an increased refcount.
958 * If the slot holds a shadow entry of a previously evicted page, or a
963 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) find_get_entry()
966 struct page *page; find_get_entry() local
970 page = NULL; find_get_entry()
973 page = radix_tree_deref_slot(pagep); find_get_entry()
974 if (unlikely(!page)) find_get_entry()
976 if (radix_tree_exception(page)) { find_get_entry()
977 if (radix_tree_deref_retry(page)) find_get_entry()
980 * A shadow entry of a recently evicted page, find_get_entry()
982 * it without attempting to raise page count. find_get_entry()
986 if (!page_cache_get_speculative(page)) find_get_entry()
990 * Has the page moved? find_get_entry()
994 if (unlikely(page != *pagep)) { find_get_entry()
995 page_cache_release(page); find_get_entry()
1002 return page; find_get_entry()
1007 * find_lock_entry - locate, pin and lock a page cache entry
1009 * @offset: the page cache index
1011 * Looks up the page cache slot at @mapping & @offset. If there is a
1012 * page cache page, it is returned locked and with an increased
1015 * If the slot holds a shadow entry of a previously evicted page, or a
1022 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) find_lock_entry()
1024 struct page *page; find_lock_entry() local
1027 page = find_get_entry(mapping, offset); find_lock_entry()
1028 if (page && !radix_tree_exception(page)) { find_lock_entry()
1029 lock_page(page); find_lock_entry()
1030 /* Has the page been truncated? */ find_lock_entry()
1031 if (unlikely(page->mapping != mapping)) { find_lock_entry()
1032 unlock_page(page); find_lock_entry()
1033 page_cache_release(page); find_lock_entry()
1036 VM_BUG_ON_PAGE(page->index != offset, page); find_lock_entry()
1038 return page; find_lock_entry()
1043 * pagecache_get_page - find and get a page reference
1045 * @offset: the page index
1047 * @gfp_mask: gfp mask to use for the page cache data page allocation
1049 * Looks up the page cache slot at @mapping & @offset.
1051 * PCG flags modify how the page is returned.
1053 * FGP_ACCESSED: the page will be marked accessed
1055 * FGP_CREAT: If page is not present then a new page is allocated using
1056 * @gfp_mask and added to the page cache and the VM's LRU
1057 * list. The page is returned locked and with an increased
1063 * If there is a page cache page, it is returned with an increased refcount.
1065 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, pagecache_get_page()
1068 struct page *page; pagecache_get_page() local
1071 page = find_get_entry(mapping, offset); pagecache_get_page()
1072 if (radix_tree_exceptional_entry(page)) pagecache_get_page()
1073 page = NULL; pagecache_get_page()
1074 if (!page) pagecache_get_page()
1079 if (!trylock_page(page)) { pagecache_get_page()
1080 page_cache_release(page); pagecache_get_page()
1084 lock_page(page); pagecache_get_page()
1087 /* Has the page been truncated? */ pagecache_get_page()
1088 if (unlikely(page->mapping != mapping)) { pagecache_get_page()
1089 unlock_page(page); pagecache_get_page()
1090 page_cache_release(page); pagecache_get_page()
1093 VM_BUG_ON_PAGE(page->index != offset, page); pagecache_get_page()
1096 if (page && (fgp_flags & FGP_ACCESSED)) pagecache_get_page()
1097 mark_page_accessed(page); pagecache_get_page()
1100 if (!page && (fgp_flags & FGP_CREAT)) { pagecache_get_page()
1107 page = __page_cache_alloc(gfp_mask); pagecache_get_page()
1108 if (!page) pagecache_get_page()
1116 __SetPageReferenced(page); pagecache_get_page()
1118 err = add_to_page_cache_lru(page, mapping, offset, pagecache_get_page()
1121 page_cache_release(page); pagecache_get_page()
1122 page = NULL; pagecache_get_page()
1128 return page; pagecache_get_page()
1135 * @start: The starting page cache index
1145 * The search returns a group of mapping-contiguous page cache entries
1157 struct page **entries, pgoff_t *indices) find_get_entries()
1169 struct page *page; find_get_entries() local
1171 page = radix_tree_deref_slot(slot); find_get_entries()
1172 if (unlikely(!page)) find_get_entries()
1174 if (radix_tree_exception(page)) { find_get_entries()
1175 if (radix_tree_deref_retry(page)) find_get_entries()
1178 * A shadow entry of a recently evicted page, find_get_entries()
1180 * it without attempting to raise page count. find_get_entries()
1184 if (!page_cache_get_speculative(page)) find_get_entries()
1187 /* Has the page moved? */ find_get_entries()
1188 if (unlikely(page != *slot)) { find_get_entries()
1189 page_cache_release(page); find_get_entries()
1194 entries[ret] = page; find_get_entries()
1205 * @start: The starting page index
1219 unsigned int nr_pages, struct page **pages) find_get_pages()
1231 struct page *page; find_get_pages() local
1233 page = radix_tree_deref_slot(slot); find_get_pages()
1234 if (unlikely(!page)) find_get_pages()
1237 if (radix_tree_exception(page)) { find_get_pages()
1238 if (radix_tree_deref_retry(page)) { find_get_pages()
1248 * A shadow entry of a recently evicted page, find_get_pages()
1255 if (!page_cache_get_speculative(page)) find_get_pages()
1258 /* Has the page moved? */ find_get_pages()
1259 if (unlikely(page != *slot)) { find_get_pages()
1260 page_cache_release(page); find_get_pages()
1264 pages[ret] = page; find_get_pages()
1276 * @index: The starting page index
1286 unsigned int nr_pages, struct page **pages) find_get_pages_contig()
1298 struct page *page; find_get_pages_contig() local
1300 page = radix_tree_deref_slot(slot); find_get_pages_contig()
1302 if (unlikely(!page)) find_get_pages_contig()
1305 if (radix_tree_exception(page)) { find_get_pages_contig()
1306 if (radix_tree_deref_retry(page)) { find_get_pages_contig()
1315 * A shadow entry of a recently evicted page, find_get_pages_contig()
1322 if (!page_cache_get_speculative(page)) find_get_pages_contig()
1325 /* Has the page moved? */ find_get_pages_contig()
1326 if (unlikely(page != *slot)) { find_get_pages_contig()
1327 page_cache_release(page); find_get_pages_contig()
1336 if (page->mapping == NULL || page->index != iter.index) { find_get_pages_contig()
1337 page_cache_release(page); find_get_pages_contig()
1341 pages[ret] = page; find_get_pages_contig()
1353 * @index: the starting page index
1359 * @tag. We update @index to index the next page for the traversal.
1362 int tag, unsigned int nr_pages, struct page **pages) find_get_pages_tag()
1375 struct page *page; find_get_pages_tag() local
1377 page = radix_tree_deref_slot(slot); find_get_pages_tag()
1378 if (unlikely(!page)) find_get_pages_tag()
1381 if (radix_tree_exception(page)) { find_get_pages_tag()
1382 if (radix_tree_deref_retry(page)) { find_get_pages_tag()
1391 * A shadow entry of a recently evicted page. find_get_pages_tag()
1396 * time, so there is a sizable window for page find_get_pages_tag()
1397 * reclaim to evict a page we saw tagged. find_get_pages_tag()
1404 if (!page_cache_get_speculative(page)) find_get_pages_tag()
1407 /* Has the page moved? */ find_get_pages_tag()
1408 if (unlikely(page != *slot)) { find_get_pages_tag()
1409 page_cache_release(page); find_get_pages_tag()
1413 pages[ret] = page; find_get_pages_tag()
1470 unsigned long offset; /* offset into pagecache page */ do_generic_file_read()
1481 struct page *page; do_generic_file_read() local
1488 page = find_get_page(mapping, index); do_generic_file_read()
1489 if (!page) { do_generic_file_read()
1493 page = find_get_page(mapping, index); do_generic_file_read()
1494 if (unlikely(page == NULL)) do_generic_file_read()
1497 if (PageReadahead(page)) { do_generic_file_read()
1499 ra, filp, page, do_generic_file_read()
1502 if (!PageUptodate(page)) { do_generic_file_read()
1506 if (!trylock_page(page)) do_generic_file_read()
1509 if (!page->mapping) do_generic_file_read()
1511 if (!mapping->a_ops->is_partially_uptodate(page, do_generic_file_read()
1514 unlock_page(page); do_generic_file_read()
1518 * i_size must be checked after we know the page is Uptodate. do_generic_file_read()
1522 * part of the page is not copied back to userspace (unless do_generic_file_read()
1529 page_cache_release(page); do_generic_file_read()
1533 /* nr is the maximum number of bytes to copy from this page */ do_generic_file_read()
1538 page_cache_release(page); do_generic_file_read()
1544 /* If users can be writing to this page using arbitrary do_generic_file_read()
1546 * before reading the page on the kernel side. do_generic_file_read()
1549 flush_dcache_page(page); do_generic_file_read()
1552 * When a sequential read accesses a page several times, do_generic_file_read()
1556 mark_page_accessed(page); do_generic_file_read()
1560 * Ok, we have the page, and it's up-to-date, so do_generic_file_read()
1564 ret = copy_page_to_iter(page, offset, nr, iter); do_generic_file_read()
1570 page_cache_release(page); do_generic_file_read()
1581 /* Get exclusive access to the page ... */ do_generic_file_read()
1582 error = lock_page_killable(page); do_generic_file_read()
1588 if (!page->mapping) { do_generic_file_read()
1589 unlock_page(page); do_generic_file_read()
1590 page_cache_release(page); do_generic_file_read()
1595 if (PageUptodate(page)) { do_generic_file_read()
1596 unlock_page(page); do_generic_file_read()
1606 ClearPageError(page); do_generic_file_read()
1607 /* Start the actual read. The read will unlock the page. */ do_generic_file_read()
1608 error = mapping->a_ops->readpage(filp, page); do_generic_file_read()
1612 page_cache_release(page); do_generic_file_read()
1619 if (!PageUptodate(page)) { do_generic_file_read()
1620 error = lock_page_killable(page); do_generic_file_read()
1623 if (!PageUptodate(page)) { do_generic_file_read()
1624 if (page->mapping == NULL) { do_generic_file_read()
1628 unlock_page(page); do_generic_file_read()
1629 page_cache_release(page); do_generic_file_read()
1632 unlock_page(page); do_generic_file_read()
1637 unlock_page(page); do_generic_file_read()
1644 page_cache_release(page); do_generic_file_read()
1650 * page.. do_generic_file_read()
1652 page = page_cache_alloc_cold(mapping); do_generic_file_read()
1653 if (!page) { do_generic_file_read()
1657 error = add_to_page_cache_lru(page, mapping, do_generic_file_read()
1660 page_cache_release(page); do_generic_file_read()
1686 * that can use the page cache directly.
1741 * page_cache_read - adds requested page to the page cache if not already there
1743 * @offset: page index
1745 * This adds the requested page to the page cache if it isn't already there,
1751 struct page *page; page_cache_read() local
1755 page = page_cache_alloc_cold(mapping); page_cache_read()
1756 if (!page) page_cache_read()
1759 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL); page_cache_read()
1761 ret = mapping->a_ops->readpage(file, page); page_cache_read()
1765 page_cache_release(page); page_cache_read()
1776 * a page in the page cache at all.
1820 * Asynchronous readahead happens when we find the page and PG_readahead,
1826 struct page *page, do_async_mmap_readahead()
1836 if (PageReadahead(page)) do_async_mmap_readahead()
1838 page, offset, ra->ra_pages); do_async_mmap_readahead()
1842 * filemap_fault - read in file data for page fault handling
1847 * mapped memory region to read in file data during a page fault.
1850 * it in the page cache, and handles the special cases reasonably without
1873 struct page *page; filemap_fault() local
1882 * Do we have something in the page cache already? filemap_fault()
1884 page = find_get_page(mapping, offset); filemap_fault()
1885 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { filemap_fault()
1887 * We found the page, so try async readahead before filemap_fault()
1890 do_async_mmap_readahead(vma, ra, file, page, offset); filemap_fault()
1891 } else if (!page) { filemap_fault()
1892 /* No page in the page cache at all */ filemap_fault()
1898 page = find_get_page(mapping, offset); filemap_fault()
1899 if (!page) filemap_fault()
1903 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { filemap_fault()
1904 page_cache_release(page); filemap_fault()
1909 if (unlikely(page->mapping != mapping)) { filemap_fault()
1910 unlock_page(page); filemap_fault()
1911 put_page(page); filemap_fault()
1914 VM_BUG_ON_PAGE(page->index != offset, page); filemap_fault()
1917 * We have a locked page in the page cache, now we need to check filemap_fault()
1920 if (unlikely(!PageUptodate(page))) filemap_fault()
1924 * Found the page and have a reference on it. filemap_fault()
1925 * We must recheck i_size under page lock. filemap_fault()
1929 unlock_page(page); filemap_fault()
1930 page_cache_release(page); filemap_fault()
1934 vmf->page = page; filemap_fault()
1945 * The page we want has now been added to the page cache. filemap_fault()
1963 * Umm, take care of errors if the page isn't up-to-date. filemap_fault()
1968 ClearPageError(page); filemap_fault()
1969 error = mapping->a_ops->readpage(file, page); filemap_fault()
1971 wait_on_page_locked(page); filemap_fault()
1972 if (!PageUptodate(page)) filemap_fault()
1975 page_cache_release(page); filemap_fault()
1993 struct page *page; filemap_map_pages() local
2003 page = radix_tree_deref_slot(slot); filemap_map_pages()
2004 if (unlikely(!page)) filemap_map_pages()
2006 if (radix_tree_exception(page)) { filemap_map_pages()
2007 if (radix_tree_deref_retry(page)) filemap_map_pages()
2013 if (!page_cache_get_speculative(page)) filemap_map_pages()
2016 /* Has the page moved? */ filemap_map_pages()
2017 if (unlikely(page != *slot)) { filemap_map_pages()
2018 page_cache_release(page); filemap_map_pages()
2022 if (!PageUptodate(page) || filemap_map_pages()
2023 PageReadahead(page) || filemap_map_pages()
2024 PageHWPoison(page)) filemap_map_pages()
2026 if (!trylock_page(page)) filemap_map_pages()
2029 if (page->mapping != mapping || !PageUptodate(page)) filemap_map_pages()
2033 if (page->index >= size >> PAGE_CACHE_SHIFT) filemap_map_pages()
2036 pte = vmf->pte + page->index - vmf->pgoff; filemap_map_pages()
2042 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; filemap_map_pages()
2043 do_set_pte(vma, addr, page, pte, false, false); filemap_map_pages()
2044 unlock_page(page); filemap_map_pages()
2047 unlock_page(page); filemap_map_pages()
2049 page_cache_release(page); filemap_map_pages()
2060 struct page *page = vmf->page; filemap_page_mkwrite() local
2066 lock_page(page); filemap_page_mkwrite()
2067 if (page->mapping != inode->i_mapping) { filemap_page_mkwrite()
2068 unlock_page(page); filemap_page_mkwrite()
2073 * We mark the page dirty already here so that when freeze is in filemap_page_mkwrite()
2075 * see the dirty page and writeprotect it again. filemap_page_mkwrite()
2077 set_page_dirty(page); filemap_page_mkwrite()
2078 wait_for_stable_page(page); filemap_page_mkwrite()
2127 static struct page *wait_on_page_read(struct page *page) wait_on_page_read() argument
2129 if (!IS_ERR(page)) { wait_on_page_read()
2130 wait_on_page_locked(page); wait_on_page_read()
2131 if (!PageUptodate(page)) { wait_on_page_read()
2132 page_cache_release(page); wait_on_page_read()
2133 page = ERR_PTR(-EIO); wait_on_page_read()
2136 return page; wait_on_page_read()
2139 static struct page *__read_cache_page(struct address_space *mapping, __read_cache_page()
2141 int (*filler)(void *, struct page *), __read_cache_page()
2145 struct page *page; __read_cache_page() local
2148 page = find_get_page(mapping, index); __read_cache_page()
2149 if (!page) { __read_cache_page()
2150 page = __page_cache_alloc(gfp | __GFP_COLD); __read_cache_page()
2151 if (!page) __read_cache_page()
2153 err = add_to_page_cache_lru(page, mapping, index, gfp); __read_cache_page()
2155 page_cache_release(page); __read_cache_page()
2161 err = filler(data, page); __read_cache_page()
2163 page_cache_release(page); __read_cache_page()
2164 page = ERR_PTR(err); __read_cache_page()
2166 page = wait_on_page_read(page); __read_cache_page()
2169 return page; __read_cache_page()
2172 static struct page *do_read_cache_page(struct address_space *mapping, do_read_cache_page()
2174 int (*filler)(void *, struct page *), do_read_cache_page()
2179 struct page *page; do_read_cache_page() local
2183 page = __read_cache_page(mapping, index, filler, data, gfp); do_read_cache_page()
2184 if (IS_ERR(page)) do_read_cache_page()
2185 return page; do_read_cache_page()
2186 if (PageUptodate(page)) do_read_cache_page()
2189 lock_page(page); do_read_cache_page()
2190 if (!page->mapping) { do_read_cache_page()
2191 unlock_page(page); do_read_cache_page()
2192 page_cache_release(page); do_read_cache_page()
2195 if (PageUptodate(page)) { do_read_cache_page()
2196 unlock_page(page); do_read_cache_page()
2199 err = filler(data, page); do_read_cache_page()
2201 page_cache_release(page); do_read_cache_page()
2204 page = wait_on_page_read(page); do_read_cache_page()
2205 if (IS_ERR(page)) do_read_cache_page()
2206 return page; do_read_cache_page()
2209 mark_page_accessed(page); do_read_cache_page()
2210 return page; do_read_cache_page()
2214 * read_cache_page - read into page cache, fill it if needed
2215 * @mapping: the page's address_space
2216 * @index: the page index
2218 * @data: first arg to filler(data, page) function, often left as NULL
2220 * Read into the page cache. If a page already exists, and PageUptodate() is
2221 * not set, try to fill the page and wait for it to become unlocked.
2223 * If the page does not get brought uptodate, return -EIO.
2225 struct page *read_cache_page(struct address_space *mapping, read_cache_page()
2227 int (*filler)(void *, struct page *), read_cache_page()
2235 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
2236 * @mapping: the page's address_space
2237 * @index: the page index
2238 * @gfp: the page allocator flags to use if allocating
2241 * any new page allocations done using the specified allocation flags.
2243 * If the page does not get brought uptodate, return -EIO.
2245 struct page *read_cache_page_gfp(struct address_space *mapping, read_cache_page_gfp()
2313 struct page **pagep, void **fsdata) pagecache_write_begin()
2324 struct page *page, void *fsdata) pagecache_write_end()
2328 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); pagecache_write_end()
2352 * the new data. We invalidate clean cached page from the region we're generic_file_direct_write()
2360 * If a page can not be invalidated, return 0 to fall back generic_file_direct_write()
2401 * Find or create a page at the given pagecache position. Return the locked
2402 * page. This function is specifically for buffered writes.
2404 struct page *grab_cache_page_write_begin(struct address_space *mapping, grab_cache_page_write_begin()
2407 struct page *page; grab_cache_page_write_begin() local
2413 page = pagecache_get_page(mapping, index, fgp_flags, grab_cache_page_write_begin()
2415 if (page) grab_cache_page_write_begin()
2416 wait_for_stable_page(page); grab_cache_page_write_begin()
2418 return page; grab_cache_page_write_begin()
2438 struct page *page; generic_perform_write() local
2439 unsigned long offset; /* Offset into pagecache page */ generic_perform_write()
2440 unsigned long bytes; /* Bytes to write to page */ generic_perform_write()
2450 * Bring in the user page that we will copy from _first_. generic_perform_write()
2452 * same page as we're writing to, without it being marked generic_perform_write()
2470 &page, &fsdata); generic_perform_write()
2475 flush_dcache_page(page); generic_perform_write()
2477 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); generic_perform_write()
2478 flush_dcache_page(page); generic_perform_write()
2481 page, fsdata); generic_perform_write()
2538 /* We can write back this queue in page reclaim */ __generic_file_write_iter()
2557 * page-cache pages correctly). __generic_file_write_iter()
2575 * We need to ensure that the page cache pages are written to __generic_file_write_iter()
2637 * try_to_release_page() - release old fs-specific metadata on a page
2639 * @page: the page which the kernel is trying to free
2642 * The address_space is to try to release any data against the page
2643 * (presumably at page->private). If the release was successful, return `1'.
2646 * This may also be called if PG_fscache is set on a page, indicating that the
2647 * page is known to the local caching routines.
2650 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
2653 int try_to_release_page(struct page *page, gfp_t gfp_mask) try_to_release_page() argument
2655 struct address_space * const mapping = page->mapping; try_to_release_page()
2657 BUG_ON(!PageLocked(page)); try_to_release_page()
2658 if (PageWriteback(page)) try_to_release_page()
2662 return mapping->a_ops->releasepage(page, gfp_mask); try_to_release_page()
2663 return try_to_free_buffers(page); try_to_release_page()
111 page_cache_tree_delete(struct address_space *mapping, struct page *page, void *shadow) page_cache_tree_delete() argument
501 page_cache_tree_insert(struct address_space *mapping, struct page *page, void **shadowp) page_cache_tree_insert() argument
1823 do_async_mmap_readahead(struct vm_area_struct *vma, struct file_ra_state *ra, struct file *file, struct page *page, pgoff_t offset) do_async_mmap_readahead() argument
2322 pagecache_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) pagecache_write_end() argument
H A Drmap.c10 * Provides methods for unmapping each kind of mapped page:
25 * page->flags PG_locked (lock_page)
250 * page is mapped.
431 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
435 * have been relevant to this page.
437 * The page might have been remapped to a different anon_vma or the anon_vma
442 * ensure that any anon_vma obtained from the page will still be valid for as
446 * chain and verify that the page in question is indeed mapped in it
450 * that the anon_vma pointer from page->mapping is valid if there is a
453 struct anon_vma *page_get_anon_vma(struct page *page) page_get_anon_vma() argument
459 anon_mapping = (unsigned long)READ_ONCE(page->mapping); page_get_anon_vma()
462 if (!page_mapped(page)) page_get_anon_vma()
472 * If this page is still mapped, then its anon_vma cannot have been page_get_anon_vma()
478 if (!page_mapped(page)) { page_get_anon_vma()
496 struct anon_vma *page_lock_anon_vma_read(struct page *page) page_lock_anon_vma_read() argument
503 anon_mapping = (unsigned long)READ_ONCE(page->mapping); page_lock_anon_vma_read()
506 if (!page_mapped(page)) page_lock_anon_vma_read()
513 * If the page is still mapped, then this anon_vma is still page_lock_anon_vma_read()
517 if (!page_mapped(page)) { page_lock_anon_vma_read()
530 if (!page_mapped(page)) { page_lock_anon_vma_read()
564 * At what user virtual address is page expected in @vma?
567 __vma_address(struct page *page, struct vm_area_struct *vma) __vma_address() argument
569 pgoff_t pgoff = page_to_pgoff(page); __vma_address()
574 vma_address(struct page *page, struct vm_area_struct *vma) vma_address() argument
576 unsigned long address = __vma_address(page, vma); vma_address()
578 /* page should be within @vma mapping range */ vma_address()
585 * At what user virtual address is page expected in vma?
586 * Caller should check the page is actually part of the vma.
588 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) page_address_in_vma() argument
591 if (PageAnon(page)) { page_address_in_vma()
592 struct anon_vma *page__anon_vma = page_anon_vma(page); page_address_in_vma()
600 } else if (page->mapping) { page_address_in_vma()
601 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) page_address_in_vma()
605 address = __vma_address(page, vma); page_address_in_vma()
641 * Check that @page is mapped at @address into @mm.
644 * the page table lock when the pte is not present (helpful when reclaiming
649 pte_t *__page_check_address(struct page *page, struct mm_struct *mm, __page_check_address() argument
656 if (unlikely(PageHuge(page))) { __page_check_address()
662 ptl = huge_pte_lockptr(page_hstate(page), mm, pte); __page_check_address()
680 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { __page_check_address()
689 * page_mapped_in_vma - check whether a page is really mapped in a VMA
690 * @page: the page to test
693 * Returns 1 if the page is mapped into the page tables of the VMA, 0
694 * if the page is not mapped into the page tables of this VMA. Only
697 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) page_mapped_in_vma() argument
703 address = __vma_address(page, vma); page_mapped_in_vma()
706 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); page_mapped_in_vma()
707 if (!pte) /* the page is not in this mm */ page_mapped_in_vma()
723 static int page_referenced_one(struct page *page, struct vm_area_struct *vma, page_referenced_one() argument
731 if (unlikely(PageTransHuge(page))) { page_referenced_one()
738 pmd = page_check_address_pmd(page, mm, address, page_referenced_one()
760 pte = page_check_address(page, mm, address, &ptl, 0); page_referenced_one()
773 * mapping as such. If the page has been used in page_referenced_one()
776 * set PG_referenced or activated the page. page_referenced_one()
808 * page_referenced - test if the page was referenced
809 * @page: the page to test
810 * @is_locked: caller holds lock on the page
812 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
814 * Quick test_and_clear_referenced for all mappings to a page,
815 * returns the number of ptes which referenced the page.
817 int page_referenced(struct page *page, page_referenced() argument
825 .mapcount = page_mapcount(page), page_referenced()
835 if (!page_mapped(page)) page_referenced()
838 if (!page_rmapping(page)) page_referenced()
841 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { page_referenced()
842 we_locked = trylock_page(page); page_referenced()
856 ret = rmap_walk(page, &rwc); page_referenced()
860 unlock_page(page); page_referenced()
865 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, page_mkclean_one() argument
874 pte = page_check_address(page, mm, address, &ptl, 1); page_mkclean_one()
907 int page_mkclean(struct page *page) page_mkclean() argument
917 BUG_ON(!PageLocked(page)); page_mkclean()
919 if (!page_mapped(page)) page_mkclean()
922 mapping = page_mapping(page); page_mkclean()
926 rmap_walk(page, &rwc); page_mkclean()
933 * page_move_anon_rmap - move a page to our anon_vma
934 * @page: the page to move to our anon_vma
935 * @vma: the vma the page belongs to
938 * When a page belongs exclusively to one process after a COW event,
939 * that page can be moved into the anon_vma that belongs to just that
943 void page_move_anon_rmap(struct page *page, page_move_anon_rmap() argument
948 VM_BUG_ON_PAGE(!PageLocked(page), page); page_move_anon_rmap()
950 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); page_move_anon_rmap()
953 page->mapping = (struct address_space *) anon_vma; page_move_anon_rmap()
958 * @page: Page to add to rmap
959 * @vma: VM area to add page to.
961 * @exclusive: the page is exclusively owned by the current process
963 static void __page_set_anon_rmap(struct page *page, __page_set_anon_rmap() argument
970 if (PageAnon(page)) __page_set_anon_rmap()
974 * If the page isn't exclusively mapped into this vma, __page_set_anon_rmap()
976 * page mapping! __page_set_anon_rmap()
982 page->mapping = (struct address_space *) anon_vma; __page_set_anon_rmap()
983 page->index = linear_page_index(vma, address); __page_set_anon_rmap()
988 * @page: the page to add the mapping to
992 static void __page_check_anon_rmap(struct page *page, __page_check_anon_rmap() argument
997 * The page's anon-rmap details (mapping and index) are guaranteed to __page_check_anon_rmap()
1001 * always holds the page locked, except if called from page_dup_rmap, __page_check_anon_rmap()
1002 * in which case the page is already known to be setup. __page_check_anon_rmap()
1008 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); __page_check_anon_rmap()
1009 BUG_ON(page->index != linear_page_index(vma, address)); __page_check_anon_rmap()
1014 * page_add_anon_rmap - add pte mapping to an anonymous page
1015 * @page: the page to add the mapping to
1019 * The caller needs to hold the pte lock, and the page must be locked in
1024 void page_add_anon_rmap(struct page *page, page_add_anon_rmap() argument
1027 do_page_add_anon_rmap(page, vma, address, 0); page_add_anon_rmap()
1035 void do_page_add_anon_rmap(struct page *page, do_page_add_anon_rmap() argument
1038 int first = atomic_inc_and_test(&page->_mapcount); do_page_add_anon_rmap()
1046 if (PageTransHuge(page)) do_page_add_anon_rmap()
1047 __inc_zone_page_state(page, do_page_add_anon_rmap()
1049 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, do_page_add_anon_rmap()
1050 hpage_nr_pages(page)); do_page_add_anon_rmap()
1052 if (unlikely(PageKsm(page))) do_page_add_anon_rmap()
1055 VM_BUG_ON_PAGE(!PageLocked(page), page); do_page_add_anon_rmap()
1058 __page_set_anon_rmap(page, vma, address, exclusive); do_page_add_anon_rmap()
1060 __page_check_anon_rmap(page, vma, address); do_page_add_anon_rmap()
1064 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1065 * @page: the page to add the mapping to
1073 void page_add_new_anon_rmap(struct page *page, page_add_new_anon_rmap() argument
1077 SetPageSwapBacked(page); page_add_new_anon_rmap()
1078 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ page_add_new_anon_rmap()
1079 if (PageTransHuge(page)) page_add_new_anon_rmap()
1080 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); page_add_new_anon_rmap()
1081 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, page_add_new_anon_rmap()
1082 hpage_nr_pages(page)); page_add_new_anon_rmap()
1083 __page_set_anon_rmap(page, vma, address, 1); page_add_new_anon_rmap()
1087 * page_add_file_rmap - add pte mapping to a file page
1088 * @page: the page to add the mapping to
1092 void page_add_file_rmap(struct page *page) page_add_file_rmap() argument
1096 memcg = mem_cgroup_begin_page_stat(page); page_add_file_rmap()
1097 if (atomic_inc_and_test(&page->_mapcount)) { page_add_file_rmap()
1098 __inc_zone_page_state(page, NR_FILE_MAPPED); page_add_file_rmap()
1104 static void page_remove_file_rmap(struct page *page) page_remove_file_rmap() argument
1108 memcg = mem_cgroup_begin_page_stat(page); page_remove_file_rmap()
1110 /* page still mapped by someone else? */ page_remove_file_rmap()
1111 if (!atomic_add_negative(-1, &page->_mapcount)) page_remove_file_rmap()
1115 if (unlikely(PageHuge(page))) page_remove_file_rmap()
1123 __dec_zone_page_state(page, NR_FILE_MAPPED); page_remove_file_rmap()
1126 if (unlikely(PageMlocked(page))) page_remove_file_rmap()
1127 clear_page_mlock(page); page_remove_file_rmap()
1133 * page_remove_rmap - take down pte mapping from a page
1134 * @page: page to remove mapping from
1138 void page_remove_rmap(struct page *page) page_remove_rmap() argument
1140 if (!PageAnon(page)) { page_remove_rmap()
1141 page_remove_file_rmap(page); page_remove_rmap()
1145 /* page still mapped by someone else? */ page_remove_rmap()
1146 if (!atomic_add_negative(-1, &page->_mapcount)) page_remove_rmap()
1150 if (unlikely(PageHuge(page))) page_remove_rmap()
1158 if (PageTransHuge(page)) page_remove_rmap()
1159 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); page_remove_rmap()
1161 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, page_remove_rmap()
1162 -hpage_nr_pages(page)); page_remove_rmap()
1164 if (unlikely(PageMlocked(page))) page_remove_rmap()
1165 clear_page_mlock(page); page_remove_rmap()
1181 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, try_to_unmap_one() argument
1191 pte = page_check_address(page, mm, address, &ptl, 0); try_to_unmap_one()
1196 * If the page is mlock()d, we cannot swap it out. try_to_unmap_one()
1214 /* Nuke the page table entry. */ try_to_unmap_one()
1215 flush_cache_page(vma, address, page_to_pfn(page)); try_to_unmap_one()
1218 /* Move the dirty bit to the physical page now the pte is gone. */ try_to_unmap_one()
1220 set_page_dirty(page); try_to_unmap_one()
1225 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { try_to_unmap_one()
1226 if (!PageHuge(page)) { try_to_unmap_one()
1227 if (PageAnon(page)) try_to_unmap_one()
1233 swp_entry_to_pte(make_hwpoison_entry(page))); try_to_unmap_one()
1236 * The guest indicated that the page content is of no try_to_unmap_one()
1240 if (PageAnon(page)) try_to_unmap_one()
1244 } else if (PageAnon(page)) { try_to_unmap_one()
1245 swp_entry_t entry = { .val = page_private(page) }; try_to_unmap_one()
1248 if (PageSwapCache(page)) { try_to_unmap_one()
1268 * Store the pfn of the page in a special migration try_to_unmap_one()
1273 entry = make_migration_entry(page, pte_write(pteval)); try_to_unmap_one()
1281 /* Establish migration entry for a file page */ try_to_unmap_one()
1283 entry = make_migration_entry(page, pte_write(pteval)); try_to_unmap_one()
1288 page_remove_rmap(page); try_to_unmap_one()
1289 page_cache_release(page); try_to_unmap_one()
1306 * if trylock failed, the page remain in evictable lru and later try_to_unmap_one()
1307 * vmscan could retry to move the page to unevictable lru if the try_to_unmap_one()
1308 * page is actually mlocked. try_to_unmap_one()
1312 mlock_vma_page(page); try_to_unmap_one()
1339 static int page_not_mapped(struct page *page) page_not_mapped() argument
1341 return !page_mapped(page); page_not_mapped()
1345 * try_to_unmap - try to remove all page table mappings to a page
1346 * @page: the page to get unmapped
1349 * Tries to remove all the page table entries which are mapping this
1350 * page, used in the pageout path. Caller must hold the page lock.
1355 * SWAP_FAIL - the page is unswappable
1356 * SWAP_MLOCK - page is mlocked.
1358 int try_to_unmap(struct page *page, enum ttu_flags flags) try_to_unmap() argument
1368 VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page); try_to_unmap()
1373 * page tables leading to a race where migration cannot try_to_unmap()
1378 if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page)) try_to_unmap()
1381 ret = rmap_walk(page, &rwc); try_to_unmap()
1383 if (ret != SWAP_MLOCK && !page_mapped(page)) try_to_unmap()
1389 * try_to_munlock - try to munlock a page
1390 * @page: the page to be munlocked
1392 * Called from munlock code. Checks all of the VMAs mapping the page
1393 * to make sure nobody else has this page mlocked. The page will be
1398 * SWAP_AGAIN - no vma is holding page mlocked, or,
1399 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
1400 * SWAP_FAIL - page cannot be located at present
1401 * SWAP_MLOCK - page is now mlocked.
1403 int try_to_munlock(struct page *page) try_to_munlock() argument
1414 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); try_to_munlock()
1416 ret = rmap_walk(page, &rwc); try_to_munlock()
1429 static struct anon_vma *rmap_walk_anon_lock(struct page *page, rmap_walk_anon_lock() argument
1435 return rwc->anon_lock(page); rmap_walk_anon_lock()
1443 anon_vma = page_anon_vma(page); rmap_walk_anon_lock()
1452 * rmap_walk_anon - do something to anonymous page using the object-based
1454 * @page: the page to be handled
1457 * Find all the mappings of a page using the mapping pointer and the vma chains
1461 * where the page was found will be held for write. So, we won't recheck
1465 static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) rmap_walk_anon() argument
1472 anon_vma = rmap_walk_anon_lock(page, rwc); rmap_walk_anon()
1476 pgoff = page_to_pgoff(page); rmap_walk_anon()
1479 unsigned long address = vma_address(page, vma); rmap_walk_anon()
1484 ret = rwc->rmap_one(page, vma, address, rwc->arg); rmap_walk_anon()
1487 if (rwc->done && rwc->done(page)) rmap_walk_anon()
1495 * rmap_walk_file - do something to file page using the object-based rmap method
1496 * @page: the page to be handled
1499 * Find all the mappings of a page using the mapping pointer and the vma chains
1503 * where the page was found will be held for write. So, we won't recheck
1507 static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) rmap_walk_file() argument
1509 struct address_space *mapping = page->mapping; rmap_walk_file()
1515 * The page lock not only makes sure that page->mapping cannot rmap_walk_file()
1520 VM_BUG_ON_PAGE(!PageLocked(page), page); rmap_walk_file()
1525 pgoff = page_to_pgoff(page); rmap_walk_file()
1528 unsigned long address = vma_address(page, vma); rmap_walk_file()
1533 ret = rwc->rmap_one(page, vma, address, rwc->arg); rmap_walk_file()
1536 if (rwc->done && rwc->done(page)) rmap_walk_file()
1545 int rmap_walk(struct page *page, struct rmap_walk_control *rwc) rmap_walk() argument
1547 if (unlikely(PageKsm(page))) rmap_walk()
1548 return rmap_walk_ksm(page, rwc); rmap_walk()
1549 else if (PageAnon(page)) rmap_walk()
1550 return rmap_walk_anon(page, rwc); rmap_walk()
1552 return rmap_walk_file(page, rwc); rmap_walk()
1561 static void __hugepage_set_anon_rmap(struct page *page, __hugepage_set_anon_rmap() argument
1568 if (PageAnon(page)) __hugepage_set_anon_rmap()
1574 page->mapping = (struct address_space *) anon_vma; __hugepage_set_anon_rmap()
1575 page->index = linear_page_index(vma, address); __hugepage_set_anon_rmap()
1578 void hugepage_add_anon_rmap(struct page *page, hugepage_add_anon_rmap() argument
1584 BUG_ON(!PageLocked(page)); hugepage_add_anon_rmap()
1587 first = atomic_inc_and_test(&page->_mapcount); hugepage_add_anon_rmap()
1589 __hugepage_set_anon_rmap(page, vma, address, 0); hugepage_add_anon_rmap()
1592 void hugepage_add_new_anon_rmap(struct page *page, hugepage_add_new_anon_rmap() argument
1596 atomic_set(&page->_mapcount, 0); hugepage_add_new_anon_rmap()
1597 __hugepage_set_anon_rmap(page, vma, address, 1); hugepage_add_new_anon_rmap()
H A Dmlock.c42 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
49 * may have mlocked a page that is being munlocked. So lazy mlock must take
57 void clear_page_mlock(struct page *page) clear_page_mlock() argument
59 if (!TestClearPageMlocked(page)) clear_page_mlock()
62 mod_zone_page_state(page_zone(page), NR_MLOCK, clear_page_mlock()
63 -hpage_nr_pages(page)); clear_page_mlock()
65 if (!isolate_lru_page(page)) { clear_page_mlock()
66 putback_lru_page(page); clear_page_mlock()
69 * We lost the race. the page already moved to evictable list. clear_page_mlock()
71 if (PageUnevictable(page)) clear_page_mlock()
77 * Mark page as mlocked if not already.
78 * If page on LRU, isolate and putback to move to unevictable list.
80 void mlock_vma_page(struct page *page) mlock_vma_page() argument
82 /* Serialize with page migration */ mlock_vma_page()
83 BUG_ON(!PageLocked(page)); mlock_vma_page()
85 if (!TestSetPageMlocked(page)) { mlock_vma_page()
86 mod_zone_page_state(page_zone(page), NR_MLOCK, mlock_vma_page()
87 hpage_nr_pages(page)); mlock_vma_page()
89 if (!isolate_lru_page(page)) mlock_vma_page()
90 putback_lru_page(page); mlock_vma_page()
95 * Isolate a page from LRU with optional get_page() pin.
96 * Assumes lru_lock already held and page already pinned.
98 static bool __munlock_isolate_lru_page(struct page *page, bool getpage) __munlock_isolate_lru_page() argument
100 if (PageLRU(page)) { __munlock_isolate_lru_page()
103 lruvec = mem_cgroup_page_lruvec(page, page_zone(page)); __munlock_isolate_lru_page()
105 get_page(page); __munlock_isolate_lru_page()
106 ClearPageLRU(page); __munlock_isolate_lru_page()
107 del_page_from_lru_list(page, lruvec, page_lru(page)); __munlock_isolate_lru_page()
115 * Finish munlock after successful page isolation
120 static void __munlock_isolated_page(struct page *page) __munlock_isolated_page() argument
125 * Optimization: if the page was mapped just once, that's our mapping __munlock_isolated_page()
128 if (page_mapcount(page) > 1) __munlock_isolated_page()
129 ret = try_to_munlock(page); __munlock_isolated_page()
135 putback_lru_page(page); __munlock_isolated_page()
139 * Accounting for page isolation fail during munlock
141 * Performs accounting when page isolation fails in munlock. There is nothing
142 * else to do because it means some other task has already removed the page
143 * from the LRU. putback_lru_page() will take care of removing the page from
145 * the page back to the unevictable list if some other vma has it mlocked.
147 static void __munlock_isolation_failed(struct page *page) __munlock_isolation_failed() argument
149 if (PageUnevictable(page)) __munlock_isolation_failed()
156 * munlock_vma_page - munlock a vma page
157 * @page - page to be unlocked, either a normal page or THP page head
159 * returns the size of the page as a page mask (0 for normal page,
160 * HPAGE_PMD_NR - 1 for THP head page)
162 * called from munlock()/munmap() path with page supposedly on the LRU.
163 * When we munlock a page, because the vma where we found the page is being
165 * page locked so that we can leave it on the unevictable lru list and not
166 * bother vmscan with it. However, to walk the page's rmap list in
167 * try_to_munlock() we must isolate the page from the LRU. If some other
168 * task has removed the page from the LRU, we won't be able to do that.
170 * can't isolate the page, we leave it for putback_lru_page() and vmscan
173 unsigned int munlock_vma_page(struct page *page) munlock_vma_page() argument
176 struct zone *zone = page_zone(page); munlock_vma_page()
178 /* For try_to_munlock() and to serialize with page migration */ munlock_vma_page()
179 BUG_ON(!PageLocked(page)); munlock_vma_page()
184 * we clear it in the head page. It also stabilizes hpage_nr_pages(). munlock_vma_page()
188 nr_pages = hpage_nr_pages(page); munlock_vma_page()
189 if (!TestClearPageMlocked(page)) munlock_vma_page()
194 if (__munlock_isolate_lru_page(page, true)) { munlock_vma_page()
196 __munlock_isolated_page(page); munlock_vma_page()
199 __munlock_isolation_failed(page); munlock_vma_page()
221 * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
227 * avoid leaving evictable page in unevictable list.
229 * In case of success, @page is added to @pvec and @pgrescued is incremented
230 * in case that the page was previously unevictable. @page is also unlocked.
232 static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, __putback_lru_fast_prepare() argument
235 VM_BUG_ON_PAGE(PageLRU(page), page); __putback_lru_fast_prepare() local
236 VM_BUG_ON_PAGE(!PageLocked(page), page); __putback_lru_fast_prepare()
238 if (page_mapcount(page) <= 1 && page_evictable(page)) { __putback_lru_fast_prepare()
239 pagevec_add(pvec, page); __putback_lru_fast_prepare()
240 if (TestClearPageUnevictable(page)) __putback_lru_fast_prepare()
242 unlock_page(page); __putback_lru_fast_prepare()
286 /* Phase 1: page isolation */ __munlock_pagevec()
289 struct page *page = pvec->pages[i]; __munlock_pagevec() local
291 if (TestClearPageMlocked(page)) { __munlock_pagevec()
296 if (__munlock_isolate_lru_page(page, false)) __munlock_pagevec()
299 __munlock_isolation_failed(page); __munlock_pagevec()
303 * We won't be munlocking this page in the next phase __munlock_pagevec()
318 /* Phase 2: page munlock */ __munlock_pagevec()
320 struct page *page = pvec->pages[i]; __munlock_pagevec() local
322 if (page) { __munlock_pagevec()
323 lock_page(page); __munlock_pagevec()
324 if (!__putback_lru_fast_prepare(page, &pvec_putback, __munlock_pagevec()
330 get_page(page); /* for putback_lru_page() */ __munlock_pagevec()
331 __munlock_isolated_page(page); __munlock_pagevec()
332 unlock_page(page); __munlock_pagevec()
333 put_page(page); /* from follow_page_mask() */ __munlock_pagevec()
339 * Phase 3: page putback for pages that qualified for the fast path __munlock_pagevec()
349 * The function expects that the struct page corresponding to @start address is
350 * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
356 * Returns the address of the next page that should be scanned. This equals
357 * @start + PAGE_SIZE when no page could be added by the pte walk.
367 * Initialize pte walk starting at the already pinned page where we __munlock_pagevec_fill()
372 /* Make sure we do not cross the page table boundary */ __munlock_pagevec_fill()
377 /* The page next to the pinned page is the first we will try to get */ __munlock_pagevec_fill()
380 struct page *page = NULL; __munlock_pagevec_fill() local
383 page = vm_normal_page(vma, start, *pte); __munlock_pagevec_fill()
385 * Break if page could not be obtained or the page's node+zone does not __munlock_pagevec_fill()
388 if (!page || page_zone_id(page) != zoneid) __munlock_pagevec_fill()
391 get_page(page); __munlock_pagevec_fill()
394 * eventual break due to pvec becoming full by adding the page __munlock_pagevec_fill()
397 if (pagevec_add(pvec, page) == 0) __munlock_pagevec_fill()
428 struct page *page = NULL; munlock_vma_pages_range() local
440 * suits munlock very well (and if somehow an abnormal page munlock_vma_pages_range()
443 page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP, munlock_vma_pages_range()
446 if (page && !IS_ERR(page)) { munlock_vma_pages_range()
447 if (PageTransHuge(page)) { munlock_vma_pages_range()
448 lock_page(page); munlock_vma_pages_range()
450 * Any THP page found by follow_page_mask() may munlock_vma_pages_range()
455 page_mask = munlock_vma_page(page); munlock_vma_pages_range()
456 unlock_page(page); munlock_vma_pages_range()
457 put_page(page); /* follow_page_mask() */ munlock_vma_pages_range()
464 pagevec_add(&pvec, page); munlock_vma_pages_range()
465 zone = page_zone(page); munlock_vma_pages_range()
466 zoneid = page_zone_id(page); munlock_vma_pages_range()
471 * the next page to process. Then munlock the munlock_vma_pages_range()
480 /* It's a bug to munlock in the middle of a THP page */ munlock_vma_pages_range()
542 * It's okay if try_to_unmap_one unmaps a page just after we mlock_fixup()
H A Dswap_state.c7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
79 int __add_to_swap_cache(struct page *page, swp_entry_t entry) __add_to_swap_cache() argument
84 VM_BUG_ON_PAGE(!PageLocked(page), page); __add_to_swap_cache()
85 VM_BUG_ON_PAGE(PageSwapCache(page), page); __add_to_swap_cache() local
86 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); __add_to_swap_cache()
88 page_cache_get(page); __add_to_swap_cache()
89 SetPageSwapCache(page); __add_to_swap_cache()
90 set_page_private(page, entry.val); __add_to_swap_cache()
95 entry.val, page); __add_to_swap_cache()
98 __inc_zone_page_state(page, NR_FILE_PAGES); __add_to_swap_cache()
110 set_page_private(page, 0UL); __add_to_swap_cache()
111 ClearPageSwapCache(page); __add_to_swap_cache()
112 page_cache_release(page); __add_to_swap_cache()
119 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) add_to_swap_cache() argument
125 error = __add_to_swap_cache(page, entry); add_to_swap_cache()
135 void __delete_from_swap_cache(struct page *page) __delete_from_swap_cache() argument
140 VM_BUG_ON_PAGE(!PageLocked(page), page); __delete_from_swap_cache()
141 VM_BUG_ON_PAGE(!PageSwapCache(page), page); __delete_from_swap_cache()
142 VM_BUG_ON_PAGE(PageWriteback(page), page); __delete_from_swap_cache() local
144 entry.val = page_private(page); __delete_from_swap_cache()
146 radix_tree_delete(&address_space->page_tree, page_private(page)); __delete_from_swap_cache()
147 set_page_private(page, 0); __delete_from_swap_cache()
148 ClearPageSwapCache(page); __delete_from_swap_cache()
150 __dec_zone_page_state(page, NR_FILE_PAGES); __delete_from_swap_cache()
155 * add_to_swap - allocate swap space for a page
156 * @page: page we want to move to swap
158 * Allocate swap space for the page and add the page to the
159 * swap cache. Caller needs to hold the page lock.
161 int add_to_swap(struct page *page, struct list_head *list) add_to_swap() argument
166 VM_BUG_ON_PAGE(!PageLocked(page), page); add_to_swap()
167 VM_BUG_ON_PAGE(!PageUptodate(page), page); add_to_swap()
173 if (unlikely(PageTransHuge(page))) add_to_swap()
174 if (unlikely(split_huge_page_to_list(page, list))) { add_to_swap()
181 * completely exhaust the page allocator. __GFP_NOMEMALLOC add_to_swap()
190 err = add_to_swap_cache(page, entry, add_to_swap()
194 SetPageDirty(page); add_to_swap()
209 * It will never put the page into the free list,
210 * the caller has a reference on the page.
212 void delete_from_swap_cache(struct page *page) delete_from_swap_cache() argument
217 entry.val = page_private(page); delete_from_swap_cache()
221 __delete_from_swap_cache(page); delete_from_swap_cache()
225 page_cache_release(page); delete_from_swap_cache()
231 * Its ok to check for PageSwapCache without the page lock
236 static inline void free_swap_cache(struct page *page) free_swap_cache() argument
238 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { free_swap_cache()
239 try_to_free_swap(page); free_swap_cache()
240 unlock_page(page); free_swap_cache()
246 * this page if it is the last user of the page.
248 void free_page_and_swap_cache(struct page *page) free_page_and_swap_cache() argument
250 free_swap_cache(page); free_page_and_swap_cache()
251 page_cache_release(page); free_page_and_swap_cache()
258 void free_pages_and_swap_cache(struct page **pages, int nr) free_pages_and_swap_cache()
260 struct page **pagep = pages; free_pages_and_swap_cache()
270 * Lookup a swap entry in the swap cache. A found page will be returned
272 * lock getting page table operations atomic even if we drop the page
275 struct page * lookup_swap_cache(swp_entry_t entry) lookup_swap_cache()
277 struct page *page; lookup_swap_cache() local
279 page = find_get_page(swap_address_space(entry), entry.val); lookup_swap_cache()
281 if (page) { lookup_swap_cache()
283 if (TestClearPageReadahead(page)) lookup_swap_cache()
288 return page; lookup_swap_cache()
292 * Locate a page of swap in physical memory, reserving swap cache space
294 * A failure return means that either the page allocation failed or that
297 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, read_swap_cache_async()
300 struct page *found_page, *new_page = NULL; read_swap_cache_async()
315 * Get a new page to read into from swap. read_swap_cache_async()
338 * across a SWAP_HAS_CACHE swap_map entry whose page read_swap_cache_async()
366 * Initiate read into locked page and return. read_swap_cache_async()
438 * Returns the struct page for entry and addr, after queueing swapin.
450 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, swapin_readahead()
453 struct page *page; swapin_readahead() local
467 if (!start_offset) /* First page is swap header. */ swapin_readahead()
473 page = read_swap_cache_async(swp_entry(swp_type(entry), offset), swapin_readahead()
475 if (!page) swapin_readahead()
478 SetPageReadahead(page); swapin_readahead()
479 page_cache_release(page); swapin_readahead()
H A Dinternal.h20 static inline void set_page_count(struct page *page, int v) set_page_count() argument
22 atomic_set(&page->_count, v); set_page_count()
40 * Turn a non-refcounted page (->_count == 0) into refcounted with
43 static inline void set_page_refcounted(struct page *page) set_page_refcounted() argument
45 VM_BUG_ON_PAGE(PageTail(page), page); set_page_refcounted() local
46 VM_BUG_ON_PAGE(atomic_read(&page->_count), page); set_page_refcounted()
47 set_page_count(page, 1); set_page_refcounted()
50 static inline void __get_page_tail_foll(struct page *page, __get_page_tail_foll() argument
54 * If we're getting a tail page, the elevated page->_count is __get_page_tail_foll()
55 * required only in the head page and we will elevate the head __get_page_tail_foll()
56 * page->_count and tail page->_mapcount. __get_page_tail_foll()
61 * speculative page access (like in __get_page_tail_foll()
64 VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page); __get_page_tail_foll()
66 atomic_inc(&page->first_page->_count); __get_page_tail_foll()
67 get_huge_page_tail(page); __get_page_tail_foll()
73 * lock while the pte (or pmd_trans_huge) is still mapping the page.
75 static inline void get_page_foll(struct page *page) get_page_foll() argument
77 if (unlikely(PageTail(page))) get_page_foll()
83 __get_page_tail_foll(page, true); get_page_foll()
86 * Getting a normal page or the head of a compound page get_page_foll()
87 * requires to already have an elevated page->_count. get_page_foll()
89 VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); get_page_foll()
90 atomic_inc(&page->_count); get_page_foll()
99 extern int isolate_lru_page(struct page *page);
100 extern void putback_lru_page(struct page *page);
135 * Locate the struct page for both the matching buddy in our
136 * pair (buddy1) and the combined O(n+1) page they form (page).
157 extern int __isolate_free_page(struct page *page, unsigned int order);
158 extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
160 extern void prep_compound_page(struct page *page, unsigned int order);
162 extern bool is_free_buddy_page(struct page *page);
210 * This function returns the order of a free page in the buddy system. In
211 * general, page_zone(page)->lock must be held by the caller to prevent the
212 * page from being allocated in parallel and returning garbage as the order.
213 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
214 * page cannot be allocated or merged in parallel. Alternatively, it must
217 static inline unsigned int page_order(struct page *page) page_order() argument
220 return page_private(page); page_order()
230 * decide to remove the variable and inline the page_private(page) multiple
234 #define page_order_unsafe(page) READ_ONCE(page_private(page))
256 * must be called with vma's mmap_sem held for read or write, and page locked.
258 extern void mlock_vma_page(struct page *page);
259 extern unsigned int munlock_vma_page(struct page *page);
262 * Clear the page's PageMlocked(). This can be useful in a situation where
263 * we want to unconditionally remove a page from the pagecache -- e.g.,
266 * It is legal to call this function for any page, mlocked or not.
267 * If called for a page that is still mapped by mlocked vmas, all we do
270 extern void clear_page_mlock(struct page *page);
274 * migrate the Mlocked page flag; update statistics.
276 static inline void mlock_migrate_page(struct page *newpage, struct page *page) mlock_migrate_page() argument
278 if (TestClearPageMlocked(page)) { mlock_migrate_page()
280 int nr_pages = hpage_nr_pages(page); mlock_migrate_page()
283 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); mlock_migrate_page()
293 extern unsigned long vma_address(struct page *page,
297 static inline void clear_page_mlock(struct page *page) { } mlock_vma_page() argument
298 static inline void mlock_vma_page(struct page *page) { } mlock_migrate_page() argument
299 static inline void mlock_migrate_page(struct page *new, struct page *old) { } mlock_migrate_page()
305 * the maximally aligned gigantic page 'base'. Handle any discontiguity mlock_migrate_page()
308 static inline struct page *mem_map_offset(struct page *base, int offset) mem_map_offset()
317 * page 'base'. Handle any discontiguity in the mem_map.
319 static inline struct page *mem_map_next(struct page *iter, mem_map_next()
320 struct page *base, int offset) mem_map_next()
365 extern void mminit_verify_page_links(struct page *page,
380 static inline void mminit_verify_page_links(struct page *page, mminit_verify_page_links() argument
406 extern int hwpoison_filter(struct page *p);
H A Dmigrate.c79 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
84 struct page *page; putback_movable_pages() local
85 struct page *page2; putback_movable_pages()
87 list_for_each_entry_safe(page, page2, l, lru) { list_for_each_entry_safe()
88 if (unlikely(PageHuge(page))) { list_for_each_entry_safe()
89 putback_active_hugepage(page); list_for_each_entry_safe()
92 list_del(&page->lru); list_for_each_entry_safe()
93 dec_zone_page_state(page, NR_ISOLATED_ANON + list_for_each_entry_safe()
94 page_is_file_cache(page)); list_for_each_entry_safe()
95 if (unlikely(isolated_balloon_page(page))) list_for_each_entry_safe()
96 balloon_page_putback(page); list_for_each_entry_safe()
98 putback_lru_page(page); list_for_each_entry_safe()
105 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, remove_migration_pte()
183 * references to the indicated page.
185 static void remove_migration_ptes(struct page *old, struct page *new) remove_migration_ptes()
196 * Something used the pte of a page under migration. We need to
197 * get to the page and wait until migration is finished.
205 struct page *page; __migration_entry_wait() local
216 page = migration_entry_to_page(entry); __migration_entry_wait()
219 * Once radix-tree replacement of page migration started, page_count __migration_entry_wait()
221 * against a page without get_page(). __migration_entry_wait()
222 * So, we use get_page_unless_zero(), here. Even failed, page fault __migration_entry_wait()
225 if (!get_page_unless_zero(page)) __migration_entry_wait()
228 wait_on_page_locked(page); __migration_entry_wait()
229 put_page(page); __migration_entry_wait()
301 * Replace the page in the mapping.
309 struct page *newpage, struct page *page, migrate_page_move_mapping()
317 /* Anonymous page without mapping */ migrate_page_move_mapping()
318 if (page_count(page) != expected_count) migrate_page_move_mapping()
326 page_index(page)); migrate_page_move_mapping()
328 expected_count += 1 + page_has_private(page); migrate_page_move_mapping()
329 if (page_count(page) != expected_count || migrate_page_move_mapping()
330 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { migrate_page_move_mapping()
335 if (!page_freeze_refs(page, expected_count)) { migrate_page_move_mapping()
341 * In the async migration case of moving a page with buffers, lock the migrate_page_move_mapping()
344 * the mapping back due to an elevated page count, we would have to migrate_page_move_mapping()
349 page_unfreeze_refs(page, expected_count); migrate_page_move_mapping()
355 * Now we know that no one else is looking at the page. migrate_page_move_mapping()
358 if (PageSwapCache(page)) { migrate_page_move_mapping()
360 set_page_private(newpage, page_private(page)); migrate_page_move_mapping()
366 * Drop cache reference from old page by unfreezing migrate_page_move_mapping()
370 page_unfreeze_refs(page, expected_count - 1); migrate_page_move_mapping()
374 * the page for that zone. Other VM counters will be migrate_page_move_mapping()
376 * new page and drop references to the old page. migrate_page_move_mapping()
382 __dec_zone_page_state(page, NR_FILE_PAGES); migrate_page_move_mapping()
384 if (!PageSwapCache(page) && PageSwapBacked(page)) { migrate_page_move_mapping()
385 __dec_zone_page_state(page, NR_SHMEM); migrate_page_move_mapping()
398 struct page *newpage, struct page *page) migrate_huge_page_move_mapping()
404 if (page_count(page) != 1) migrate_huge_page_move_mapping()
412 page_index(page)); migrate_huge_page_move_mapping()
414 expected_count = 2 + page_has_private(page); migrate_huge_page_move_mapping()
415 if (page_count(page) != expected_count || migrate_huge_page_move_mapping()
416 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { migrate_huge_page_move_mapping()
421 if (!page_freeze_refs(page, expected_count)) { migrate_huge_page_move_mapping()
430 page_unfreeze_refs(page, expected_count - 1); migrate_huge_page_move_mapping()
437 * Gigantic pages are so large that we do not guarantee that page++ pointer
438 * arithmetic will work across the entire page. We need something more
441 static void __copy_gigantic_page(struct page *dst, struct page *src, __copy_gigantic_page()
445 struct page *dst_base = dst; __copy_gigantic_page()
446 struct page *src_base = src; __copy_gigantic_page()
458 static void copy_huge_page(struct page *dst, struct page *src) copy_huge_page()
464 /* hugetlbfs page */ copy_huge_page()
473 /* thp page */ copy_huge_page()
485 * Copy the page to its new location
487 void migrate_page_copy(struct page *newpage, struct page *page) migrate_page_copy() argument
491 if (PageHuge(page) || PageTransHuge(page)) migrate_page_copy()
492 copy_huge_page(newpage, page); migrate_page_copy()
494 copy_highpage(newpage, page); migrate_page_copy()
496 if (PageError(page)) migrate_page_copy()
498 if (PageReferenced(page)) migrate_page_copy()
500 if (PageUptodate(page)) migrate_page_copy()
502 if (TestClearPageActive(page)) { migrate_page_copy()
503 VM_BUG_ON_PAGE(PageUnevictable(page), page); migrate_page_copy() local
505 } else if (TestClearPageUnevictable(page)) migrate_page_copy()
507 if (PageChecked(page)) migrate_page_copy()
509 if (PageMappedToDisk(page)) migrate_page_copy()
512 if (PageDirty(page)) { migrate_page_copy()
513 clear_page_dirty_for_io(page); migrate_page_copy()
515 * Want to mark the page and the radix tree as dirty, and migrate_page_copy()
518 * is actually a signal that all of the page has become dirty. migrate_page_copy()
519 * Whereas only part of our page may be dirty. migrate_page_copy()
521 if (PageSwapBacked(page)) migrate_page_copy()
528 * Copy NUMA information to the new page, to prevent over-eager migrate_page_copy()
529 * future migrations of this same page. migrate_page_copy()
531 cpupid = page_cpupid_xchg_last(page, -1); migrate_page_copy()
534 mlock_migrate_page(newpage, page); migrate_page_copy()
535 ksm_migrate_page(newpage, page); migrate_page_copy()
540 if (PageSwapCache(page)) migrate_page_copy()
541 ClearPageSwapCache(page); migrate_page_copy()
542 ClearPagePrivate(page); migrate_page_copy()
543 set_page_private(page, 0); migrate_page_copy()
546 * If any waiters have accumulated on the new page then migrate_page_copy()
558 * Common logic to directly migrate a single page suitable for
564 struct page *newpage, struct page *page, migrate_page()
569 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ migrate_page()
571 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); migrate_page()
576 migrate_page_copy(newpage, page); migrate_page()
584 * if the underlying filesystem guarantees that no other references to "page"
588 struct page *newpage, struct page *page, enum migrate_mode mode) buffer_migrate_page()
593 if (!page_has_buffers(page)) buffer_migrate_page()
594 return migrate_page(mapping, newpage, page, mode); buffer_migrate_page()
596 head = page_buffers(page); buffer_migrate_page()
598 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); buffer_migrate_page()
611 ClearPagePrivate(page); buffer_migrate_page()
612 set_page_private(newpage, page_private(page)); buffer_migrate_page()
613 set_page_private(page, 0); buffer_migrate_page()
614 put_page(page); buffer_migrate_page()
626 migrate_page_copy(newpage, page); buffer_migrate_page()
642 * Writeback a page to clean the dirty state
644 static int writeout(struct address_space *mapping, struct page *page) writeout() argument
659 if (!clear_page_dirty_for_io(page)) writeout()
664 * A dirty page may imply that the underlying filesystem has writeout()
665 * the page on some queue. So the page must be clean for writeout()
667 * page state is no longer what we checked for earlier. writeout()
671 remove_migration_ptes(page, page); writeout()
673 rc = mapping->a_ops->writepage(page, &wbc); writeout()
677 lock_page(page); writeout()
686 struct page *newpage, struct page *page, enum migrate_mode mode) fallback_migrate_page()
688 if (PageDirty(page)) { fallback_migrate_page()
692 return writeout(mapping, page); fallback_migrate_page()
699 if (page_has_private(page) && fallback_migrate_page()
700 !try_to_release_page(page, GFP_KERNEL)) fallback_migrate_page()
703 return migrate_page(mapping, newpage, page, mode); fallback_migrate_page()
707 * Move a page to a newly allocated page
708 * The page is locked and all ptes have been successfully removed.
710 * The new page will have replaced the old page if this function
717 static int move_to_new_page(struct page *newpage, struct page *page, move_to_new_page() argument
724 * Block others from accessing the page when we get around to move_to_new_page()
726 * holding a reference to the new page at this point. move_to_new_page()
731 /* Prepare mapping for the new page.*/ move_to_new_page()
732 newpage->index = page->index; move_to_new_page()
733 newpage->mapping = page->mapping; move_to_new_page()
734 if (PageSwapBacked(page)) move_to_new_page()
737 mapping = page_mapping(page); move_to_new_page()
739 rc = migrate_page(mapping, newpage, page, mode); move_to_new_page()
745 * is the most common path for page migration. move_to_new_page()
748 newpage, page, mode); move_to_new_page()
750 rc = fallback_migrate_page(mapping, newpage, page, mode); move_to_new_page()
755 mem_cgroup_migrate(page, newpage, false); move_to_new_page()
757 remove_migration_ptes(page, newpage); move_to_new_page()
758 page->mapping = NULL; move_to_new_page()
766 static int __unmap_and_move(struct page *page, struct page *newpage, __unmap_and_move() argument
773 if (!trylock_page(page)) { __unmap_and_move()
779 * For example, during page readahead pages are added locked __unmap_and_move()
784 * second or third page, the process can end up locking __unmap_and_move()
785 * the same page twice and deadlocking. Rather than __unmap_and_move()
793 lock_page(page); __unmap_and_move()
796 if (PageWriteback(page)) { __unmap_and_move()
809 wait_on_page_writeback(page); __unmap_and_move()
812 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, __unmap_and_move()
813 * we cannot notice that anon_vma is freed while we migrates a page. __unmap_and_move()
817 * just care Anon page here. __unmap_and_move()
819 if (PageAnon(page) && !PageKsm(page)) { __unmap_and_move()
824 anon_vma = page_get_anon_vma(page); __unmap_and_move()
827 * Anon page __unmap_and_move()
829 } else if (PageSwapCache(page)) { __unmap_and_move()
832 * swapcache page is safe to use because we don't __unmap_and_move()
833 * know in advance if the VMA that this page belonged __unmap_and_move()
847 if (unlikely(isolated_balloon_page(page))) { __unmap_and_move()
849 * A ballooned page does not need any special attention from __unmap_and_move()
853 * the page migration right away (proteced by page lock). __unmap_and_move()
855 rc = balloon_page_migrate(newpage, page, mode); __unmap_and_move()
861 * 1. When a new swap-cache page is read into, it is added to the LRU __unmap_and_move()
863 * Calling try_to_unmap() against a page->mapping==NULL page will __unmap_and_move()
865 * 2. An orphaned page (see truncate_complete_page) might have __unmap_and_move()
866 * fs-private metadata. The page can be picked up due to memory __unmap_and_move()
867 * offlining. Everywhere else except page reclaim, the page is __unmap_and_move()
868 * invisible to the vm, so the page can not be migrated. So try to __unmap_and_move()
869 * free the metadata, so the page can be freed. __unmap_and_move()
871 if (!page->mapping) { __unmap_and_move()
872 VM_BUG_ON_PAGE(PageAnon(page), page); __unmap_and_move() local
873 if (page_has_private(page)) { __unmap_and_move()
874 try_to_free_buffers(page); __unmap_and_move()
881 if (page_mapped(page)) { __unmap_and_move()
882 try_to_unmap(page, __unmap_and_move()
888 if (!page_mapped(page)) __unmap_and_move()
889 rc = move_to_new_page(newpage, page, page_was_mapped, mode); __unmap_and_move()
892 remove_migration_ptes(page, page); __unmap_and_move()
899 unlock_page(page); __unmap_and_move()
915 * Obtain the lock on page, remove all ptes and migrate the page
916 * to the newly allocated page in newpage.
920 unsigned long private, struct page *page, unmap_and_move()
926 struct page *newpage = get_new_page(page, private, &result); unmap_and_move()
931 if (page_count(page) == 1) { unmap_and_move()
932 /* page was freed from under us. So we are done. */ unmap_and_move()
936 if (unlikely(PageTransHuge(page))) unmap_and_move()
937 if (unlikely(split_huge_page(page))) unmap_and_move()
940 rc = __unmap_and_move(page, newpage, force, mode); unmap_and_move()
945 * A page that has been migrated has all references unmap_and_move()
946 * removed and will be freed. A page that has not been unmap_and_move()
950 list_del(&page->lru); unmap_and_move()
951 dec_zone_page_state(page, NR_ISOLATED_ANON + unmap_and_move()
952 page_is_file_cache(page)); unmap_and_move()
953 /* Soft-offlined page shouldn't go through lru cache list */ unmap_and_move()
955 put_page(page); unmap_and_move()
957 putback_lru_page(page); unmap_and_move()
969 /* drop our reference, page already in the balloon */ unmap_and_move()
991 * count of the head page (i.e. if all subpages of a 2MB hugepage are
992 * under direct I/O, the reference of the head page is 512 and a bit more.)
997 * There is also no race when direct I/O is issued on the page under migration,
999 * will wait in the page fault for migration to complete.
1003 struct page *hpage, int force, unmap_and_move_huge_page()
1009 struct page *new_hpage; unmap_and_move_huge_page()
1015 * like soft offline and memory hotremove don't walk through page unmap_and_move_huge_page()
1083 * supplied as the target for the page migration
1087 * as the target of the page migration.
1092 * page migration, if any.
1093 * @reason: The reason for page migration.
1110 struct page *page; migrate_pages() local
1111 struct page *page2; migrate_pages()
1121 list_for_each_entry_safe(page, page2, from, lru) { list_for_each_entry_safe()
1124 if (PageHuge(page)) list_for_each_entry_safe()
1126 put_new_page, private, page, list_for_each_entry_safe()
1130 private, page, pass > 2, mode, list_for_each_entry_safe()
1145 * unlike -EAGAIN case, the failed page is list_for_each_entry_safe()
1146 * removed from migration page list and not list_for_each_entry_safe()
1174 struct page *page; member in struct:page_to_node
1179 static struct page *new_page_node(struct page *p, unsigned long private, new_page_node()
1184 while (pm->node != MAX_NUMNODES && pm->page != p) new_page_node()
1202 * field must be set to the virtual address of the page to be moved
1221 struct page *page; do_move_page_to_node_array() local
1228 page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT); do_move_page_to_node_array()
1230 err = PTR_ERR(page); do_move_page_to_node_array()
1231 if (IS_ERR(page)) do_move_page_to_node_array()
1235 if (!page) do_move_page_to_node_array()
1238 /* Use PageReserved to check for zero page */ do_move_page_to_node_array()
1239 if (PageReserved(page)) do_move_page_to_node_array()
1242 pp->page = page; do_move_page_to_node_array()
1243 err = page_to_nid(page); do_move_page_to_node_array()
1252 if (page_mapcount(page) > 1 && do_move_page_to_node_array()
1256 if (PageHuge(page)) { do_move_page_to_node_array()
1257 if (PageHead(page)) do_move_page_to_node_array()
1258 isolate_huge_page(page, &pagelist); do_move_page_to_node_array()
1262 err = isolate_lru_page(page); do_move_page_to_node_array()
1264 list_add_tail(&page->lru, &pagelist); do_move_page_to_node_array()
1265 inc_zone_page_state(page, NR_ISOLATED_ANON + do_move_page_to_node_array()
1266 page_is_file_cache(page)); do_move_page_to_node_array()
1271 * isolate_lru_page() or drop the page ref if it was do_move_page_to_node_array()
1274 put_page(page); do_move_page_to_node_array()
1292 * Migrate an array of page address onto an array of nodes and fill
1314 * Store a chunk of page_to_node array in a page, do_pages_move()
1391 struct page *page; do_pages_stat_array() local
1398 page = follow_page(vma, addr, 0); do_pages_stat_array()
1400 err = PTR_ERR(page); do_pages_stat_array()
1401 if (IS_ERR(page)) do_pages_stat_array()
1405 /* Use PageReserved to check for zero page */ do_pages_stat_array()
1406 if (!page || PageReserved(page)) do_pages_stat_array()
1409 err = page_to_nid(page); do_pages_stat_array()
1555 static struct page *alloc_misplaced_dst_page(struct page *page, alloc_misplaced_dst_page() argument
1560 struct page *newpage; alloc_misplaced_dst_page()
1572 * page migration rate limiting control.
1611 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) numamigrate_isolate_page() argument
1615 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); numamigrate_isolate_page()
1618 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) numamigrate_isolate_page()
1621 if (isolate_lru_page(page)) numamigrate_isolate_page()
1625 * migrate_misplaced_transhuge_page() skips page migration's usual numamigrate_isolate_page()
1626 * check on page_count(), so we must do it here, now that the page numamigrate_isolate_page()
1628 * The expected page count is 3: 1 for page's mapcount and 1 for the numamigrate_isolate_page()
1631 if (PageTransHuge(page) && page_count(page) != 3) { numamigrate_isolate_page()
1632 putback_lru_page(page); numamigrate_isolate_page()
1636 page_lru = page_is_file_cache(page); numamigrate_isolate_page()
1637 mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru, numamigrate_isolate_page()
1638 hpage_nr_pages(page)); numamigrate_isolate_page()
1641 * Isolating the page has taken another reference, so the numamigrate_isolate_page()
1642 * caller's reference can be safely dropped without the page numamigrate_isolate_page()
1645 put_page(page); numamigrate_isolate_page()
1651 struct page *page = pmd_page(pmd); pmd_trans_migrating() local
1652 return PageLocked(page); pmd_trans_migrating()
1656 * Attempt to migrate a misplaced page to the specified destination
1658 * the page that will be dropped by this function before returning.
1660 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, migrate_misplaced_page() argument
1672 if (page_mapcount(page) != 1 && page_is_file_cache(page) && migrate_misplaced_page()
1684 isolated = numamigrate_isolate_page(pgdat, page); migrate_misplaced_page()
1688 list_add(&page->lru, &migratepages); migrate_misplaced_page()
1694 list_del(&page->lru); migrate_misplaced_page()
1695 dec_zone_page_state(page, NR_ISOLATED_ANON + migrate_misplaced_page()
1696 page_is_file_cache(page)); migrate_misplaced_page()
1697 putback_lru_page(page); migrate_misplaced_page()
1706 put_page(page); migrate_misplaced_page()
1713 * Migrates a THP to a given target node. page must be locked and is unlocked
1720 struct page *page, int node) migrate_misplaced_transhuge_page()
1725 struct page *new_page = NULL; migrate_misplaced_transhuge_page()
1726 int page_lru = page_is_file_cache(page); migrate_misplaced_transhuge_page()
1745 isolated = numamigrate_isolate_page(pgdat, page); migrate_misplaced_transhuge_page()
1754 /* Prepare a page as a migration target */ migrate_misplaced_transhuge_page()
1758 /* anon mapping, we can simply copy page->mapping to the new page: */ migrate_misplaced_transhuge_page()
1759 new_page->mapping = page->mapping; migrate_misplaced_transhuge_page()
1760 new_page->index = page->index; migrate_misplaced_transhuge_page()
1761 migrate_page_copy(new_page, page); migrate_misplaced_transhuge_page()
1767 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) { migrate_misplaced_transhuge_page()
1774 SetPageActive(page); migrate_misplaced_transhuge_page()
1776 SetPageUnevictable(page); migrate_misplaced_transhuge_page()
1777 mlock_migrate_page(page, new_page); migrate_misplaced_transhuge_page()
1783 get_page(page); migrate_misplaced_transhuge_page()
1784 putback_lru_page(page); migrate_misplaced_transhuge_page()
1785 mod_zone_page_state(page_zone(page), migrate_misplaced_transhuge_page()
1798 * Any parallel GUP will either observe the old page blocking on the migrate_misplaced_transhuge_page()
1799 * page lock, block on the page table lock or observe the new page. migrate_misplaced_transhuge_page()
1800 * The SetPageUptodate on the new page and page_add_new_anon_rmap migrate_misplaced_transhuge_page()
1810 if (page_count(page) != 2) { migrate_misplaced_transhuge_page()
1819 mem_cgroup_migrate(page, new_page, false); migrate_misplaced_transhuge_page()
1821 page_remove_rmap(page); migrate_misplaced_transhuge_page()
1826 /* Take an "isolate" reference and put new page on the LRU. */ migrate_misplaced_transhuge_page()
1831 unlock_page(page); migrate_misplaced_transhuge_page()
1832 put_page(page); /* Drop the rmap reference */ migrate_misplaced_transhuge_page()
1833 put_page(page); /* Drop the LRU isolation reference */ migrate_misplaced_transhuge_page()
1838 mod_zone_page_state(page_zone(page), migrate_misplaced_transhuge_page()
1855 unlock_page(page); migrate_misplaced_transhuge_page()
1856 put_page(page); migrate_misplaced_transhuge_page()
308 migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page, struct buffer_head *head, enum migrate_mode mode, int extra_count) migrate_page_move_mapping() argument
397 migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page) migrate_huge_page_move_mapping() argument
563 migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) migrate_page() argument
587 buffer_migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) buffer_migrate_page() argument
685 fallback_migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) fallback_migrate_page() argument
918 unmap_and_move(new_page_t get_new_page, free_page_t put_new_page, unsigned long private, struct page *page, int force, enum migrate_mode mode, enum migrate_reason reason) unmap_and_move() argument
1716 migrate_misplaced_transhuge_page(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, pmd_t entry, unsigned long address, struct page *page, int node) migrate_misplaced_transhuge_page() argument
H A Dzsmalloc.c16 * struct page(s) to form a zspage.
18 * Usage of struct page fields:
19 * page->first_page: points to the first component (0-order) page
20 * page->index (union with page->freelist): offset of the first object
21 * starting in this page. For the first page, this is
24 * page->lru: links together all component pages (except the first page)
27 * For _first_ page only:
29 * page->private (union with page->first_page): refers to the
30 * component page after the first page
31 * If the page is first_page for huge object, it stores handle.
33 * page->freelist: points to the first free object in zspage.
36 * page->objects: maximum number of objects we can store in this
38 * page->lru: links together first pages of various zspages.
40 * page->mapping: class index and fullness group of the zspage
42 * Usage of struct page flags:
43 * PG_private: identifies the first component page
44 * PG_private2: identifies the last component page
75 * span more than 1 page which avoids complex case of mapping 2 pages simply
94 * page <PFN> it is stored in, so for each sub-page belonging
115 * encoding <page, obj_idx> and the encoded value has a room
142 * On systems with 4K page size, this gives 255 size classes! There is a
144 * - Large number of size classes is potentially wasteful as free page are
192 * We assign a page to ZS_ALMOST_EMPTY fullness group when:
226 struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
265 * are encoded in its (first)page->mapping
399 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
402 static int is_first_page(struct page *page) is_first_page() argument
404 return PagePrivate(page); is_first_page()
407 static int is_last_page(struct page *page) is_last_page() argument
409 return PagePrivate2(page); is_last_page()
412 static void get_zspage_mapping(struct page *page, unsigned int *class_idx, get_zspage_mapping() argument
416 BUG_ON(!is_first_page(page)); get_zspage_mapping()
418 m = (unsigned long)page->mapping; get_zspage_mapping()
423 static void set_zspage_mapping(struct page *page, unsigned int class_idx, set_zspage_mapping() argument
427 BUG_ON(!is_first_page(page)); set_zspage_mapping()
431 page->mapping = (struct address_space *)m; set_zspage_mapping()
630 * status of the given page.
632 static enum fullness_group get_fullness_group(struct page *page) get_fullness_group() argument
636 BUG_ON(!is_first_page(page)); get_fullness_group()
638 inuse = page->inuse; get_fullness_group()
639 max_objects = page->objects; get_fullness_group()
659 static void insert_zspage(struct page *page, struct size_class *class, insert_zspage() argument
662 struct page **head; insert_zspage()
664 BUG_ON(!is_first_page(page)); insert_zspage()
671 list_add_tail(&page->lru, &(*head)->lru); insert_zspage()
673 *head = page; insert_zspage()
682 static void remove_zspage(struct page *page, struct size_class *class, remove_zspage() argument
685 struct page **head; remove_zspage()
687 BUG_ON(!is_first_page(page)); remove_zspage()
696 else if (*head == page) remove_zspage()
697 *head = (struct page *)list_entry((*head)->lru.next, remove_zspage()
698 struct page, lru); remove_zspage()
700 list_del_init(&page->lru); remove_zspage()
708 * objects, the fullness status of the page can change, say, from ALMOST_FULL
710 * a status change has occurred for the given page and accordingly moves the
711 * page from the freelist of the old fullness group to that of the new
715 struct page *page) fix_fullness_group()
720 BUG_ON(!is_first_page(page)); fix_fullness_group()
722 get_zspage_mapping(page, &class_idx, &currfg); fix_fullness_group()
723 newfg = get_fullness_group(page); fix_fullness_group()
727 remove_zspage(page, class, currfg); fix_fullness_group()
728 insert_zspage(page, class, newfg); fix_fullness_group()
729 set_zspage_mapping(page, class_idx, newfg); fix_fullness_group()
773 * linked together using fields in struct page. This function finds
774 * the first/head page, given any component page of a zspage.
776 static struct page *get_first_page(struct page *page) get_first_page() argument
778 if (is_first_page(page)) get_first_page()
779 return page; get_first_page()
781 return page->first_page; get_first_page()
784 static struct page *get_next_page(struct page *page) get_next_page() argument
786 struct page *next; get_next_page()
788 if (is_last_page(page)) get_next_page()
790 else if (is_first_page(page)) get_next_page()
791 next = (struct page *)page_private(page); get_next_page()
793 next = list_entry(page->lru.next, struct page, lru); get_next_page()
799 * Encode <page, obj_idx> as a single handle value.
802 static void *location_to_obj(struct page *page, unsigned long obj_idx) location_to_obj() argument
806 if (!page) { location_to_obj()
811 obj = page_to_pfn(page) << OBJ_INDEX_BITS; location_to_obj()
819 * Decode <page, obj_idx> pair from the given object handle. We adjust the
823 static void obj_to_location(unsigned long obj, struct page **page, obj_to_location() argument
827 *page = pfn_to_page(obj >> OBJ_INDEX_BITS); obj_to_location()
836 static unsigned long obj_to_head(struct size_class *class, struct page *page, obj_to_head() argument
840 VM_BUG_ON(!is_first_page(page)); obj_to_head()
841 return *(unsigned long *)page_private(page); obj_to_head()
846 static unsigned long obj_idx_to_offset(struct page *page, obj_idx_to_offset() argument
851 if (!is_first_page(page)) obj_idx_to_offset()
852 off = page->index; obj_idx_to_offset()
876 static void reset_page(struct page *page) reset_page() argument
878 clear_bit(PG_private, &page->flags); reset_page()
879 clear_bit(PG_private_2, &page->flags); reset_page()
880 set_page_private(page, 0); reset_page()
881 page->mapping = NULL; reset_page()
882 page->freelist = NULL; reset_page()
883 page_mapcount_reset(page); reset_page()
886 static void free_zspage(struct page *first_page) free_zspage()
888 struct page *nextp, *tmp, *head_extra; free_zspage()
893 head_extra = (struct page *)page_private(first_page); free_zspage()
898 /* zspage with only 1 system page */ free_zspage()
912 static void init_zspage(struct page *first_page, struct size_class *class) init_zspage()
915 struct page *page = first_page; init_zspage() local
918 while (page) { init_zspage()
919 struct page *next_page; init_zspage()
925 * page->index stores offset of first object starting init_zspage()
926 * in the page. For the first page, this is always 0, init_zspage()
930 if (page != first_page) init_zspage()
931 page->index = off; init_zspage()
933 vaddr = kmap_atomic(page); init_zspage()
937 link->next = location_to_obj(page, i++); init_zspage()
943 * page, which must point to the first object on the next init_zspage()
944 * page (if present) init_zspage()
946 next_page = get_next_page(page); init_zspage()
949 page = next_page; init_zspage()
957 static struct page *alloc_zspage(struct size_class *class, gfp_t flags) alloc_zspage()
960 struct page *first_page = NULL, *uninitialized_var(prev_page); alloc_zspage()
964 * 1. first page->private = first sub-page alloc_zspage()
965 * 2. all sub-pages are linked together using page->lru alloc_zspage()
966 * 3. each sub-page is linked to the first page using page->first_page alloc_zspage()
969 * page->lru. Also, we set PG_private to identify the first page alloc_zspage()
970 * (i.e. no other sub-page has this flag set) and PG_private_2 to alloc_zspage()
971 * identify the last page. alloc_zspage()
975 struct page *page; alloc_zspage() local
977 page = alloc_page(flags); alloc_zspage()
978 if (!page) alloc_zspage()
981 INIT_LIST_HEAD(&page->lru); alloc_zspage()
982 if (i == 0) { /* first page */ alloc_zspage()
983 SetPagePrivate(page); alloc_zspage()
984 set_page_private(page, 0); alloc_zspage()
985 first_page = page; alloc_zspage()
989 set_page_private(first_page, (unsigned long)page); alloc_zspage()
991 page->first_page = first_page; alloc_zspage()
993 list_add(&page->lru, &prev_page->lru); alloc_zspage()
994 if (i == class->pages_per_zspage - 1) /* last page */ alloc_zspage()
995 SetPagePrivate2(page); alloc_zspage()
996 prev_page = page; alloc_zspage()
1016 static struct page *find_get_zspage(struct size_class *class) find_get_zspage()
1019 struct page *page; find_get_zspage() local
1022 page = class->fullness_list[i]; find_get_zspage()
1023 if (page) find_get_zspage()
1027 return page; find_get_zspage()
1053 struct page *pages[2], int off, int size) __zs_map_object()
1061 struct page *pages[2], int off, int size) __zs_unmap_object()
1091 struct page *pages[2], int off, int size) __zs_map_object()
1097 /* disable page faults to match kmap_atomic() return conditions */ __zs_map_object()
1119 struct page *pages[2], int off, int size) __zs_unmap_object()
1148 /* enable page faults to match kunmap_atomic() return conditions */ __zs_unmap_object()
1234 static bool zspage_full(struct page *page) zspage_full() argument
1236 BUG_ON(!is_first_page(page)); zspage_full()
1238 return page->inuse == page->objects; zspage_full()
1259 * This function returns with preemption and page faults disabled.
1264 struct page *page; zs_map_object() local
1271 struct page *pages[2]; zs_map_object()
1287 obj_to_location(obj, &page, &obj_idx); zs_map_object()
1288 get_zspage_mapping(get_first_page(page), &class_idx, &fg); zs_map_object()
1290 off = obj_idx_to_offset(page, obj_idx, class->size); zs_map_object()
1295 /* this object is contained entirely within a page */ zs_map_object()
1296 area->vm_addr = kmap_atomic(page); zs_map_object()
1302 pages[0] = page; zs_map_object()
1303 pages[1] = get_next_page(page); zs_map_object()
1317 struct page *page; zs_unmap_object() local
1328 obj_to_location(obj, &page, &obj_idx); zs_unmap_object()
1329 get_zspage_mapping(get_first_page(page), &class_idx, &fg); zs_unmap_object()
1331 off = obj_idx_to_offset(page, obj_idx, class->size); zs_unmap_object()
1337 struct page *pages[2]; zs_unmap_object()
1339 pages[0] = page; zs_unmap_object()
1340 pages[1] = get_next_page(page); zs_unmap_object()
1350 static unsigned long obj_malloc(struct page *first_page, obj_malloc()
1356 struct page *m_page; obj_malloc()
1395 struct page *first_page; zs_malloc()
1442 struct page *first_page, *f_page; obj_free()
1472 struct page *first_page, *f_page; zs_free()
1509 struct page *s_page, *d_page; zs_object_copy()
1575 static unsigned long find_alloced_obj(struct page *page, int index, find_alloced_obj() argument
1581 void *addr = kmap_atomic(page); find_alloced_obj()
1583 if (!is_first_page(page)) find_alloced_obj()
1584 offset = page->index; find_alloced_obj()
1588 head = obj_to_head(class, page, addr + offset); find_alloced_obj()
1605 /* Source page for migration which could be a subpage of zspage. */
1606 struct page *s_page;
1607 /* Destination page for migration which should be a first page
1609 struct page *d_page;
1622 struct page *s_page = cc->s_page; migrate_zspage()
1623 struct page *d_page = cc->d_page; migrate_zspage()
1670 static struct page *alloc_target_page(struct size_class *class) alloc_target_page()
1673 struct page *page; alloc_target_page() local
1676 page = class->fullness_list[i]; alloc_target_page()
1677 if (page) { alloc_target_page()
1678 remove_zspage(page, class, i); alloc_target_page()
1683 return page; alloc_target_page()
1687 struct page *first_page) putback_zspage()
1707 static struct page *isolate_source_page(struct size_class *class) isolate_source_page()
1709 struct page *page; isolate_source_page() local
1711 page = class->fullness_list[ZS_ALMOST_EMPTY]; isolate_source_page()
1712 if (page) isolate_source_page()
1713 remove_zspage(page, class, ZS_ALMOST_EMPTY); isolate_source_page()
1715 return page; isolate_source_page()
1723 struct page *src_page; __zs_compact()
1724 struct page *dst_page = NULL; __zs_compact()
1732 /* The goal is to migrate all live objects in source page */ __zs_compact()
714 fix_fullness_group(struct size_class *class, struct page *page) fix_fullness_group() argument
H A Dpage_owner.c49 void __reset_page_owner(struct page *page, unsigned int order) __reset_page_owner() argument
55 page_ext = lookup_page_ext(page + i); __reset_page_owner()
60 void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) __set_page_owner() argument
62 struct page_ext *page_ext = lookup_page_ext(page); __set_page_owner()
81 struct page *page, struct page_ext *page_ext) print_page_owner()
103 pageblock_mt = get_pfnblock_migratetype(page, pfn); print_page_owner()
111 PageLocked(page) ? "K" : " ", print_page_owner()
112 PageError(page) ? "E" : " ", print_page_owner()
113 PageReferenced(page) ? "R" : " ", print_page_owner()
114 PageUptodate(page) ? "U" : " ", print_page_owner()
115 PageDirty(page) ? "D" : " ", print_page_owner()
116 PageLRU(page) ? "L" : " ", print_page_owner()
117 PageActive(page) ? "A" : " ", print_page_owner()
118 PageSlab(page) ? "S" : " ", print_page_owner()
119 PageWriteback(page) ? "W" : " ", print_page_owner()
120 PageCompound(page) ? "C" : " ", print_page_owner()
121 PageSwapCache(page) ? "B" : " ", print_page_owner()
122 PageMappedToDisk(page) ? "M" : " "); print_page_owner()
150 struct page *page; read_page_owner() local
156 page = NULL; read_page_owner()
165 /* Find an allocated page */ read_page_owner()
168 * If the new page is in a new MAX_ORDER_NR_PAGES area, read_page_owner()
180 page = pfn_to_page(pfn); read_page_owner()
181 if (PageBuddy(page)) { read_page_owner()
182 unsigned long freepage_order = page_order_unsafe(page); read_page_owner()
189 page_ext = lookup_page_ext(page); read_page_owner()
201 return print_page_owner(buf, count, pfn, page, page_ext); read_page_owner()
209 struct page *page; init_pages_in_zone() local
219 * Walk the zone in pageblock_nr_pages steps. If a page block spans init_pages_in_zone()
232 page = pfn_to_page(pfn); init_pages_in_zone()
238 page = pfn_to_page(pfn); init_pages_in_zone()
244 if (PageBuddy(page)) { init_pages_in_zone()
245 pfn += (1UL << page_order(page)) - 1; init_pages_in_zone()
249 if (PageReserved(page)) init_pages_in_zone()
252 page_ext = lookup_page_ext(page); init_pages_in_zone()
258 /* Found early allocated page */ init_pages_in_zone()
259 set_page_owner(page, 0, 0); init_pages_in_zone()
264 pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n", init_pages_in_zone()
80 print_page_owner(char __user *buf, size_t count, unsigned long pfn, struct page *page, struct page_ext *page_ext) print_page_owner() argument
H A Dpercpu-vm.c9 * Chunks are mapped into vmalloc areas and populated page by page.
13 static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, pcpu_chunk_page()
26 * Returns pointer to array of pointers to struct page which can be indexed
33 static struct page **pcpu_get_pages(struct pcpu_chunk *chunk_alloc) pcpu_get_pages()
35 static struct page **pages; pcpu_get_pages()
49 * @page_start: page index of the first page to be freed
50 * @page_end: page index of the last page to be freed + 1
56 struct page **pages, int page_start, int page_end) pcpu_free_pages()
63 struct page *page = pages[pcpu_page_idx(cpu, i)]; for_each_possible_cpu() local
65 if (page) for_each_possible_cpu()
66 __free_page(page); for_each_possible_cpu()
75 * @page_start: page index of the first page to be allocated
76 * @page_end: page index of the last page to be allocated + 1
83 struct page **pages, int page_start, int page_end) pcpu_alloc_pages()
91 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; for_each_possible_cpu()
116 * @page_start: page index of the first page to be flushed
117 * @page_end: page index of the last page to be flushed + 1
142 * @page_start: page index of the first page to unmap
143 * @page_end: page index of the last page to unmap + 1
152 struct page **pages, int page_start, int page_end) pcpu_unmap_pages()
159 struct page *page; for_each_possible_cpu() local
161 page = pcpu_chunk_page(chunk, cpu, i); for_each_possible_cpu()
162 WARN_ON(!page); for_each_possible_cpu()
163 pages[pcpu_page_idx(cpu, i)] = page; for_each_possible_cpu()
173 * @page_start: page index of the first page to be flushed
174 * @page_end: page index of the last page to be flushed + 1
191 static int __pcpu_map_pages(unsigned long addr, struct page **pages, __pcpu_map_pages()
202 * @page_start: page index of the first page to map
203 * @page_end: page index of the last page to map + 1
213 struct page **pages, int page_start, int page_end) pcpu_map_pages()
244 * @page_start: page index of the first page to be flushed
245 * @page_end: page index of the last page to be flushed + 1
264 * @page_start: the start page
265 * @page_end: the end page
276 struct page **pages; pcpu_populate_chunk()
297 * @page_start: the start page
298 * @page_end: the end page
309 struct page **pages; pcpu_depopulate_chunk()
357 static struct page *pcpu_addr_to_page(void *addr) pcpu_addr_to_page()
H A Dmemory-failure.c16 * Handles page cache pages in various states. The tricky part
17 * here is that we can access any page asynchronously in respect to
40 #include <linux/page-flags.h>
41 #include <linux/kernel-page-flags.h>
50 #include <linux/page-isolation.h>
79 static int hwpoison_filter_dev(struct page *p) hwpoison_filter_dev()
109 static int hwpoison_filter_flags(struct page *p) hwpoison_filter_flags()
128 * can only guarantee that the page either belongs to the memcg tasks, or is
129 * a freed page.
134 static int hwpoison_filter_task(struct page *p) hwpoison_filter_task()
157 static int hwpoison_filter_task(struct page *p) { return 0; } hwpoison_filter_task()
160 int hwpoison_filter(struct page *p) hwpoison_filter()
177 int hwpoison_filter(struct page *p) hwpoison_filter()
186 * Send all the processes who have the page mapped a signal.
191 unsigned long pfn, struct page *page, int flags) kill_proc()
205 si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT; kill_proc()
227 * When a unknown page type is encountered drain as many buffers as possible
228 * in the hope to turn the page into a LRU or free page, which we can handle.
230 void shake_page(struct page *p, int access) shake_page()
251 * Kill all processes that have a poisoned page mapped and then isolate
252 * the page.
255 * Find all processes having the page mapped and kill them.
256 * But we keep a page reference around so that the page is not
258 * Then stash the page away
289 static void add_to_kill(struct task_struct *tsk, struct page *p, add_to_kill()
311 * In theory we don't have to kill when the page was add_to_kill()
335 int fail, struct page *page, unsigned long pfn, kill_procs()
349 "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", list_for_each_entry_safe()
361 pfn, page, flags) < 0) list_for_each_entry_safe()
391 * to be signaled when some page under the process is hwpoisoned.
412 * Collect processes when the error hit an anonymous page.
414 static void collect_procs_anon(struct page *page, struct list_head *to_kill, collect_procs_anon() argument
422 av = page_lock_anon_vma_read(page); collect_procs_anon()
426 pgoff = page_to_pgoff(page); collect_procs_anon()
437 if (!page_mapped_in_vma(page, vma)) for_each_process()
440 add_to_kill(t, page, vma, to_kill, tkc); for_each_process()
448 * Collect processes when the error hit a file mapped page.
450 static void collect_procs_file(struct page *page, struct list_head *to_kill, collect_procs_file() argument
455 struct address_space *mapping = page->mapping; collect_procs_file()
460 pgoff_t pgoff = page_to_pgoff(page); for_each_process()
469 * the page but the corrupted page is not necessarily for_each_process()
475 add_to_kill(t, page, vma, to_kill, tkc); for_each_process()
483 * Collect the processes who have the corrupted page mapped to kill.
488 static void collect_procs(struct page *page, struct list_head *tokill, collect_procs() argument
493 if (!page->mapping) collect_procs()
499 if (PageAnon(page)) collect_procs()
500 collect_procs_anon(page, tokill, &tk, force_early); collect_procs()
502 collect_procs_file(page, tokill, &tk, force_early); collect_procs()
548 [MSG_KERNEL] = "reserved kernel page",
549 [MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
550 [MSG_SLAB] = "kernel slab page",
551 [MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
552 [MSG_POISONED_HUGE] = "huge page already hardware poisoned",
553 [MSG_HUGE] = "huge page",
554 [MSG_FREE_HUGE] = "free huge page",
555 [MSG_UNMAP_FAILED] = "unmapping failed page",
556 [MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
557 [MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
558 [MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
559 [MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
560 [MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
561 [MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
562 [MSG_DIRTY_LRU] = "dirty LRU page",
563 [MSG_CLEAN_LRU] = "clean LRU page",
564 [MSG_TRUNCATED_LRU] = "already truncated LRU page",
565 [MSG_BUDDY] = "free buddy page",
566 [MSG_BUDDY_2ND] = "free buddy page (2nd try)",
567 [MSG_UNKNOWN] = "unknown page",
571 * XXX: It is possible that a page is isolated from LRU cache,
572 * and then kept in swap cache or failed to remove from page cache.
573 * The page count will stop it from being freed by unpoison.
576 static int delete_from_lru_cache(struct page *p) delete_from_lru_cache()
580 * Clear sensible page flags, so that the buddy system won't delete_from_lru_cache()
581 * complain when the page is unpoison-and-freed. delete_from_lru_cache()
586 * drop the page count elevated by isolate_lru_page() delete_from_lru_cache()
595 * Error hit kernel page.
599 static int me_kernel(struct page *p, unsigned long pfn) me_kernel()
607 static int me_unknown(struct page *p, unsigned long pfn) me_unknown()
609 printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn); me_unknown()
614 * Clean (or cleaned) page cache page.
616 static int me_pagecache_clean(struct page *p, unsigned long pfn) me_pagecache_clean()
632 * Now truncate the page in the page cache. This is really me_pagecache_clean()
654 printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n", me_pagecache_clean()
677 * Dirty pagecache page
678 * Issues: when the error hit a hole page the error is not properly
681 static int me_pagecache_dirty(struct page *p, unsigned long pfn) me_pagecache_dirty()
700 * and then through the PageError flag in the page. me_pagecache_dirty()
707 * when the page is reread or dropped. If an me_pagecache_dirty()
710 * and the page is dropped between then the error me_pagecache_dirty()
715 * report through AS_EIO) or when the page is dropped me_pagecache_dirty()
731 * Dirty swap cache page is tricky to handle. The page could live both in page
732 * cache and swap cache(ie. page is freshly swapped in). So it could be
740 * a later page fault, we know the application is accessing
744 * Clean swap cache pages can be directly isolated. A later page fault will
747 static int me_swapcache_dirty(struct page *p, unsigned long pfn) me_swapcache_dirty()
759 static int me_swapcache_clean(struct page *p, unsigned long pfn) me_swapcache_clean()
772 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
773 * To narrow down kill region to one page, we need to break up pmd.
775 static int me_huge_page(struct page *p, unsigned long pfn) me_huge_page()
778 struct page *hpage = compound_head(p); me_huge_page()
783 * page->lru because it can be used in other hugepage operations, me_huge_page()
786 * We assume that this function is called with page lock held, me_huge_page()
798 * Various page states we can handle.
800 * A page state is defined by its current page->flags bits.
803 * This is quite tricky because we can access page at any time
827 int (*action)(struct page *p, unsigned long pfn);
836 * Could in theory check if slab page is free or if we can drop
882 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
890 static int page_action(struct page_state *ps, struct page *p, page_action()
909 /* Could do more checks here if page looks ok */ page_action()
911 * Could adjust zone counters here to correct for the missing page. page_action()
921 static int hwpoison_user_mappings(struct page *p, unsigned long pfn, hwpoison_user_mappings()
922 int trapno, int flags, struct page **hpagep) hwpoison_user_mappings()
929 struct page *hpage = *hpagep; hwpoison_user_mappings()
930 struct page *ppage; hwpoison_user_mappings()
955 "MCE %#lx: keeping poisoned page in swap cache\n", pfn); hwpoison_user_mappings()
960 * Propagate the dirty bit from PTEs to struct page first, because we hwpoison_user_mappings()
961 * need this to decide if we should kill or just drop the page. hwpoison_user_mappings()
963 * be called inside page lock (it's recommended but not enforced). hwpoison_user_mappings()
974 "MCE %#lx: corrupted page was clean: dropped without side effects\n", hwpoison_user_mappings()
980 * ppage: poisoned page hwpoison_user_mappings()
981 * if p is regular page(4k page) hwpoison_user_mappings()
982 * ppage == real poisoned page; hwpoison_user_mappings()
983 * else p is hugetlb or THP, ppage == head page. hwpoison_user_mappings()
989 * Verify that this isn't a hugetlbfs head page, the check for hwpoison_user_mappings()
992 * anything that isn't an anon page. PageAnon can't go away fro hwpoison_user_mappings()
1004 * survive if the page is freed later. hwpoison_user_mappings()
1013 * We pinned the head page for hwpoison handling, hwpoison_user_mappings()
1015 * the hwpoisoned raw page, so move the refcount hwpoison_user_mappings()
1016 * to it. Similarly, page lock is shifted. hwpoison_user_mappings()
1027 /* THP is split, so ppage should be the real poisoned page. */ hwpoison_user_mappings()
1033 * First collect all the processes that have the page hwpoison_user_mappings()
1045 printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", hwpoison_user_mappings()
1050 * struct page and all unmaps done we can decide if hwpoison_user_mappings()
1051 * killing is needed or not. Only kill when the page hwpoison_user_mappings()
1065 static void set_page_hwpoison_huge_page(struct page *hpage) set_page_hwpoison_huge_page()
1073 static void clear_page_hwpoison_huge_page(struct page *hpage) clear_page_hwpoison_huge_page()
1082 * memory_failure - Handle memory failure of a page.
1083 * @pfn: Page Number of the corrupted page
1089 * of a page. It tries its best to recover, which includes
1102 struct page *p; memory_failure()
1103 struct page *hpage; memory_failure()
1109 panic("Memory failure from trap %d on page %lx", trapno, pfn); memory_failure()
1129 * measurement is done in normal page units. So nr_pages should be one memory_failure()
1134 else /* normal page or thp */ memory_failure()
1140 * 1) it's a free page, and therefore in safe hand: memory_failure()
1145 * 3) it's part of a non-compound high order page. memory_failure()
1147 * R/W the page; let's pray that the page has been memory_failure()
1149 * In fact it's dangerous to directly bump up page count from 0, memory_failure()
1188 * walked by the page reclaim code, however that's not a big loss. memory_failure()
1211 * The page could have changed compound pages during the locking. memory_failure()
1221 * We use page flags to determine what action should be taken, but memory_failure()
1223 * example is an mlocked page, where PG_mlocked is cleared by memory_failure()
1224 * page_remove_rmap() in try_to_unmap_one(). So to determine page status memory_failure()
1225 * correctly, we save a copy of the page flags at this time. memory_failure()
1230 * unpoison always clear PG_hwpoison inside page lock memory_failure()
1251 * For error on the tail page, we should set PG_hwpoison memory_failure()
1252 * on the head page to show that the hugepage is hwpoisoned memory_failure()
1263 * Since we have done TestSetPageHWPoison() for the head page with memory_failure()
1264 * page lock held, we can safely set PG_hwpoison bits on tail pages. memory_failure()
1277 * Abort on fail: __delete_from_page_cache() assumes unmapped page. memory_failure()
1279 * When the raw error page is thp tail page, hpage points to the raw memory_failure()
1280 * page after thp split. memory_failure()
1301 * The first check uses the current page flags which may not have any memory_failure()
1302 * relevant information. The second check with the saved page flagss is memory_failure()
1303 * carried out only if the first check can't determine the page status. memory_failure()
1341 * memory_failure_queue - Schedule handling memory failure of a page.
1342 * @pfn: Page Number of the corrupted page
1347 * when it detects hardware memory corruption of a page. It schedules
1348 * the recovering of error page, including dropping pages, killing
1417 * unpoison_memory - Unpoison a previously poisoned page
1418 * @pfn: Page number of the to be unpoisoned page
1420 * Software-unpoison a page that has been poisoned by
1430 struct page *page; unpoison_memory() local
1431 struct page *p; unpoison_memory()
1439 page = compound_head(p); unpoison_memory()
1448 * worked by memory_failure() and the page lock is not held yet. unpoison_memory()
1451 if (!PageHuge(page) && PageTransHuge(page)) { unpoison_memory()
1456 nr_pages = 1 << compound_order(page); unpoison_memory()
1458 if (!get_page_unless_zero(page)) { unpoison_memory()
1465 if (PageHuge(page)) { unpoison_memory()
1471 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn); unpoison_memory()
1475 lock_page(page); unpoison_memory()
1477 * This test is racy because PG_hwpoison is set outside of page lock. unpoison_memory()
1479 * the PG_hwpoison page will be caught and isolated on the entrance to unpoison_memory()
1480 * the free buddy page pool. unpoison_memory()
1482 if (TestClearPageHWPoison(page)) { unpoison_memory()
1483 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn); unpoison_memory()
1486 if (PageHuge(page)) unpoison_memory()
1487 clear_page_hwpoison_huge_page(page); unpoison_memory()
1489 unlock_page(page); unpoison_memory()
1491 put_page(page); unpoison_memory()
1493 put_page(page); unpoison_memory()
1499 static struct page *new_page(struct page *p, unsigned long private, int **x) new_page()
1510 * Safely get reference count of an arbitrary page.
1511 * Returns 0 for a free page, -EIO for a zero refcount page
1512 * that is not free, and 1 for any other page type.
1513 * For 1 the page is returned with increased page count, otherwise not.
1515 static int __get_any_page(struct page *p, unsigned long pfn, int flags) __get_any_page()
1523 * When the target page is a free hugepage, just remove it __get_any_page()
1528 pr_info("%s: %#lx free huge page\n", __func__, pfn); __get_any_page()
1531 pr_info("%s: %#lx free buddy page\n", __func__, pfn); __get_any_page()
1534 pr_info("%s: %#lx: unknown zero refcount page type %lx\n", __get_any_page()
1539 /* Not a free page */ __get_any_page()
1545 static int get_any_page(struct page *page, unsigned long pfn, int flags) get_any_page() argument
1547 int ret = __get_any_page(page, pfn, flags); get_any_page()
1549 if (ret == 1 && !PageHuge(page) && !PageLRU(page)) { get_any_page()
1553 put_page(page); get_any_page()
1554 shake_page(page, 1); get_any_page()
1559 ret = __get_any_page(page, pfn, 0); get_any_page()
1560 if (ret == 1 && !PageLRU(page)) { get_any_page()
1561 /* Drop page reference which is from __get_any_page() */ get_any_page()
1562 put_page(page); get_any_page()
1563 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", get_any_page()
1564 pfn, page->flags); get_any_page()
1571 static int soft_offline_huge_page(struct page *page, int flags) soft_offline_huge_page() argument
1574 unsigned long pfn = page_to_pfn(page); soft_offline_huge_page()
1575 struct page *hpage = compound_head(page); soft_offline_huge_page()
1606 pfn, ret, page->flags); soft_offline_huge_page()
1616 /* overcommit hugetlb page will be freed to buddy */ soft_offline_huge_page()
1617 if (PageHuge(page)) { soft_offline_huge_page()
1623 SetPageHWPoison(page); soft_offline_huge_page()
1630 static int __soft_offline_page(struct page *page, int flags) __soft_offline_page() argument
1633 unsigned long pfn = page_to_pfn(page); __soft_offline_page()
1636 * Check PageHWPoison again inside page lock because PageHWPoison __soft_offline_page()
1637 * is set by memory_failure() outside page lock. Note that __soft_offline_page()
1638 * memory_failure() also double-checks PageHWPoison inside page lock, __soft_offline_page()
1641 lock_page(page); __soft_offline_page()
1642 wait_on_page_writeback(page); __soft_offline_page()
1643 if (PageHWPoison(page)) { __soft_offline_page()
1644 unlock_page(page); __soft_offline_page()
1645 put_page(page); __soft_offline_page()
1646 pr_info("soft offline: %#lx page already poisoned\n", pfn); __soft_offline_page()
1651 * non dirty unmapped page cache pages. __soft_offline_page()
1653 ret = invalidate_inode_page(page); __soft_offline_page()
1654 unlock_page(page); __soft_offline_page()
1660 put_page(page); __soft_offline_page()
1662 SetPageHWPoison(page); __soft_offline_page()
1669 * Try to migrate to a new page instead. migrate.c __soft_offline_page()
1672 ret = isolate_lru_page(page); __soft_offline_page()
1674 * Drop page reference which is came from get_any_page() __soft_offline_page()
1677 put_page(page); __soft_offline_page()
1680 inc_zone_page_state(page, NR_ISOLATED_ANON + __soft_offline_page()
1681 page_is_file_cache(page)); __soft_offline_page()
1682 list_add(&page->lru, &pagelist); __soft_offline_page()
1687 list_del(&page->lru); __soft_offline_page()
1688 dec_zone_page_state(page, NR_ISOLATED_ANON + __soft_offline_page()
1689 page_is_file_cache(page)); __soft_offline_page()
1690 putback_lru_page(page); __soft_offline_page()
1694 pfn, ret, page->flags); __soft_offline_page()
1698 SetPageHWPoison(page); __soft_offline_page()
1702 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", __soft_offline_page()
1703 pfn, ret, page_count(page), page->flags); __soft_offline_page()
1709 * soft_offline_page - Soft offline a page.
1710 * @page: page to offline
1715 * Soft offline a page, by migration or invalidation,
1717 * a page is not corrupted yet (so it's still valid to access),
1730 int soft_offline_page(struct page *page, int flags) soft_offline_page() argument
1733 unsigned long pfn = page_to_pfn(page); soft_offline_page()
1734 struct page *hpage = compound_head(page); soft_offline_page()
1736 if (PageHWPoison(page)) { soft_offline_page()
1737 pr_info("soft offline: %#lx page already poisoned\n", pfn); soft_offline_page()
1740 if (!PageHuge(page) && PageTransHuge(hpage)) { soft_offline_page()
1750 ret = get_any_page(page, pfn, flags); soft_offline_page()
1753 if (PageHuge(page)) soft_offline_page()
1754 ret = soft_offline_huge_page(page, flags); soft_offline_page()
1756 ret = __soft_offline_page(page, flags); soft_offline_page()
1758 if (PageHuge(page)) { soft_offline_page()
1764 if (!TestSetPageHWPoison(page)) soft_offline_page()
190 kill_proc(struct task_struct *t, unsigned long addr, int trapno, unsigned long pfn, struct page *page, int flags) kill_proc() argument
334 kill_procs(struct list_head *to_kill, int forcekill, int trapno, int fail, struct page *page, unsigned long pfn, int flags) kill_procs() argument
H A Dtruncate.c38 * Regular page slots are stabilized by the page lock even clear_exceptional_entry()
67 * do_invalidatepage - invalidate part or all of a page
68 * @page: the page which is affected
72 * do_invalidatepage() is called when all or part of the page has become
81 void do_invalidatepage(struct page *page, unsigned int offset, do_invalidatepage() argument
84 void (*invalidatepage)(struct page *, unsigned int, unsigned int); do_invalidatepage()
86 invalidatepage = page->mapping->a_ops->invalidatepage; do_invalidatepage()
92 (*invalidatepage)(page, offset, length); do_invalidatepage()
96 * If truncate cannot remove the fs-private metadata from the page, the page
100 * We need to bale out if page->mapping is no longer equal to the original
101 * mapping. This happens a) when the VM reclaimed the page while we waited on
103 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
106 truncate_complete_page(struct address_space *mapping, struct page *page) truncate_complete_page() argument
108 if (page->mapping != mapping) truncate_complete_page()
111 if (page_has_private(page)) truncate_complete_page()
112 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); truncate_complete_page()
115 * Some filesystems seem to re-dirty the page even after truncate_complete_page()
119 if (TestClearPageDirty(page)) truncate_complete_page()
120 account_page_cleaned(page, mapping); truncate_complete_page()
122 ClearPageMappedToDisk(page); truncate_complete_page()
123 delete_from_page_cache(page); truncate_complete_page()
133 * Returns non-zero if the page was successfully invalidated.
136 invalidate_complete_page(struct address_space *mapping, struct page *page) invalidate_complete_page() argument
140 if (page->mapping != mapping) invalidate_complete_page()
143 if (page_has_private(page) && !try_to_release_page(page, 0)) invalidate_complete_page()
146 ret = remove_mapping(mapping, page); invalidate_complete_page()
151 int truncate_inode_page(struct address_space *mapping, struct page *page) truncate_inode_page() argument
153 if (page_mapped(page)) { truncate_inode_page()
155 (loff_t)page->index << PAGE_CACHE_SHIFT, truncate_inode_page()
158 return truncate_complete_page(mapping, page); truncate_inode_page()
164 int generic_error_remove_page(struct address_space *mapping, struct page *page) generic_error_remove_page() argument
174 return truncate_inode_page(mapping, page); generic_error_remove_page()
179 * Safely invalidate one page from its pagecache mapping.
180 * It only drops clean, unused pages. The page must be locked.
182 * Returns 1 if the page is successfully invalidated, otherwise 0.
184 int invalidate_inode_page(struct page *page) invalidate_inode_page() argument
186 struct address_space *mapping = page_mapping(page); invalidate_inode_page()
189 if (PageDirty(page) || PageWriteback(page)) invalidate_inode_page()
191 if (page_mapped(page)) invalidate_inode_page()
193 return invalidate_complete_page(mapping, page); invalidate_inode_page()
202 * Truncate the page cache, removing the pages that are between
204 * if lstart or lend + 1 is not page aligned).
207 * block on page locks and it will not block on writeback. The second pass
212 * We pass down the cache-hot hint to the page freeing code. Even if the
218 * page aligned properly.
263 struct page *page = pvec.pages[i]; truncate_inode_pages_range() local
265 /* We rely upon deletion not changing page->index */ truncate_inode_pages_range()
270 if (radix_tree_exceptional_entry(page)) { truncate_inode_pages_range()
271 clear_exceptional_entry(mapping, index, page); truncate_inode_pages_range()
275 if (!trylock_page(page)) truncate_inode_pages_range()
277 WARN_ON(page->index != index); truncate_inode_pages_range()
278 if (PageWriteback(page)) { truncate_inode_pages_range()
279 unlock_page(page); truncate_inode_pages_range()
282 truncate_inode_page(mapping, page); truncate_inode_pages_range()
283 unlock_page(page); truncate_inode_pages_range()
292 struct page *page = find_lock_page(mapping, start - 1); truncate_inode_pages_range() local
293 if (page) { truncate_inode_pages_range()
296 /* Truncation within a single page */ truncate_inode_pages_range()
300 wait_on_page_writeback(page); truncate_inode_pages_range()
301 zero_user_segment(page, partial_start, top); truncate_inode_pages_range()
302 cleancache_invalidate_page(mapping, page); truncate_inode_pages_range()
303 if (page_has_private(page)) truncate_inode_pages_range()
304 do_invalidatepage(page, partial_start, truncate_inode_pages_range()
306 unlock_page(page); truncate_inode_pages_range()
307 page_cache_release(page); truncate_inode_pages_range()
311 struct page *page = find_lock_page(mapping, end); truncate_inode_pages_range() local
312 if (page) { truncate_inode_pages_range()
313 wait_on_page_writeback(page); truncate_inode_pages_range()
314 zero_user_segment(page, 0, partial_end); truncate_inode_pages_range()
315 cleancache_invalidate_page(mapping, page); truncate_inode_pages_range()
316 if (page_has_private(page)) truncate_inode_pages_range()
317 do_invalidatepage(page, 0, truncate_inode_pages_range()
319 unlock_page(page); truncate_inode_pages_range()
320 page_cache_release(page); truncate_inode_pages_range()
324 * If the truncation happened within a single page no pages truncate_inode_pages_range()
349 struct page *page = pvec.pages[i]; truncate_inode_pages_range() local
351 /* We rely upon deletion not changing page->index */ truncate_inode_pages_range()
359 if (radix_tree_exceptional_entry(page)) { truncate_inode_pages_range()
360 clear_exceptional_entry(mapping, index, page); truncate_inode_pages_range()
364 lock_page(page); truncate_inode_pages_range()
365 WARN_ON(page->index != index); truncate_inode_pages_range()
366 wait_on_page_writeback(page); truncate_inode_pages_range()
367 truncate_inode_page(mapping, page); truncate_inode_pages_range()
368 unlock_page(page); truncate_inode_pages_range()
385 * Note: When this function returns, there can be a page in the process of
471 struct page *page = pvec.pages[i]; invalidate_mapping_pages() local
473 /* We rely upon deletion not changing page->index */ invalidate_mapping_pages()
478 if (radix_tree_exceptional_entry(page)) { invalidate_mapping_pages()
479 clear_exceptional_entry(mapping, index, page); invalidate_mapping_pages()
483 if (!trylock_page(page)) invalidate_mapping_pages()
485 WARN_ON(page->index != index); invalidate_mapping_pages()
486 ret = invalidate_inode_page(page); invalidate_mapping_pages()
487 unlock_page(page); invalidate_mapping_pages()
489 * Invalidation is a hint that the page is no longer invalidate_mapping_pages()
493 deactivate_file_page(page); invalidate_mapping_pages()
506 * This is like invalidate_complete_page(), except it ignores the page's
513 invalidate_complete_page2(struct address_space *mapping, struct page *page) invalidate_complete_page2() argument
515 if (page->mapping != mapping) invalidate_complete_page2()
518 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) invalidate_complete_page2()
522 if (PageDirty(page)) invalidate_complete_page2()
525 BUG_ON(page_has_private(page)); invalidate_complete_page2()
526 __delete_from_page_cache(page, NULL); invalidate_complete_page2()
530 mapping->a_ops->freepage(page); invalidate_complete_page2()
532 page_cache_release(page); /* pagecache ref */ invalidate_complete_page2()
539 static int do_launder_page(struct address_space *mapping, struct page *page) do_launder_page() argument
541 if (!PageDirty(page)) do_launder_page()
543 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) do_launder_page()
545 return mapping->a_ops->launder_page(page); do_launder_page()
551 * @start: the page offset 'from' which to invalidate
552 * @end: the page offset 'to' which to invalidate (inclusive)
577 struct page *page = pvec.pages[i]; invalidate_inode_pages2_range() local
579 /* We rely upon deletion not changing page->index */ invalidate_inode_pages2_range()
584 if (radix_tree_exceptional_entry(page)) { invalidate_inode_pages2_range()
585 clear_exceptional_entry(mapping, index, page); invalidate_inode_pages2_range()
589 lock_page(page); invalidate_inode_pages2_range()
590 WARN_ON(page->index != index); invalidate_inode_pages2_range()
591 if (page->mapping != mapping) { invalidate_inode_pages2_range()
592 unlock_page(page); invalidate_inode_pages2_range()
595 wait_on_page_writeback(page); invalidate_inode_pages2_range()
596 if (page_mapped(page)) { invalidate_inode_pages2_range()
609 * Just zap this page invalidate_inode_pages2_range()
616 BUG_ON(page_mapped(page)); invalidate_inode_pages2_range()
617 ret2 = do_launder_page(mapping, page); invalidate_inode_pages2_range()
619 if (!invalidate_complete_page2(mapping, page)) invalidate_inode_pages2_range()
624 unlock_page(page); invalidate_inode_pages2_range()
663 * situations such as writepage being called for a page that has already
674 * single-page unmaps. However after this first call, and truncate_pagecache()
717 * write starting after current i_size. We mark the page straddling current
719 * the page. This way filesystem can be sure that page_mkwrite() is called on
720 * the page before user writes to the page via mmap after the i_size has been
723 * The function must be called after i_size is updated so that page fault
724 * coming after we unlock the page will already see the new i_size.
733 struct page *page; pagecache_isize_extended() local
746 page = find_lock_page(inode->i_mapping, index); pagecache_isize_extended()
748 if (!page) pagecache_isize_extended()
754 if (page_mkclean(page)) pagecache_isize_extended()
755 set_page_dirty(page); pagecache_isize_extended()
756 unlock_page(page); pagecache_isize_extended()
757 page_cache_release(page); pagecache_isize_extended()
771 * situations such as writepage being called for a page that has already
783 * doing their own page rounding first. Note that unmap_mapping_range truncate_pagecache_range()
H A Dmmzone.c4 * management codes for pgdats, zones and page flags
76 struct page *page, struct zone *zone) memmap_valid_within()
78 if (page_to_pfn(page) != pfn) memmap_valid_within()
81 if (page_zone(page) != zone) memmap_valid_within()
99 int page_cpupid_xchg_last(struct page *page, int cpupid) page_cpupid_xchg_last() argument
105 old_flags = flags = page->flags; page_cpupid_xchg_last()
106 last_cpupid = page_cpupid_last(page); page_cpupid_xchg_last()
110 } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags)); page_cpupid_xchg_last()
75 memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone) memmap_valid_within() argument
H A Ddmapool.c13 * the given device. It uses the dma_alloc_coherent page allocator to get
19 * allocated pages. Each page in the page_list is split into blocks of at
21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page.
73 struct dma_page *page; show_pools() local
89 list_for_each_entry(page, &pool->page_list, page_list) { show_pools()
91 blocks += page->in_use; show_pools()
206 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) pool_initialise_page() argument
217 *(int *)(page->vaddr + offset) = next; pool_initialise_page()
224 struct dma_page *page; pool_alloc_page() local
226 page = kmalloc(sizeof(*page), mem_flags); pool_alloc_page()
227 if (!page) pool_alloc_page()
229 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, pool_alloc_page()
230 &page->dma, mem_flags); pool_alloc_page()
231 if (page->vaddr) { pool_alloc_page()
233 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); pool_alloc_page()
235 pool_initialise_page(pool, page); pool_alloc_page()
236 page->in_use = 0; pool_alloc_page()
237 page->offset = 0; pool_alloc_page()
239 kfree(page); pool_alloc_page()
240 page = NULL; pool_alloc_page()
242 return page; pool_alloc_page()
245 static inline int is_page_busy(struct dma_page *page) is_page_busy() argument
247 return page->in_use != 0; is_page_busy()
250 static void pool_free_page(struct dma_pool *pool, struct dma_page *page) pool_free_page() argument
252 dma_addr_t dma = page->dma; pool_free_page()
255 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); pool_free_page()
257 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); pool_free_page()
258 list_del(&page->page_list); pool_free_page()
259 kfree(page); pool_free_page()
285 struct dma_page *page; dma_pool_destroy() local
286 page = list_entry(pool->page_list.next, dma_pool_destroy()
288 if (is_page_busy(page)) { dma_pool_destroy()
292 pool->name, page->vaddr); dma_pool_destroy()
296 pool->name, page->vaddr); dma_pool_destroy()
298 list_del(&page->page_list); dma_pool_destroy()
299 kfree(page); dma_pool_destroy()
301 pool_free_page(pool, page); dma_pool_destroy()
322 struct dma_page *page; dma_pool_alloc() local
329 list_for_each_entry(page, &pool->page_list, page_list) { dma_pool_alloc()
330 if (page->offset < pool->allocation) dma_pool_alloc()
337 page = pool_alloc_page(pool, mem_flags); dma_pool_alloc()
338 if (!page) dma_pool_alloc()
343 list_add(&page->page_list, &pool->page_list); dma_pool_alloc()
345 page->in_use++; dma_pool_alloc()
346 offset = page->offset; dma_pool_alloc()
347 page->offset = *(int *)(page->vaddr + offset); dma_pool_alloc()
348 retval = offset + page->vaddr; dma_pool_alloc()
349 *handle = offset + page->dma; dma_pool_alloc()
354 /* page->offset is stored in first 4 bytes */ dma_pool_alloc()
355 for (i = sizeof(page->offset); i < pool->size; i++) { dma_pool_alloc()
384 struct dma_page *page; pool_find_page() local
386 list_for_each_entry(page, &pool->page_list, page_list) { pool_find_page()
387 if (dma < page->dma) pool_find_page()
389 if (dma < (page->dma + pool->allocation)) pool_find_page()
390 return page; pool_find_page()
406 struct dma_page *page; dma_pool_free() local
411 page = pool_find_page(pool, dma); dma_pool_free()
412 if (!page) { dma_pool_free()
424 offset = vaddr - page->vaddr; dma_pool_free()
426 if ((dma - page->dma) != offset) { dma_pool_free()
439 unsigned int chain = page->offset; dma_pool_free()
442 chain = *(int *)(page->vaddr + chain); dma_pool_free()
460 page->in_use--; dma_pool_free()
461 *(int *)vaddr = page->offset; dma_pool_free()
462 page->offset = offset; dma_pool_free()
465 * if (!is_page_busy(page)) pool_free_page(pool, page); dma_pool_free()
H A Dhighmem.c12 * Rewrote high memory support to move the page cache into
55 * Determine color of virtual address where the page should be mapped.
57 static inline unsigned int get_pkmap_color(struct page *page) get_pkmap_color() argument
64 * Get next index for mapping inside PKMAP region for page with given color.
75 * Determine if page index inside PKMAP region (pkmap_nr) of given color
154 struct page *kmap_to_page(void *vaddr) kmap_to_page()
175 struct page *page; flush_all_zero_pkmaps() local
192 * no-one has the page mapped, and cannot get at flush_all_zero_pkmaps()
197 page = pte_page(pkmap_page_table[i]); flush_all_zero_pkmaps()
200 set_page_address(page, NULL); flush_all_zero_pkmaps()
217 static inline unsigned long map_new_virtual(struct page *page) map_new_virtual() argument
222 unsigned int color = get_pkmap_color(page); map_new_virtual()
254 if (page_address(page)) map_new_virtual()
255 return (unsigned long)page_address(page); map_new_virtual()
263 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); map_new_virtual()
266 set_page_address(page, (void *)vaddr); map_new_virtual()
272 * kmap_high - map a highmem page into memory
273 * @page: &struct page to map
275 * Returns the page's virtual memory address.
279 void *kmap_high(struct page *page) kmap_high() argument
288 vaddr = (unsigned long)page_address(page); kmap_high()
290 vaddr = map_new_virtual(page); kmap_high()
301 * kmap_high_get - pin a highmem page into memory
302 * @page: &struct page to pin
304 * Returns the page's current virtual memory address, or NULL if no mapping
310 void *kmap_high_get(struct page *page) kmap_high_get() argument
315 vaddr = (unsigned long)page_address(page); kmap_high_get()
326 * kunmap_high - unmap a highmem page into memory
327 * @page: &struct page to unmap
332 void kunmap_high(struct page *page) kunmap_high() argument
338 unsigned int color = get_pkmap_color(page); kunmap_high()
342 vaddr = (unsigned long)page_address(page); kunmap_high()
383 * Describes one page->virtual association
386 struct page *page; member in struct:page_address_map
401 static struct page_address_slot *page_slot(const struct page *page) page_slot() argument
403 return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; page_slot()
407 * page_address - get the mapped virtual address of a page
408 * @page: &struct page to get the virtual address of
410 * Returns the page's virtual address.
412 void *page_address(const struct page *page) page_address() argument
418 if (!PageHighMem(page)) page_address()
419 return lowmem_page_address(page); page_address()
421 pas = page_slot(page); page_address()
428 if (pam->page == page) { page_address()
442 * set_page_address - set a page's virtual address
443 * @page: &struct page to set
446 void set_page_address(struct page *page, void *virtual) set_page_address() argument
452 BUG_ON(!PageHighMem(page)); set_page_address()
454 pas = page_slot(page); set_page_address()
457 pam->page = page; set_page_address()
466 if (pam->page == page) { set_page_address()
H A Dslub.c46 * 3. slab_lock(page) (Only on some arches and for debugging)
55 * double word in the page struct. Meaning
56 * A. page->freelist -> List of object free in a page
57 * B. page->counters -> Counters of objects
58 * C. page->frozen -> frozen state
62 * perform list operations on the page. Other processors may put objects
64 * one that can retrieve the objects from the page's freelist.
92 * minimal so we rely on the page allocators per cpu caches for
95 * Overloading of page flags that are otherwise used for LRU management.
175 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
227 /* Verify that a pointer has an address that is valid within a slab page */ check_valid_pointer()
229 struct page *page, const void *object) check_valid_pointer()
236 base = page_address(page); check_valid_pointer()
237 if (object < base || object >= base + page->objects * s->size || check_valid_pointer()
339 static __always_inline void slab_lock(struct page *page) slab_lock() argument
341 bit_spin_lock(PG_locked, &page->flags); slab_lock()
344 static __always_inline void slab_unlock(struct page *page) slab_unlock() argument
346 __bit_spin_unlock(PG_locked, &page->flags); slab_unlock()
349 static inline void set_page_slub_counters(struct page *page, unsigned long counters_new) set_page_slub_counters() argument
351 struct page tmp; set_page_slub_counters()
354 * page->counters can cover frozen/inuse/objects as well set_page_slub_counters()
355 * as page->_count. If we assign to ->counters directly set_page_slub_counters()
356 * we run the risk of losing updates to page->_count, so set_page_slub_counters()
359 page->frozen = tmp.frozen; set_page_slub_counters()
360 page->inuse = tmp.inuse; set_page_slub_counters()
361 page->objects = tmp.objects; set_page_slub_counters()
365 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, __cmpxchg_double_slab() argument
374 if (cmpxchg_double(&page->freelist, &page->counters, __cmpxchg_double_slab()
381 slab_lock(page); __cmpxchg_double_slab()
382 if (page->freelist == freelist_old && __cmpxchg_double_slab()
383 page->counters == counters_old) { __cmpxchg_double_slab()
384 page->freelist = freelist_new; __cmpxchg_double_slab()
385 set_page_slub_counters(page, counters_new); __cmpxchg_double_slab()
386 slab_unlock(page); __cmpxchg_double_slab()
389 slab_unlock(page); __cmpxchg_double_slab()
402 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, cmpxchg_double_slab() argument
410 if (cmpxchg_double(&page->freelist, &page->counters, cmpxchg_double_slab()
420 slab_lock(page); cmpxchg_double_slab()
421 if (page->freelist == freelist_old && cmpxchg_double_slab()
422 page->counters == counters_old) { cmpxchg_double_slab()
423 page->freelist = freelist_new; cmpxchg_double_slab()
424 set_page_slub_counters(page, counters_new); cmpxchg_double_slab()
425 slab_unlock(page); cmpxchg_double_slab()
429 slab_unlock(page); cmpxchg_double_slab()
445 * Determine a map of object in use on a page.
447 * Node listlock must be held to guarantee that the page does
450 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) get_map() argument
453 void *addr = page_address(page); get_map()
455 for (p = page->freelist; p; p = get_freepointer(s, p)) get_map()
582 static void print_page_info(struct page *page) print_page_info() argument
585 page, page->objects, page->inuse, page->freelist, page->flags); print_page_info()
617 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) print_trailer() argument
620 u8 *addr = page_address(page); print_trailer()
624 print_page_info(page); print_trailer()
653 void object_err(struct kmem_cache *s, struct page *page, object_err() argument
657 print_trailer(s, page, object); object_err()
660 static void slab_err(struct kmem_cache *s, struct page *page, slab_err() argument
670 print_page_info(page); slab_err()
694 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, check_bytes_and_report() argument
714 print_trailer(s, page, object); check_bytes_and_report()
758 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) check_pad_bytes() argument
773 return check_bytes_and_report(s, page, p, "Object padding", check_pad_bytes()
777 /* Check the pad bytes at the end of a slab page */ slab_pad_check()
778 static int slab_pad_check(struct kmem_cache *s, struct page *page) slab_pad_check() argument
789 start = page_address(page); slab_pad_check()
790 length = (PAGE_SIZE << compound_order(page)) - s->reserved; slab_pad_check()
804 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); slab_pad_check()
811 static int check_object(struct kmem_cache *s, struct page *page, check_object() argument
818 if (!check_bytes_and_report(s, page, object, "Redzone", check_object()
823 check_bytes_and_report(s, page, p, "Alignment padding", check_object()
831 (!check_bytes_and_report(s, page, p, "Poison", p, check_object()
833 !check_bytes_and_report(s, page, p, "Poison", check_object()
839 check_pad_bytes(s, page, p); check_object()
850 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { check_object()
851 object_err(s, page, p, "Freepointer corrupt"); check_object()
863 static int check_slab(struct kmem_cache *s, struct page *page) check_slab() argument
869 if (!PageSlab(page)) { check_slab()
870 slab_err(s, page, "Not a valid slab page"); check_slab()
874 maxobj = order_objects(compound_order(page), s->size, s->reserved); check_slab()
875 if (page->objects > maxobj) { check_slab()
876 slab_err(s, page, "objects %u > max %u", check_slab()
877 page->objects, maxobj); check_slab()
880 if (page->inuse > page->objects) { check_slab()
881 slab_err(s, page, "inuse %u > max %u", check_slab()
882 page->inuse, page->objects); check_slab()
886 slab_pad_check(s, page); check_slab()
891 * Determine if a certain object on a page is on the freelist. Must hold the
894 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) on_freelist() argument
901 fp = page->freelist; on_freelist()
902 while (fp && nr <= page->objects) { on_freelist()
905 if (!check_valid_pointer(s, page, fp)) { on_freelist()
907 object_err(s, page, object, on_freelist()
911 slab_err(s, page, "Freepointer corrupt"); on_freelist()
912 page->freelist = NULL; on_freelist()
913 page->inuse = page->objects; on_freelist()
924 max_objects = order_objects(compound_order(page), s->size, s->reserved); on_freelist()
928 if (page->objects != max_objects) { on_freelist()
929 slab_err(s, page, "Wrong number of objects. Found %d but " on_freelist()
930 "should be %d", page->objects, max_objects); on_freelist()
931 page->objects = max_objects; on_freelist()
934 if (page->inuse != page->objects - nr) { on_freelist()
935 slab_err(s, page, "Wrong object count. Counter is %d but " on_freelist()
936 "counted were %d", page->inuse, page->objects - nr); on_freelist()
937 page->inuse = page->objects - nr; on_freelist()
943 static void trace(struct kmem_cache *s, struct page *page, void *object, trace() argument
950 object, page->inuse, trace()
951 page->freelist); trace()
965 struct kmem_cache_node *n, struct page *page) add_full()
971 list_add(&page->lru, &n->full); add_full()
974 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) remove_full() argument
980 list_del(&page->lru); remove_full()
1020 static void setup_object_debug(struct kmem_cache *s, struct page *page, setup_object_debug() argument
1031 struct page *page, alloc_debug_processing()
1034 if (!check_slab(s, page)) alloc_debug_processing()
1037 if (!check_valid_pointer(s, page, object)) { alloc_debug_processing()
1038 object_err(s, page, object, "Freelist Pointer check fails"); alloc_debug_processing()
1042 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) alloc_debug_processing()
1048 trace(s, page, object, 1); alloc_debug_processing()
1053 if (PageSlab(page)) { alloc_debug_processing()
1055 * If this is a slab page then lets do the best we can alloc_debug_processing()
1060 page->inuse = page->objects; alloc_debug_processing()
1061 page->freelist = NULL; alloc_debug_processing()
1067 struct kmem_cache *s, struct page *page, void *object, free_debug_processing()
1070 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); free_debug_processing()
1073 slab_lock(page); free_debug_processing()
1075 if (!check_slab(s, page)) free_debug_processing()
1078 if (!check_valid_pointer(s, page, object)) { free_debug_processing()
1079 slab_err(s, page, "Invalid object pointer 0x%p", object); free_debug_processing()
1083 if (on_freelist(s, page, object)) { free_debug_processing()
1084 object_err(s, page, object, "Object already free"); free_debug_processing()
1088 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) free_debug_processing()
1091 if (unlikely(s != page->slab_cache)) { free_debug_processing()
1092 if (!PageSlab(page)) { free_debug_processing()
1093 slab_err(s, page, "Attempt to free object(0x%p) " free_debug_processing()
1095 } else if (!page->slab_cache) { free_debug_processing()
1100 object_err(s, page, object, free_debug_processing()
1101 "page slab pointer corrupt."); free_debug_processing()
1107 trace(s, page, object, 0); free_debug_processing()
1110 slab_unlock(page); free_debug_processing()
1118 slab_unlock(page); free_debug_processing()
1207 struct page *page, void *object) {}
1210 struct page *page, void *object, unsigned long addr) { return 0; }
1213 struct kmem_cache *s, struct page *page, void *object, free_debug_processing()
1216 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) slab_pad_check() argument
1218 static inline int check_object(struct kmem_cache *s, struct page *page, check_object() argument
1221 struct page *page) {} remove_full()
1223 struct page *page) {} kmem_cache_flags()
1312 static inline struct page *alloc_slab_page(struct kmem_cache *s, alloc_slab_page()
1315 struct page *page; alloc_slab_page() local
1324 page = alloc_pages(flags, order); alloc_slab_page()
1326 page = alloc_pages_exact_node(node, flags, order); alloc_slab_page()
1328 if (!page) alloc_slab_page()
1331 return page; alloc_slab_page()
1334 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) allocate_slab()
1336 struct page *page; allocate_slab() local
1353 page = alloc_slab_page(s, alloc_gfp, node, oo); allocate_slab()
1354 if (unlikely(!page)) { allocate_slab()
1361 page = alloc_slab_page(s, alloc_gfp, node, oo); allocate_slab()
1363 if (page) allocate_slab()
1367 if (kmemcheck_enabled && page allocate_slab()
1371 kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node); allocate_slab()
1378 kmemcheck_mark_uninitialized_pages(page, pages); allocate_slab()
1380 kmemcheck_mark_unallocated_pages(page, pages); allocate_slab()
1385 if (!page) allocate_slab()
1388 page->objects = oo_objects(oo); allocate_slab()
1389 mod_zone_page_state(page_zone(page), allocate_slab()
1394 return page; allocate_slab()
1397 static void setup_object(struct kmem_cache *s, struct page *page, setup_object() argument
1400 setup_object_debug(s, page, object); setup_object()
1408 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) new_slab()
1410 struct page *page; new_slab() local
1421 page = allocate_slab(s, new_slab()
1423 if (!page) new_slab()
1426 order = compound_order(page); new_slab()
1427 inc_slabs_node(s, page_to_nid(page), page->objects); new_slab()
1428 page->slab_cache = s; new_slab()
1429 __SetPageSlab(page); new_slab()
1430 if (page_is_pfmemalloc(page)) new_slab()
1431 SetPageSlabPfmemalloc(page); new_slab()
1433 start = page_address(page); new_slab()
1438 kasan_poison_slab(page); new_slab()
1440 for_each_object_idx(p, idx, s, start, page->objects) { new_slab()
1441 setup_object(s, page, p); new_slab()
1442 if (likely(idx < page->objects)) new_slab()
1448 page->freelist = start; new_slab()
1449 page->inuse = page->objects; new_slab()
1450 page->frozen = 1; new_slab()
1452 return page; new_slab()
1455 static void __free_slab(struct kmem_cache *s, struct page *page) __free_slab() argument
1457 int order = compound_order(page); __free_slab()
1463 slab_pad_check(s, page); __free_slab()
1464 for_each_object(p, s, page_address(page), __free_slab()
1465 page->objects) __free_slab()
1466 check_object(s, page, p, SLUB_RED_INACTIVE); __free_slab()
1469 kmemcheck_free_shadow(page, compound_order(page)); __free_slab()
1471 mod_zone_page_state(page_zone(page), __free_slab()
1476 __ClearPageSlabPfmemalloc(page); __free_slab()
1477 __ClearPageSlab(page); __free_slab()
1479 page_mapcount_reset(page); __free_slab()
1482 __free_pages(page, order); __free_slab()
1487 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1491 struct page *page; rcu_free_slab() local
1494 page = virt_to_head_page(h); rcu_free_slab()
1496 page = container_of((struct list_head *)h, struct page, lru); rcu_free_slab()
1498 __free_slab(page->slab_cache, page); rcu_free_slab()
1501 static void free_slab(struct kmem_cache *s, struct page *page) free_slab() argument
1507 int order = compound_order(page); free_slab()
1511 head = page_address(page) + offset; free_slab()
1516 head = (void *)&page->lru; free_slab()
1521 __free_slab(s, page); free_slab()
1524 static void discard_slab(struct kmem_cache *s, struct page *page) discard_slab() argument
1526 dec_slabs_node(s, page_to_nid(page), page->objects); discard_slab()
1527 free_slab(s, page); discard_slab()
1534 __add_partial(struct kmem_cache_node *n, struct page *page, int tail) __add_partial() argument
1538 list_add_tail(&page->lru, &n->partial); __add_partial()
1540 list_add(&page->lru, &n->partial); __add_partial()
1544 struct page *page, int tail) add_partial()
1547 __add_partial(n, page, tail); add_partial()
1551 __remove_partial(struct kmem_cache_node *n, struct page *page) __remove_partial() argument
1553 list_del(&page->lru); __remove_partial()
1558 struct page *page) remove_partial()
1561 __remove_partial(n, page); remove_partial()
1571 struct kmem_cache_node *n, struct page *page, acquire_slab()
1576 struct page new; acquire_slab()
1585 freelist = page->freelist; acquire_slab()
1586 counters = page->counters; acquire_slab()
1590 new.inuse = page->objects; acquire_slab()
1599 if (!__cmpxchg_double_slab(s, page, acquire_slab()
1605 remove_partial(n, page); acquire_slab()
1610 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1611 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1619 struct page *page, *page2; get_partial_node() local
1634 list_for_each_entry_safe(page, page2, &n->partial, lru) { get_partial_node()
1637 if (!pfmemalloc_match(page, flags)) get_partial_node()
1640 t = acquire_slab(s, n, page, object == NULL, &objects); get_partial_node()
1646 c->page = page; get_partial_node()
1650 put_cpu_partial(s, page, 0); get_partial_node()
1663 * Get a page from somewhere. Search in increasing NUMA distances.
1727 * Get a partial page, lock it and return it.
1817 static void deactivate_slab(struct kmem_cache *s, struct page *page, deactivate_slab() argument
1821 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); deactivate_slab()
1826 struct page new; deactivate_slab()
1827 struct page old; deactivate_slab()
1829 if (page->freelist) { deactivate_slab()
1836 * to the page freelist while it is still frozen. Leave the deactivate_slab()
1839 * There is no need to take the list->lock because the page deactivate_slab()
1847 prior = page->freelist; deactivate_slab()
1848 counters = page->counters; deactivate_slab()
1854 } while (!__cmpxchg_double_slab(s, page, deactivate_slab()
1863 * Stage two: Ensure that the page is unfrozen while the deactivate_slab()
1868 * with the count. If there is a mismatch then the page deactivate_slab()
1869 * is not unfrozen but the page is on the wrong list. deactivate_slab()
1872 * the page from the list that we just put it on again deactivate_slab()
1878 old.freelist = page->freelist; deactivate_slab()
1879 old.counters = page->counters; deactivate_slab()
1901 * that acquire_slab() will see a slab page that deactivate_slab()
1923 remove_partial(n, page); deactivate_slab()
1927 remove_full(s, n, page); deactivate_slab()
1931 add_partial(n, page, tail); deactivate_slab()
1937 add_full(s, n, page); deactivate_slab()
1943 if (!__cmpxchg_double_slab(s, page, deactivate_slab()
1954 discard_slab(s, page); deactivate_slab()
1971 struct page *page, *discard_page = NULL; unfreeze_partials() local
1973 while ((page = c->partial)) { unfreeze_partials()
1974 struct page new; unfreeze_partials()
1975 struct page old; unfreeze_partials()
1977 c->partial = page->next; unfreeze_partials()
1979 n2 = get_node(s, page_to_nid(page)); unfreeze_partials()
1990 old.freelist = page->freelist; unfreeze_partials()
1991 old.counters = page->counters; unfreeze_partials()
1999 } while (!__cmpxchg_double_slab(s, page, unfreeze_partials()
2005 page->next = discard_page; unfreeze_partials()
2006 discard_page = page; unfreeze_partials()
2008 add_partial(n, page, DEACTIVATE_TO_TAIL); unfreeze_partials()
2017 page = discard_page; unfreeze_partials()
2021 discard_slab(s, page); unfreeze_partials()
2028 * Put a page that was just frozen (in __slab_free) into a partial page
2030 * preemption disabled. The cmpxchg is racy and may put the partial page
2036 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) put_cpu_partial() argument
2039 struct page *oldpage; put_cpu_partial()
2069 pobjects += page->objects - page->inuse; put_cpu_partial()
2071 page->pages = pages; put_cpu_partial()
2072 page->pobjects = pobjects; put_cpu_partial()
2073 page->next = oldpage; put_cpu_partial()
2075 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) put_cpu_partial()
2091 deactivate_slab(s, c->page, c->freelist); flush_slab()
2094 c->page = NULL; flush_slab()
2108 if (c->page) __flush_cpu_slab()
2127 return c->page || c->partial; has_cpu_slab()
2139 static inline int node_match(struct page *page, int node) node_match() argument
2142 if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node)) node_match()
2149 static int count_free(struct page *page) count_free() argument
2151 return page->objects - page->inuse; count_free()
2162 int (*get_count)(struct page *)) count_partial()
2166 struct page *page; count_partial() local
2169 list_for_each_entry(page, &n->partial, lru) count_partial()
2170 x += get_count(page); count_partial()
2218 struct page *page; new_slab_objects() local
2225 page = new_slab(s, flags, node); new_slab_objects()
2226 if (page) { new_slab_objects()
2228 if (c->page) new_slab_objects()
2232 * No other reference to the page yet so we can new_slab_objects()
2235 freelist = page->freelist; new_slab_objects()
2236 page->freelist = NULL; new_slab_objects()
2239 c->page = page; new_slab_objects()
2247 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) pfmemalloc_match() argument
2249 if (unlikely(PageSlabPfmemalloc(page))) pfmemalloc_match()
2256 * Check the page->freelist of a page and either transfer the freelist to the
2257 * per cpu freelist or deactivate the page.
2259 * The page is still frozen if the return value is not NULL.
2261 * If this function returns NULL then the page has been unfrozen.
2265 static inline void *get_freelist(struct kmem_cache *s, struct page *page) get_freelist() argument
2267 struct page new; get_freelist()
2272 freelist = page->freelist; get_freelist()
2273 counters = page->counters; get_freelist()
2278 new.inuse = page->objects; get_freelist()
2281 } while (!__cmpxchg_double_slab(s, page, get_freelist()
2303 * a call to the page allocator and the setup of a new slab.
2309 struct page *page; __slab_alloc() local
2322 page = c->page; __slab_alloc()
2323 if (!page) __slab_alloc()
2327 if (unlikely(!node_match(page, node))) { __slab_alloc()
2333 if (unlikely(!node_match(page, searchnode))) { __slab_alloc()
2335 deactivate_slab(s, page, c->freelist); __slab_alloc()
2336 c->page = NULL; __slab_alloc()
2343 * By rights, we should be searching for a slab page that was __slab_alloc()
2345 * information when the page leaves the per-cpu allocator __slab_alloc()
2347 if (unlikely(!pfmemalloc_match(page, gfpflags))) { __slab_alloc()
2348 deactivate_slab(s, page, c->freelist); __slab_alloc()
2349 c->page = NULL; __slab_alloc()
2359 freelist = get_freelist(s, page); __slab_alloc()
2362 c->page = NULL; __slab_alloc()
2372 * page is pointing to the page from which the objects are obtained. __slab_alloc()
2373 * That page must be frozen for per cpu allocations to work. __slab_alloc()
2375 VM_BUG_ON(!c->page->frozen); __slab_alloc()
2384 page = c->page = c->partial; __slab_alloc()
2385 c->partial = page->next; __slab_alloc()
2399 page = c->page; __slab_alloc()
2400 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) __slab_alloc()
2405 !alloc_debug_processing(s, page, freelist, addr)) __slab_alloc()
2408 deactivate_slab(s, page, get_freepointer(s, freelist)); __slab_alloc()
2409 c->page = NULL; __slab_alloc()
2430 struct page *page; slab_alloc_node() local
2456 * on c to guarantee that object and page associated with previous tid slab_alloc_node()
2458 * page could be one associated with next tid and our alloc/free slab_alloc_node()
2471 page = c->page; slab_alloc_node()
2472 if (unlikely(!object || !node_match(page, node))) { slab_alloc_node()
2574 * lock and free the item. If there is no additional partial page
2577 static void __slab_free(struct kmem_cache *s, struct page *page, __slab_free() argument
2583 struct page new; __slab_free()
2591 !(n = free_debug_processing(s, page, x, addr, &flags))) __slab_free()
2599 prior = page->freelist; __slab_free()
2600 counters = page->counters; __slab_free()
2619 n = get_node(s, page_to_nid(page)); __slab_free()
2633 } while (!cmpxchg_double_slab(s, page, __slab_free()
2641 * If we just froze the page then put it onto the __slab_free()
2645 put_cpu_partial(s, page, 1); __slab_free()
2666 remove_full(s, n, page); __slab_free()
2667 add_partial(n, page, DEACTIVATE_TO_TAIL); __slab_free()
2678 remove_partial(n, page); __slab_free()
2682 remove_full(s, n, page); __slab_free()
2687 discard_slab(s, page); __slab_free()
2702 struct page *page, void *x, unsigned long addr) slab_free()
2726 if (likely(page == c->page)) { slab_free()
2739 __slab_free(s, page, x, addr); slab_free()
2781 * order 0 does not cause fragmentation in the page allocator. Larger objects
2793 * we try to keep the page order as low as possible. So we accept more waste
2794 * of space in favor of a small page order.
2926 struct page *page; early_kmem_cache_node_alloc() local
2931 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); early_kmem_cache_node_alloc()
2933 BUG_ON(!page); early_kmem_cache_node_alloc()
2934 if (page_to_nid(page) != node) { early_kmem_cache_node_alloc()
2939 n = page->freelist; early_kmem_cache_node_alloc()
2941 page->freelist = get_freepointer(kmem_cache_node, n); early_kmem_cache_node_alloc()
2942 page->inuse = 1; early_kmem_cache_node_alloc()
2943 page->frozen = 0; early_kmem_cache_node_alloc()
2951 inc_slabs_node(kmem_cache_node, node, page->objects); early_kmem_cache_node_alloc()
2957 __add_partial(n, page, DEACTIVATE_TO_HEAD); early_kmem_cache_node_alloc()
3151 * list to avoid pounding the page allocator excessively. kmem_cache_open()
3202 static void list_slab_objects(struct kmem_cache *s, struct page *page, list_slab_objects() argument
3206 void *addr = page_address(page); list_slab_objects()
3208 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) * list_slab_objects()
3212 slab_err(s, page, text, s->name); list_slab_objects()
3213 slab_lock(page); list_slab_objects()
3215 get_map(s, page, map); list_slab_objects()
3216 for_each_object(p, s, addr, page->objects) { list_slab_objects()
3223 slab_unlock(page); list_slab_objects()
3235 struct page *page, *h; free_partial() local
3237 list_for_each_entry_safe(page, h, &n->partial, lru) { free_partial()
3238 if (!page->inuse) { free_partial()
3239 __remove_partial(n, page); free_partial()
3240 discard_slab(s, page); free_partial()
3242 list_slab_objects(s, page, free_partial()
3331 struct page *page; kmalloc_large_node() local
3335 page = alloc_kmem_pages_node(node, flags, get_order(size)); kmalloc_large_node()
3336 if (page) kmalloc_large_node()
3337 ptr = page_address(page); kmalloc_large_node()
3376 struct page *page; __ksize() local
3381 page = virt_to_head_page(object); __ksize()
3383 if (unlikely(!PageSlab(page))) { __ksize()
3384 WARN_ON(!PageCompound(page)); __ksize()
3385 return PAGE_SIZE << compound_order(page); __ksize()
3388 return slab_ksize(page->slab_cache); __ksize()
3403 struct page *page; kfree() local
3411 page = virt_to_head_page(x); kfree()
3412 if (unlikely(!PageSlab(page))) { kfree()
3413 BUG_ON(!PageCompound(page)); kfree()
3415 __free_kmem_pages(page, compound_order(page)); kfree()
3418 slab_free(page->slab_cache, page, object, _RET_IP_); kfree()
3438 struct page *page; __kmem_cache_shrink() local
3439 struct page *t; __kmem_cache_shrink()
3472 * list_lock. page->inuse here is the upper limit. for_each_kmem_cache_node()
3474 list_for_each_entry_safe(page, t, &n->partial, lru) { for_each_kmem_cache_node()
3475 int free = page->objects - page->inuse; for_each_kmem_cache_node()
3477 /* Do not reread page->inuse */ for_each_kmem_cache_node()
3483 if (free == page->objects) { for_each_kmem_cache_node()
3484 list_move(&page->lru, &discard); for_each_kmem_cache_node()
3487 list_move(&page->lru, promote + free - 1); for_each_kmem_cache_node()
3500 list_for_each_entry_safe(page, t, &discard, lru) for_each_kmem_cache_node()
3501 discard_slab(s, page); for_each_kmem_cache_node()
3635 * the page allocator. Allocate them properly then fix up the pointers
3654 struct page *p; for_each_kmem_cache_node()
3861 static int count_inuse(struct page *page) count_inuse() argument
3863 return page->inuse; count_inuse()
3866 static int count_total(struct page *page) count_total() argument
3868 return page->objects; count_total()
3873 static int validate_slab(struct kmem_cache *s, struct page *page, validate_slab() argument
3877 void *addr = page_address(page); validate_slab()
3879 if (!check_slab(s, page) || validate_slab()
3880 !on_freelist(s, page, NULL)) validate_slab()
3884 bitmap_zero(map, page->objects); validate_slab()
3886 get_map(s, page, map); validate_slab()
3887 for_each_object(p, s, addr, page->objects) { validate_slab()
3889 if (!check_object(s, page, p, SLUB_RED_INACTIVE)) validate_slab()
3893 for_each_object(p, s, addr, page->objects) validate_slab()
3895 if (!check_object(s, page, p, SLUB_RED_ACTIVE)) validate_slab()
3900 static void validate_slab_slab(struct kmem_cache *s, struct page *page, validate_slab_slab() argument
3903 slab_lock(page); validate_slab_slab()
3904 validate_slab(s, page, map); validate_slab_slab()
3905 slab_unlock(page); validate_slab_slab()
3912 struct page *page; validate_slab_node() local
3917 list_for_each_entry(page, &n->partial, lru) { validate_slab_node()
3918 validate_slab_slab(s, page, map); validate_slab_node()
3928 list_for_each_entry(page, &n->full, lru) { validate_slab_node()
3929 validate_slab_slab(s, page, map); validate_slab_node()
4085 struct page *page, enum track_item alloc, process_slab()
4088 void *addr = page_address(page); process_slab()
4091 bitmap_zero(map, page->objects); process_slab()
4092 get_map(s, page, map); process_slab()
4094 for_each_object(p, s, addr, page->objects) process_slab()
4120 struct page *page; for_each_kmem_cache_node() local
4126 list_for_each_entry(page, &n->partial, lru) for_each_kmem_cache_node()
4127 process_slab(&t, s, page, alloc, map); for_each_kmem_cache_node()
4128 list_for_each_entry(page, &n->full, lru) for_each_kmem_cache_node()
4129 process_slab(&t, s, page, alloc, map); for_each_kmem_cache_node()
4278 struct page *page; for_each_possible_cpu() local
4280 page = READ_ONCE(c->page); for_each_possible_cpu()
4281 if (!page) for_each_possible_cpu()
4284 node = page_to_nid(page); for_each_possible_cpu()
4286 x = page->objects; for_each_possible_cpu()
4288 x = page->inuse; for_each_possible_cpu()
4295 page = READ_ONCE(c->partial); for_each_possible_cpu()
4296 if (page) { for_each_possible_cpu()
4297 node = page_to_nid(page); for_each_possible_cpu()
4303 x = page->pages; for_each_possible_cpu()
4523 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial; for_each_online_cpu() local
4525 if (page) { for_each_online_cpu()
4526 pages += page->pages; for_each_online_cpu()
4527 objects += page->pobjects; for_each_online_cpu()
4535 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial; for_each_online_cpu() local
4537 if (page && len < PAGE_SIZE - 20) for_each_online_cpu()
4539 page->pobjects, page->pages); for_each_online_cpu()
5059 * size, but sysfs allows buffers up to a page, so they can memcg_propagate_slab_attrs()
228 check_valid_pointer(struct kmem_cache *s, struct page *page, const void *object) check_valid_pointer() argument
964 add_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) add_full() argument
1030 alloc_debug_processing(struct kmem_cache *s, struct page *page, void *object, unsigned long addr) alloc_debug_processing() argument
1066 free_debug_processing( struct kmem_cache *s, struct page *page, void *object, unsigned long addr, unsigned long *flags) free_debug_processing() argument
1206 setup_object_debug(struct kmem_cache *s, struct page *page, void *object) setup_object_debug() argument
1209 alloc_debug_processing(struct kmem_cache *s, struct page *page, void *object, unsigned long addr) alloc_debug_processing() argument
1212 free_debug_processing( struct kmem_cache *s, struct page *page, void *object, unsigned long addr, unsigned long *flags) free_debug_processing() argument
1220 add_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) add_full() argument
1222 remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) remove_full() argument
1543 add_partial(struct kmem_cache_node *n, struct page *page, int tail) add_partial() argument
1557 remove_partial(struct kmem_cache_node *n, struct page *page) remove_partial() argument
1570 acquire_slab(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page, int mode, int *objects) acquire_slab() argument
2701 slab_free(struct kmem_cache *s, struct page *page, void *x, unsigned long addr) slab_free() argument
4084 process_slab(struct loc_track *t, struct kmem_cache *s, struct page *page, enum track_item alloc, unsigned long *map) process_slab() argument
/linux-4.1.27/arch/m68k/include/asm/
H A Dmotorola_pgalloc.h32 struct page *page; pte_alloc_one() local
35 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); pte_alloc_one()
36 if(!page) pte_alloc_one()
38 if (!pgtable_page_ctor(page)) { pte_alloc_one()
39 __free_page(page); pte_alloc_one()
43 pte = kmap(page); pte_alloc_one()
47 kunmap(page); pte_alloc_one()
48 return page; pte_alloc_one()
51 static inline void pte_free(struct mm_struct *mm, pgtable_t page) pte_free() argument
53 pgtable_page_dtor(page); pte_free()
54 cache_page(kmap(page)); pte_free()
55 kunmap(page); pte_free()
56 __free_page(page); pte_free()
59 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, __pte_free_tlb() argument
62 pgtable_page_dtor(page); __pte_free_tlb()
63 cache_page(kmap(page)); __pte_free_tlb()
64 kunmap(page); __pte_free_tlb()
65 __free_page(page); __pte_free_tlb()
102 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) pmd_populate() argument
104 pmd_set(pmd, page_address(page)); pmd_populate()
H A Dsun3_pgalloc.h25 static inline void pte_free(struct mm_struct *mm, pgtable_t page) pte_free() argument
27 pgtable_page_dtor(page); pte_free()
28 __free_page(page); pte_free()
40 unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT); pte_alloc_one_kernel() local
42 if (!page) pte_alloc_one_kernel()
45 memset((void *)page, 0, PAGE_SIZE); pte_alloc_one_kernel()
46 return (pte_t *) (page); pte_alloc_one_kernel()
52 struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); pte_alloc_one() local
54 if (page == NULL) pte_alloc_one()
57 clear_highpage(page); pte_alloc_one()
58 if (!pgtable_page_ctor(page)) { pte_alloc_one()
59 __free_page(page); pte_alloc_one()
62 return page; pte_alloc_one()
71 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) pmd_populate() argument
73 pmd_val(*pmd) = __pa((unsigned long)page_address(page)); pmd_populate()
H A Dmcf_pgalloc.h17 unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT); pte_alloc_one_kernel() local
19 if (!page) pte_alloc_one_kernel()
22 memset((void *)page, 0, PAGE_SIZE); pte_alloc_one_kernel()
23 return (pte_t *) (page); pte_alloc_one_kernel()
36 #define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
37 (unsigned long)(page_address(page)))
43 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, __pte_free_tlb() argument
46 __free_page(page); __pte_free_tlb()
51 static inline struct page *pte_alloc_one(struct mm_struct *mm, pte_alloc_one()
54 struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0); pte_alloc_one() local
57 if (!page) pte_alloc_one()
59 if (!pgtable_page_ctor(page)) { pte_alloc_one()
60 __free_page(page); pte_alloc_one()
64 pte = kmap(page); pte_alloc_one()
71 kunmap(page); pte_alloc_one()
73 return page; pte_alloc_one()
76 extern inline void pte_free(struct mm_struct *mm, struct page *page) pte_free() argument
78 __free_page(page); pte_free()
H A Dpage_no.h10 #define free_user_page(page, addr) free_page(addr)
12 #define clear_page(page) memset((page), 0, PAGE_SIZE)
15 #define clear_user_page(page, vaddr, pg) clear_page(page)
29 #define page_to_virt(page) __va(((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET))
32 #define page_to_pfn(page) virt_to_pfn(page_to_virt(page))
H A Dvirtconvert.h13 #include <asm/page.h>
28 /* Permanent address of a page. */
30 #define page_to_phys(page) \
31 __pa(PAGE_OFFSET + (((page) - pg_data_map[0].node_mem_map) << PAGE_SHIFT))
33 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
/linux-4.1.27/arch/avr32/mm/
H A Ddma-coherent.c40 static struct page *__dma_alloc(struct device *dev, size_t size, __dma_alloc()
43 struct page *page, *free, *end; __dma_alloc() local
56 page = alloc_pages(gfp, order); __dma_alloc()
57 if (!page) __dma_alloc()
59 split_page(page, order); __dma_alloc()
70 invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size); __dma_alloc()
72 *handle = page_to_bus(page); __dma_alloc()
73 free = page + (size >> PAGE_SHIFT); __dma_alloc()
74 end = page + (1 << order); __dma_alloc()
84 return page; __dma_alloc()
88 struct page *page, dma_addr_t handle) __dma_free()
90 struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT); __dma_free()
92 while (page < end) __dma_free()
93 __free_page(page++); __dma_free()
99 struct page *page; dma_alloc_coherent() local
102 page = __dma_alloc(dev, size, handle, gfp); dma_alloc_coherent()
103 if (page) dma_alloc_coherent()
104 ret = phys_to_uncached(page_to_phys(page)); dma_alloc_coherent()
114 struct page *page; dma_free_coherent() local
119 page = virt_to_page(addr); dma_free_coherent()
120 __dma_free(dev, size, page, handle); dma_free_coherent()
127 struct page *page; dma_alloc_writecombine() local
130 page = __dma_alloc(dev, size, handle, gfp); dma_alloc_writecombine()
131 if (!page) dma_alloc_writecombine()
134 phys = page_to_phys(page); dma_alloc_writecombine()
137 /* Now, map the page into P3 with write-combining turned on */ dma_alloc_writecombine()
145 struct page *page; dma_free_writecombine() local
149 page = phys_to_page(handle); dma_free_writecombine()
150 __dma_free(dev, size, page, handle); dma_free_writecombine()
87 __dma_free(struct device *dev, size_t size, struct page *page, dma_addr_t handle) __dma_free() argument
/linux-4.1.27/fs/afs/
H A Dfile.c21 static int afs_readpage(struct file *file, struct page *page);
22 static void afs_invalidatepage(struct page *page, unsigned int offset,
24 static int afs_releasepage(struct page *page, gfp_t gfp_flags);
25 static int afs_launder_page(struct page *page);
106 * deal with notification that a page was read from the cache
108 static void afs_file_readpage_read_complete(struct page *page, afs_file_readpage_read_complete() argument
112 _enter("%p,%p,%d", page, data, error); afs_file_readpage_read_complete()
114 /* if the read completes with an error, we just unlock the page and let afs_file_readpage_read_complete()
117 SetPageUptodate(page); afs_file_readpage_read_complete()
118 unlock_page(page); afs_file_readpage_read_complete()
123 * read page from file, directory or symlink, given a key to use
125 int afs_page_filler(void *data, struct page *page) afs_page_filler() argument
127 struct inode *inode = page->mapping->host; afs_page_filler()
134 _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index); afs_page_filler()
136 BUG_ON(!PageLocked(page)); afs_page_filler()
145 page, afs_page_filler()
153 /* read BIO submitted (page in cache) */ afs_page_filler()
157 /* page not yet cached */ afs_page_filler()
162 /* page will not be cached */ afs_page_filler()
167 offset = page->index << PAGE_CACHE_SHIFT; afs_page_filler()
171 * page */ afs_page_filler()
172 ret = afs_vnode_fetch_data(vnode, key, offset, len, page); afs_page_filler()
182 fscache_uncache_page(vnode->cache, page); afs_page_filler()
184 BUG_ON(PageFsCache(page)); afs_page_filler()
188 SetPageUptodate(page); afs_page_filler()
190 /* send the page to the cache */ afs_page_filler()
192 if (PageFsCache(page) && afs_page_filler()
193 fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) { afs_page_filler()
194 fscache_uncache_page(vnode->cache, page); afs_page_filler()
195 BUG_ON(PageFsCache(page)); afs_page_filler()
198 unlock_page(page); afs_page_filler()
205 SetPageError(page); afs_page_filler()
206 unlock_page(page); afs_page_filler()
212 * read page from file, directory or symlink, given a file to nominate the key
215 static int afs_readpage(struct file *file, struct page *page) afs_readpage() argument
223 ret = afs_page_filler(key, page); afs_readpage()
225 struct inode *inode = page->mapping->host; afs_readpage()
230 ret = afs_page_filler(key, page); afs_readpage()
298 * write back a dirty page
300 static int afs_launder_page(struct page *page) afs_launder_page() argument
302 _enter("{%lu}", page->index); afs_launder_page()
308 * invalidate part or all of a page
309 * - release a page and clean up its private data if offset is 0 (indicating
310 * the entire page)
312 static void afs_invalidatepage(struct page *page, unsigned int offset, afs_invalidatepage() argument
315 struct afs_writeback *wb = (struct afs_writeback *) page_private(page); afs_invalidatepage()
317 _enter("{%lu},%u,%u", page->index, offset, length); afs_invalidatepage()
319 BUG_ON(!PageLocked(page)); afs_invalidatepage()
321 /* we clean up only if the entire page is being invalidated */ afs_invalidatepage()
324 if (PageFsCache(page)) { afs_invalidatepage()
325 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); afs_invalidatepage()
326 fscache_wait_on_page_write(vnode->cache, page); afs_invalidatepage()
327 fscache_uncache_page(vnode->cache, page); afs_invalidatepage()
331 if (PagePrivate(page)) { afs_invalidatepage()
332 if (wb && !PageWriteback(page)) { afs_invalidatepage()
333 set_page_private(page, 0); afs_invalidatepage()
337 if (!page_private(page)) afs_invalidatepage()
338 ClearPagePrivate(page); afs_invalidatepage()
346 * release a page and clean up its private state if it's not busy
347 * - return true if the page can now be released, false if not
349 static int afs_releasepage(struct page *page, gfp_t gfp_flags) afs_releasepage() argument
351 struct afs_writeback *wb = (struct afs_writeback *) page_private(page); afs_releasepage()
352 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); afs_releasepage()
355 vnode->fid.vid, vnode->fid.vnode, page->index, page->flags, afs_releasepage()
358 /* deny if page is being written to the cache and the caller hasn't afs_releasepage()
361 if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) { afs_releasepage()
367 if (PagePrivate(page)) { afs_releasepage()
369 set_page_private(page, 0); afs_releasepage()
372 ClearPagePrivate(page); afs_releasepage()
375 /* indicate that the page can be released */ afs_releasepage()
H A Dwrite.c20 struct page *page);
23 * mark a page as having been made dirty and thus needing writeback
25 int afs_set_page_dirty(struct page *page) afs_set_page_dirty() argument
28 return __set_page_dirty_nobuffers(page); afs_set_page_dirty()
84 * partly or wholly fill a page that's under preparation for writing
87 loff_t pos, struct page *page) afs_fill_page()
101 ret = afs_vnode_fetch_data(vnode, key, pos, len, page); afs_fill_page()
116 * prepare to perform part of a write to a page
120 struct page **pagep, void **fsdata) afs_write_begin()
124 struct page *page; afs_write_begin() local
146 page = grab_cache_page_write_begin(mapping, index, flags); afs_write_begin()
147 if (!page) { afs_write_begin()
151 *pagep = page; afs_write_begin()
152 /* page won't leak in error case: it eventually gets cleaned off LRU */ afs_write_begin()
154 if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) { afs_write_begin()
155 ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page); afs_write_begin()
161 SetPageUptodate(page); afs_write_begin()
167 /* see if this page is already pending a writeback under a suitable key afs_write_begin()
169 wb = (struct afs_writeback *) page_private(page); afs_write_begin()
178 * append this page to */ afs_write_begin()
189 SetPagePrivate(page); afs_write_begin()
190 set_page_private(page, (unsigned long) candidate); afs_write_begin()
212 SetPagePrivate(page); afs_write_begin()
213 set_page_private(page, (unsigned long) wb); afs_write_begin()
218 /* the page is currently bound to another context, so if it's dirty we afs_write_begin()
225 if (PageDirty(page)) { afs_write_begin()
226 ret = afs_write_back_from_locked_page(wb, page); afs_write_begin()
234 /* the page holds a ref on the writeback record */ afs_write_begin()
236 set_page_private(page, 0); afs_write_begin()
237 ClearPagePrivate(page); afs_write_begin()
242 * finalise part of a write to a page
246 struct page *page, void *fsdata) afs_write_end()
252 vnode->fid.vid, vnode->fid.vnode, page->index); afs_write_end()
265 set_page_dirty(page); afs_write_end()
266 if (PageDirty(page)) afs_write_end()
268 unlock_page(page); afs_write_end()
269 page_cache_release(page); afs_write_end()
312 * synchronously write back the locked page and any subsequent non-locked dirty
316 struct page *primary_page) afs_write_back_from_locked_page()
318 struct page *pages[8], *page; afs_write_back_from_locked_page() local
333 * page that is not immediately lockable, is not dirty or is missing, afs_write_back_from_locked_page()
357 page = pages[loop]; afs_write_back_from_locked_page()
358 if (page->index > wb->last) afs_write_back_from_locked_page()
360 if (!trylock_page(page)) afs_write_back_from_locked_page()
362 if (!PageDirty(page) || afs_write_back_from_locked_page()
363 page_private(page) != (unsigned long) wb) { afs_write_back_from_locked_page()
364 unlock_page(page); afs_write_back_from_locked_page()
367 if (!clear_page_dirty_for_io(page)) afs_write_back_from_locked_page()
369 if (test_set_page_writeback(page)) afs_write_back_from_locked_page()
371 unlock_page(page); afs_write_back_from_locked_page()
372 put_page(page); afs_write_back_from_locked_page()
386 * and the dirty mark cleared; the first page is locked and must remain afs_write_back_from_locked_page()
434 * write a page back to the server
435 * - the caller locked the page for us
437 int afs_writepage(struct page *page, struct writeback_control *wbc) afs_writepage() argument
442 _enter("{%lx},", page->index); afs_writepage()
444 wb = (struct afs_writeback *) page_private(page); afs_writepage()
447 ret = afs_write_back_from_locked_page(wb, page); afs_writepage()
448 unlock_page(page); afs_writepage()
468 struct page *page; afs_writepages_region() local
475 1, &page); afs_writepages_region()
479 _debug("wback %lx", page->index); afs_writepages_region()
481 if (page->index > end) { afs_writepages_region()
483 page_cache_release(page); afs_writepages_region()
489 * the page itself: the page may be truncated or invalidated afs_writepages_region()
490 * (changing page->mapping to NULL), or even swizzled back from afs_writepages_region()
493 lock_page(page); afs_writepages_region()
495 if (page->mapping != mapping) { afs_writepages_region()
496 unlock_page(page); afs_writepages_region()
497 page_cache_release(page); afs_writepages_region()
502 wait_on_page_writeback(page); afs_writepages_region()
504 if (PageWriteback(page) || !PageDirty(page)) { afs_writepages_region()
505 unlock_page(page); afs_writepages_region()
509 wb = (struct afs_writeback *) page_private(page); afs_writepages_region()
516 ret = afs_write_back_from_locked_page(wb, page); afs_writepages_region()
517 unlock_page(page); afs_writepages_region()
518 page_cache_release(page); afs_writepages_region()
598 struct page *page = pv.pages[loop]; afs_pages_written_back() local
599 end_page_writeback(page); afs_pages_written_back()
600 if (page_private(page) == (unsigned long) wb) { afs_pages_written_back()
601 set_page_private(page, 0); afs_pages_written_back()
602 ClearPagePrivate(page); afs_pages_written_back()
743 * notification that a previously read-only page is about to become writable
746 int afs_page_mkwrite(struct vm_area_struct *vma, struct page *page) afs_page_mkwrite() argument
751 vnode->fid.vid, vnode->fid.vnode, page->index); afs_page_mkwrite()
753 /* wait for the page to be written to the cache before we allow it to afs_page_mkwrite()
756 fscache_wait_on_page_write(vnode->cache, page); afs_page_mkwrite()
86 afs_fill_page(struct afs_vnode *vnode, struct key *key, loff_t pos, struct page *page) afs_fill_page() argument
244 afs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) afs_write_end() argument
/linux-4.1.27/net/ceph/
H A Dpagelist.c11 struct page *page = list_entry(pl->head.prev, struct page, lru); ceph_pagelist_unmap_tail() local
12 kunmap(page); ceph_pagelist_unmap_tail()
23 struct page *page = list_first_entry(&pl->head, struct page, ceph_pagelist_release() local
25 list_del(&page->lru); ceph_pagelist_release()
26 __free_page(page); ceph_pagelist_release()
35 struct page *page; ceph_pagelist_addpage() local
38 page = __page_cache_alloc(GFP_NOFS); ceph_pagelist_addpage()
40 page = list_first_entry(&pl->free_list, struct page, lru); ceph_pagelist_addpage()
41 list_del(&page->lru); ceph_pagelist_addpage()
44 if (!page) ceph_pagelist_addpage()
48 list_add_tail(&page->lru, &pl->head); ceph_pagelist_addpage()
49 pl->mapped_tail = kmap(page); ceph_pagelist_addpage()
89 struct page *page = __page_cache_alloc(GFP_NOFS); ceph_pagelist_reserve() local
90 if (!page) ceph_pagelist_reserve()
92 list_add_tail(&page->lru, &pl->free_list); ceph_pagelist_reserve()
103 struct page *page = list_first_entry(&pl->free_list, ceph_pagelist_free_reserve() local
104 struct page, lru); ceph_pagelist_free_reserve()
105 list_del(&page->lru); ceph_pagelist_free_reserve()
106 __free_page(page); ceph_pagelist_free_reserve()
132 struct page *page; ceph_pagelist_truncate() local
138 page = list_entry(pl->head.prev, struct page, lru); ceph_pagelist_truncate()
140 list_move_tail(&page->lru, &pl->free_list); ceph_pagelist_truncate()
145 page = list_entry(pl->head.prev, struct page, lru); ceph_pagelist_truncate()
146 pl->mapped_tail = kmap(page); ceph_pagelist_truncate()
H A Dpagevec.c15 struct page **ceph_get_direct_page_vector(const void __user *data, ceph_get_direct_page_vector()
18 struct page **pages; ceph_get_direct_page_vector()
45 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) ceph_put_page_vector()
61 void ceph_release_page_vector(struct page **pages, int num_pages) ceph_release_page_vector()
74 struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags) ceph_alloc_page_vector()
76 struct page **pages; ceph_alloc_page_vector()
94 * copy user data into a page vector
96 int ceph_copy_user_to_page_vector(struct page **pages, ceph_copy_user_to_page_vector()
122 void ceph_copy_to_page_vector(struct page **pages, ceph_copy_to_page_vector()
145 void ceph_copy_from_page_vector(struct page **pages, ceph_copy_from_page_vector()
169 * Zero an extent within a page vector. Offset is relative to the
170 * start of the first page.
172 void ceph_zero_page_vector_range(int off, int len, struct page **pages) ceph_zero_page_vector_range()
180 /* leading partial page? */ ceph_zero_page_vector_range()
195 /* trailing partial page? */ ceph_zero_page_vector_range()
/linux-4.1.27/arch/tile/include/asm/
H A Dhighmem.h53 void *kmap_high(struct page *page);
54 void kunmap_high(struct page *page);
55 void *kmap(struct page *page);
56 void kunmap(struct page *page);
57 void *kmap_fix_kpte(struct page *page, int finished);
59 /* This macro is used only in map_new_virtual() to map "page". */
60 #define kmap_prot page_to_kpgprot(page)
62 void *kmap_atomic(struct page *page);
66 struct page *kmap_atomic_to_page(void *ptr);
67 void *kmap_atomic_prot(struct page *page, pgprot_t prot);
68 void kmap_atomic_fix_kpte(struct page *page, int finished);
H A Dhomecache.h20 #include <asm/page.h>
23 struct page;
29 * Coherence point for the page is its memory controller.
35 * Is this page immutable (unwritable) and thus able to be cached more
41 * Each cpu considers its own cache to be the home for the page,
46 /* Home for the page is distributed via hash-for-home. */
64 * Change a kernel page's homecache. It must not be mapped in user space.
66 * no other cpu can reference the page, and causes a full-chip cache/TLB flush.
68 extern void homecache_change_page_home(struct page *, int order, int home);
71 * Flush a page out of whatever cache(s) it is in.
76 extern void homecache_finv_page(struct page *);
79 * Flush a page out of the specified home cache.
80 * Note that the specified home need not be the actual home of the page,
83 extern void homecache_finv_map_page(struct page *, int home);
86 * Allocate a page with the given GFP flags, home, and optionally
92 extern struct page *homecache_alloc_pages(gfp_t gfp_mask,
94 extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
103 * back to the default before returning the page to the allocator.
105 void __homecache_free_pages(struct page *, unsigned int order);
107 #define __homecache_free_page(page) __homecache_free_pages((page), 0)
108 #define homecache_free_page(page) homecache_free_pages((page), 0)
112 * Report the page home for LOWMEM pages by examining their kernel PTE,
115 extern int page_home(struct page *);
H A Dpgalloc.h22 #include <asm/page.h>
25 /* Bits for the size of the second-level page table. */
28 /* How big is a kernel L2 page table? */
31 /* We currently allocate user L2 page tables by page (unlike kernel L2s). */
38 /* How many pages do we need, as an "order", for a user L2 page table? */
58 pgtable_t page) pmd_populate()
60 set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))), pmd_populate()
65 * Allocate and free page tables.
73 extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order);
81 static inline void pte_free(struct mm_struct *mm, struct page *pte) pte_free()
100 extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
102 static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, __pte_free_tlb()
111 * Get the small-page pte_t lowmem entry for a given pfn.
113 * huge-page entry for the page has already been shattered.
128 /* Bits for the size of the L1 (intermediate) page table. */
131 /* How big is a kernel L2 page table? */
134 /* We currently allocate L1 page tables by page. */
141 /* How many pages do we need, as an "order", for an L1 page table? */
146 struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER); pmd_alloc_one()
57 pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) pmd_populate() argument
/linux-4.1.27/arch/s390/mm/
H A Dpage-states.c4 * Guest page hinting for unused pages.
54 static inline void set_page_unstable(struct page *page, int order) set_page_unstable() argument
61 : "a" (page_to_phys(page + i)), set_page_unstable()
65 void arch_free_page(struct page *page, int order) arch_free_page() argument
69 set_page_unstable(page, order); arch_free_page()
72 static inline void set_page_stable(struct page *page, int order) set_page_stable() argument
79 : "a" (page_to_phys(page + i)), set_page_stable()
83 void arch_alloc_page(struct page *page, int order) arch_alloc_page() argument
87 set_page_stable(page, order); arch_alloc_page()
94 struct page *page; arch_set_page_states() local
105 page = list_entry(l, struct page, lru); for_each_migratetype_order()
107 set_page_stable(page, order); for_each_migratetype_order()
109 set_page_unstable(page, order); for_each_migratetype_order()
/linux-4.1.27/fs/squashfs/
H A Dfile_direct.c23 static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
24 int pages, struct page **page);
26 /* Read separately compressed datablock directly into page cache */ squashfs_readpage_block()
27 int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) squashfs_readpage_block()
38 struct page **page; squashfs_readpage_block() local
47 page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL); squashfs_readpage_block()
48 if (page == NULL) squashfs_readpage_block()
52 * Create a "page actor" which will kmap and kunmap the squashfs_readpage_block()
53 * page cache pages appropriately within the decompressor squashfs_readpage_block()
55 actor = squashfs_page_actor_init_special(page, pages, 0); squashfs_readpage_block()
61 page[i] = (n == target_page->index) ? target_page : squashfs_readpage_block()
64 if (page[i] == NULL) { squashfs_readpage_block()
69 if (PageUptodate(page[i])) { squashfs_readpage_block()
70 unlock_page(page[i]); squashfs_readpage_block()
71 page_cache_release(page[i]); squashfs_readpage_block()
72 page[i] = NULL; squashfs_readpage_block()
79 * Couldn't get one or more pages, this page has either squashfs_readpage_block()
80 * been VM reclaimed, but others are still in the page cache squashfs_readpage_block()
86 page); squashfs_readpage_block()
93 /* Decompress directly into the page cache buffers */ squashfs_readpage_block()
98 /* Last page may have trailing bytes not filled */ squashfs_readpage_block()
101 pageaddr = kmap_atomic(page[pages - 1]); squashfs_readpage_block()
108 flush_dcache_page(page[i]); squashfs_readpage_block()
109 SetPageUptodate(page[i]); squashfs_readpage_block()
110 unlock_page(page[i]); squashfs_readpage_block()
111 if (page[i] != target_page) squashfs_readpage_block()
112 page_cache_release(page[i]); squashfs_readpage_block()
116 kfree(page); squashfs_readpage_block()
125 if (page[i] == NULL || page[i] == target_page) squashfs_readpage_block()
127 flush_dcache_page(page[i]); squashfs_readpage_block()
128 SetPageError(page[i]); squashfs_readpage_block()
129 unlock_page(page[i]); squashfs_readpage_block()
130 page_cache_release(page[i]); squashfs_readpage_block()
135 kfree(page); squashfs_readpage_block()
140 static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, squashfs_read_cache() argument
141 int pages, struct page **page) squashfs_read_cache()
150 ERROR("Unable to read page, block %llx, size %x\n", block, squashfs_read_cache()
159 if (page[n] == NULL) squashfs_read_cache()
162 pageaddr = kmap_atomic(page[n]); squashfs_read_cache()
166 flush_dcache_page(page[n]); squashfs_read_cache()
167 SetPageUptodate(page[n]); squashfs_read_cache()
168 unlock_page(page[n]); squashfs_read_cache()
169 if (page[n] != target_page) squashfs_read_cache()
170 page_cache_release(page[n]); squashfs_read_cache()
H A Dfile_cache.c22 /* Read separately compressed datablock and memcopy into page cache */ squashfs_readpage_block()
23 int squashfs_readpage_block(struct page *page, u64 block, int bsize) squashfs_readpage_block() argument
25 struct inode *i = page->mapping->host; squashfs_readpage_block()
31 ERROR("Unable to read page, block %llx, size %x\n", block, squashfs_readpage_block()
34 squashfs_copy_cache(page, buffer, buffer->length, 0); squashfs_readpage_block()
H A Dpage_actor.h13 void **page; member in struct:squashfs_page_actor
19 static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page, squashfs_page_actor_init() argument
28 actor->page = page; squashfs_page_actor_init()
37 return actor->page[0]; squashfs_first_page()
43 actor->page[actor->next_page++]; squashfs_next_page()
54 struct page **page; member in union:squashfs_page_actor::__anon11301
66 extern struct squashfs_page_actor *squashfs_page_actor_init_special(struct page
/linux-4.1.27/fs/minix/
H A Ddir.c28 static inline void dir_put_page(struct page *page) dir_put_page() argument
30 kunmap(page); dir_put_page()
31 page_cache_release(page); dir_put_page()
35 * Return the offset into page `page_nr' of the last valid
36 * byte in that page, plus one.
53 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) dir_commit_chunk() argument
55 struct address_space *mapping = page->mapping; dir_commit_chunk()
58 block_write_end(NULL, mapping, pos, len, len, page, NULL); dir_commit_chunk()
65 err = write_one_page(page, 1); dir_commit_chunk()
67 unlock_page(page); dir_commit_chunk()
71 static struct page * dir_get_page(struct inode *dir, unsigned long n) dir_get_page()
74 struct page *page = read_mapping_page(mapping, n, NULL); dir_get_page() local
75 if (!IS_ERR(page)) dir_get_page()
76 kmap(page); dir_get_page()
77 return page; dir_get_page()
105 struct page *page = dir_get_page(inode, n); minix_readdir() local
107 if (IS_ERR(page)) minix_readdir()
109 kaddr = (char *)page_address(page); minix_readdir()
128 dir_put_page(page); minix_readdir()
134 dir_put_page(page); minix_readdir()
155 minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page) minix_find_entry()
164 struct page *page = NULL; minix_find_entry() local
174 page = dir_get_page(dir, n); minix_find_entry()
175 if (IS_ERR(page)) minix_find_entry()
178 kaddr = (char*)page_address(page); minix_find_entry()
195 dir_put_page(page); minix_find_entry()
200 *res_page = page; minix_find_entry()
211 struct page *page = NULL; minix_add_link() local
224 * This code plays outside i_size, so it locks the page minix_add_link()
230 page = dir_get_page(dir, n); minix_add_link()
231 err = PTR_ERR(page); minix_add_link()
232 if (IS_ERR(page)) minix_add_link()
234 lock_page(page); minix_add_link()
235 kaddr = (char*)page_address(page); minix_add_link()
262 unlock_page(page); minix_add_link()
263 dir_put_page(page); minix_add_link()
269 pos = page_offset(page) + p - (char *)page_address(page); minix_add_link()
270 err = minix_prepare_chunk(page, pos, sbi->s_dirsize); minix_add_link()
281 err = dir_commit_chunk(page, pos, sbi->s_dirsize); minix_add_link()
285 dir_put_page(page); minix_add_link()
289 unlock_page(page); minix_add_link()
293 int minix_delete_entry(struct minix_dir_entry *de, struct page *page) minix_delete_entry() argument
295 struct inode *inode = page->mapping->host; minix_delete_entry()
296 char *kaddr = page_address(page); minix_delete_entry()
297 loff_t pos = page_offset(page) + (char*)de - kaddr; minix_delete_entry()
302 lock_page(page); minix_delete_entry()
303 err = minix_prepare_chunk(page, pos, len); minix_delete_entry()
309 err = dir_commit_chunk(page, pos, len); minix_delete_entry()
311 unlock_page(page); minix_delete_entry()
313 dir_put_page(page); minix_delete_entry()
321 struct page *page = grab_cache_page(inode->i_mapping, 0); minix_make_empty() local
326 if (!page) minix_make_empty()
328 err = minix_prepare_chunk(page, 0, 2 * sbi->s_dirsize); minix_make_empty()
330 unlock_page(page); minix_make_empty()
334 kaddr = kmap_atomic(page); minix_make_empty()
356 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); minix_make_empty()
358 page_cache_release(page); minix_make_empty()
367 struct page *page = NULL; minix_empty_dir() local
376 page = dir_get_page(inode, i); minix_empty_dir()
377 if (IS_ERR(page)) minix_empty_dir()
380 kaddr = (char *)page_address(page); minix_empty_dir()
406 dir_put_page(page); minix_empty_dir()
411 dir_put_page(page); minix_empty_dir()
415 /* Releases the page */ minix_set_link()
416 void minix_set_link(struct minix_dir_entry *de, struct page *page, minix_set_link() argument
419 struct inode *dir = page->mapping->host; minix_set_link()
421 loff_t pos = page_offset(page) + minix_set_link()
422 (char *)de-(char*)page_address(page); minix_set_link()
425 lock_page(page); minix_set_link()
427 err = minix_prepare_chunk(page, pos, sbi->s_dirsize); minix_set_link()
433 err = dir_commit_chunk(page, pos, sbi->s_dirsize); minix_set_link()
435 unlock_page(page); minix_set_link()
437 dir_put_page(page); minix_set_link()
442 struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p) minix_dotdot()
444 struct page *page = dir_get_page(dir, 0); minix_dotdot() local
448 if (!IS_ERR(page)) { minix_dotdot()
449 de = minix_next_entry(page_address(page), sbi); minix_dotdot()
450 *p = page; minix_dotdot()
457 struct page *page; minix_inode_by_name() local
458 struct minix_dir_entry *de = minix_find_entry(dentry, &page); minix_inode_by_name()
462 struct address_space *mapping = page->mapping; minix_inode_by_name()
470 dir_put_page(page); minix_inode_by_name()
/linux-4.1.27/arch/xtensa/include/asm/
H A Dhighmem.h32 static inline int get_pkmap_color(struct page *page) get_pkmap_color() argument
34 return DCACHE_ALIAS(page_to_phys(page)); get_pkmap_color()
66 void *kmap_high(struct page *page);
67 void kunmap_high(struct page *page);
69 static inline void *kmap(struct page *page) kmap() argument
72 if (!PageHighMem(page)) kmap()
73 return page_address(page); kmap()
74 return kmap_high(page); kmap()
77 static inline void kunmap(struct page *page) kunmap() argument
80 if (!PageHighMem(page)) kunmap()
82 kunmap_high(page); kunmap()
90 void *kmap_atomic(struct page *page);
H A Dpage.h2 * include/asm-xtensa/page.h
29 * PAGE_SHIFT determines the page size
49 * If the cache size for one way is greater than the page size, we have to
50 * deal with cache aliasing. The cache index is wider than the page size:
61 * When the page number is translated to the physical page address, the lowest
65 * The kernel does not provide a mechanism to ensure that the page color
68 * the page might also change.
105 typedef struct { unsigned long pte; } pte_t; /* page table entry */
108 typedef struct page *pgtable_t;
138 struct page;
140 extern void clear_page(void *page);
154 void clear_user_highpage(struct page *page, unsigned long vaddr);
156 void copy_user_highpage(struct page *to, struct page *from,
159 # define clear_user_page(page, vaddr, pg) clear_page(page)
182 #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
184 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
H A Dpgalloc.h26 #define pmd_populate(mm, pmdp, page) \
27 (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
59 struct page *page; pte_alloc_one() local
64 page = virt_to_page(pte); pte_alloc_one()
65 if (!pgtable_page_ctor(page)) { pte_alloc_one()
66 __free_page(page); pte_alloc_one()
69 return page; pte_alloc_one()
/linux-4.1.27/arch/sparc/include/asm/
H A Dpage.h4 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
H A Dcacheflush_64.h4 #include <asm/page.h>
25 #define flush_cache_page(vma, page, pfn) \
37 void flush_dcache_page_impl(struct page *page);
39 void smp_flush_dcache_page_impl(struct page *page, int cpu);
40 void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
42 #define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
43 #define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
48 void flush_dcache_page(struct page *page);
53 void flush_ptrace_access(struct vm_area_struct *, struct page *,
57 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
59 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
61 flush_ptrace_access(vma, page, vaddr, src, len, 0); \
64 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
66 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
68 flush_ptrace_access(vma, page, vaddr, dst, len, 1); \
H A Dhighmem.h52 void *kmap_high(struct page *page);
53 void kunmap_high(struct page *page);
55 static inline void *kmap(struct page *page) kmap() argument
58 if (!PageHighMem(page)) kmap()
59 return page_address(page); kmap()
60 return kmap_high(page); kmap()
63 static inline void kunmap(struct page *page) kunmap() argument
66 if (!PageHighMem(page)) kunmap()
68 kunmap_high(page); kunmap()
71 void *kmap_atomic(struct page *page);
H A Dsparsemem.h6 #include <asm/page.h>
H A Dagp.h6 #define map_page_into_agp(page)
7 #define unmap_page_from_agp(page)
H A Dcacheflush_32.h21 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
23 flush_cache_page(vma, vaddr, page_to_pfn(page));\
26 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
28 flush_cache_page(vma, vaddr, page_to_pfn(page));\
39 void sparc_flush_page_to_ram(struct page *page);
42 #define flush_dcache_page(page) sparc_flush_page_to_ram(page)
/linux-4.1.27/fs/nilfs2/
H A Dpage.c2 * page.c - buffer/page management specific to NILFS
28 #include <linux/page-flags.h>
34 #include "page.h"
43 __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, __nilfs_get_page_block() argument
50 if (!page_has_buffers(page)) __nilfs_get_page_block()
51 create_empty_buffers(page, 1 << blkbits, b_state); __nilfs_get_page_block()
54 bh = nilfs_page_get_nth_block(page, block - first_block); __nilfs_get_page_block()
68 struct page *page; nilfs_grab_buffer() local
71 page = grab_cache_page(mapping, index); nilfs_grab_buffer()
72 if (unlikely(!page)) nilfs_grab_buffer()
75 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state); nilfs_grab_buffer()
77 unlock_page(page); nilfs_grab_buffer()
78 page_cache_release(page); nilfs_grab_buffer()
91 struct page *page = bh->b_page; nilfs_forget_buffer() local
99 if (nilfs_page_buffers_clean(page)) nilfs_forget_buffer()
100 __nilfs_clear_page_dirty(page); nilfs_forget_buffer()
103 ClearPageUptodate(page); nilfs_forget_buffer()
104 ClearPageMappedToDisk(page); nilfs_forget_buffer()
118 struct page *spage = sbh->b_page, *dpage = dbh->b_page; nilfs_copy_buffer()
149 * nilfs_page_buffers_clean - check if a page has dirty buffers or not.
150 * @page: page to be checked
152 * nilfs_page_buffers_clean() returns zero if the page has dirty buffers.
155 int nilfs_page_buffers_clean(struct page *page) nilfs_page_buffers_clean() argument
159 bh = head = page_buffers(page); nilfs_page_buffers_clean()
168 void nilfs_page_bug(struct page *page) nilfs_page_bug() argument
173 if (unlikely(!page)) { nilfs_page_bug()
178 m = page->mapping; nilfs_page_bug()
183 page, atomic_read(&page->_count), nilfs_page_bug()
184 (unsigned long long)page->index, page->flags, m, ino); nilfs_page_bug()
186 if (page_has_buffers(page)) { nilfs_page_bug()
190 bh = head = page_buffers(page); nilfs_page_bug()
202 * nilfs_copy_page -- copy the page with buffers
203 * @dst: destination page
204 * @src: source page
205 * @copy_dirty: flag whether to copy dirty states on the page's buffer heads.
208 * should be treated by caller. The page must not be under i/o.
209 * Both src and dst page must be locked
211 static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty) nilfs_copy_page()
270 struct page *page = pvec.pages[i], *dpage; nilfs_copy_dirty_pages() local
272 lock_page(page); nilfs_copy_dirty_pages()
273 if (unlikely(!PageDirty(page))) nilfs_copy_dirty_pages()
274 NILFS_PAGE_BUG(page, "inconsistent dirty state"); nilfs_copy_dirty_pages()
276 dpage = grab_cache_page(dmap, page->index); nilfs_copy_dirty_pages()
278 /* No empty page is added to the page cache */ nilfs_copy_dirty_pages()
280 unlock_page(page); nilfs_copy_dirty_pages()
283 if (unlikely(!page_has_buffers(page))) nilfs_copy_dirty_pages()
284 NILFS_PAGE_BUG(page, nilfs_copy_dirty_pages()
285 "found empty page in dat page cache"); nilfs_copy_dirty_pages()
287 nilfs_copy_page(dpage, page, 1); nilfs_copy_dirty_pages()
292 unlock_page(page); nilfs_copy_dirty_pages()
304 * @dmap: destination page cache
305 * @smap: source page cache
326 struct page *page = pvec.pages[i], *dpage; nilfs_copy_back_pages() local
327 pgoff_t offset = page->index; nilfs_copy_back_pages()
329 lock_page(page); nilfs_copy_back_pages()
332 /* override existing page on the destination cache */ nilfs_copy_back_pages()
334 nilfs_copy_page(dpage, page, 0); nilfs_copy_back_pages()
338 struct page *page2; nilfs_copy_back_pages()
340 /* move the page to the destination cache */ nilfs_copy_back_pages()
343 WARN_ON(page2 != page); nilfs_copy_back_pages()
349 err = radix_tree_insert(&dmap->page_tree, offset, page); nilfs_copy_back_pages()
352 page->mapping = NULL; nilfs_copy_back_pages()
353 page_cache_release(page); /* for cache */ nilfs_copy_back_pages()
355 page->mapping = dmap; nilfs_copy_back_pages()
357 if (PageDirty(page)) nilfs_copy_back_pages()
364 unlock_page(page); nilfs_copy_back_pages()
388 struct page *page = pvec.pages[i]; nilfs_clear_dirty_pages() local
390 lock_page(page); nilfs_clear_dirty_pages()
391 nilfs_clear_dirty_page(page, silent); nilfs_clear_dirty_pages()
392 unlock_page(page); nilfs_clear_dirty_pages()
400 * nilfs_clear_dirty_page - discard dirty page
401 * @page: dirty page that will be discarded
404 void nilfs_clear_dirty_page(struct page *page, bool silent) nilfs_clear_dirty_page() argument
406 struct inode *inode = page->mapping->host; nilfs_clear_dirty_page()
409 BUG_ON(!PageLocked(page)); nilfs_clear_dirty_page()
413 "discard page: offset %lld, ino %lu", nilfs_clear_dirty_page()
414 page_offset(page), inode->i_ino); nilfs_clear_dirty_page()
417 ClearPageUptodate(page); nilfs_clear_dirty_page()
418 ClearPageMappedToDisk(page); nilfs_clear_dirty_page()
420 if (page_has_buffers(page)) { nilfs_clear_dirty_page()
427 bh = head = page_buffers(page); nilfs_clear_dirty_page()
440 __nilfs_clear_page_dirty(page); nilfs_clear_dirty_page()
443 unsigned nilfs_page_count_clean_buffers(struct page *page, nilfs_page_count_clean_buffers() argument
450 for (bh = head = page_buffers(page), block_start = 0; nilfs_page_count_clean_buffers()
473 * page dirty flags when it copies back pages from the shadow cache
480 int __nilfs_clear_page_dirty(struct page *page) __nilfs_clear_page_dirty() argument
482 struct address_space *mapping = page->mapping; __nilfs_clear_page_dirty()
486 if (test_bit(PG_dirty, &page->flags)) { __nilfs_clear_page_dirty()
488 page_index(page), __nilfs_clear_page_dirty()
491 return clear_page_dirty_for_io(page); __nilfs_clear_page_dirty()
496 return TestClearPageDirty(page); __nilfs_clear_page_dirty()
521 struct page *page; nilfs_find_uncommitted_extent() local
543 page = pvec.pages[i]; nilfs_find_uncommitted_extent()
545 lock_page(page); nilfs_find_uncommitted_extent()
546 if (page_has_buffers(page)) { nilfs_find_uncommitted_extent()
549 bh = head = page_buffers(page); nilfs_find_uncommitted_extent()
567 unlock_page(page); nilfs_find_uncommitted_extent()
571 index = page->index + 1; nilfs_find_uncommitted_extent()
577 unlock_page(page); nilfs_find_uncommitted_extent()
H A Ddir.c47 #include "page.h"
58 static inline void nilfs_put_page(struct page *page) nilfs_put_page() argument
60 kunmap(page); nilfs_put_page()
61 page_cache_release(page); nilfs_put_page()
70 * Return the offset into page `page_nr' of the last valid
71 * byte in that page, plus one.
83 static int nilfs_prepare_chunk(struct page *page, unsigned from, unsigned to) nilfs_prepare_chunk() argument
85 loff_t pos = page_offset(page) + from; nilfs_prepare_chunk()
86 return __block_write_begin(page, pos, to - from, nilfs_get_block); nilfs_prepare_chunk()
89 static void nilfs_commit_chunk(struct page *page, nilfs_commit_chunk() argument
94 loff_t pos = page_offset(page) + from; nilfs_commit_chunk()
99 nr_dirty = nilfs_page_count_clean_buffers(page, from, to); nilfs_commit_chunk()
100 copied = block_write_end(NULL, mapping, pos, len, len, page, NULL); nilfs_commit_chunk()
107 unlock_page(page); nilfs_commit_chunk()
110 static void nilfs_check_page(struct page *page) nilfs_check_page() argument
112 struct inode *dir = page->mapping->host; nilfs_check_page()
115 char *kaddr = page_address(page); nilfs_check_page()
121 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { nilfs_check_page()
144 SetPageChecked(page); nilfs_check_page()
169 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, nilfs_check_page()
176 "entry in directory #%lu spans the page boundary" nilfs_check_page()
178 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, nilfs_check_page()
181 SetPageChecked(page); nilfs_check_page()
182 SetPageError(page); nilfs_check_page()
185 static struct page *nilfs_get_page(struct inode *dir, unsigned long n) nilfs_get_page()
188 struct page *page = read_mapping_page(mapping, n, NULL); nilfs_get_page() local
190 if (!IS_ERR(page)) { nilfs_get_page()
191 kmap(page); nilfs_get_page()
192 if (!PageChecked(page)) nilfs_get_page()
193 nilfs_check_page(page); nilfs_get_page()
194 if (PageError(page)) nilfs_get_page()
197 return page; nilfs_get_page()
200 nilfs_put_page(page); nilfs_get_page()
220 * p is at least 6 bytes before the end of page
275 struct page *page = nilfs_get_page(inode, n); nilfs_readdir() local
277 if (IS_ERR(page)) { nilfs_readdir()
278 nilfs_error(sb, __func__, "bad page in #%lu", nilfs_readdir()
283 kaddr = page_address(page); nilfs_readdir()
291 nilfs_put_page(page); nilfs_readdir()
304 nilfs_put_page(page); nilfs_readdir()
310 nilfs_put_page(page); nilfs_readdir()
319 * returns the page in which the entry was found, and the entry itself
325 struct page **res_page) nilfs_find_entry()
332 struct page *page = NULL; nilfs_find_entry() local
348 page = nilfs_get_page(dir, n); nilfs_find_entry()
349 if (!IS_ERR(page)) { nilfs_find_entry()
350 kaddr = page_address(page); nilfs_find_entry()
357 nilfs_put_page(page); nilfs_find_entry()
364 nilfs_put_page(page); nilfs_find_entry()
368 /* next page is past the blocks we've got */ nilfs_find_entry()
381 *res_page = page; nilfs_find_entry()
386 struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p) nilfs_dotdot()
388 struct page *page = nilfs_get_page(dir, 0); nilfs_dotdot() local
391 if (!IS_ERR(page)) { nilfs_dotdot()
393 (struct nilfs_dir_entry *)page_address(page)); nilfs_dotdot()
394 *p = page; nilfs_dotdot()
403 struct page *page; nilfs_inode_by_name() local
405 de = nilfs_find_entry(dir, qstr, &page); nilfs_inode_by_name()
408 kunmap(page); nilfs_inode_by_name()
409 page_cache_release(page); nilfs_inode_by_name()
414 /* Releases the page */ nilfs_set_link()
416 struct page *page, struct inode *inode) nilfs_set_link()
418 unsigned from = (char *) de - (char *) page_address(page); nilfs_set_link()
420 struct address_space *mapping = page->mapping; nilfs_set_link()
423 lock_page(page); nilfs_set_link()
424 err = nilfs_prepare_chunk(page, from, to); nilfs_set_link()
428 nilfs_commit_chunk(page, mapping, from, to); nilfs_set_link()
429 nilfs_put_page(page); nilfs_set_link()
444 struct page *page = NULL; nilfs_add_link() local
454 * This code plays outside i_size, so it locks the page nilfs_add_link()
460 page = nilfs_get_page(dir, n); nilfs_add_link()
461 err = PTR_ERR(page); nilfs_add_link()
462 if (IS_ERR(page)) nilfs_add_link()
464 lock_page(page); nilfs_add_link()
465 kaddr = page_address(page); nilfs_add_link()
495 unlock_page(page); nilfs_add_link()
496 nilfs_put_page(page); nilfs_add_link()
502 from = (char *)de - (char *)page_address(page); nilfs_add_link()
504 err = nilfs_prepare_chunk(page, from, to); nilfs_add_link()
519 nilfs_commit_chunk(page, page->mapping, from, to); nilfs_add_link()
524 nilfs_put_page(page); nilfs_add_link()
528 unlock_page(page); nilfs_add_link()
534 * previous entry. Page is up-to-date. Releases the page.
536 int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page) nilfs_delete_entry() argument
538 struct address_space *mapping = page->mapping; nilfs_delete_entry()
540 char *kaddr = page_address(page); nilfs_delete_entry()
559 from = (char *)pde - (char *)page_address(page); nilfs_delete_entry()
560 lock_page(page); nilfs_delete_entry()
561 err = nilfs_prepare_chunk(page, from, to); nilfs_delete_entry()
566 nilfs_commit_chunk(page, mapping, from, to); nilfs_delete_entry()
569 nilfs_put_page(page); nilfs_delete_entry()
579 struct page *page = grab_cache_page(mapping, 0); nilfs_make_empty() local
585 if (!page) nilfs_make_empty()
588 err = nilfs_prepare_chunk(page, 0, chunk_size); nilfs_make_empty()
590 unlock_page(page); nilfs_make_empty()
593 kaddr = kmap_atomic(page); nilfs_make_empty()
609 nilfs_commit_chunk(page, mapping, 0, chunk_size); nilfs_make_empty()
611 page_cache_release(page); nilfs_make_empty()
620 struct page *page = NULL; nilfs_empty_dir() local
627 page = nilfs_get_page(inode, i); nilfs_empty_dir()
628 if (IS_ERR(page)) nilfs_empty_dir()
631 kaddr = page_address(page); nilfs_empty_dir()
657 nilfs_put_page(page); nilfs_empty_dir()
662 nilfs_put_page(page); nilfs_empty_dir()
415 nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, struct page *page, struct inode *inode) nilfs_set_link() argument
H A Dpage.h2 * page.h - buffer/page management specific to NILFS
47 int __nilfs_clear_page_dirty(struct page *);
53 int nilfs_page_buffers_clean(struct page *);
54 void nilfs_page_bug(struct page *);
58 void nilfs_clear_dirty_page(struct page *, bool);
61 unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
66 #define NILFS_PAGE_BUG(page, m, a...) \
67 do { nilfs_page_bug(page); BUG(); } while (0)
70 nilfs_page_get_nth_block(struct page *page, unsigned int count) nilfs_page_get_nth_block() argument
72 struct buffer_head *bh = page_buffers(page); nilfs_page_get_nth_block()
H A DMakefile2 nilfs2-y := inode.o file.o dir.o super.o namei.o page.o mdt.o \
/linux-4.1.27/fs/jfs/
H A Djfs_metapage.c38 uint pagealloc; /* # of page allocations */
39 uint pagefree; /* # of page frees */
61 unlock_page(mp->page); __lock_metapage()
63 lock_page(mp->page); __lock_metapage()
71 * Must have mp->page locked
92 #define mp_anchor(page) ((struct meta_anchor *)page_private(page))
94 static inline struct metapage *page_to_mp(struct page *page, int offset) page_to_mp() argument
96 if (!PagePrivate(page)) page_to_mp()
98 return mp_anchor(page)->mp[offset >> L2PSIZE]; page_to_mp()
101 static inline int insert_metapage(struct page *page, struct metapage *mp) insert_metapage() argument
107 if (PagePrivate(page)) insert_metapage()
108 a = mp_anchor(page); insert_metapage()
113 set_page_private(page, (unsigned long)a); insert_metapage()
114 SetPagePrivate(page); insert_metapage()
115 kmap(page); insert_metapage()
119 l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits; insert_metapage()
128 static inline void remove_metapage(struct page *page, struct metapage *mp) remove_metapage() argument
130 struct meta_anchor *a = mp_anchor(page); remove_metapage()
131 int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits; remove_metapage()
141 set_page_private(page, 0); remove_metapage()
142 ClearPagePrivate(page); remove_metapage()
143 kunmap(page); remove_metapage()
147 static inline void inc_io(struct page *page) inc_io() argument
149 atomic_inc(&mp_anchor(page)->io_count); inc_io()
152 static inline void dec_io(struct page *page, void (*handler) (struct page *)) dec_io() argument
154 if (atomic_dec_and_test(&mp_anchor(page)->io_count)) dec_io()
155 handler(page); dec_io()
159 static inline struct metapage *page_to_mp(struct page *page, int offset) page_to_mp() argument
161 return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL; page_to_mp()
164 static inline int insert_metapage(struct page *page, struct metapage *mp) insert_metapage() argument
167 set_page_private(page, (unsigned long)mp); insert_metapage()
168 SetPagePrivate(page); insert_metapage()
169 kmap(page); insert_metapage()
174 static inline void remove_metapage(struct page *page, struct metapage *mp) remove_metapage() argument
176 set_page_private(page, 0); remove_metapage()
177 ClearPagePrivate(page); remove_metapage()
178 kunmap(page); remove_metapage()
181 #define inc_io(page) do {} while(0)
182 #define dec_io(page, handler) handler(page)
233 static inline void drop_metapage(struct page *page, struct metapage *mp) drop_metapage() argument
238 remove_metapage(page, mp); drop_metapage()
272 static void last_read_complete(struct page *page) last_read_complete() argument
274 if (!PageError(page)) last_read_complete()
275 SetPageUptodate(page); last_read_complete()
276 unlock_page(page); last_read_complete()
281 struct page *page = bio->bi_private; metapage_read_end_io() local
285 SetPageError(page); metapage_read_end_io()
288 dec_io(page, last_read_complete); metapage_read_end_io()
314 static void last_write_complete(struct page *page) last_write_complete() argument
320 mp = page_to_mp(page, offset); last_write_complete()
328 * safe unless I have the page locked last_write_complete()
331 end_page_writeback(page); last_write_complete()
336 struct page *page = bio->bi_private; metapage_write_end_io() local
338 BUG_ON(!PagePrivate(page)); metapage_write_end_io()
342 SetPageError(page); metapage_write_end_io()
344 dec_io(page, last_write_complete); metapage_write_end_io()
348 static int metapage_writepage(struct page *page, struct writeback_control *wbc) metapage_writepage() argument
351 int block_offset; /* block offset of mp within page */ metapage_writepage()
352 struct inode *inode = page->mapping->host; metapage_writepage()
368 page_start = (sector_t)page->index << metapage_writepage()
370 BUG_ON(!PageLocked(page)); metapage_writepage()
371 BUG_ON(PageWriteback(page)); metapage_writepage()
372 set_page_writeback(page); metapage_writepage()
375 mp = page_to_mp(page, offset); metapage_writepage()
383 * Make sure this page isn't blocked indefinitely. metapage_writepage()
404 if (bio_add_page(bio, page, bio_bytes, bio_offset) < metapage_writepage()
411 inc_io(page); metapage_writepage()
418 inc_io(page); metapage_writepage()
425 * with dec_io() until we're done with the page metapage_writepage()
436 bio->bi_private = page; metapage_writepage()
446 if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes) metapage_writepage()
455 redirty_page_for_writepage(wbc, page); metapage_writepage()
457 unlock_page(page); metapage_writepage()
463 end_page_writeback(page); metapage_writepage()
475 unlock_page(page); metapage_writepage()
476 dec_io(page, last_write_complete); metapage_writepage()
479 dec_io(page, last_write_complete); metapage_writepage()
483 static int metapage_readpage(struct file *fp, struct page *page) metapage_readpage() argument
485 struct inode *inode = page->mapping->host; metapage_readpage()
489 sector_t page_start; /* address of page in fs blocks */ metapage_readpage()
495 BUG_ON(!PageLocked(page)); metapage_readpage()
496 page_start = (sector_t)page->index << metapage_readpage()
505 if (!PagePrivate(page)) metapage_readpage()
506 insert_metapage(page, NULL); metapage_readpage()
507 inc_io(page); metapage_readpage()
516 bio->bi_private = page; metapage_readpage()
519 if (bio_add_page(bio, page, len, offset) < len) metapage_readpage()
528 unlock_page(page); metapage_readpage()
535 dec_io(page, last_read_complete); metapage_readpage()
539 static int metapage_releasepage(struct page *page, gfp_t gfp_mask) metapage_releasepage() argument
546 mp = page_to_mp(page, offset); metapage_releasepage()
561 remove_metapage(page, mp); metapage_releasepage()
568 static void metapage_invalidatepage(struct page *page, unsigned int offset, metapage_invalidatepage() argument
573 BUG_ON(PageWriteback(page)); metapage_invalidatepage()
575 metapage_releasepage(page, 0); metapage_invalidatepage()
594 struct page *page; __get_metapage() local
606 jfs_err("MetaData crosses page boundary!!"); __get_metapage()
625 page = grab_cache_page(mapping, page_index); __get_metapage()
626 if (!page) { __get_metapage()
630 SetPageUptodate(page); __get_metapage()
632 page = read_mapping_page(mapping, page_index, NULL); __get_metapage()
633 if (IS_ERR(page) || !PageUptodate(page)) { __get_metapage()
637 lock_page(page); __get_metapage()
640 mp = page_to_mp(page, page_offset); __get_metapage()
664 mp->page = page; __get_metapage()
670 mp->data = page_address(page) + page_offset; __get_metapage()
672 if (unlikely(insert_metapage(page, mp))) { __get_metapage()
684 unlock_page(page); __get_metapage()
689 unlock_page(page); __get_metapage()
696 page_cache_get(mp->page); grab_metapage()
697 lock_page(mp->page); grab_metapage()
700 unlock_page(mp->page); grab_metapage()
705 struct page *page = mp->page; force_metapage() local
709 page_cache_get(page); force_metapage()
710 lock_page(page); force_metapage()
711 set_page_dirty(page); force_metapage()
712 write_one_page(page, 1); force_metapage()
714 page_cache_release(page); force_metapage()
719 lock_page(mp->page); hold_metapage()
726 unlock_page(mp->page); put_metapage()
729 page_cache_get(mp->page); put_metapage()
732 unlock_page(mp->page); put_metapage()
738 struct page *page = mp->page; release_metapage() local
741 BUG_ON(!page); release_metapage()
743 lock_page(page); release_metapage()
748 unlock_page(page); release_metapage()
749 page_cache_release(page); release_metapage()
754 set_page_dirty(page); release_metapage()
757 write_one_page(page, 1); release_metapage()
758 lock_page(page); /* write_one_page unlocks the page */ release_metapage()
764 drop_metapage(page, mp); release_metapage()
766 unlock_page(page); release_metapage()
767 page_cache_release(page); release_metapage()
779 struct page *page; __invalidate_metapages() local
788 page = find_lock_page(mapping, lblock >> l2BlocksPerPage); __invalidate_metapages()
789 if (!page) __invalidate_metapages()
792 mp = page_to_mp(page, offset); __invalidate_metapages()
805 unlock_page(page); __invalidate_metapages()
806 page_cache_release(page); __invalidate_metapages()
816 "page allocations = %d\n" jfs_mpstat_proc_show()
817 "page frees = %d\n" jfs_mpstat_proc_show()
/linux-4.1.27/fs/exofs/
H A Ddir.c41 static inline void exofs_put_page(struct page *page) exofs_put_page() argument
43 kunmap(page); exofs_put_page()
44 page_cache_release(page); exofs_put_page()
63 static int exofs_commit_chunk(struct page *page, loff_t pos, unsigned len) exofs_commit_chunk() argument
65 struct address_space *mapping = page->mapping; exofs_commit_chunk()
71 if (!PageUptodate(page)) exofs_commit_chunk()
72 SetPageUptodate(page); exofs_commit_chunk()
78 set_page_dirty(page); exofs_commit_chunk()
81 err = write_one_page(page, 1); exofs_commit_chunk()
83 unlock_page(page); exofs_commit_chunk()
88 static void exofs_check_page(struct page *page) exofs_check_page() argument
90 struct inode *dir = page->mapping->host; exofs_check_page()
92 char *kaddr = page_address(page); exofs_check_page()
98 /* if the page is the last one in the directory */ exofs_check_page()
99 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { exofs_check_page()
122 SetPageChecked(page); exofs_check_page()
147 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, exofs_check_page()
154 "entry in directory(0x%lx) spans the page boundary" exofs_check_page()
156 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, exofs_check_page()
159 SetPageChecked(page); exofs_check_page()
160 SetPageError(page); exofs_check_page()
163 static struct page *exofs_get_page(struct inode *dir, unsigned long n) exofs_get_page()
166 struct page *page = read_mapping_page(mapping, n, NULL); exofs_get_page() local
168 if (!IS_ERR(page)) { exofs_get_page()
169 kmap(page); exofs_get_page()
170 if (!PageChecked(page)) exofs_get_page()
171 exofs_check_page(page); exofs_get_page()
172 if (PageError(page)) exofs_get_page()
175 return page; exofs_get_page()
178 exofs_put_page(page); exofs_get_page()
258 struct page *page = exofs_get_page(inode, n); exofs_readdir() local
260 if (IS_ERR(page)) { exofs_readdir()
261 EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n", exofs_readdir()
264 return PTR_ERR(page); exofs_readdir()
266 kaddr = page_address(page); exofs_readdir()
284 exofs_put_page(page); exofs_readdir()
298 exofs_put_page(page); exofs_readdir()
304 exofs_put_page(page); exofs_readdir()
310 struct dentry *dentry, struct page **res_page) exofs_find_entry()
317 struct page *page = NULL; exofs_find_entry() local
332 page = exofs_get_page(dir, n); exofs_find_entry()
333 if (!IS_ERR(page)) { exofs_find_entry()
334 kaddr = page_address(page); exofs_find_entry()
342 exofs_put_page(page); exofs_find_entry()
349 exofs_put_page(page); exofs_find_entry()
358 *res_page = page; exofs_find_entry()
363 struct exofs_dir_entry *exofs_dotdot(struct inode *dir, struct page **p) exofs_dotdot()
365 struct page *page = exofs_get_page(dir, 0); exofs_dotdot() local
368 if (!IS_ERR(page)) { exofs_dotdot()
370 (struct exofs_dir_entry *)page_address(page)); exofs_dotdot()
371 *p = page; exofs_dotdot()
378 struct page *page; exofs_parent_ino() local
382 de = exofs_dotdot(d_inode(child), &page); exofs_parent_ino()
387 exofs_put_page(page); exofs_parent_ino()
395 struct page *page; exofs_inode_by_name() local
397 de = exofs_find_entry(dir, dentry, &page); exofs_inode_by_name()
400 exofs_put_page(page); exofs_inode_by_name()
406 struct page *page, struct inode *inode) exofs_set_link()
408 loff_t pos = page_offset(page) + exofs_set_link()
409 (char *) de - (char *) page_address(page); exofs_set_link()
413 lock_page(page); exofs_set_link()
414 err = exofs_write_begin(NULL, page->mapping, pos, len, exofs_set_link()
415 AOP_FLAG_UNINTERRUPTIBLE, &page, NULL); exofs_set_link()
423 err = exofs_commit_chunk(page, pos, len); exofs_set_link()
424 exofs_put_page(page); exofs_set_link()
438 struct page *page = NULL; exofs_add_link() local
450 page = exofs_get_page(dir, n); exofs_add_link()
451 err = PTR_ERR(page); exofs_add_link()
452 if (IS_ERR(page)) exofs_add_link()
454 lock_page(page); exofs_add_link()
455 kaddr = page_address(page); exofs_add_link()
485 unlock_page(page); exofs_add_link()
486 exofs_put_page(page); exofs_add_link()
494 pos = page_offset(page) + exofs_add_link()
495 (char *)de - (char *)page_address(page); exofs_add_link()
496 err = exofs_write_begin(NULL, page->mapping, pos, rec_len, 0, exofs_add_link()
497 &page, NULL); exofs_add_link()
511 err = exofs_commit_chunk(page, pos, rec_len); exofs_add_link()
517 exofs_put_page(page); exofs_add_link()
521 unlock_page(page); exofs_add_link()
525 int exofs_delete_entry(struct exofs_dir_entry *dir, struct page *page) exofs_delete_entry() argument
527 struct address_space *mapping = page->mapping; exofs_delete_entry()
530 char *kaddr = page_address(page); exofs_delete_entry()
550 from = (char *)pde - (char *)page_address(page); exofs_delete_entry()
551 pos = page_offset(page) + from; exofs_delete_entry()
552 lock_page(page); exofs_delete_entry()
553 err = exofs_write_begin(NULL, page->mapping, pos, to - from, 0, exofs_delete_entry()
554 &page, NULL); exofs_delete_entry()
562 err = exofs_commit_chunk(page, pos, to - from); exofs_delete_entry()
567 exofs_put_page(page); exofs_delete_entry()
578 struct page *page = grab_cache_page(mapping, 0); exofs_make_empty() local
584 if (!page) exofs_make_empty()
587 err = exofs_write_begin(NULL, page->mapping, 0, chunk_size, 0, exofs_make_empty()
588 &page, NULL); exofs_make_empty()
590 unlock_page(page); exofs_make_empty()
594 kaddr = kmap_atomic(page); exofs_make_empty()
609 err = exofs_commit_chunk(page, 0, chunk_size); exofs_make_empty()
611 page_cache_release(page); exofs_make_empty()
617 struct page *page = NULL; exofs_empty_dir() local
623 page = exofs_get_page(inode, i); exofs_empty_dir()
625 if (IS_ERR(page)) exofs_empty_dir()
628 kaddr = page_address(page); exofs_empty_dir()
654 exofs_put_page(page); exofs_empty_dir()
659 exofs_put_page(page); exofs_empty_dir()
405 exofs_set_link(struct inode *dir, struct exofs_dir_entry *de, struct page *page, struct inode *inode) exofs_set_link() argument
/linux-4.1.27/fs/logfs/
H A Dfile.c14 struct page **pagep, void **fsdata) logfs_write_begin()
17 struct page *page; logfs_write_begin() local
20 page = grab_cache_page_write_begin(mapping, index, flags); logfs_write_begin()
21 if (!page) logfs_write_begin()
23 *pagep = page; logfs_write_begin()
25 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) logfs_write_begin()
32 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); logfs_write_begin()
35 return logfs_readpage_nolock(page); logfs_write_begin()
39 loff_t pos, unsigned len, unsigned copied, struct page *page, logfs_write_end()
43 pgoff_t index = page->index; logfs_write_end()
49 BUG_ON(page->index > I3_BLOCKS); logfs_write_end()
54 * to retry the entire page. logfs_write_end()
56 if (!PageUptodate(page)) { logfs_write_end()
69 SetPageUptodate(page); logfs_write_end()
70 if (!PageDirty(page)) { logfs_write_end()
71 if (!get_page_reserve(inode, page)) logfs_write_end()
72 __set_page_dirty_nobuffers(page); logfs_write_end()
74 ret = logfs_write_buf(inode, page, WF_LOCK); logfs_write_end()
77 unlock_page(page); logfs_write_end()
78 page_cache_release(page); logfs_write_end()
82 int logfs_readpage(struct file *file, struct page *page) logfs_readpage() argument
86 ret = logfs_readpage_nolock(page); logfs_readpage()
87 unlock_page(page); logfs_readpage()
91 /* Clear the page's dirty flag in the radix tree. */
94 * for page writeback to finish (i.e. any compressing filesystem).
96 static void clear_radix_tree_dirty(struct page *page) clear_radix_tree_dirty() argument
98 BUG_ON(PagePrivate(page) || page->private); clear_radix_tree_dirty()
99 set_page_writeback(page); clear_radix_tree_dirty()
100 end_page_writeback(page); clear_radix_tree_dirty()
103 static int __logfs_writepage(struct page *page) __logfs_writepage() argument
105 struct inode *inode = page->mapping->host; __logfs_writepage()
108 err = logfs_write_buf(inode, page, WF_LOCK); __logfs_writepage()
110 set_page_dirty(page); __logfs_writepage()
112 clear_radix_tree_dirty(page); __logfs_writepage()
113 unlock_page(page); __logfs_writepage()
117 static int logfs_writepage(struct page *page, struct writeback_control *wbc) logfs_writepage() argument
119 struct inode *inode = page->mapping->host; logfs_writepage()
126 log_file("logfs_writepage(%lx, %lx, %p)\n", inode->i_ino, page->index, logfs_writepage()
127 page); logfs_writepage()
129 logfs_unpack_index(page->index, &bix, &level); logfs_writepage()
133 return __logfs_writepage(page); logfs_writepage()
140 /* Is the page fully inside i_size? */ logfs_writepage()
142 return __logfs_writepage(page); logfs_writepage()
144 /* Is the page fully outside i_size? (truncate in progress) */ logfs_writepage()
147 unlock_page(page); logfs_writepage()
152 * The page straddles i_size. It must be zeroed out on each and every logfs_writepage()
154 * in multiples of the page size. For a file that is not a multiple of logfs_writepage()
155 * the page size, the remaining memory is zeroed when mapped, and logfs_writepage()
158 zero_user_segment(page, offset, PAGE_CACHE_SIZE); logfs_writepage()
159 return __logfs_writepage(page); logfs_writepage()
162 static void logfs_invalidatepage(struct page *page, unsigned int offset, logfs_invalidatepage() argument
165 struct logfs_block *block = logfs_block(page); logfs_invalidatepage()
168 struct super_block *sb = page->mapping->host->i_sb; logfs_invalidatepage()
175 move_page_to_btree(page); logfs_invalidatepage()
176 BUG_ON(PagePrivate(page) || page->private); logfs_invalidatepage()
179 static int logfs_releasepage(struct page *page, gfp_t only_xfs_uses_this) logfs_releasepage() argument
38 logfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) logfs_write_end() argument
H A Dreadwrite.c52 * set, the actual block index (bix) and level can be derived from the page
205 static void prelock_page(struct super_block *sb, struct page *page, int lock) prelock_page() argument
209 BUG_ON(!PageLocked(page)); prelock_page()
211 BUG_ON(PagePreLocked(page)); prelock_page()
212 SetPagePreLocked(page); prelock_page()
215 if (PagePreLocked(page)) prelock_page()
218 SetPagePreLocked(page); prelock_page()
222 static void preunlock_page(struct super_block *sb, struct page *page, int lock) preunlock_page() argument
226 BUG_ON(!PageLocked(page)); preunlock_page()
228 ClearPagePreLocked(page); preunlock_page()
231 BUG_ON(!PagePreLocked(page)); preunlock_page()
235 ClearPagePreLocked(page); preunlock_page()
241 * s_write_mutex with a locked page and GC tries to get that page while holding
243 * To solve this issue logfs will ignore the page lock iff the page in question
247 void logfs_get_wblocks(struct super_block *sb, struct page *page, int lock) logfs_get_wblocks() argument
251 if (page) logfs_get_wblocks()
252 prelock_page(sb, page, lock); logfs_get_wblocks()
262 void logfs_put_wblocks(struct super_block *sb, struct page *page, int lock) logfs_put_wblocks() argument
266 if (page) logfs_put_wblocks()
267 preunlock_page(sb, page, lock); logfs_put_wblocks()
274 static struct page *logfs_get_read_page(struct inode *inode, u64 bix, logfs_get_read_page()
281 static void logfs_put_read_page(struct page *page) logfs_put_read_page() argument
283 unlock_page(page); logfs_put_read_page()
284 page_cache_release(page); logfs_put_read_page()
287 static void logfs_lock_write_page(struct page *page) logfs_lock_write_page() argument
291 while (unlikely(!trylock_page(page))) { logfs_lock_write_page()
297 if (PagePreLocked(page)) { logfs_lock_write_page()
298 /* Holder of page lock is waiting for us, it logfs_lock_write_page()
299 * is safe to use this page. */ logfs_lock_write_page()
302 /* Some other process has this page locked and has logfs_lock_write_page()
307 BUG_ON(!PageLocked(page)); logfs_lock_write_page()
310 static struct page *logfs_get_write_page(struct inode *inode, u64 bix, logfs_get_write_page()
315 struct page *page; logfs_get_write_page() local
319 page = find_get_page(mapping, index); logfs_get_write_page()
320 if (!page) { logfs_get_write_page()
321 page = __page_cache_alloc(GFP_NOFS); logfs_get_write_page()
322 if (!page) logfs_get_write_page()
324 err = add_to_page_cache_lru(page, mapping, index, GFP_NOFS); logfs_get_write_page()
326 page_cache_release(page); logfs_get_write_page()
331 } else logfs_lock_write_page(page); logfs_get_write_page()
332 BUG_ON(!PageLocked(page)); logfs_get_write_page()
333 return page; logfs_get_write_page()
336 static void logfs_unlock_write_page(struct page *page) logfs_unlock_write_page() argument
338 if (!PagePreLocked(page)) logfs_unlock_write_page()
339 unlock_page(page); logfs_unlock_write_page()
342 static void logfs_put_write_page(struct page *page) logfs_put_write_page() argument
344 logfs_unlock_write_page(page); logfs_put_write_page()
345 page_cache_release(page); logfs_put_write_page()
348 static struct page *logfs_get_page(struct inode *inode, u64 bix, level_t level, logfs_get_page()
357 static void logfs_put_page(struct page *page, int rw) logfs_put_page() argument
360 logfs_put_read_page(page); logfs_put_page()
362 logfs_put_write_page(page); logfs_put_page()
391 struct page *page; indirect_write_block() local
395 page = block->page; indirect_write_block()
396 inode = page->mapping->host; indirect_write_block()
397 logfs_lock_write_page(page); indirect_write_block()
398 ret = logfs_write_buf(inode, page, 0); indirect_write_block()
399 logfs_unlock_write_page(page); indirect_write_block()
507 struct page *page = block->page; indirect_write_alias() local
518 ino = page->mapping->host->i_ino; indirect_write_alias()
519 logfs_unpack_index(page->index, &bix, &level); indirect_write_alias()
520 child = kmap_atomic(page); indirect_write_alias()
561 struct page *page = block->page; indirect_free_block() local
563 if (PagePrivate(page)) { indirect_free_block()
564 ClearPagePrivate(page); indirect_free_block()
565 page_cache_release(page); indirect_free_block()
566 set_page_private(page, 0); indirect_free_block()
615 void initialize_block_counters(struct page *page, struct logfs_block *block, initialize_block_counters() argument
624 if (page->index < first_indirect_block()) { initialize_block_counters()
628 if (page->index == first_indirect_block()) { initialize_block_counters()
644 static void alloc_data_block(struct inode *inode, struct page *page) alloc_data_block() argument
650 if (PagePrivate(page)) alloc_data_block()
653 logfs_unpack_index(page->index, &bix, &level); alloc_data_block()
655 block->page = page; alloc_data_block()
657 SetPagePrivate(page); alloc_data_block()
658 page_cache_get(page); alloc_data_block()
659 set_page_private(page, (unsigned long) block); alloc_data_block()
664 static void alloc_indirect_block(struct inode *inode, struct page *page, alloc_indirect_block() argument
670 if (PagePrivate(page)) alloc_indirect_block()
673 alloc_data_block(inode, page); alloc_indirect_block()
675 block = logfs_block(page); alloc_indirect_block()
676 array = kmap_atomic(page); alloc_indirect_block()
677 initialize_block_counters(page, block, array, page_is_empty); alloc_indirect_block()
681 static void block_set_pointer(struct page *page, int index, u64 ptr) block_set_pointer() argument
683 struct logfs_block *block = logfs_block(page); block_set_pointer()
688 array = kmap_atomic(page); block_set_pointer()
692 SetPageUptodate(page); block_set_pointer()
699 static u64 block_get_pointer(struct page *page, int index) block_get_pointer() argument
704 block = kmap_atomic(page); block_get_pointer()
710 static int logfs_read_empty(struct page *page) logfs_read_empty() argument
712 zero_user_segment(page, 0, PAGE_CACHE_SIZE); logfs_read_empty()
716 static int logfs_read_direct(struct inode *inode, struct page *page) logfs_read_direct() argument
719 pgoff_t index = page->index; logfs_read_direct()
724 return logfs_read_empty(page); logfs_read_direct()
726 return logfs_segment_read(inode, page, block, index, 0); logfs_read_direct()
729 static int logfs_read_loop(struct inode *inode, struct page *page, logfs_read_loop() argument
736 struct page *ipage; logfs_read_loop()
738 logfs_unpack_index(page->index, &bix, &target_level); logfs_read_loop()
740 return logfs_read_empty(page); logfs_read_loop()
743 return logfs_read_empty(page); logfs_read_loop()
761 return logfs_read_empty(page); logfs_read_loop()
764 return logfs_segment_read(inode, page, bofs, bix, 0); logfs_read_loop()
767 static int logfs_read_block(struct inode *inode, struct page *page, logfs_read_block() argument
770 pgoff_t index = page->index; logfs_read_block()
773 return logfs_read_direct(inode, page); logfs_read_block()
774 return logfs_read_loop(inode, page, rw_context); logfs_read_block()
783 struct page *ipage; logfs_exist_loop()
836 struct page *page; seek_holedata_loop() local
842 page = logfs_get_read_page(inode, bix, level); seek_holedata_loop()
843 if (!page) seek_holedata_loop()
846 ret = logfs_segment_read(inode, page, bofs, bix, level); seek_holedata_loop()
848 logfs_put_read_page(page); seek_holedata_loop()
853 rblock = kmap_atomic(page); seek_holedata_loop()
865 logfs_put_read_page(page); seek_holedata_loop()
870 logfs_put_read_page(page); seek_holedata_loop()
967 struct page *page; __logfs_is_valid_loop() local
970 page = logfs_get_write_page(inode, bix, level); __logfs_is_valid_loop()
971 BUG_ON(!page); __logfs_is_valid_loop()
973 ret = logfs_segment_read(inode, page, bofs, bix, level); __logfs_is_valid_loop()
975 logfs_put_write_page(page); __logfs_is_valid_loop()
979 bofs = block_get_pointer(page, get_bits(bix, SUBLEVEL(level))); __logfs_is_valid_loop()
980 logfs_put_write_page(page); __logfs_is_valid_loop()
1063 int logfs_readpage_nolock(struct page *page) logfs_readpage_nolock() argument
1065 struct inode *inode = page->mapping->host; logfs_readpage_nolock()
1068 ret = logfs_read_block(inode, page, READ); logfs_readpage_nolock()
1071 ClearPageUptodate(page); logfs_readpage_nolock()
1072 SetPageError(page); logfs_readpage_nolock()
1074 SetPageUptodate(page); logfs_readpage_nolock()
1075 ClearPageError(page); logfs_readpage_nolock()
1077 flush_dcache_page(page); logfs_readpage_nolock()
1101 int get_page_reserve(struct inode *inode, struct page *page) get_page_reserve() argument
1104 struct logfs_block *block = logfs_block(page); get_page_reserve()
1110 logfs_get_wblocks(inode->i_sb, page, WF_LOCK); get_page_reserve()
1118 alloc_data_block(inode, page); get_page_reserve()
1119 block = logfs_block(page); get_page_reserve()
1124 logfs_put_wblocks(inode->i_sb, page, WF_LOCK); get_page_reserve()
1253 * @inode: Inode owning the page
1254 * @page: Struct page that was written
1270 * the page's tree, in case it was an indirect block. If a page is
1272 * the page parameter is left NULL.
1274 static void fill_shadow_tree(struct inode *inode, struct page *page, fill_shadow_tree() argument
1278 struct logfs_block *block = logfs_block(page); fill_shadow_tree()
1281 if (PagePrivate(page)) { fill_shadow_tree()
1340 static int logfs_write_i0(struct inode *inode, struct page *page, logfs_write_i0() argument
1348 logfs_unpack_index(page->index, &bix, &level); logfs_write_i0()
1355 err = logfs_segment_write(inode, page, shadow); logfs_write_i0()
1366 alloc_indirect_block(inode, page, 0); logfs_write_i0()
1367 full = logfs_block(page)->full == LOGFS_BLOCK_FACTOR; logfs_write_i0()
1369 fill_shadow_tree(inode, page, shadow); logfs_write_i0()
1376 static int logfs_write_direct(struct inode *inode, struct page *page, logfs_write_direct() argument
1381 .ofs = li->li_data[page->index], logfs_write_direct()
1388 err = logfs_write_i0(inode, page, &wc); logfs_write_direct()
1392 li->li_data[page->index] = wc.ofs; logfs_write_direct()
1394 page->index + INODE_POINTER_OFS); logfs_write_direct()
1398 static int ptr_change(u64 ofs, struct page *page) ptr_change() argument
1400 struct logfs_block *block = logfs_block(page); ptr_change()
1416 static int __logfs_write_rec(struct inode *inode, struct page *page, __logfs_write_rec() argument
1422 struct page *ipage; __logfs_write_rec()
1443 ret = __logfs_write_rec(inode, page, &child_wc, bix, __logfs_write_rec()
1446 ret = logfs_write_i0(inode, page, &child_wc); __logfs_write_rec()
1467 static int logfs_write_rec(struct inode *inode, struct page *page, logfs_write_rec() argument
1480 ret = __logfs_write_rec(inode, page, &wc, bix, target_level, logfs_write_rec()
1483 ret = logfs_write_i0(inode, page, &wc); logfs_write_rec()
1513 struct page *page; grow_inode() local
1521 page = logfs_get_write_page(inode, I0_BLOCKS + 1, grow_inode()
1523 if (!page) grow_inode()
1525 logfs_read_empty(page); grow_inode()
1526 alloc_indirect_block(inode, page, 1); grow_inode()
1527 block_set_pointer(page, 0, li->li_data[INDIRECT_INDEX]); grow_inode()
1528 err = logfs_write_i0(inode, page, &wc); grow_inode()
1529 logfs_put_write_page(page); grow_inode()
1540 static int __logfs_write_buf(struct inode *inode, struct page *page, long flags) __logfs_write_buf() argument
1543 pgoff_t index = page->index; __logfs_write_buf()
1552 if (logfs_block(page) && logfs_block(page)->reserved_bytes) __logfs_write_buf()
1553 super->s_dirty_pages -= logfs_block(page)->reserved_bytes; __logfs_write_buf()
1556 return logfs_write_direct(inode, page, flags); __logfs_write_buf()
1562 return logfs_write_rec(inode, page, bix, level, flags); __logfs_write_buf()
1565 int logfs_write_buf(struct inode *inode, struct page *page, long flags) logfs_write_buf() argument
1570 logfs_get_wblocks(sb, page, flags & WF_LOCK); logfs_write_buf()
1571 ret = __logfs_write_buf(inode, page, flags); logfs_write_buf()
1572 logfs_put_wblocks(sb, page, flags & WF_LOCK); logfs_write_buf()
1576 static int __logfs_delete(struct inode *inode, struct page *page) __logfs_delete() argument
1583 if (page->index < I0_BLOCKS) __logfs_delete()
1584 return logfs_write_direct(inode, page, flags); __logfs_delete()
1585 err = grow_inode(inode, page->index, 0); __logfs_delete()
1588 return logfs_write_rec(inode, page, page->index, 0, flags); __logfs_delete()
1595 struct page *page; logfs_delete() local
1598 page = logfs_get_read_page(inode, index, 0); logfs_delete()
1599 if (!page) logfs_delete()
1602 logfs_get_wblocks(sb, page, 1); logfs_delete()
1603 ret = __logfs_delete(inode, page); logfs_delete()
1604 logfs_put_wblocks(sb, page, 1); logfs_delete()
1606 logfs_put_read_page(page); logfs_delete()
1615 struct page *page; logfs_rewrite_block() local
1618 page = logfs_get_write_page(inode, bix, level); logfs_rewrite_block()
1619 if (!page) logfs_rewrite_block()
1622 err = logfs_segment_read(inode, page, ofs, bix, level); logfs_rewrite_block()
1625 alloc_indirect_block(inode, page, 0); logfs_rewrite_block()
1626 err = logfs_write_buf(inode, page, flags); logfs_rewrite_block()
1636 err = __logfs_write_inode(inode, page, flags); logfs_rewrite_block()
1640 logfs_put_write_page(page); logfs_rewrite_block()
1644 static int truncate_data_block(struct inode *inode, struct page *page, truncate_data_block() argument
1647 loff_t pageofs = page->index << inode->i_sb->s_blocksize_bits; truncate_data_block()
1652 /* Does truncation happen within this page? */ truncate_data_block()
1656 logfs_unpack_index(page->index, &bix, &level); truncate_data_block()
1659 err = logfs_segment_read(inode, page, ofs, bix, level); truncate_data_block()
1663 zero_user_segment(page, size - pageofs, PAGE_CACHE_SIZE); truncate_data_block()
1664 return logfs_segment_write(inode, page, shadow); truncate_data_block()
1667 static int logfs_truncate_i0(struct inode *inode, struct page *page, logfs_truncate_i0() argument
1675 logfs_unpack_index(page->index, &bix, &level); logfs_truncate_i0()
1679 err = truncate_data_block(inode, page, wc->ofs, shadow, size); logfs_truncate_i0()
1687 fill_shadow_tree(inode, page, shadow); logfs_truncate_i0()
1696 struct page *page; logfs_truncate_direct() local
1710 page = logfs_get_write_page(inode, e, 0); logfs_truncate_direct()
1711 if (!page) logfs_truncate_direct()
1713 err = logfs_segment_read(inode, page, wc.ofs, e, 0); logfs_truncate_direct()
1715 logfs_put_write_page(page); logfs_truncate_direct()
1718 err = logfs_truncate_i0(inode, page, &wc, size); logfs_truncate_direct()
1719 logfs_put_write_page(page); logfs_truncate_direct()
1765 static int __logfs_truncate_rec(struct inode *inode, struct page *ipage, __logfs_truncate_rec()
1772 struct page *page; __logfs_truncate_rec() local
1790 page = logfs_get_write_page(inode, child_bix, SUBLEVEL(level)); __logfs_truncate_rec()
1791 if (!page) __logfs_truncate_rec()
1795 err = __logfs_truncate_rec(inode, page, &child_wc, size); __logfs_truncate_rec()
1797 err = logfs_truncate_i0(inode, page, &child_wc, size); __logfs_truncate_rec()
1798 logfs_put_write_page(page); __logfs_truncate_rec()
1825 struct page *page; logfs_truncate_rec() local
1833 page = logfs_get_write_page(inode, 0, LEVEL(li->li_height)); logfs_truncate_rec()
1834 if (!page) logfs_truncate_rec()
1837 err = __logfs_truncate_rec(inode, page, &wc, size); logfs_truncate_rec()
1838 logfs_put_write_page(page); logfs_truncate_rec()
1904 static void move_page_to_inode(struct inode *inode, struct page *page) move_page_to_inode() argument
1907 struct logfs_block *block = logfs_block(page); move_page_to_inode()
1919 block->page = NULL; move_page_to_inode()
1920 if (PagePrivate(page)) { move_page_to_inode()
1921 ClearPagePrivate(page); move_page_to_inode()
1922 page_cache_release(page); move_page_to_inode()
1923 set_page_private(page, 0); move_page_to_inode()
1927 static void move_inode_to_page(struct page *page, struct inode *inode) move_inode_to_page() argument
1937 BUG_ON(PagePrivate(page)); move_inode_to_page()
1939 block->page = page; move_inode_to_page()
1941 if (!PagePrivate(page)) { move_inode_to_page()
1942 SetPagePrivate(page); move_inode_to_page()
1943 page_cache_get(page); move_inode_to_page()
1944 set_page_private(page, (unsigned long) block); move_inode_to_page()
1956 struct page *page; logfs_read_inode() local
1965 page = read_cache_page(master_inode->i_mapping, ino, logfs_read_inode()
1967 if (IS_ERR(page)) logfs_read_inode()
1968 return PTR_ERR(page); logfs_read_inode()
1970 di = kmap_atomic(page); logfs_read_inode()
1973 move_page_to_inode(inode, page); logfs_read_inode()
1974 page_cache_release(page); logfs_read_inode()
1978 /* Caller must logfs_put_write_page(page); */ inode_to_page()
1979 static struct page *inode_to_page(struct inode *inode) inode_to_page()
1983 struct page *page; inode_to_page() local
1987 page = logfs_get_write_page(master_inode, inode->i_ino, 0); inode_to_page()
1988 if (!page) inode_to_page()
1991 di = kmap_atomic(page); inode_to_page()
1994 move_inode_to_page(page, inode); inode_to_page()
1995 return page; inode_to_page()
2003 struct page *page; do_write_inode() local
2014 page = inode_to_page(inode); do_write_inode()
2015 if (!page) do_write_inode()
2019 err = logfs_write_buf(master_inode, page, 0); do_write_inode()
2021 move_page_to_inode(inode, page); do_write_inode()
2023 logfs_put_write_page(page); do_write_inode()
2034 struct page *page; logfs_mod_segment_entry() local
2043 page = logfs_get_write_page(inode, page_no, 0); logfs_mod_segment_entry()
2044 BUG_ON(!page); /* FIXME: We need some reserve page for this case */ logfs_mod_segment_entry()
2045 if (!PageUptodate(page)) logfs_mod_segment_entry()
2046 logfs_read_block(inode, page, WRITE); logfs_mod_segment_entry()
2049 alloc_indirect_block(inode, page, 0); logfs_mod_segment_entry()
2050 se = kmap_atomic(page); logfs_mod_segment_entry()
2053 logfs_set_alias(sb, logfs_block(page), child_no); logfs_mod_segment_entry()
2058 logfs_put_write_page(page); logfs_mod_segment_entry()
2132 int __logfs_write_inode(struct inode *inode, struct page *page, long flags) __logfs_write_inode() argument
2137 logfs_get_wblocks(sb, page, flags & WF_LOCK); __logfs_write_inode()
2139 logfs_put_wblocks(sb, page, flags & WF_LOCK); __logfs_write_inode()
2147 struct page *page; do_delete_inode() local
2150 page = logfs_get_write_page(master_inode, inode->i_ino, 0); do_delete_inode()
2151 if (!page) do_delete_inode()
2154 move_inode_to_page(page, inode); do_delete_inode()
2156 logfs_get_wblocks(sb, page, 1); do_delete_inode()
2157 ret = __logfs_delete(master_inode, page); do_delete_inode()
2158 logfs_put_wblocks(sb, page, 1); do_delete_inode()
2160 logfs_put_write_page(page); do_delete_inode()
2173 struct page *page; logfs_evict_inode() local
2198 page = inode_to_page(inode); logfs_evict_inode()
2199 BUG_ON(!page); /* FIXME: Use emergency page */ logfs_evict_inode()
2200 logfs_put_write_page(page); logfs_evict_inode()
2206 struct page *page; btree_write_block() local
2210 page = logfs_get_write_page(inode, block->bix, block->level); btree_write_block()
2212 err = logfs_readpage_nolock(page); btree_write_block()
2214 BUG_ON(!PagePrivate(page)); btree_write_block()
2215 BUG_ON(logfs_block(page) != block); btree_write_block()
2216 err = __logfs_write_buf(inode, page, 0); btree_write_block()
2218 BUG_ON(PagePrivate(page) || page->private); btree_write_block()
2220 logfs_put_write_page(page); btree_write_block()
2243 struct page *page; logfs_inode_write() local
2248 page = logfs_get_write_page(inode, bix, 0); logfs_inode_write()
2249 if (!page) logfs_inode_write()
2252 pagebuf = kmap_atomic(page); logfs_inode_write()
2254 flush_dcache_page(page); logfs_inode_write()
2260 err = logfs_write_buf(inode, page, flags); logfs_inode_write()
2261 logfs_put_write_page(page); logfs_inode_write()
/linux-4.1.27/sound/pci/trident/
H A Dtrident_memory.c6 * Trident 4DWave-NX memory page allocation (TLB area)
34 /* page arguments of these two macros are Trident page (4096 bytes), not like
37 #define __set_tlb_bus(trident,page,ptr,addr) \
38 do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \
39 (trident)->tlb.shadow_entries[page] = (ptr); } while (0)
40 #define __tlb_to_ptr(trident,page) \
41 (void*)((trident)->tlb.shadow_entries[page])
42 #define __tlb_to_addr(trident,page) \
43 (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1))
46 /* page size == SNDRV_TRIDENT_PAGE_SIZE */
47 #define ALIGN_PAGE_SIZE PAGE_SIZE /* minimum page size for allocation */
49 /* fill TLB entrie(s) corresponding to page with ptr */
50 #define set_tlb_bus(trident,page,ptr,addr) __set_tlb_bus(trident,page,ptr,addr)
51 /* fill TLB entrie(s) corresponding to page with silence pointer */
52 #define set_silent_tlb(trident,page) __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr)
53 /* get aligned page from offset address */
55 /* get offset address from aligned page */
56 #define aligned_page_offset(page) ((page) << 12)
57 /* get buffer address from aligned page */
58 #define page_to_ptr(trident,page) __tlb_to_ptr(trident, page)
59 /* get PCI physical address from aligned page */
60 #define page_to_addr(trident,page) __tlb_to_addr(trident, page)
63 /* page size == SNDRV_TRIDENT_PAGE_SIZE x 2*/
67 #define aligned_page_offset(page) ((page) << 13)
68 #define page_to_ptr(trident,page) __tlb_to_ptr(trident, (page) << 1)
69 #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) << 1)
72 static inline void set_tlb_bus(struct snd_trident *trident, int page, set_tlb_bus() argument
75 page <<= 1; set_tlb_bus()
76 __set_tlb_bus(trident, page, ptr, addr); set_tlb_bus()
77 __set_tlb_bus(trident, page+1, ptr + SNDRV_TRIDENT_PAGE_SIZE, addr + SNDRV_TRIDENT_PAGE_SIZE); set_tlb_bus()
79 static inline void set_silent_tlb(struct snd_trident *trident, int page) set_silent_tlb() argument
81 page <<= 1; set_silent_tlb()
82 __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr); set_silent_tlb()
83 __set_tlb_bus(trident, page+1, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr); set_silent_tlb()
93 * of accessing page in set_tlb_bus and set_silent_tlb. search_empty()
97 #define aligned_page_offset(page) ((page) * ALIGN_PAGE_SIZE)
98 #define page_to_ptr(trident,page) __tlb_to_ptr(trident, (page) * UNIT_PAGES)
99 #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) * UNIT_PAGES)
102 static inline void set_tlb_bus(struct snd_trident *trident, int page, set_tlb_bus() argument
106 page *= UNIT_PAGES; set_tlb_bus()
107 for (i = 0; i < UNIT_PAGES; i++, page++) { set_tlb_bus()
108 __set_tlb_bus(trident, page, ptr, addr); set_tlb_bus()
113 static inline void set_silent_tlb(struct snd_trident *trident, int page) set_silent_tlb() argument
116 page *= UNIT_PAGES; set_silent_tlb()
117 for (i = 0; i < UNIT_PAGES; i++, page++) set_silent_tlb()
118 __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr); set_silent_tlb()
143 int page, psize; search_empty() local
147 page = 0; search_empty()
150 if (page + psize <= firstpg(blk)) search_empty()
152 page = lastpg(blk) + 1; search_empty()
154 if (page + psize > MAX_ALIGN_PAGES) search_empty()
162 blk->offset = aligned_page_offset(page); /* set aligned offset */ search_empty()
163 firstpg(blk) = page; search_empty()
164 lastpg(blk) = page + psize - 1; search_empty()
179 snd_printk(KERN_ERR "page is not aligned\n"); is_valid_page()
186 * page allocation for DMA (Scatter-Gather version)
195 int idx, page; snd_trident_alloc_sg_pages() local
216 for (page = firstpg(blk); page <= lastpg(blk); page++, idx++) { snd_trident_alloc_sg_pages()
226 set_tlb_bus(trident, page, ptr, addr); snd_trident_alloc_sg_pages()
233 * page allocation for DMA (contiguous version)
241 int page; snd_trident_alloc_cont_pages() local
264 for (page = firstpg(blk); page <= lastpg(blk); page++, snd_trident_alloc_cont_pages()
271 set_tlb_bus(trident, page, ptr, addr); snd_trident_alloc_cont_pages()
278 * page allocation for DMA
294 * release DMA buffer from page table
300 int page; snd_trident_free_pages() local
308 for (page = firstpg(blk); page <= lastpg(blk); page++) snd_trident_free_pages()
309 set_silent_tlb(trident, page); snd_trident_free_pages()
/linux-4.1.27/arch/powerpc/include/asm/
H A Dhighmem.h28 #include <asm/page.h>
44 * in case of 16K/64K/256K page sizes.
61 extern void *kmap_high(struct page *page);
62 extern void kunmap_high(struct page *page);
63 extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
66 static inline void *kmap(struct page *page) kmap() argument
69 if (!PageHighMem(page)) kmap()
70 return page_address(page); kmap()
71 return kmap_high(page); kmap()
74 static inline void kunmap(struct page *page) kunmap() argument
77 if (!PageHighMem(page)) kunmap()
79 kunmap_high(page); kunmap()
82 static inline void *kmap_atomic(struct page *page) kmap_atomic() argument
84 return kmap_atomic_prot(page, kmap_prot); kmap_atomic()
87 static inline struct page *kmap_atomic_to_page(void *ptr) kmap_atomic_to_page()
H A Dpage_32.h23 #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */
25 #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */
39 struct page;
40 extern void clear_pages(void *page, int order); clear_page()
41 static inline void clear_page(void *page) { clear_pages(page, 0); } argument
H A Dpgtable-ppc64-4k.h4 * Entries per page directory level. The PTE level must use a 64b record
5 * for each page table entry. The PMD and PGD level use a 32b record for
6 * each entry by assuming that each entry is page aligned.
25 /* PMD_SHIFT determines what a second-level page table entry can map */
30 /* With 4k base page size, hugepage PTEs go at the PMD level */
33 /* PUD_SHIFT determines what a third-level page table entry can map */
38 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
43 /* Bits to mask out from a PMD to get to the PTE page */
45 /* Bits to mask out from a PUD to get to the PMD page */
47 /* Bits to mask out from a PGD to get to the PUD page */
52 * 4-level page tables related bits
72 extern struct page *pgd_page(pgd_t pgd);
/linux-4.1.27/fs/ceph/
H A Daddr.c22 * The page->private field is used to reference a struct
23 * ceph_snap_context for _every_ dirty page. This indicates which
24 * snapshot the page was logically dirtied in, and thus which snap
29 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
49 * Invalidate and so forth must take care to ensure the dirty page
58 static inline struct ceph_snap_context *page_snap_context(struct page *page) page_snap_context() argument
60 if (PagePrivate(page)) page_snap_context()
61 return (void *)page->private; page_snap_context()
66 * Dirty a page. Optimistically adjust accounting, on the assumption
69 static int ceph_set_page_dirty(struct page *page) ceph_set_page_dirty() argument
71 struct address_space *mapping = page->mapping; ceph_set_page_dirty()
78 return !TestSetPageDirty(page); ceph_set_page_dirty()
80 if (PageDirty(page)) { ceph_set_page_dirty()
82 mapping->host, page, page->index); ceph_set_page_dirty()
83 BUG_ON(!PagePrivate(page)); ceph_set_page_dirty()
106 mapping->host, page, page->index, ceph_set_page_dirty()
113 * Reference snap context in page->private. Also set ceph_set_page_dirty()
116 BUG_ON(PagePrivate(page)); ceph_set_page_dirty()
117 page->private = (unsigned long)snapc; ceph_set_page_dirty()
118 SetPagePrivate(page); ceph_set_page_dirty()
120 ret = __set_page_dirty_nobuffers(page); ceph_set_page_dirty()
121 WARN_ON(!PageLocked(page)); ceph_set_page_dirty()
122 WARN_ON(!page->mapping); ceph_set_page_dirty()
128 * If we are truncating the full page (i.e. offset == 0), adjust the
129 * dirty page counters appropriately. Only called if there is private
130 * data on the page.
132 static void ceph_invalidatepage(struct page *page, unsigned int offset, ceph_invalidatepage() argument
137 struct ceph_snap_context *snapc = page_snap_context(page); ceph_invalidatepage()
139 inode = page->mapping->host; ceph_invalidatepage()
143 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", ceph_invalidatepage()
144 inode, page, page->index, offset, length); ceph_invalidatepage()
148 ceph_invalidate_fscache_page(inode, page); ceph_invalidatepage()
150 if (!PagePrivate(page)) ceph_invalidatepage()
158 if (!PageDirty(page)) ceph_invalidatepage()
159 pr_err("%p invalidatepage %p page not dirty\n", inode, page); ceph_invalidatepage()
161 ClearPageChecked(page); ceph_invalidatepage()
163 dout("%p invalidatepage %p idx %lu full dirty page\n", ceph_invalidatepage()
164 inode, page, page->index); ceph_invalidatepage()
168 page->private = 0; ceph_invalidatepage()
169 ClearPagePrivate(page); ceph_invalidatepage()
172 static int ceph_releasepage(struct page *page, gfp_t g) ceph_releasepage() argument
174 struct inode *inode = page->mapping ? page->mapping->host : NULL; ceph_releasepage()
175 dout("%p releasepage %p idx %lu\n", inode, page, page->index); ceph_releasepage()
176 WARN_ON(PageDirty(page)); ceph_releasepage()
178 /* Can we release the page from the cache? */ ceph_releasepage()
179 if (!ceph_release_fscache_page(page, g)) ceph_releasepage()
182 return !PagePrivate(page); ceph_releasepage()
186 * read a single page, without unlocking it.
188 static int readpage_nounlock(struct file *filp, struct page *page) readpage_nounlock() argument
195 u64 off = page_offset(page); readpage_nounlock()
199 zero_user_segment(page, 0, PAGE_CACHE_SIZE); readpage_nounlock()
200 SetPageUptodate(page); readpage_nounlock()
207 * into page cache while getting Fcr caps. readpage_nounlock()
211 zero_user_segment(page, 0, PAGE_CACHE_SIZE); readpage_nounlock()
212 SetPageUptodate(page); readpage_nounlock()
216 err = ceph_readpage_from_fscache(inode, page); readpage_nounlock()
220 dout("readpage inode %p file %p page %p index %lu\n", readpage_nounlock()
221 inode, filp, page, page->index); readpage_nounlock()
225 &page, 1, 0); readpage_nounlock()
229 SetPageError(page); readpage_nounlock()
230 ceph_fscache_readpage_cancel(inode, page); readpage_nounlock()
234 /* zero fill remainder of page */ readpage_nounlock()
235 zero_user_segment(page, err, PAGE_CACHE_SIZE); readpage_nounlock()
237 flush_dcache_page(page); readpage_nounlock()
239 SetPageUptodate(page); readpage_nounlock()
240 ceph_readpage_to_fscache(inode, page); readpage_nounlock()
246 static int ceph_readpage(struct file *filp, struct page *page) ceph_readpage() argument
248 int r = readpage_nounlock(filp, page); ceph_readpage()
249 unlock_page(page); ceph_readpage()
273 struct page *page = osd_data->pages[i]; finish_read() local
278 /* zero (remainder of) page */ finish_read()
280 zero_user_segment(page, s, PAGE_CACHE_SIZE); finish_read()
282 dout("finish_read %p uptodate %p idx %lu\n", inode, page, finish_read()
283 page->index); finish_read()
284 flush_dcache_page(page); finish_read()
285 SetPageUptodate(page); finish_read()
286 ceph_readpage_to_fscache(inode, page); finish_read()
288 unlock_page(page); finish_read()
289 page_cache_release(page); finish_read()
295 static void ceph_unlock_page_vector(struct page **pages, int num_pages) ceph_unlock_page_vector()
312 struct page *page = list_entry(page_list->prev, struct page, lru); start_read() local
318 struct page **pages; start_read()
323 off = (u64) page_offset(page); start_read()
326 next_index = page->index; list_for_each_entry_reverse()
327 list_for_each_entry_reverse(page, page_list, lru) { list_for_each_entry_reverse()
328 if (page->index != next_index) list_for_each_entry_reverse()
347 /* build page vector */
354 page = list_entry(page_list->prev, struct page, lru);
355 BUG_ON(PageLocked(page));
356 list_del(&page->lru);
358 dout("start_read %p adding %p idx %lu\n", inode, page,
359 page->index);
360 if (add_to_page_cache_lru(page, &inode->i_data, page->index,
362 ceph_fscache_uncache_page(inode, page);
363 page_cache_release(page);
365 inode, page);
369 pages[i] = page;
466 * Write a single page, but leave the page locked.
468 * If we get a write error, set the page error bit, but still adjust the
469 * dirty page accounting (i.e., page is no longer dirty).
471 static int writepage_nounlock(struct page *page, struct writeback_control *wbc) writepage_nounlock() argument
478 loff_t page_off = page_offset(page); writepage_nounlock()
484 dout("writepage %p idx %lu\n", page, page->index); writepage_nounlock()
486 if (!page->mapping || !page->mapping->host) { writepage_nounlock()
487 dout("writepage %p - no mapping\n", page); writepage_nounlock()
490 inode = page->mapping->host; writepage_nounlock()
496 snapc = page_snap_context(page); writepage_nounlock()
498 dout("writepage %p page %p not dirty?\n", inode, page); writepage_nounlock()
503 dout("writepage %p page %p snapc %p not writeable - noop\n", writepage_nounlock()
504 inode, page, snapc); writepage_nounlock()
519 /* is this a partial page at end of file? */ writepage_nounlock()
521 dout("%p page eof %llu\n", page, snap_size); writepage_nounlock()
527 dout("writepage %p page %p index %lu on %llu~%u snapc %p\n", writepage_nounlock()
528 inode, page, page->index, page_off, len, snapc); writepage_nounlock()
535 ceph_readpage_to_fscache(inode, page); writepage_nounlock()
537 set_page_writeback(page); writepage_nounlock()
542 &inode->i_mtime, &page, 1); writepage_nounlock()
544 dout("writepage setting page/mapping error %d %p\n", err, page); writepage_nounlock()
545 SetPageError(page); writepage_nounlock()
550 dout("writepage cleaned page %p\n", page); writepage_nounlock()
553 page->private = 0; writepage_nounlock()
554 ClearPagePrivate(page); writepage_nounlock()
555 end_page_writeback(page); writepage_nounlock()
557 ceph_put_snap_context(snapc); /* page's reference */ writepage_nounlock()
562 static int ceph_writepage(struct page *page, struct writeback_control *wbc) ceph_writepage() argument
565 struct inode *inode = page->mapping->host; ceph_writepage()
568 err = writepage_nounlock(page, wbc); ceph_writepage()
569 unlock_page(page); ceph_writepage()
579 static void ceph_release_pages(struct page **pages, int num) ceph_release_pages()
596 * page error bits.
605 struct page *page; writepages_finish() local
637 page = osd_data->pages[i]; writepages_finish()
638 BUG_ON(!page); writepages_finish()
639 WARN_ON(!PageUptodate(page)); writepages_finish()
648 ceph_put_snap_context(page_snap_context(page)); writepages_finish()
649 page->private = 0; writepages_finish()
650 ClearPagePrivate(page); writepages_finish()
651 dout("unlocking %d %p\n", i, page); writepages_finish()
652 end_page_writeback(page); writepages_finish()
655 * We lost the cache cap, need to truncate the page before writepages_finish()
657 * page truncation thread, possibly losing some data that writepages_finish()
661 generic_error_remove_page(inode->i_mapping, page); writepages_finish()
663 unlock_page(page); writepages_finish()
778 struct page **pages = NULL; ceph_writepages_start()
780 struct page *page; ceph_writepages_start() local
802 page = pvec.pages[i]; ceph_writepages_start()
803 dout("? %p idx %lu\n", page, page->index); ceph_writepages_start()
805 lock_page(page); /* first page */ ceph_writepages_start()
806 else if (!trylock_page(page)) ceph_writepages_start()
810 if (unlikely(!PageDirty(page)) || ceph_writepages_start()
811 unlikely(page->mapping != mapping)) { ceph_writepages_start()
812 dout("!dirty or !mapping %p\n", page); ceph_writepages_start()
813 unlock_page(page); ceph_writepages_start()
816 if (!wbc->range_cyclic && page->index > end) { ceph_writepages_start()
817 dout("end of range %p\n", page); ceph_writepages_start()
819 unlock_page(page); ceph_writepages_start()
822 if (next && (page->index != next)) { ceph_writepages_start()
823 dout("not consecutive %p\n", page); ceph_writepages_start()
824 unlock_page(page); ceph_writepages_start()
828 dout("waiting on writeback %p\n", page); ceph_writepages_start()
829 wait_on_page_writeback(page); ceph_writepages_start()
831 if (page_offset(page) >= snap_size) { ceph_writepages_start()
832 dout("%p page eof %llu\n", page, snap_size); ceph_writepages_start()
834 unlock_page(page); ceph_writepages_start()
837 if (PageWriteback(page)) { ceph_writepages_start()
838 dout("%p under writeback\n", page); ceph_writepages_start()
839 unlock_page(page); ceph_writepages_start()
844 pgsnapc = page_snap_context(page); ceph_writepages_start()
846 dout("page snapc %p %lld > oldest %p %lld\n", ceph_writepages_start()
848 unlock_page(page); ceph_writepages_start()
854 if (!clear_page_dirty_for_io(page)) { ceph_writepages_start()
855 dout("%p !clear_page_dirty_for_io\n", page); ceph_writepages_start()
856 unlock_page(page); ceph_writepages_start()
862 * the first locked page this time through, ceph_writepages_start()
863 * allocate an osd request and a page array ceph_writepages_start()
869 offset = (u64)page_offset(page); ceph_writepages_start()
882 unlock_page(page); ceph_writepages_start()
902 /* note position of first page in pvec */ ceph_writepages_start()
905 dout("%p will write page %p idx %lu\n", ceph_writepages_start()
906 inode, page, page->index); ceph_writepages_start()
916 set_page_writeback(page); ceph_writepages_start()
917 pages[locked_pages] = page; ceph_writepages_start()
919 next = page->index + 1; ceph_writepages_start()
939 dout(" pvec leftover page %p\n", ceph_writepages_start()
1022 * We are only allowed to write into/dirty the page if the page is
1025 * called with page locked.
1026 * return success with page locked,
1027 * or any failure (incl -EAGAIN) with page unlocked.
1031 struct page *page) ceph_update_writeable_page()
1044 /* writepages currently holds page lock, but if we change that later, */ ceph_update_writeable_page()
1045 wait_on_page_writeback(page); ceph_update_writeable_page()
1051 snapc = page_snap_context(page); ceph_update_writeable_page()
1054 * this page is already dirty in another (older) snap ceph_update_writeable_page()
1062 dout(" page %p snapc %p not current or oldest\n", ceph_update_writeable_page()
1063 page, snapc); ceph_update_writeable_page()
1069 unlock_page(page); ceph_update_writeable_page()
1080 /* yay, writeable, do it now (without dropping page lock) */ ceph_update_writeable_page()
1081 dout(" page %p snapc %p not current, but oldest\n", ceph_update_writeable_page()
1082 page, snapc); ceph_update_writeable_page()
1083 if (!clear_page_dirty_for_io(page)) ceph_update_writeable_page()
1085 r = writepage_nounlock(page, NULL); ceph_update_writeable_page()
1091 if (PageUptodate(page)) { ceph_update_writeable_page()
1092 dout(" page %p already uptodate\n", page); ceph_update_writeable_page()
1096 /* full page? */ ceph_update_writeable_page()
1107 page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE); ceph_update_writeable_page()
1108 zero_user_segments(page, ceph_update_writeable_page()
1116 r = readpage_nounlock(file, page); ceph_update_writeable_page()
1121 unlock_page(page); ceph_update_writeable_page()
1126 * We are only allowed to write into/dirty the page if the page is
1131 struct page **pagep, void **fsdata) ceph_write_begin()
1134 struct page *page; ceph_write_begin() local
1139 /* get a page */ ceph_write_begin()
1140 page = grab_cache_page_write_begin(mapping, index, 0); ceph_write_begin()
1141 if (!page) ceph_write_begin()
1143 *pagep = page; ceph_write_begin()
1145 dout("write_begin file %p inode %p page %p %d~%d\n", file, ceph_write_begin()
1146 inode, page, (int)pos, (int)len); ceph_write_begin()
1148 r = ceph_update_writeable_page(file, pos, len, page); ceph_write_begin()
1150 page_cache_release(page); ceph_write_begin()
1152 *pagep = page; ceph_write_begin()
1160 * except adjust dirty page accounting and drop read lock on
1165 struct page *page, void *fsdata) ceph_write_end()
1173 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, ceph_write_end()
1174 inode, page, (int)pos, (int)copied, (int)len); ceph_write_end()
1176 /* zero the stale part of the page if we did a short copy */ ceph_write_end()
1178 zero_user_segment(page, from+copied, len); ceph_write_end()
1185 if (!PageUptodate(page)) ceph_write_end()
1186 SetPageUptodate(page); ceph_write_end()
1188 set_page_dirty(page); ceph_write_end()
1190 unlock_page(page); ceph_write_end()
1192 page_cache_release(page); ceph_write_end()
1234 struct page *pinned_page = NULL; ceph_filemap_fault()
1280 struct page *page = find_or_create_page(mapping, 0, ceph_filemap_fault() local
1283 if (!page) { ceph_filemap_fault()
1287 ret1 = __ceph_do_getattr(inode, page, ceph_filemap_fault()
1290 unlock_page(page); ceph_filemap_fault()
1291 page_cache_release(page); ceph_filemap_fault()
1296 zero_user_segment(page, ret1, PAGE_CACHE_SIZE); ceph_filemap_fault()
1298 flush_dcache_page(page); ceph_filemap_fault()
1299 SetPageUptodate(page); ceph_filemap_fault()
1300 vmf->page = page; ceph_filemap_fault()
1318 struct page *page = vmf->page; ceph_page_mkwrite() local
1319 loff_t off = page_offset(page); ceph_page_mkwrite()
1325 struct page *locked_page = NULL; ceph_page_mkwrite()
1327 lock_page(page); ceph_page_mkwrite()
1328 locked_page = page; ceph_page_mkwrite()
1362 /* Update time before taking page lock */ ceph_page_mkwrite()
1365 lock_page(page); ceph_page_mkwrite()
1369 (page->mapping != inode->i_mapping)) ceph_page_mkwrite()
1372 ret = ceph_update_writeable_page(vma->vm_file, off, len, page); ceph_page_mkwrite()
1374 /* success. we'll keep the page locked. */ ceph_page_mkwrite()
1375 set_page_dirty(page); ceph_page_mkwrite()
1386 unlock_page(page); ceph_page_mkwrite()
1405 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, ceph_fill_inline_data()
1409 struct page *page; ceph_fill_inline_data() local
1412 page = locked_page; ceph_fill_inline_data()
1416 page = find_or_create_page(mapping, 0, ceph_fill_inline_data()
1418 if (!page) ceph_fill_inline_data()
1420 if (PageUptodate(page)) { ceph_fill_inline_data()
1421 unlock_page(page); ceph_fill_inline_data()
1422 page_cache_release(page); ceph_fill_inline_data()
1431 void *kaddr = kmap_atomic(page); ceph_fill_inline_data()
1436 if (page != locked_page) { ceph_fill_inline_data()
1438 zero_user_segment(page, len, PAGE_CACHE_SIZE); ceph_fill_inline_data()
1440 flush_dcache_page(page); ceph_fill_inline_data()
1442 SetPageUptodate(page); ceph_fill_inline_data()
1443 unlock_page(page); ceph_fill_inline_data()
1444 page_cache_release(page); ceph_fill_inline_data()
1448 int ceph_uninline_data(struct file *filp, struct page *locked_page) ceph_uninline_data()
1454 struct page *page = NULL; ceph_uninline_data() local
1471 page = locked_page; ceph_uninline_data()
1472 WARN_ON(!PageUptodate(page)); ceph_uninline_data()
1475 page = find_get_page(inode->i_mapping, 0); ceph_uninline_data()
1476 if (page) { ceph_uninline_data()
1477 if (PageUptodate(page)) { ceph_uninline_data()
1479 lock_page(page); ceph_uninline_data()
1481 page_cache_release(page); ceph_uninline_data()
1482 page = NULL; ceph_uninline_data()
1487 if (page) { ceph_uninline_data()
1492 page = __page_cache_alloc(GFP_NOFS); ceph_uninline_data()
1493 if (!page) { ceph_uninline_data()
1497 err = __ceph_do_getattr(inode, page, ceph_uninline_data()
1539 osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false); ceph_uninline_data()
1572 if (page && page != locked_page) { ceph_uninline_data()
1574 unlock_page(page); ceph_uninline_data()
1575 page_cache_release(page); ceph_uninline_data()
1577 __free_pages(page, 0); ceph_uninline_data()
1029 ceph_update_writeable_page(struct file *file, loff_t pos, unsigned len, struct page *page) ceph_update_writeable_page() argument
1163 ceph_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) ceph_write_end() argument
H A Dcache.h42 int ceph_readpage_from_fscache(struct inode *inode, struct page *page);
47 void ceph_readpage_to_fscache(struct inode *inode, struct page *page);
48 void ceph_invalidate_fscache_page(struct inode* inode, struct page *page);
63 struct page *page) ceph_fscache_uncache_page()
66 return fscache_uncache_page(ci->fscache, page); ceph_fscache_uncache_page()
69 static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp) ceph_release_fscache_page() argument
71 struct inode* inode = page->mapping->host; ceph_release_fscache_page()
73 return fscache_maybe_release_page(ci->fscache, page, gfp); ceph_release_fscache_page()
77 struct page *page) ceph_fscache_readpage_cancel()
80 if (fscache_cookie_valid(ci->fscache) && PageFsCache(page)) ceph_fscache_readpage_cancel()
81 __fscache_uncache_page(ci->fscache, page); ceph_fscache_readpage_cancel()
121 struct page *pages) ceph_fscache_uncache_page()
126 struct page *page) ceph_readpage_from_fscache()
140 struct page *page) ceph_readpage_to_fscache()
153 struct page *page) ceph_invalidate_fscache_page()
161 static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp) ceph_release_fscache_page() argument
167 struct page *page) ceph_fscache_readpage_cancel()
62 ceph_fscache_uncache_page(struct inode *inode, struct page *page) ceph_fscache_uncache_page() argument
76 ceph_fscache_readpage_cancel(struct inode *inode, struct page *page) ceph_fscache_readpage_cancel() argument
125 ceph_readpage_from_fscache(struct inode* inode, struct page *page) ceph_readpage_from_fscache() argument
139 ceph_readpage_to_fscache(struct inode *inode, struct page *page) ceph_readpage_to_fscache() argument
152 ceph_invalidate_fscache_page(struct inode *inode, struct page *page) ceph_invalidate_fscache_page() argument
166 ceph_fscache_readpage_cancel(struct inode *inode, struct page *page) ceph_fscache_readpage_cancel() argument
/linux-4.1.27/fs/nfs/
H A Dsymlink.c8 * Jun 7 1999, cache symlink lookups in the page cache. -DaveM
25 /* Symlink caching in the page cache is even more simplistic
29 static int nfs_symlink_filler(struct inode *inode, struct page *page) nfs_symlink_filler() argument
33 error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE); nfs_symlink_filler()
36 SetPageUptodate(page); nfs_symlink_filler()
37 unlock_page(page); nfs_symlink_filler()
41 SetPageError(page); nfs_symlink_filler()
42 unlock_page(page); nfs_symlink_filler()
49 struct page *page; nfs_follow_link() local
55 page = read_cache_page(&inode->i_data, 0, nfs_follow_link()
57 if (IS_ERR(page)) { nfs_follow_link()
58 err = page; nfs_follow_link()
61 nd_set_link(nd, kmap(page)); nfs_follow_link()
62 return page; nfs_follow_link()
H A Dfscache.c256 * Release the caching state associated with a page, if the page isn't busy
258 * - Returns true (can release page) or false (page busy).
260 int nfs_fscache_release_page(struct page *page, gfp_t gfp) nfs_fscache_release_page() argument
262 if (PageFsCache(page)) { nfs_fscache_release_page()
263 struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host); nfs_fscache_release_page()
267 cookie, page, NFS_I(page->mapping->host)); nfs_fscache_release_page()
269 if (!fscache_maybe_release_page(cookie, page, gfp)) nfs_fscache_release_page()
272 nfs_inc_fscache_stats(page->mapping->host, nfs_fscache_release_page()
280 * Release the caching state associated with a page if undergoing complete page
283 void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode) __nfs_fscache_invalidate_page() argument
290 cookie, page, NFS_I(inode)); __nfs_fscache_invalidate_page()
292 fscache_wait_on_page_write(cookie, page); __nfs_fscache_invalidate_page()
294 BUG_ON(!PageLocked(page)); __nfs_fscache_invalidate_page()
295 fscache_uncache_page(cookie, page); __nfs_fscache_invalidate_page()
296 nfs_inc_fscache_stats(page->mapping->host, __nfs_fscache_invalidate_page()
301 * Handle completion of a page being read from the cache.
304 static void nfs_readpage_from_fscache_complete(struct page *page, nfs_readpage_from_fscache_complete() argument
310 page, context, error); nfs_readpage_from_fscache_complete()
312 /* if the read completes with an error, we just unlock the page and let nfs_readpage_from_fscache_complete()
315 SetPageUptodate(page); nfs_readpage_from_fscache_complete()
316 unlock_page(page); nfs_readpage_from_fscache_complete()
318 error = nfs_readpage_async(context, page->mapping->host, page); nfs_readpage_from_fscache_complete()
320 unlock_page(page); nfs_readpage_from_fscache_complete()
325 * Retrieve a page from fscache
328 struct inode *inode, struct page *page) __nfs_readpage_from_fscache()
334 nfs_i_fscache(inode), page, page->index, page->flags, inode); __nfs_readpage_from_fscache()
337 page, __nfs_readpage_from_fscache()
343 case 0: /* read BIO submitted (page in fscache) */ __nfs_readpage_from_fscache()
350 case -ENODATA: /* page not in cache */ __nfs_readpage_from_fscache()
402 "NFS: nfs_getpages_from_fscache: no page: %d\n", ret); __nfs_readpages_from_fscache()
414 * Store a newly fetched page in fscache
415 * - PG_fscache must be set on the page
417 void __nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync) __nfs_readpage_to_fscache() argument
423 nfs_i_fscache(inode), page, page->index, page->flags, sync); __nfs_readpage_to_fscache()
425 ret = fscache_write_page(nfs_i_fscache(inode), page, GFP_KERNEL); __nfs_readpage_to_fscache()
428 page, page->index, page->flags, ret); __nfs_readpage_to_fscache()
431 fscache_uncache_page(nfs_i_fscache(inode), page); __nfs_readpage_to_fscache() local
327 __nfs_readpage_from_fscache(struct nfs_open_context *ctx, struct inode *inode, struct page *page) __nfs_readpage_from_fscache() argument
H A Dfscache.h83 extern void __nfs_fscache_invalidate_page(struct page *, struct inode *);
84 extern int nfs_fscache_release_page(struct page *, gfp_t);
87 struct inode *, struct page *);
91 extern void __nfs_readpage_to_fscache(struct inode *, struct page *, int);
94 * wait for a page to complete writing to the cache
97 struct page *page) nfs_fscache_wait_on_page_write()
99 if (PageFsCache(page)) nfs_fscache_wait_on_page_write()
100 fscache_wait_on_page_write(nfsi->fscache, page); nfs_fscache_wait_on_page_write()
104 * release the caching state associated with a page if undergoing complete page
107 static inline void nfs_fscache_invalidate_page(struct page *page, nfs_fscache_invalidate_page() argument
110 if (PageFsCache(page)) nfs_fscache_invalidate_page()
111 __nfs_fscache_invalidate_page(page, inode); nfs_fscache_invalidate_page()
115 * Retrieve a page from an inode data storage object.
119 struct page *page) nfs_readpage_from_fscache()
122 return __nfs_readpage_from_fscache(ctx, inode, page); nfs_readpage_from_fscache()
142 * Store a page newly fetched from the server in an inode data storage object
146 struct page *page, nfs_readpage_to_fscache()
149 if (PageFsCache(page)) nfs_readpage_to_fscache()
150 __nfs_readpage_to_fscache(inode, page, sync); nfs_readpage_to_fscache()
193 static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp) nfs_fscache_release_page() argument
195 return 1; /* True: may release page */ nfs_fscache_release_page()
197 static inline void nfs_fscache_invalidate_page(struct page *page, nfs_fscache_invalidate_page() argument
200 struct page *page) {}
204 struct page *page) nfs_readpage_from_fscache()
217 struct page *page, int sync) {}
96 nfs_fscache_wait_on_page_write(struct nfs_inode *nfsi, struct page *page) nfs_fscache_wait_on_page_write() argument
117 nfs_readpage_from_fscache(struct nfs_open_context *ctx, struct inode *inode, struct page *page) nfs_readpage_from_fscache() argument
145 nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync) nfs_readpage_to_fscache() argument
199 nfs_fscache_wait_on_page_write(struct nfs_inode *nfsi, struct page *page) nfs_fscache_wait_on_page_write() argument
202 nfs_readpage_from_fscache(struct nfs_open_context *ctx, struct inode *inode, struct page *page) nfs_readpage_from_fscache() argument
216 nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync) nfs_readpage_to_fscache() argument
/linux-4.1.27/include/xen/
H A Dpage.h4 #include <asm/xen/page.h>
6 static inline unsigned long page_to_mfn(struct page *page) page_to_mfn() argument
8 return pfn_to_mfn(page_to_pfn(page)); page_to_mfn()
/linux-4.1.27/arch/arm/include/asm/
H A Dlimits.h5 #include <asm/page.h>
H A Dhighmem.h23 extern void *kmap_high(struct page *page);
24 extern void kunmap_high(struct page *page);
28 * page usage count does not decrease to zero while we're using its
53 extern void *kmap_high_get(struct page *page);
55 static inline void *kmap_high_get(struct page *page) kmap_high_get() argument
66 extern void *kmap(struct page *page);
67 extern void kunmap(struct page *page);
68 extern void *kmap_atomic(struct page *page);
71 extern struct page *kmap_atomic_to_page(const void *ptr);
H A Dpage.h2 * arch/arm/include/asm/page.h
13 /* PAGE_SHIFT determines the page size */
22 #include <asm/page-nommu.h>
33 * page-based copying and clearing for user space for the particular
110 struct page;
114 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
115 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
130 extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
131 extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
135 #define clear_user_highpage(page,vaddr) \
136 __cpu_clear_user_highpage(page, vaddr)
142 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
157 typedef struct page *pgtable_t;
H A Dpage-nommu.h2 * arch/arm/include/asm/page-nommu.h
21 #define free_user_page(page, addr) free_page(addr)
23 #define clear_page(page) memset((page), 0, PAGE_SIZE)
26 #define clear_user_page(page, vaddr, pg) clear_page(page)
/linux-4.1.27/fs/coda/
H A Dsymlink.c23 static int coda_symlink_filler(struct file *file, struct page *page) coda_symlink_filler() argument
25 struct inode *inode = page->mapping->host; coda_symlink_filler()
29 char *p = kmap(page); coda_symlink_filler()
36 SetPageUptodate(page); coda_symlink_filler()
37 kunmap(page); coda_symlink_filler()
38 unlock_page(page); coda_symlink_filler()
42 SetPageError(page); coda_symlink_filler()
43 kunmap(page); coda_symlink_filler()
44 unlock_page(page); coda_symlink_filler()
/linux-4.1.27/fs/efs/
H A Dsymlink.c14 static int efs_symlink_readpage(struct file *file, struct page *page) efs_symlink_readpage() argument
16 char *link = kmap(page); efs_symlink_readpage()
18 struct inode * inode = page->mapping->host; efs_symlink_readpage()
41 SetPageUptodate(page); efs_symlink_readpage()
42 kunmap(page); efs_symlink_readpage()
43 unlock_page(page); efs_symlink_readpage()
46 SetPageError(page); efs_symlink_readpage()
47 kunmap(page); efs_symlink_readpage()
48 unlock_page(page); efs_symlink_readpage()
/linux-4.1.27/fs/affs/
H A Dsymlink.c13 static int affs_symlink_readpage(struct file *file, struct page *page) affs_symlink_readpage() argument
16 struct inode *inode = page->mapping->host; affs_symlink_readpage()
17 char *link = kmap(page); affs_symlink_readpage()
61 SetPageUptodate(page); affs_symlink_readpage()
62 kunmap(page); affs_symlink_readpage()
63 unlock_page(page); affs_symlink_readpage()
66 SetPageError(page); affs_symlink_readpage()
67 kunmap(page); affs_symlink_readpage()
68 unlock_page(page); affs_symlink_readpage()
/linux-4.1.27/arch/xtensa/mm/
H A Dcache.c32 #include <asm/page.h>
41 * The kernel provides one architecture bit PG_arch_1 in the page flags that
48 * are coherent. The kernel clears this bit whenever a page is added to the
49 * page cache. At that time, the caches might not be in sync. We, therefore,
56 * page.
63 static inline void kmap_invalidate_coherent(struct page *page, kmap_invalidate_coherent() argument
66 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { kmap_invalidate_coherent()
69 if (!PageHighMem(page)) { kmap_invalidate_coherent()
70 kvaddr = (unsigned long)page_to_virt(page); kmap_invalidate_coherent()
75 (page_to_phys(page) & DCACHE_ALIAS_MASK); kmap_invalidate_coherent()
78 page_to_phys(page)); kmap_invalidate_coherent()
83 static inline void *coherent_kvaddr(struct page *page, unsigned long base, coherent_kvaddr() argument
86 if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { coherent_kvaddr()
87 *paddr = page_to_phys(page); coherent_kvaddr()
91 return page_to_virt(page); coherent_kvaddr()
95 void clear_user_highpage(struct page *page, unsigned long vaddr) clear_user_highpage() argument
98 void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr); clear_user_highpage()
101 kmap_invalidate_coherent(page, vaddr); clear_user_highpage()
102 set_bit(PG_arch_1, &page->flags); clear_user_highpage()
107 void copy_user_highpage(struct page *dst, struct page *src, copy_user_highpage()
128 * Any time the kernel writes to a user page cache page, or it is about to
129 * read from a page cache page this routine is called.
133 void flush_dcache_page(struct page *page) flush_dcache_page() argument
135 struct address_space *mapping = page_mapping(page); flush_dcache_page()
138 * If we have a mapping but the page is not mapped to user-space flush_dcache_page()
139 * yet, we simply mark this page dirty and defer flushing the flush_dcache_page()
144 if (!test_bit(PG_arch_1, &page->flags)) flush_dcache_page()
145 set_bit(PG_arch_1, &page->flags); flush_dcache_page()
150 unsigned long phys = page_to_phys(page); flush_dcache_page()
151 unsigned long temp = page->index << PAGE_SHIFT; flush_dcache_page()
156 * Flush the page in kernel space and user space. flush_dcache_page()
177 /* There shouldn't be an entry in the cache for this page anymore. */ flush_dcache_page()
193 * Remove any entry in the cache for this page.
217 struct page *page; update_mmu_cache() local
222 page = pfn_to_page(pfn); update_mmu_cache()
230 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { update_mmu_cache()
231 unsigned long phys = page_to_phys(page); update_mmu_cache()
240 clear_bit(PG_arch_1, &page->flags); update_mmu_cache()
243 if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags) update_mmu_cache()
245 unsigned long paddr = (unsigned long)kmap_atomic(page); update_mmu_cache()
248 set_bit(PG_arch_1, &page->flags); update_mmu_cache()
256 * flush_dcache_page() on the page.
261 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, copy_to_user_page() argument
265 unsigned long phys = page_to_phys(page); copy_to_user_page()
268 /* Flush and invalidate user page if aliased. */ copy_to_user_page()
280 * Flush and invalidate kernel page if aliased and synchronize copy_to_user_page()
297 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, copy_from_user_page() argument
301 unsigned long phys = page_to_phys(page); copy_from_user_page()
305 * Flush user page if aliased. copy_from_user_page()
H A Dhighmem.c40 void *kmap_atomic(struct page *page) kmap_atomic() argument
46 if (!PageHighMem(page)) kmap_atomic()
47 return page_address(page); kmap_atomic()
50 DCACHE_ALIAS(page_to_phys(page))); kmap_atomic()
55 set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC)); kmap_atomic()
71 * is a bad idea also, in case the page changes cacheability __kunmap_atomic()
72 * attributes or becomes a protected page in a hypervisor. __kunmap_atomic()
/linux-4.1.27/arch/unicore32/mm/
H A Dflush.c35 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, flush_ptrace_access() argument
47 * Copy user data from/to a page which is mapped into a different
53 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, copy_to_user_page() argument
58 flush_ptrace_access(vma, page, uaddr, dst, len); copy_to_user_page()
61 void __flush_dcache_page(struct address_space *mapping, struct page *page) __flush_dcache_page() argument
65 * page. This ensures that data in the physical page is mutually __flush_dcache_page()
68 __cpuc_flush_kern_dcache_area(page_address(page), PAGE_SIZE); __flush_dcache_page()
73 * of this page.
75 void flush_dcache_page(struct page *page) flush_dcache_page() argument
80 * The zero page is never written to, so never has any dirty flush_dcache_page()
83 if (page == ZERO_PAGE(0)) flush_dcache_page()
86 mapping = page_mapping(page); flush_dcache_page()
89 clear_bit(PG_dcache_clean, &page->flags); flush_dcache_page()
91 __flush_dcache_page(mapping, page); flush_dcache_page()
94 set_bit(PG_dcache_clean, &page->flags); flush_dcache_page()
/linux-4.1.27/arch/microblaze/include/asm/
H A Dhighmem.h40 * in case of 16K/64K/256K page sizes.
53 extern void *kmap_high(struct page *page);
54 extern void kunmap_high(struct page *page);
55 extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
58 static inline void *kmap(struct page *page) kmap() argument
61 if (!PageHighMem(page)) kmap()
62 return page_address(page); kmap()
63 return kmap_high(page); kmap()
66 static inline void kunmap(struct page *page) kunmap() argument
69 if (!PageHighMem(page)) kunmap()
71 kunmap_high(page); kunmap()
74 static inline void *kmap_atomic(struct page *page) kmap_atomic() argument
76 return kmap_atomic_prot(page, kmap_prot); kmap_atomic()
79 static inline struct page *kmap_atomic_to_page(void *ptr) kmap_atomic_to_page()
/linux-4.1.27/fs/f2fs/
H A Dinline.c33 void read_inline_data(struct page *page, struct page *ipage) read_inline_data() argument
37 if (PageUptodate(page)) read_inline_data()
40 f2fs_bug_on(F2FS_P_SB(page), page->index); read_inline_data()
42 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); read_inline_data()
46 dst_addr = kmap_atomic(page); read_inline_data()
48 flush_dcache_page(page); read_inline_data()
50 SetPageUptodate(page); read_inline_data()
53 bool truncate_inline_inode(struct page *ipage, u64 from) truncate_inline_inode()
68 int f2fs_read_inline_data(struct inode *inode, struct page *page) f2fs_read_inline_data() argument
70 struct page *ipage; f2fs_read_inline_data()
74 unlock_page(page); f2fs_read_inline_data()
83 if (page->index) f2fs_read_inline_data()
84 zero_user_segment(page, 0, PAGE_CACHE_SIZE); f2fs_read_inline_data()
86 read_inline_data(page, ipage); f2fs_read_inline_data()
88 SetPageUptodate(page); f2fs_read_inline_data()
90 unlock_page(page); f2fs_read_inline_data()
94 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) f2fs_convert_inline_page() argument
103 f2fs_bug_on(F2FS_I_SB(dn->inode), page->index); f2fs_convert_inline_page()
112 f2fs_wait_on_page_writeback(page, DATA); f2fs_convert_inline_page()
114 if (PageUptodate(page)) f2fs_convert_inline_page()
117 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); f2fs_convert_inline_page()
121 dst_addr = kmap_atomic(page); f2fs_convert_inline_page()
123 flush_dcache_page(page); f2fs_convert_inline_page()
125 SetPageUptodate(page); f2fs_convert_inline_page()
128 dirty = clear_page_dirty_for_io(page); f2fs_convert_inline_page()
130 /* write data page to try to make data consistent */ f2fs_convert_inline_page()
131 set_page_writeback(page); f2fs_convert_inline_page()
133 write_data_page(page, dn, &fio); f2fs_convert_inline_page()
136 f2fs_wait_on_page_writeback(page, DATA); f2fs_convert_inline_page()
157 struct page *ipage, *page; f2fs_convert_inline_inode() local
160 page = grab_cache_page(inode->i_mapping, 0); f2fs_convert_inline_inode()
161 if (!page) f2fs_convert_inline_inode()
175 err = f2fs_convert_inline_page(&dn, page); f2fs_convert_inline_inode()
181 f2fs_put_page(page, 1); f2fs_convert_inline_inode()
185 int f2fs_write_inline_data(struct inode *inode, struct page *page) f2fs_write_inline_data() argument
201 f2fs_bug_on(F2FS_I_SB(inode), page->index); f2fs_write_inline_data()
204 src_addr = kmap_atomic(page); f2fs_write_inline_data()
217 bool recover_inline_data(struct inode *inode, struct page *npage) recover_inline_data()
222 struct page *ipage; recover_inline_data()
270 struct qstr *name, struct page **res_page) find_in_inline_dir()
276 struct page *ipage; find_in_inline_dir()
302 struct page **p) f2fs_parent_inline_dir()
305 struct page *ipage; f2fs_parent_inline_dir()
321 struct page *ipage) make_empty_inline_dir()
341 static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, f2fs_convert_inline_dir()
344 struct page *page; f2fs_convert_inline_dir() local
349 page = grab_cache_page(dir->i_mapping, 0); f2fs_convert_inline_dir()
350 if (!page) f2fs_convert_inline_dir()
358 f2fs_wait_on_page_writeback(page, DATA); f2fs_convert_inline_dir()
359 zero_user_segment(page, 0, PAGE_CACHE_SIZE); f2fs_convert_inline_dir()
361 dentry_blk = kmap_atomic(page); f2fs_convert_inline_dir()
372 SetPageUptodate(page); f2fs_convert_inline_dir()
373 set_page_dirty(page); f2fs_convert_inline_dir()
388 f2fs_put_page(page, 1); f2fs_convert_inline_dir()
396 struct page *ipage; f2fs_add_inline_entry()
403 struct page *page = NULL; f2fs_add_inline_entry() local
422 page = init_inode_metadata(inode, dir, name, ipage); f2fs_add_inline_entry()
423 if (IS_ERR(page)) { f2fs_add_inline_entry()
424 err = PTR_ERR(page); f2fs_add_inline_entry()
440 update_inode(inode, page); f2fs_add_inline_entry()
441 f2fs_put_page(page, 1); f2fs_add_inline_entry()
458 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, f2fs_delete_inline_entry() argument
466 lock_page(page); f2fs_delete_inline_entry()
467 f2fs_wait_on_page_writeback(page, NODE); f2fs_delete_inline_entry()
469 inline_dentry = inline_data_addr(page); f2fs_delete_inline_entry()
475 set_page_dirty(page); f2fs_delete_inline_entry()
480 f2fs_drop_nlink(dir, inode, page); f2fs_delete_inline_entry()
482 f2fs_put_page(page, 1); f2fs_delete_inline_entry()
488 struct page *ipage; f2fs_empty_inline_dir()
513 struct page *ipage = NULL; f2fs_read_inline_dir()
H A Dnode.c79 static void clear_node_page_dirty(struct page *page) clear_node_page_dirty() argument
81 struct address_space *mapping = page->mapping; clear_node_page_dirty()
84 if (PageDirty(page)) { clear_node_page_dirty()
87 page_index(page), clear_node_page_dirty()
91 clear_page_dirty_for_io(page); clear_node_page_dirty()
94 ClearPageUptodate(page); clear_node_page_dirty()
97 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) get_current_nat_page()
103 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) get_next_nat_page()
105 struct page *src_page; get_next_nat_page()
106 struct page *dst_page; get_next_nat_page()
116 /* get current nat block page with lock */ get_next_nat_page()
353 struct page *page = NULL; get_node_info() local
385 /* Fill node_info from nat page */ get_node_info()
386 page = get_current_nat_page(sbi, start_nid); get_node_info()
387 nat_blk = (struct f2fs_nat_block *)page_address(page); get_node_info()
390 f2fs_put_page(page, 1); get_node_info()
483 struct page *npage[4]; get_dnode_of_data()
484 struct page *parent = NULL; get_dnode_of_data()
619 struct page *page; truncate_dnode() local
625 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); truncate_dnode()
626 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) truncate_dnode()
628 else if (IS_ERR(page)) truncate_dnode()
629 return PTR_ERR(page); truncate_dnode()
632 dn->node_page = page; truncate_dnode()
643 struct page *page; truncate_nodes() local
655 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); truncate_nodes()
656 if (IS_ERR(page)) { truncate_nodes()
657 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); truncate_nodes()
658 return PTR_ERR(page); truncate_nodes()
661 rn = F2FS_NODE(page); truncate_nodes()
671 set_nid(page, i, 0, false); truncate_nodes()
684 set_nid(page, i, 0, false); truncate_nodes()
695 dn->node_page = page; truncate_nodes()
699 f2fs_put_page(page, 1); truncate_nodes()
705 f2fs_put_page(page, 1); truncate_nodes()
713 struct page *pages[2]; truncate_partial_nodes()
778 struct page *page; truncate_inode_blocks() local
784 page = get_node_page(sbi, inode->i_ino); truncate_inode_blocks()
785 if (IS_ERR(page)) { truncate_inode_blocks()
786 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); truncate_inode_blocks()
787 return PTR_ERR(page); truncate_inode_blocks()
790 set_new_dnode(&dn, inode, page, NULL, 0); truncate_inode_blocks()
791 unlock_page(page); truncate_inode_blocks()
793 ri = F2FS_INODE(page); truncate_inode_blocks()
846 lock_page(page); truncate_inode_blocks()
847 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { truncate_inode_blocks()
848 f2fs_put_page(page, 1); truncate_inode_blocks()
851 f2fs_wait_on_page_writeback(page, NODE); truncate_inode_blocks()
853 set_page_dirty(page); truncate_inode_blocks()
854 unlock_page(page); truncate_inode_blocks()
861 f2fs_put_page(page, 0); truncate_inode_blocks()
866 int truncate_xattr_node(struct inode *inode, struct page *page) truncate_xattr_node() argument
871 struct page *npage; truncate_xattr_node()
885 set_new_dnode(&dn, inode, page, npage, nid); truncate_xattr_node()
887 if (page) truncate_xattr_node()
923 struct page *new_inode_page(struct inode *inode) new_inode_page()
927 /* allocate inode page for new inode */ new_inode_page()
930 /* caller should f2fs_put_page(page, 1); */ new_inode_page()
934 struct page *new_node_page(struct dnode_of_data *dn, new_node_page()
935 unsigned int ofs, struct page *ipage) new_node_page()
939 struct page *page; new_node_page() local
945 page = grab_cache_page(NODE_MAPPING(sbi), dn->nid); new_node_page()
946 if (!page) new_node_page()
956 /* Reinitialize old_ni with new node page */ new_node_page()
962 f2fs_wait_on_page_writeback(page, NODE); new_node_page()
963 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); new_node_page()
964 set_cold_node(dn->inode, page); new_node_page()
965 SetPageUptodate(page); new_node_page()
966 set_page_dirty(page); new_node_page()
971 dn->node_page = page; new_node_page()
979 return page; new_node_page()
982 clear_node_page_dirty(page); new_node_page()
983 f2fs_put_page(page, 1); new_node_page()
989 * 0: f2fs_put_page(page, 0)
990 * LOCKED_PAGE: f2fs_put_page(page, 1)
993 static int read_node_page(struct page *page, int rw) read_node_page() argument
995 struct f2fs_sb_info *sbi = F2FS_P_SB(page); read_node_page()
1002 get_node_info(sbi, page->index, &ni); read_node_page()
1005 ClearPageUptodate(page); read_node_page()
1006 f2fs_put_page(page, 1); read_node_page()
1010 if (PageUptodate(page)) read_node_page()
1014 return f2fs_submit_page_bio(sbi, page, &fio); read_node_page()
1018 * Readahead a node page
1022 struct page *apage; ra_node_page()
1043 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) get_node_page()
1045 struct page *page; get_node_page() local
1048 page = grab_cache_page(NODE_MAPPING(sbi), nid); get_node_page()
1049 if (!page) get_node_page()
1052 err = read_node_page(page, READ_SYNC); get_node_page()
1056 lock_page(page); get_node_page()
1058 if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) { get_node_page()
1059 ClearPageUptodate(page); get_node_page()
1060 f2fs_put_page(page, 1); get_node_page()
1063 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { get_node_page()
1064 f2fs_put_page(page, 1); get_node_page()
1067 return page; get_node_page()
1071 * Return a locked page for the desired node page.
1074 struct page *get_node_page_ra(struct page *parent, int start) get_node_page_ra()
1078 struct page *page; get_node_page_ra() local
1087 page = grab_cache_page(NODE_MAPPING(sbi), nid); get_node_page_ra()
1088 if (!page) get_node_page_ra()
1091 err = read_node_page(page, READ_SYNC); get_node_page_ra()
1111 lock_page(page); get_node_page_ra()
1112 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { get_node_page_ra()
1113 f2fs_put_page(page, 1); get_node_page_ra()
1117 if (unlikely(!PageUptodate(page))) { get_node_page_ra()
1118 f2fs_put_page(page, 1); get_node_page_ra()
1121 return page; get_node_page_ra()
1162 struct page *page = pvec.pages[i]; sync_node_pages() local
1170 if (step == 0 && IS_DNODE(page)) sync_node_pages()
1172 if (step == 1 && (!IS_DNODE(page) || sync_node_pages()
1173 is_cold_node(page))) sync_node_pages()
1175 if (step == 2 && (!IS_DNODE(page) || sync_node_pages()
1176 !is_cold_node(page))) sync_node_pages()
1183 if (ino && ino_of_node(page) == ino) sync_node_pages()
1184 lock_page(page); sync_node_pages()
1185 else if (!trylock_page(page)) sync_node_pages()
1188 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { sync_node_pages()
1190 unlock_page(page); sync_node_pages()
1193 if (ino && ino_of_node(page) != ino) sync_node_pages()
1196 if (!PageDirty(page)) { sync_node_pages()
1201 if (!clear_page_dirty_for_io(page)) sync_node_pages()
1205 if (ino && IS_DNODE(page)) { sync_node_pages()
1206 set_fsync_mark(page, 1); sync_node_pages()
1207 if (IS_INODE(page)) { sync_node_pages()
1210 set_dentry_mark(page, 1); sync_node_pages()
1212 set_dentry_mark(page, 0); sync_node_pages()
1216 set_fsync_mark(page, 0); sync_node_pages()
1217 set_dentry_mark(page, 0); sync_node_pages()
1220 if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc)) sync_node_pages()
1221 unlock_page(page); sync_node_pages()
1264 struct page *page = pvec.pages[i]; wait_on_node_pages_writeback() local
1267 if (unlikely(page->index > end)) wait_on_node_pages_writeback()
1270 if (ino && ino_of_node(page) == ino) { wait_on_node_pages_writeback()
1271 f2fs_wait_on_page_writeback(page, NODE); wait_on_node_pages_writeback()
1272 if (TestClearPageError(page)) wait_on_node_pages_writeback()
1289 static int f2fs_write_node_page(struct page *page, f2fs_write_node_page() argument
1292 struct f2fs_sb_info *sbi = F2FS_P_SB(page); f2fs_write_node_page()
1300 trace_f2fs_writepage(page, NODE); f2fs_write_node_page()
1307 f2fs_wait_on_page_writeback(page, NODE); f2fs_write_node_page()
1309 /* get old block addr of this node page */ f2fs_write_node_page()
1310 nid = nid_of_node(page); f2fs_write_node_page()
1311 f2fs_bug_on(sbi, page->index != nid); f2fs_write_node_page()
1315 /* This page is already truncated */ f2fs_write_node_page()
1317 ClearPageUptodate(page); f2fs_write_node_page()
1319 unlock_page(page); f2fs_write_node_page()
1330 set_page_writeback(page); f2fs_write_node_page()
1332 write_node_page(sbi, page, nid, &fio); f2fs_write_node_page()
1333 set_node_addr(sbi, &ni, fio.blk_addr, is_fsync_dnode(page)); f2fs_write_node_page()
1336 unlock_page(page); f2fs_write_node_page()
1344 redirty_page_for_writepage(wbc, page); f2fs_write_node_page()
1374 static int f2fs_set_node_page_dirty(struct page *page) f2fs_set_node_page_dirty() argument
1376 trace_f2fs_set_page_dirty(page, NODE); f2fs_set_node_page_dirty()
1378 SetPageUptodate(page); f2fs_set_node_page_dirty()
1379 if (!PageDirty(page)) { f2fs_set_node_page_dirty()
1380 __set_page_dirty_nobuffers(page); f2fs_set_node_page_dirty()
1381 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); f2fs_set_node_page_dirty()
1382 SetPagePrivate(page); f2fs_set_node_page_dirty()
1383 f2fs_trace_pid(page); f2fs_set_node_page_dirty()
1482 struct page *nat_page, nid_t start_nid) scan_nat_page()
1521 struct page *page = get_current_nat_page(sbi, nid); build_free_nids() local
1523 scan_nat_page(sbi, page, nid); build_free_nids()
1524 f2fs_put_page(page, 1); build_free_nids()
1633 void recover_inline_xattr(struct inode *inode, struct page *page) recover_inline_xattr() argument
1637 struct page *ipage; recover_inline_xattr()
1643 ri = F2FS_INODE(page); recover_inline_xattr()
1650 src_addr = inline_xattr_addr(page); recover_inline_xattr()
1660 void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr) recover_xattr_data() argument
1664 nid_t new_xnid = nid_of_node(page); recover_xattr_data()
1696 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) recover_inode_page() argument
1699 nid_t ino = ino_of_node(page); recover_inode_page()
1701 struct page *ipage; recover_inode_page()
1718 src = F2FS_INODE(page); recover_inode_page()
1761 struct page *page = get_meta_page(sbi, idx); restore_node_summary() local
1763 rn = F2FS_NODE(page); restore_node_summary()
1768 f2fs_put_page(page, 1); restore_node_summary()
1832 struct page *page = NULL; __flush_nat_entry_set() local
1838 * #2, flush nat entries to nat page. __flush_nat_entry_set()
1846 page = get_next_nat_page(sbi, start_nid); __flush_nat_entry_set()
1847 nat_blk = page_address(page); __flush_nat_entry_set()
1883 f2fs_put_page(page, 1); __flush_nat_entry_set()
H A Ddir.c91 static struct f2fs_dir_entry *find_in_block(struct page *dentry_page, find_in_block()
93 struct page **res_page) find_in_block()
159 f2fs_hash_t namehash, struct page **res_page) find_in_level()
164 struct page *dentry_page; find_in_level()
205 * It returns the page where the entry was found (as a parameter - res_page),
210 struct qstr *child, struct page **res_page) f2fs_find_entry()
241 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p) f2fs_parent_dir()
243 struct page *page; f2fs_parent_dir() local
250 page = get_lock_data_page(dir, 0); f2fs_parent_dir()
251 if (IS_ERR(page)) f2fs_parent_dir()
254 dentry_blk = kmap(page); f2fs_parent_dir()
256 *p = page; f2fs_parent_dir()
257 unlock_page(page); f2fs_parent_dir()
265 struct page *page; f2fs_inode_by_name() local
267 de = f2fs_find_entry(dir, qstr, &page); f2fs_inode_by_name()
270 f2fs_dentry_kunmap(dir, page); f2fs_inode_by_name()
271 f2fs_put_page(page, 0); f2fs_inode_by_name()
278 struct page *page, struct inode *inode) f2fs_set_link()
281 lock_page(page); f2fs_set_link()
282 f2fs_wait_on_page_writeback(page, type); f2fs_set_link()
285 f2fs_dentry_kunmap(dir, page); f2fs_set_link()
286 set_page_dirty(page); f2fs_set_link()
290 f2fs_put_page(page, 1); f2fs_set_link()
293 static void init_dent_inode(const struct qstr *name, struct page *ipage) init_dent_inode()
299 /* copy name info. to this inode page */ init_dent_inode()
308 struct page *page; update_dent_inode() local
310 page = get_node_page(F2FS_I_SB(inode), inode->i_ino); update_dent_inode()
311 if (IS_ERR(page)) update_dent_inode()
312 return PTR_ERR(page); update_dent_inode()
314 init_dent_inode(name, page); update_dent_inode()
315 f2fs_put_page(page, 1); update_dent_inode()
344 struct inode *parent, struct page *page) make_empty_dir()
346 struct page *dentry_page; make_empty_dir()
351 return make_empty_inline_dir(inode, parent, page); make_empty_dir()
353 dentry_page = get_new_data_page(inode, page, 0, true); make_empty_dir()
369 struct page *init_inode_metadata(struct inode *inode, struct inode *dir, init_inode_metadata()
370 const struct qstr *name, struct page *dpage) init_inode_metadata()
372 struct page *page; init_inode_metadata() local
376 page = new_inode_page(inode); init_inode_metadata()
377 if (IS_ERR(page)) init_inode_metadata()
378 return page; init_inode_metadata()
381 err = make_empty_dir(inode, dir, page); init_inode_metadata()
386 err = f2fs_init_acl(inode, dir, page, dpage); init_inode_metadata()
390 err = f2fs_init_security(inode, dir, name, page); init_inode_metadata()
394 page = get_node_page(F2FS_I_SB(dir), inode->i_ino); init_inode_metadata()
395 if (IS_ERR(page)) init_inode_metadata()
396 return page; init_inode_metadata()
398 set_cold_node(inode, page); init_inode_metadata()
402 init_dent_inode(name, page); init_inode_metadata()
418 return page; init_inode_metadata()
421 f2fs_put_page(page, 1); init_inode_metadata()
505 struct page *dentry_page = NULL; __f2fs_add_link()
509 struct page *page = NULL; __f2fs_add_link() local
565 page = init_inode_metadata(inode, dir, name, NULL); __f2fs_add_link()
566 if (IS_ERR(page)) { __f2fs_add_link()
567 err = PTR_ERR(page); __f2fs_add_link()
580 update_inode(inode, page); __f2fs_add_link()
581 f2fs_put_page(page, 1); __f2fs_add_link()
600 struct page *page; f2fs_do_tmpfile() local
604 page = init_inode_metadata(inode, dir, NULL, NULL); f2fs_do_tmpfile()
605 if (IS_ERR(page)) { f2fs_do_tmpfile()
606 err = PTR_ERR(page); f2fs_do_tmpfile()
610 update_inode(inode, page); f2fs_do_tmpfile()
611 f2fs_put_page(page, 1); f2fs_do_tmpfile()
619 void f2fs_drop_nlink(struct inode *dir, struct inode *inode, struct page *page) f2fs_drop_nlink() argument
627 if (page) f2fs_drop_nlink()
628 update_inode(dir, page); f2fs_drop_nlink()
649 * It only removes the dentry from the dentry page, corresponding name
650 * entry in name page does not need to be touched during deletion.
652 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, f2fs_delete_entry() argument
661 return f2fs_delete_inline_entry(dentry, page, dir, inode); f2fs_delete_entry()
663 lock_page(page); f2fs_delete_entry()
664 f2fs_wait_on_page_writeback(page, DATA); f2fs_delete_entry()
666 dentry_blk = page_address(page); f2fs_delete_entry()
671 /* Let's check and deallocate this dentry page */ f2fs_delete_entry()
675 kunmap(page); /* kunmap - pair of f2fs_find_entry */ f2fs_delete_entry()
676 set_page_dirty(page); f2fs_delete_entry()
684 truncate_hole(dir, page->index, page->index + 1); f2fs_delete_entry()
685 clear_page_dirty_for_io(page); f2fs_delete_entry()
686 ClearPagePrivate(page); f2fs_delete_entry()
687 ClearPageUptodate(page); f2fs_delete_entry()
690 f2fs_put_page(page, 1); f2fs_delete_entry()
696 struct page *dentry_page; f2fs_empty_dir()
766 struct page *dentry_page = NULL; f2fs_readdir()
277 f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, struct page *page, struct inode *inode) f2fs_set_link() argument
343 make_empty_dir(struct inode *inode, struct inode *parent, struct page *page) make_empty_dir() argument
H A Dnode.h213 static inline void fill_node_footer(struct page *page, nid_t nid, fill_node_footer() argument
216 struct f2fs_node *rn = F2FS_NODE(page); fill_node_footer()
232 static inline void copy_node_footer(struct page *dst, struct page *src) copy_node_footer()
239 static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) fill_node_footer_blkaddr() argument
241 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); fill_node_footer_blkaddr()
242 struct f2fs_node *rn = F2FS_NODE(page); fill_node_footer_blkaddr()
248 static inline nid_t ino_of_node(struct page *node_page) ino_of_node()
254 static inline nid_t nid_of_node(struct page *node_page) nid_of_node()
260 static inline unsigned int ofs_of_node(struct page *node_page) ofs_of_node()
267 static inline unsigned long long cpver_of_node(struct page *node_page) cpver_of_node()
273 static inline block_t next_blkaddr_of_node(struct page *node_page) next_blkaddr_of_node()
300 static inline bool IS_DNODE(struct page *node_page) IS_DNODE()
318 static inline void set_nid(struct page *p, int off, nid_t nid, bool i) set_nid()
331 static inline nid_t get_nid(struct page *p, int off, bool i) get_nid()
344 * - Mark cold data pages in page cache
368 static inline int is_cold_data(struct page *page) is_cold_data() argument
370 return PageChecked(page); is_cold_data()
373 static inline void set_cold_data(struct page *page) set_cold_data() argument
375 SetPageChecked(page); set_cold_data()
378 static inline void clear_cold_data(struct page *page) clear_cold_data() argument
380 ClearPageChecked(page); clear_cold_data()
383 static inline int is_node(struct page *page, int type) is_node() argument
385 struct f2fs_node *rn = F2FS_NODE(page); is_node()
389 #define is_cold_node(page) is_node(page, COLD_BIT_SHIFT)
390 #define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT)
391 #define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT)
393 static inline void set_cold_node(struct inode *inode, struct page *page) set_cold_node() argument
395 struct f2fs_node *rn = F2FS_NODE(page); set_cold_node()
405 static inline void set_mark(struct page *page, int mark, int type) set_mark() argument
407 struct f2fs_node *rn = F2FS_NODE(page); set_mark()
415 #define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT)
416 #define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT)
H A Drecovery.c70 static int recover_dentry(struct inode *inode, struct page *ipage) recover_dentry()
76 struct page *page; recover_dentry() local
95 de = f2fs_find_entry(dir, &name, &page); recover_dentry()
113 f2fs_delete_entry(de, page, dir, einode); recover_dentry()
131 f2fs_dentry_kunmap(dir, page); recover_dentry()
132 f2fs_put_page(page, 0); recover_dentry()
143 static void recover_inode(struct inode *inode, struct page *page) recover_inode() argument
145 struct f2fs_inode *raw = F2FS_INODE(page); recover_inode()
157 ino_of_node(page), F2FS_INODE(page)->i_name); recover_inode()
164 struct page *page = NULL; find_fsync_dnodes() local
180 page = get_meta_page(sbi, blkaddr); find_fsync_dnodes()
182 if (cp_ver != cpver_of_node(page)) find_fsync_dnodes()
185 if (!is_fsync_dnode(page)) find_fsync_dnodes()
188 entry = get_fsync_inode(head, ino_of_node(page)); find_fsync_dnodes()
190 if (IS_INODE(page) && is_dent_dnode(page)) { find_fsync_dnodes()
191 err = recover_inode_page(sbi, page); find_fsync_dnodes()
206 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); find_fsync_dnodes()
220 if (IS_INODE(page)) { find_fsync_dnodes()
222 if (is_dent_dnode(page)) find_fsync_dnodes()
227 blkaddr = next_blkaddr_of_node(page); find_fsync_dnodes()
228 f2fs_put_page(page, 1); find_fsync_dnodes()
232 f2fs_put_page(page, 1); find_fsync_dnodes()
255 struct page *sum_page, *node_page; check_index_in_prev_nodes()
281 /* Use the locked dnode page and inode */ check_index_in_prev_nodes()
295 /* Get the node page */ check_index_in_prev_nodes()
305 /* Deallocate previous index in the node page */ check_index_in_prev_nodes()
317 * if inode page is locked, unlock temporarily, but its reference check_index_in_prev_nodes()
347 struct page *page, block_t blkaddr) do_recover_data()
357 if (IS_INODE(page)) { do_recover_data()
358 recover_inline_xattr(inode, page); do_recover_data()
359 } else if (f2fs_has_xattr_block(ofs_of_node(page))) { do_recover_data()
364 recover_xattr_data(inode, page, blkaddr); do_recover_data()
369 if (recover_inline_data(inode, page)) do_recover_data()
373 start = start_bidx_of_node(ofs_of_node(page), fi); do_recover_data()
374 end = start + ADDRS_PER_PAGE(page, fi); do_recover_data()
389 f2fs_bug_on(sbi, ni.ino != ino_of_node(page)); do_recover_data()
390 f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page)); do_recover_data()
396 dest = datablock_addr(page, dn.ofs_in_node); do_recover_data()
407 /* Check the previous node page having this index */ do_recover_data()
414 /* write dummy data page */ do_recover_data()
427 copy_node_footer(dn.node_page, page); do_recover_data()
429 ofs_of_node(page), false); do_recover_data()
446 struct page *page = NULL; recover_data() local
462 page = get_meta_page(sbi, blkaddr); recover_data()
464 if (cp_ver != cpver_of_node(page)) { recover_data()
465 f2fs_put_page(page, 1); recover_data()
469 entry = get_fsync_inode(head, ino_of_node(page)); recover_data()
478 recover_inode(entry->inode, page); recover_data()
480 err = recover_dentry(entry->inode, page); recover_data()
482 f2fs_put_page(page, 1); recover_data()
486 err = do_recover_data(sbi, entry->inode, page, blkaddr); recover_data()
488 f2fs_put_page(page, 1); recover_data()
499 blkaddr = next_blkaddr_of_node(page); recover_data()
500 f2fs_put_page(page, 1); recover_data()
346 do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, struct page *page, block_t blkaddr) do_recover_data() argument
H A Ddata.c37 struct page *page = bvec->bv_page; bio_for_each_segment_all() local
40 SetPageUptodate(page); bio_for_each_segment_all()
42 ClearPageUptodate(page); bio_for_each_segment_all()
43 SetPageError(page); bio_for_each_segment_all()
45 unlock_page(page); bio_for_each_segment_all()
57 struct page *page = bvec->bv_page; bio_for_each_segment_all() local
60 set_page_dirty(page); bio_for_each_segment_all()
61 set_bit(AS_EIO, &page->mapping->flags); bio_for_each_segment_all()
64 end_page_writeback(page); bio_for_each_segment_all()
133 * Fill the locked page with data located in the block address.
134 * Return unlocked page.
136 int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, f2fs_submit_page_bio() argument
141 trace_f2fs_submit_page_bio(page, fio); f2fs_submit_page_bio()
142 f2fs_trace_ios(page, fio, 0); f2fs_submit_page_bio()
147 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { f2fs_submit_page_bio()
149 f2fs_put_page(page, 1); f2fs_submit_page_bio()
157 void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page, f2fs_submit_page_mbio() argument
184 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) < f2fs_submit_page_mbio()
191 f2fs_trace_ios(page, fio, 0); f2fs_submit_page_mbio()
194 trace_f2fs_submit_page_mbio(page, fio); f2fs_submit_page_mbio()
201 * update block addresses in the node page
207 struct page *node_page = dn->node_page; set_data_blkaddr()
908 struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync) find_data_page()
912 struct page *page; find_data_page() local
928 page = find_get_page(mapping, index); find_data_page()
929 if (page && PageUptodate(page)) find_data_page()
930 return page; find_data_page()
931 f2fs_put_page(page, 0); find_data_page()
947 /* By fallocate(), there is no cached page, but with NEW_ADDR */ find_data_page()
952 page = grab_cache_page(mapping, index); find_data_page()
953 if (!page) find_data_page()
956 if (PageUptodate(page)) { find_data_page()
957 unlock_page(page); find_data_page()
958 return page; find_data_page()
962 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio); find_data_page()
967 wait_on_page_locked(page); find_data_page()
968 if (unlikely(!PageUptodate(page))) { find_data_page()
969 f2fs_put_page(page, 0); find_data_page()
973 return page; find_data_page()
979 * whether this page exists or not.
981 struct page *get_lock_data_page(struct inode *inode, pgoff_t index) get_lock_data_page()
985 struct page *page; get_lock_data_page() local
993 page = grab_cache_page(mapping, index); get_lock_data_page()
994 if (!page) get_lock_data_page()
1005 f2fs_put_page(page, 1); get_lock_data_page()
1011 f2fs_put_page(page, 1); get_lock_data_page()
1016 if (PageUptodate(page)) get_lock_data_page()
1017 return page; get_lock_data_page()
1020 * A new dentry page is allocated but not able to be written, since its get_lock_data_page()
1021 * new inode page couldn't be allocated due to -ENOSPC. get_lock_data_page()
1026 zero_user_segment(page, 0, PAGE_CACHE_SIZE); get_lock_data_page()
1027 SetPageUptodate(page); get_lock_data_page()
1028 return page; get_lock_data_page()
1032 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio); get_lock_data_page()
1036 lock_page(page); get_lock_data_page()
1037 if (unlikely(!PageUptodate(page))) { get_lock_data_page()
1038 f2fs_put_page(page, 1); get_lock_data_page()
1041 if (unlikely(page->mapping != mapping)) { get_lock_data_page()
1042 f2fs_put_page(page, 1); get_lock_data_page()
1045 return page; get_lock_data_page()
1049 * Caller ensures that this data page is never allocated.
1050 * A new zero-filled data page is allocated in the page cache.
1056 struct page *get_new_data_page(struct inode *inode, get_new_data_page()
1057 struct page *ipage, pgoff_t index, bool new_i_size) get_new_data_page()
1060 struct page *page; get_new_data_page() local
1069 page = grab_cache_page(mapping, index); get_new_data_page()
1070 if (!page) { get_new_data_page()
1075 if (PageUptodate(page)) get_new_data_page()
1076 return page; get_new_data_page()
1079 zero_user_segment(page, 0, PAGE_CACHE_SIZE); get_new_data_page()
1080 SetPageUptodate(page); get_new_data_page()
1087 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio); get_new_data_page()
1091 lock_page(page); get_new_data_page()
1092 if (unlikely(!PageUptodate(page))) { get_new_data_page()
1093 f2fs_put_page(page, 1); get_new_data_page()
1097 if (unlikely(page->mapping != mapping)) { get_new_data_page()
1098 f2fs_put_page(page, 1); get_new_data_page()
1109 return page; get_new_data_page()
1171 /* When reading holes, we need its node page */ __allocate_data_blocks()
1230 /* Get the page offset from the block offset(iblock) */ __get_data_block()
1241 /* When reading holes, we need its node page */ __get_data_block()
1342 static int f2fs_read_data_page(struct file *file, struct page *page) f2fs_read_data_page() argument
1344 struct inode *inode = page->mapping->host; f2fs_read_data_page()
1347 trace_f2fs_readpage(page, DATA); f2fs_read_data_page()
1351 ret = f2fs_read_inline_data(inode, page); f2fs_read_data_page()
1353 ret = mpage_readpage(page, get_data_block); f2fs_read_data_page()
1371 int do_write_data_page(struct page *page, struct f2fs_io_info *fio) do_write_data_page() argument
1373 struct inode *inode = page->mapping->host; do_write_data_page()
1378 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); do_write_data_page()
1384 /* This page is already truncated */ do_write_data_page()
1386 ClearPageUptodate(page); do_write_data_page()
1390 set_page_writeback(page); do_write_data_page()
1397 !is_cold_data(page) && do_write_data_page()
1399 rewrite_data_page(page, fio); do_write_data_page()
1401 trace_f2fs_do_write_data_page(page, IPU); do_write_data_page()
1403 write_data_page(page, &dn, fio); do_write_data_page()
1406 trace_f2fs_do_write_data_page(page, OPU); do_write_data_page()
1408 if (page->index == 0) do_write_data_page()
1416 static int f2fs_write_data_page(struct page *page, f2fs_write_data_page() argument
1419 struct inode *inode = page->mapping->host; f2fs_write_data_page()
1432 trace_f2fs_writepage(page, DATA); f2fs_write_data_page()
1434 if (page->index < end_index) f2fs_write_data_page()
1439 * this page does not have to be written to disk. f2fs_write_data_page()
1442 if ((page->index >= end_index + 1) || !offset) f2fs_write_data_page()
1445 zero_user_segment(page, offset, PAGE_CACHE_SIZE); f2fs_write_data_page()
1459 err = do_write_data_page(page, &fio); f2fs_write_data_page()
1465 SetPageError(page); f2fs_write_data_page()
1477 err = f2fs_write_inline_data(inode, page); f2fs_write_data_page()
1479 err = do_write_data_page(page, &fio); f2fs_write_data_page()
1485 clear_cold_data(page); f2fs_write_data_page()
1489 ClearPageUptodate(page); f2fs_write_data_page()
1490 unlock_page(page); f2fs_write_data_page()
1498 redirty_page_for_writepage(wbc, page); f2fs_write_data_page()
1502 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, __f2fs_writepage() argument
1506 int ret = mapping->a_ops->writepage(page, wbc); __f2fs_writepage()
1569 struct page **pagep, void **fsdata) f2fs_write_begin()
1573 struct page *page, *ipage; f2fs_write_begin() local
1583 * We should check this at this moment to avoid deadlock on inode page f2fs_write_begin()
1584 * and #0 page. The locking rule for inline_data conversion should be: f2fs_write_begin()
1585 * lock_page(page #0) -> lock_page(inode_page) f2fs_write_begin()
1593 page = grab_cache_page_write_begin(mapping, index, flags); f2fs_write_begin()
1594 if (!page) { f2fs_write_begin()
1599 *pagep = page; f2fs_write_begin()
1614 read_inline_data(page, ipage); f2fs_write_begin()
1619 err = f2fs_convert_inline_page(&dn, page); f2fs_write_begin()
1630 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) f2fs_write_begin()
1633 f2fs_wait_on_page_writeback(page, DATA); f2fs_write_begin()
1640 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); f2fs_write_begin()
1645 zero_user_segment(page, 0, PAGE_CACHE_SIZE); f2fs_write_begin()
1652 err = f2fs_submit_page_bio(sbi, page, &fio); f2fs_write_begin()
1656 lock_page(page); f2fs_write_begin()
1657 if (unlikely(!PageUptodate(page))) { f2fs_write_begin()
1658 f2fs_put_page(page, 1); f2fs_write_begin()
1662 if (unlikely(page->mapping != mapping)) { f2fs_write_begin()
1663 f2fs_put_page(page, 1); f2fs_write_begin()
1668 SetPageUptodate(page); f2fs_write_begin()
1669 clear_cold_data(page); f2fs_write_begin()
1676 f2fs_put_page(page, 1); f2fs_write_begin()
1685 struct page *page, void *fsdata) f2fs_write_end()
1687 struct inode *inode = page->mapping->host; f2fs_write_end()
1691 set_page_dirty(page); f2fs_write_end()
1699 f2fs_put_page(page, 1); f2fs_write_end()
1753 void f2fs_invalidate_page(struct page *page, unsigned int offset, f2fs_invalidate_page() argument
1756 struct inode *inode = page->mapping->host; f2fs_invalidate_page()
1763 if (PageDirty(page)) { f2fs_invalidate_page()
1771 ClearPagePrivate(page); f2fs_invalidate_page()
1774 int f2fs_release_page(struct page *page, gfp_t wait) f2fs_release_page() argument
1776 /* If this is dirty page, keep PagePrivate */ f2fs_release_page()
1777 if (PageDirty(page)) f2fs_release_page()
1780 ClearPagePrivate(page); f2fs_release_page()
1784 static int f2fs_set_data_page_dirty(struct page *page) f2fs_set_data_page_dirty() argument
1786 struct address_space *mapping = page->mapping; f2fs_set_data_page_dirty()
1789 trace_f2fs_set_page_dirty(page, DATA); f2fs_set_data_page_dirty()
1791 SetPageUptodate(page); f2fs_set_data_page_dirty()
1794 register_inmem_page(inode, page); f2fs_set_data_page_dirty()
1800 if (!PageDirty(page)) { f2fs_set_data_page_dirty()
1801 __set_page_dirty_nobuffers(page); f2fs_set_data_page_dirty()
1802 update_dirty_page(inode, page); f2fs_set_data_page_dirty()
1682 f2fs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) f2fs_write_end() argument
/linux-4.1.27/fs/ntfs/
H A Daops.h2 * aops.h - Defines for NTFS kernel address space operations and page cache
35 * ntfs_unmap_page - release a page that was mapped using ntfs_map_page()
36 * @page: the page to release
38 * Unpin, unmap and release a page that was obtained from ntfs_map_page().
40 static inline void ntfs_unmap_page(struct page *page) ntfs_unmap_page() argument
42 kunmap(page); ntfs_unmap_page()
43 page_cache_release(page); ntfs_unmap_page()
47 * ntfs_map_page - map a page into accessible memory, reading it if necessary
48 * @mapping: address space for which to obtain the page
49 * @index: index into the page cache for @mapping of the page to map
51 * Read a page from the page cache of the address space @mapping at position
54 * If the page is not in memory it is loaded from disk first using the readpage
55 * method defined in the address space operations of @mapping and the page is
56 * added to the page cache of @mapping in the process.
58 * If the page belongs to an mst protected attribute and it is marked as such
61 * the ntfs record(s) contained in the page are valid or not using one of the
65 * If the page is in high memory it is mapped into memory directly addressible
68 * Finally the page count is incremented, thus pinning the page into place.
70 * The above means that page_address(page) can be used on all pages obtained
71 * with ntfs_map_page() to get the kernel virtual address of the page.
73 * When finished with the page, the caller has to call ntfs_unmap_page() to
74 * unpin, unmap and release the page.
81 * The unlocked and uptodate page is returned on success or an encoded error
86 static inline struct page *ntfs_map_page(struct address_space *mapping, ntfs_map_page()
89 struct page *page = read_mapping_page(mapping, index, NULL); ntfs_map_page() local
91 if (!IS_ERR(page)) { ntfs_map_page()
92 kmap(page); ntfs_map_page()
93 if (!PageError(page)) ntfs_map_page()
94 return page; ntfs_map_page()
95 ntfs_unmap_page(page); ntfs_map_page()
98 return page; ntfs_map_page()
103 extern void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs);
H A Daops.c2 * aops.c - NTFS kernel address space operations and page cache handling.
52 * page has been completed and mark the page uptodate or set the error bit on
53 * the page. To determine the size of the records that need fixing up, we
62 struct page *page; ntfs_end_buffer_async_read() local
67 page = bh->b_page; ntfs_end_buffer_async_read()
68 vi = page->mapping->host; ntfs_end_buffer_async_read()
77 file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + ntfs_end_buffer_async_read()
96 kaddr = kmap_atomic(page); ntfs_end_buffer_async_read()
99 flush_dcache_page(page); ntfs_end_buffer_async_read()
105 SetPageError(page); ntfs_end_buffer_async_read()
109 first = page_buffers(page); ntfs_end_buffer_async_read()
129 * If none of the buffers had errors then we can set the page uptodate, ntfs_end_buffer_async_read()
134 * rather than per page granularity. ntfs_end_buffer_async_read()
137 if (likely(page_uptodate && !PageError(page))) ntfs_end_buffer_async_read()
138 SetPageUptodate(page); ntfs_end_buffer_async_read()
149 kaddr = kmap_atomic(page); ntfs_end_buffer_async_read()
155 flush_dcache_page(page); ntfs_end_buffer_async_read()
156 if (likely(page_uptodate && !PageError(page))) ntfs_end_buffer_async_read()
157 SetPageUptodate(page); ntfs_end_buffer_async_read()
159 unlock_page(page); ntfs_end_buffer_async_read()
168 * ntfs_read_block - fill a @page of an address space with data
169 * @page: page cache page to fill with data
171 * Fill the page @page of the address space belonging to the @page->host inode.
174 * applies the mst fixups to the page before finally marking it uptodate and
184 static int ntfs_read_block(struct page *page) ntfs_read_block() argument
201 vi = page->mapping->host; ntfs_read_block()
211 if (!page_has_buffers(page)) { ntfs_read_block()
212 create_empty_buffers(page, blocksize, 0); ntfs_read_block()
213 if (unlikely(!page_has_buffers(page))) { ntfs_read_block()
214 unlock_page(page); ntfs_read_block()
218 bh = head = page_buffers(page); ntfs_read_block()
228 * on due to the runlist being incomplete and if the page is being ntfs_read_block()
232 iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); ntfs_read_block()
244 /* Loop through all the buffers in the page. */ ntfs_read_block()
322 SetPageError(page); ntfs_read_block()
335 * of the page and set the buffer uptodate. ntfs_read_block()
341 zero_user(page, i * blocksize, blocksize); ntfs_read_block()
372 if (likely(!PageError(page))) ntfs_read_block()
373 SetPageUptodate(page); ntfs_read_block()
376 unlock_page(page); ntfs_read_block()
381 * ntfs_readpage - fill a @page of a @file with data from the device
382 * @file: open file to which the page @page belongs or NULL
383 * @page: page cache page to fill with data
385 * For non-resident attributes, ntfs_readpage() fills the @page of the open
388 * associated with the page asynchronously.
390 * For resident attributes, OTOH, ntfs_readpage() fills @page by copying the
398 static int ntfs_readpage(struct file *file, struct page *page) ntfs_readpage() argument
411 BUG_ON(!PageLocked(page)); ntfs_readpage()
412 vi = page->mapping->host; ntfs_readpage()
414 /* Is the page fully outside i_size? (truncate in progress) */ ntfs_readpage()
415 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> ntfs_readpage()
417 zero_user(page, 0, PAGE_CACHE_SIZE); ntfs_readpage()
425 if (PageUptodate(page)) { ntfs_readpage()
426 unlock_page(page); ntfs_readpage()
449 return ntfs_read_compressed_block(page); ntfs_readpage()
455 return ntfs_read_block(page); ntfs_readpage()
460 * hence smaller than a page, so can simply zero out any pages with ntfs_readpage()
465 if (unlikely(page->index > 0)) { ntfs_readpage()
466 zero_user(page, 0, PAGE_CACHE_SIZE); ntfs_readpage()
506 addr = kmap_atomic(page); ntfs_readpage()
507 /* Copy the data to the page. */ ntfs_readpage()
511 /* Zero the remainder of the page. */ ntfs_readpage()
513 flush_dcache_page(page); ntfs_readpage()
520 SetPageUptodate(page); ntfs_readpage()
522 unlock_page(page); ntfs_readpage()
529 * ntfs_write_block - write a @page to the backing store
530 * @page: page cache page to write out
536 * For a page with buffers, map and write the dirty buffers asynchronously
537 * under page writeback. For a page without buffers, create buffers for the
538 * page, then proceed as above.
540 * If a page doesn't have buffers the page dirty state is definitive. If a page
541 * does have buffers, the page dirty state is just a hint, and the buffer dirty
543 * page is illegal. Other combinations are legal and need to be handled. In
544 * particular a dirty page containing clean buffers for example.)
550 static int ntfs_write_block(struct page *page, struct writeback_control *wbc) ntfs_write_block() argument
568 vi = page->mapping->host; ntfs_write_block()
572 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index " ntfs_write_block()
573 "0x%lx.", ni->mft_no, ni->type, page->index); ntfs_write_block()
579 if (!page_has_buffers(page)) { ntfs_write_block()
580 BUG_ON(!PageUptodate(page)); ntfs_write_block()
581 create_empty_buffers(page, blocksize, ntfs_write_block()
583 if (unlikely(!page_has_buffers(page))) { ntfs_write_block()
584 ntfs_warning(vol->sb, "Error allocating page " ntfs_write_block()
585 "buffers. Redirtying page so we try " ntfs_write_block()
588 * Put the page back on mapping->dirty_pages, but leave ntfs_write_block()
591 redirty_page_for_writepage(wbc, page); ntfs_write_block()
592 unlock_page(page); ntfs_write_block()
596 bh = head = page_buffers(page); ntfs_write_block()
601 /* The first block in the page. */ ntfs_write_block()
602 block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); ntfs_write_block()
619 * then we just miss that fact, and the page stays dirty. ntfs_write_block()
626 * Loop through all the buffers in the page, mapping all the dirty ntfs_write_block()
638 * this page can be outside i_size when there is a ntfs_write_block()
644 * the page was within i_size but before we get here, ntfs_write_block()
660 * If this page is fully outside initialized size, zero ntfs_write_block()
662 * and the current page. Just use ntfs_readpage() to do ntfs_write_block()
667 // For each page do: ntfs_write_block()
669 // Again for each page do: ntfs_write_block()
671 // - Check (PageUptodate(page) && ntfs_write_block()
672 // !PageError(page)) ntfs_write_block()
675 // Again, for each page do: ntfs_write_block()
682 * The current page straddles initialized size. Zero ntfs_write_block()
685 * if the page is uptodate. ntfs_write_block()
686 * FIXME: For an uptodate page, the buffers may need to ntfs_write_block()
690 if (!PageUptodate(page)) { ntfs_write_block()
748 kaddr = kmap_atomic(page); ntfs_write_block()
795 zero_user(page, bh_offset(bh), blocksize); ntfs_write_block()
822 if (unlikely(!PageUptodate(page))) { ntfs_write_block()
832 SetPageUptodate(page); ntfs_write_block()
847 * dirty during attachment to a dirty page. ntfs_write_block()
860 "Redirtying page so we try again " ntfs_write_block()
863 * Put the page back on mapping->dirty_pages, but ntfs_write_block()
866 redirty_page_for_writepage(wbc, page); ntfs_write_block()
869 SetPageError(page); ntfs_write_block()
872 BUG_ON(PageWriteback(page)); ntfs_write_block()
873 set_page_writeback(page); /* Keeps try_to_free_buffers() away. */ ntfs_write_block()
885 unlock_page(page); ntfs_write_block()
889 end_page_writeback(page); ntfs_write_block()
896 * ntfs_write_mst_block - write a @page to the backing store
897 * @page: page cache page to write out
905 * The page must remain locked for the duration of the write because we apply
907 * page before undoing the fixups, any other user of the page will see the
908 * page contents as corrupt.
910 * We clear the page uptodate flag for the duration of the function to ensure
919 static int ntfs_write_mst_block(struct page *page, ntfs_write_mst_block() argument
923 struct inode *vi = page->mapping->host; ntfs_write_mst_block()
937 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index " ntfs_write_mst_block()
938 "0x%lx.", vi->i_ino, ni->type, page->index); ntfs_write_mst_block()
943 * NOTE: ntfs_write_mst_block() would be called for $MFTMirr if a page ntfs_write_mst_block()
944 * in its page cache were to be marked dirty. However this should ntfs_write_mst_block()
960 bh = head = page_buffers(page); ntfs_write_mst_block()
968 /* The first block in the page. */ ntfs_write_mst_block()
969 rec_block = block = (sector_t)page->index << ntfs_write_mst_block()
1118 /* Map the page so we can access its contents. */ ntfs_write_mst_block()
1119 kaddr = kmap(page); ntfs_write_mst_block()
1120 /* Clear the page uptodate flag whilst the mst fixups are applied. */ ntfs_write_mst_block()
1121 BUG_ON(!PageUptodate(page)); ntfs_write_mst_block()
1122 ClearPageUptodate(page); ntfs_write_mst_block()
1136 mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) ntfs_write_mst_block()
1144 * means we need to redirty the page before ntfs_write_mst_block()
1173 "page index 0x%lx, page offset 0x%x)!" ntfs_write_mst_block()
1175 ni->type, page->index, ofs); ntfs_write_mst_block()
1191 flush_dcache_page(page); ntfs_write_mst_block()
1220 "attribute type 0x%x, page index " ntfs_write_mst_block()
1221 "0x%lx, page offset 0x%lx)! Unmount " ntfs_write_mst_block()
1223 page->index, bh_offset(tbh)); ntfs_write_mst_block()
1227 * Set the buffer uptodate so the page and buffer ntfs_write_mst_block()
1252 mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) ntfs_write_mst_block()
1272 flush_dcache_page(page); ntfs_write_mst_block()
1295 SetPageUptodate(page); ntfs_write_mst_block()
1296 kunmap(page); ntfs_write_mst_block()
1300 * Set page error if there is only one ntfs record in the page. ntfs_write_mst_block()
1304 SetPageError(page); ntfs_write_mst_block()
1309 "records. Redirtying the page starting at " ntfs_write_mst_block()
1310 "record 0x%lx.", page->index << ntfs_write_mst_block()
1312 redirty_page_for_writepage(wbc, page); ntfs_write_mst_block()
1313 unlock_page(page); ntfs_write_mst_block()
1318 * the page is clean. ntfs_write_mst_block()
1320 BUG_ON(PageWriteback(page)); ntfs_write_mst_block()
1321 set_page_writeback(page); ntfs_write_mst_block()
1322 unlock_page(page); ntfs_write_mst_block()
1323 end_page_writeback(page); ntfs_write_mst_block()
1331 * ntfs_writepage - write a @page to the backing store
1332 * @page: page cache page to write out
1335 * This is called from the VM when it wants to have a dirty ntfs page cache
1336 * page cleaned. The VM has already locked the page and marked it clean.
1338 * For non-resident attributes, ntfs_writepage() writes the @page by calling
1341 * buffers associated with the page asynchronously.
1343 * For resident attributes, OTOH, ntfs_writepage() writes the @page by copying
1347 * vm page dirty code path for the page the mft record is in.
1353 static int ntfs_writepage(struct page *page, struct writeback_control *wbc) ntfs_writepage() argument
1356 struct inode *vi = page->mapping->host; ntfs_writepage()
1365 BUG_ON(!PageLocked(page)); ntfs_writepage()
1367 /* Is the page fully outside i_size? (truncate in progress) */ ntfs_writepage()
1368 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> ntfs_writepage()
1371 * The page may have dirty, unmapped buffers. Make them ntfs_writepage()
1372 * freeable here, so the page does not leak. ntfs_writepage()
1374 block_invalidatepage(page, 0, PAGE_CACHE_SIZE); ntfs_writepage()
1375 unlock_page(page); ntfs_writepage()
1390 unlock_page(page); ntfs_writepage()
1400 // return ntfs_write_compressed_block(page); ntfs_writepage()
1401 unlock_page(page); ntfs_writepage()
1408 unlock_page(page); ntfs_writepage()
1417 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { ntfs_writepage()
1418 /* The page straddles i_size. */ ntfs_writepage()
1420 zero_user_segment(page, ofs, PAGE_CACHE_SIZE); ntfs_writepage()
1424 return ntfs_write_mst_block(page, wbc); ntfs_writepage()
1426 return ntfs_write_block(page, wbc); ntfs_writepage()
1431 * record and hence smaller than a page, so can simply return error on ntfs_writepage()
1436 BUG_ON(page_has_buffers(page)); ntfs_writepage()
1437 BUG_ON(!PageUptodate(page)); ntfs_writepage()
1438 if (unlikely(page->index > 0)) { ntfs_writepage()
1439 ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0. " ntfs_writepage()
1440 "Aborting write.", page->index); ntfs_writepage()
1441 BUG_ON(PageWriteback(page)); ntfs_writepage()
1442 set_page_writeback(page); ntfs_writepage()
1443 unlock_page(page); ntfs_writepage()
1444 end_page_writeback(page); ntfs_writepage()
1478 * PAGECACHE_TAG_DIRTY remains set even though the page is clean. ntfs_writepage()
1480 BUG_ON(PageWriteback(page)); ntfs_writepage()
1481 set_page_writeback(page); ntfs_writepage()
1482 unlock_page(page); ntfs_writepage()
1497 addr = kmap_atomic(page); ntfs_writepage()
1498 /* Copy the data from the page to the mft record. */ ntfs_writepage()
1502 /* Zero out of bounds area in the page cache page. */ ntfs_writepage()
1505 flush_dcache_page(page); ntfs_writepage()
1507 /* We are done with the page. */ ntfs_writepage()
1508 end_page_writeback(page); ntfs_writepage()
1517 "page so we try again later."); ntfs_writepage()
1519 * Put the page back on mapping->dirty_pages, but leave its ntfs_writepage()
1522 redirty_page_for_writepage(wbc, page); ntfs_writepage()
1527 SetPageError(page); ntfs_writepage()
1530 unlock_page(page); ntfs_writepage()
1700 .readpage = ntfs_readpage, /* Fill page with data. */
1702 .writepage = ntfs_writepage, /* Write dirty page to disk. */
1703 .set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty
1705 belonging to the page. */
1716 * @page: page containing the ntfs record to mark dirty
1717 * @ofs: byte offset within @page at which the ntfs record begins
1719 * Set the buffers and the page in which the ntfs record is located dirty.
1724 * If the page does not have buffers, we create them and set them uptodate.
1725 * The page may not be locked which is why we need to handle the buffers under
1729 void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) { mark_ntfs_record_dirty() argument
1730 struct address_space *mapping = page->mapping; mark_ntfs_record_dirty()
1735 BUG_ON(!PageUptodate(page)); mark_ntfs_record_dirty()
1739 if (unlikely(!page_has_buffers(page))) { mark_ntfs_record_dirty()
1741 bh = head = alloc_page_buffers(page, bh_size, 1); mark_ntfs_record_dirty()
1743 if (likely(!page_has_buffers(page))) { mark_ntfs_record_dirty()
1752 attach_page_buffers(page, head); mark_ntfs_record_dirty()
1756 bh = head = page_buffers(page); mark_ntfs_record_dirty()
1767 __set_page_dirty_nobuffers(page); mark_ntfs_record_dirty()
H A Dbitmap.c53 struct page *page; __ntfs_bitmap_set_bits_in_run() local
73 /* Get the page containing the first bit (@start_bit). */ __ntfs_bitmap_set_bits_in_run()
75 page = ntfs_map_page(mapping, index); __ntfs_bitmap_set_bits_in_run()
76 if (IS_ERR(page)) { __ntfs_bitmap_set_bits_in_run()
78 ntfs_error(vi->i_sb, "Failed to map first page (error " __ntfs_bitmap_set_bits_in_run()
79 "%li), aborting.", PTR_ERR(page)); __ntfs_bitmap_set_bits_in_run()
80 return PTR_ERR(page); __ntfs_bitmap_set_bits_in_run()
82 kaddr = page_address(page); __ntfs_bitmap_set_bits_in_run()
100 /* If we are done, unmap the page and return success. */ __ntfs_bitmap_set_bits_in_run()
108 * Depending on @value, modify all remaining whole bytes in the page up __ntfs_bitmap_set_bits_in_run()
115 /* Update @len to point to the first not-done byte in the page. */ __ntfs_bitmap_set_bits_in_run()
119 /* If we are not in the last page, deal with all subsequent pages. */ __ntfs_bitmap_set_bits_in_run()
123 /* Update @index and get the next page. */ __ntfs_bitmap_set_bits_in_run()
124 flush_dcache_page(page); __ntfs_bitmap_set_bits_in_run()
125 set_page_dirty(page); __ntfs_bitmap_set_bits_in_run()
126 ntfs_unmap_page(page); __ntfs_bitmap_set_bits_in_run()
127 page = ntfs_map_page(mapping, ++index); __ntfs_bitmap_set_bits_in_run()
128 if (IS_ERR(page)) __ntfs_bitmap_set_bits_in_run()
130 kaddr = page_address(page); __ntfs_bitmap_set_bits_in_run()
133 * page up to @cnt. __ntfs_bitmap_set_bits_in_run()
140 * The currently mapped page is the last one. If the last byte is __ntfs_bitmap_set_bits_in_run()
142 * position of the last byte inside the page. __ntfs_bitmap_set_bits_in_run()
159 /* We are done. Unmap the page and return success. */ __ntfs_bitmap_set_bits_in_run()
160 flush_dcache_page(page); __ntfs_bitmap_set_bits_in_run()
161 set_page_dirty(page); __ntfs_bitmap_set_bits_in_run()
162 ntfs_unmap_page(page); __ntfs_bitmap_set_bits_in_run()
172 return PTR_ERR(page); __ntfs_bitmap_set_bits_in_run()
180 ntfs_error(vi->i_sb, "Failed to map subsequent page (error " __ntfs_bitmap_set_bits_in_run()
181 "%li), aborting.", PTR_ERR(page)); __ntfs_bitmap_set_bits_in_run()
184 ntfs_error(vi->i_sb, "Failed to map subsequent page (error " __ntfs_bitmap_set_bits_in_run()
187 "Unmount and run chkdsk.", PTR_ERR(page), pos); __ntfs_bitmap_set_bits_in_run()
190 return PTR_ERR(page); __ntfs_bitmap_set_bits_in_run()
/linux-4.1.27/arch/arm/mm/
H A Dflush.c116 void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr, __flush_ptrace_access() argument
128 flush_pfn_alias(page_to_pfn(page), uaddr); __flush_ptrace_access()
137 flush_icache_alias(page_to_pfn(page), uaddr, len); __flush_ptrace_access()
147 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, flush_ptrace_access() argument
155 __flush_ptrace_access(page, uaddr, kaddr, len, flags); flush_ptrace_access()
158 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, flush_uprobe_xol_access() argument
163 __flush_ptrace_access(page, uaddr, kaddr, len, flags); flush_uprobe_xol_access()
167 * Copy user data from/to a page which is mapped into a different
173 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, copy_to_user_page() argument
181 flush_ptrace_access(vma, page, uaddr, dst, len); copy_to_user_page()
187 void __flush_dcache_page(struct address_space *mapping, struct page *page) __flush_dcache_page() argument
191 * page. This ensures that data in the physical page is mutually __flush_dcache_page()
194 if (!PageHighMem(page)) { __flush_dcache_page()
195 size_t page_size = PAGE_SIZE << compound_order(page); __flush_dcache_page()
196 __cpuc_flush_dcache_area(page_address(page), page_size); __flush_dcache_page()
200 for (i = 0; i < (1 << compound_order(page)); i++) { __flush_dcache_page()
201 void *addr = kmap_atomic(page + i); __flush_dcache_page()
206 for (i = 0; i < (1 << compound_order(page)); i++) { __flush_dcache_page()
207 void *addr = kmap_high_get(page + i); __flush_dcache_page()
210 kunmap_high(page + i); __flush_dcache_page()
217 * If this is a page cache page, and we have an aliasing VIPT cache, __flush_dcache_page()
219 * userspace colour, which is congruent with page->index. __flush_dcache_page()
222 flush_pfn_alias(page_to_pfn(page), __flush_dcache_page()
223 page->index << PAGE_CACHE_SHIFT); __flush_dcache_page()
226 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) __flush_dcache_aliases() argument
233 * There are possible user space mappings of this page: __flush_dcache_aliases()
235 * data in the current VM view associated with this page. __flush_dcache_aliases()
236 * - aliasing VIPT: we only need to find one mapping of this page. __flush_dcache_aliases()
238 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); __flush_dcache_aliases()
252 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); __flush_dcache_aliases()
261 struct page *page; __sync_icache_dcache() local
271 page = pfn_to_page(pfn); __sync_icache_dcache()
273 mapping = page_mapping(page); __sync_icache_dcache()
277 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) __sync_icache_dcache()
278 __flush_dcache_page(mapping, page); __sync_icache_dcache()
287 * of this page.
296 * If the page only exists in the page cache and there are no user
304 void flush_dcache_page(struct page *page) flush_dcache_page() argument
309 * The zero page is never written to, so never has any dirty flush_dcache_page()
312 if (page == ZERO_PAGE(0)) flush_dcache_page()
315 mapping = page_mapping(page); flush_dcache_page()
318 mapping && !page_mapped(page)) flush_dcache_page()
319 clear_bit(PG_dcache_clean, &page->flags); flush_dcache_page()
321 __flush_dcache_page(mapping, page); flush_dcache_page()
323 __flush_dcache_aliases(mapping, page); flush_dcache_page()
326 set_bit(PG_dcache_clean, &page->flags); flush_dcache_page()
332 * Ensure cache coherency for the kernel mapping of this page. We can
333 * assume that the page is pinned via kmap.
335 * If the page only exists in the page cache and there are no user
336 * space mappings, this is a no-op since the page was already marked
340 void flush_kernel_dcache_page(struct page *page) flush_kernel_dcache_page() argument
345 mapping = page_mapping(page); flush_kernel_dcache_page()
350 addr = page_address(page); flush_kernel_dcache_page()
352 * kmap_atomic() doesn't set the page virtual flush_kernel_dcache_page()
365 * Flush an anonymous page so that users of get_user_pages()
370 * memcpy() to/from page
371 * if written to page, flush_dcache_page()
373 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) __flush_anon_page() argument
384 pfn = page_to_pfn(page); __flush_anon_page()
398 * in this mapping of the page. FIXME: this is overkill __flush_anon_page()
401 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); __flush_anon_page()
H A Dcopypage-v6.c30 * Copy the user page. No aliasing to deal with so we can just
33 static void v6_copy_user_highpage_nonaliasing(struct page *to, v6_copy_user_highpage_nonaliasing()
34 struct page *from, unsigned long vaddr, struct vm_area_struct *vma) v6_copy_user_highpage_nonaliasing()
46 * Clear the user page. No aliasing to deal with so we can just
47 * attack the kernel's existing mapping of this page.
49 static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) v6_clear_user_highpage_nonaliasing() argument
51 void *kaddr = kmap_atomic(page); v6_clear_user_highpage_nonaliasing()
57 * Discard data in the kernel mapping for the new page.
70 * Copy the page, taking account of the cache colour.
72 static void v6_copy_user_highpage_aliasing(struct page *to, v6_copy_user_highpage_aliasing()
73 struct page *from, unsigned long vaddr, struct vm_area_struct *vma) v6_copy_user_highpage_aliasing()
85 * Now copy the page using the same cache colour as the v6_copy_user_highpage_aliasing()
102 * Clear the user page. We need to deal with the aliasing issues,
103 * so remap the kernel page into the same cache colour as the user
104 * page.
106 static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) v6_clear_user_highpage_aliasing() argument
111 discard_old_kernel_data(page_address(page)); v6_clear_user_highpage_aliasing()
114 * Now clear the page using the same cache colour as v6_clear_user_highpage_aliasing()
119 set_top_pte(to, mk_pte(page, PAGE_KERNEL)); v6_clear_user_highpage_aliasing()
H A Dhighmem.c37 void *kmap(struct page *page) kmap() argument
40 if (!PageHighMem(page)) kmap()
41 return page_address(page); kmap()
42 return kmap_high(page); kmap()
46 void kunmap(struct page *page) kunmap() argument
49 if (!PageHighMem(page)) kunmap()
51 kunmap_high(page); kunmap()
55 void *kmap_atomic(struct page *page) kmap_atomic() argument
63 if (!PageHighMem(page)) kmap_atomic()
64 return page_address(page); kmap_atomic()
75 kmap = kmap_high_get(page); kmap_atomic()
95 set_fixmap_pte(idx, mk_pte(page, kmap_prot)); kmap_atomic()
131 struct page *page = pfn_to_page(pfn); kmap_atomic_pfn() local
134 if (!PageHighMem(page)) kmap_atomic_pfn()
135 return page_address(page); kmap_atomic_pfn()
148 struct page *kmap_atomic_to_page(const void *ptr) kmap_atomic_to_page()
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/
H A Dcl_page.c54 # define PASSERT(env, page, expr) \
57 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
62 # define PINVRNT(env, page, exp) \
63 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
65 /* Disable page statistic by default due to huge performance penalty. */
72 * Internal version of cl_page_top, it should be called if the page is
73 * known to be not freed, says with page referenced, or radix tree lock held,
74 * or page owned.
76 static struct cl_page *cl_page_top_trusted(struct cl_page *page) cl_page_top_trusted() argument
78 while (page->cp_parent != NULL) cl_page_top_trusted()
79 page = page->cp_parent; cl_page_top_trusted()
80 return page; cl_page_top_trusted()
87 * unreferenced cached object. It can be called only if concurrent page
88 * reclamation is somehow prevented, e.g., by locking page radix-tree
89 * (cl_object_header::hdr->coh_page_guard), or by keeping a lock on a VM page,
90 * associated with \a page.
94 static void cl_page_get_trust(struct cl_page *page) cl_page_get_trust() argument
96 LASSERT(atomic_read(&page->cp_ref) > 0); cl_page_get_trust()
97 atomic_inc(&page->cp_ref); cl_page_get_trust()
101 * Returns a slice within a page, corresponding to the given layer in the
107 cl_page_at_trusted(const struct cl_page *page, cl_page_at_trusted() argument
112 page = cl_page_top_trusted((struct cl_page *)page); cl_page_at_trusted()
114 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { cl_page_at_trusted()
118 page = page->cp_child; cl_page_at_trusted()
119 } while (page != NULL); cl_page_at_trusted()
124 * Returns a page with given index in the given object, or NULL if no page is
125 * found. Acquires a reference on \a page.
131 struct cl_page *page; cl_page_lookup() local
135 page = radix_tree_lookup(&hdr->coh_tree, index); cl_page_lookup()
136 if (page != NULL) cl_page_lookup()
137 cl_page_get_trust(page); cl_page_lookup()
138 return page; cl_page_lookup()
152 * Return at least one page in @queue unless there is no covered page.
159 struct cl_page *page; cl_page_gang_lookup() local
180 page = pvec[i]; cl_page_gang_lookup()
183 LASSERT(page->cp_type == CPT_CACHEABLE); cl_page_gang_lookup()
184 if (page->cp_index > end) { cl_page_gang_lookup()
188 if (page->cp_state == CPS_FREEING) cl_page_gang_lookup()
191 slice = cl_page_at_trusted(page, dtype); cl_page_gang_lookup()
193 * Pages for lsm-less file has no underneath sub-page cl_page_gang_lookup()
196 PASSERT(env, page, slice != NULL); cl_page_gang_lookup()
198 page = slice->cpl_page; cl_page_gang_lookup()
203 * XXX not true, because @page is from object another cl_page_gang_lookup()
206 cl_page_get_trust(page); cl_page_gang_lookup()
207 lu_ref_add_atomic(&page->cp_reference, cl_page_gang_lookup()
209 pvec[j++] = page; cl_page_gang_lookup()
214 * holds a reference to a page, but has to own it before it cl_page_gang_lookup()
224 page = pvec[i]; cl_page_gang_lookup()
226 res = (*cb)(env, io, page, cbdata); cl_page_gang_lookup()
227 lu_ref_del(&page->cp_reference, cl_page_gang_lookup()
229 cl_page_put(env, page); cl_page_gang_lookup()
248 static void cl_page_free(const struct lu_env *env, struct cl_page *page) cl_page_free() argument
250 struct cl_object *obj = page->cp_obj; cl_page_free()
253 PASSERT(env, page, list_empty(&page->cp_batch)); cl_page_free()
254 PASSERT(env, page, page->cp_owner == NULL); cl_page_free()
255 PASSERT(env, page, page->cp_req == NULL); cl_page_free()
256 PASSERT(env, page, page->cp_parent == NULL); cl_page_free()
257 PASSERT(env, page, page->cp_state == CPS_FREEING); cl_page_free()
260 while (!list_empty(&page->cp_layers)) { cl_page_free()
263 slice = list_entry(page->cp_layers.next, cl_page_free()
265 list_del_init(page->cp_layers.next); cl_page_free()
269 CS_PAGESTATE_DEC(obj, page->cp_state); cl_page_free()
270 lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page); cl_page_free()
272 lu_ref_fini(&page->cp_reference); cl_page_free()
273 OBD_FREE(page, pagesize); cl_page_free()
277 * Helper function updating page state. This is the only place in the code
280 static inline void cl_page_state_set_trust(struct cl_page *page, cl_page_state_set_trust() argument
284 *(enum cl_page_state *)&page->cp_state = state; cl_page_state_set_trust()
288 struct cl_object *o, pgoff_t ind, struct page *vmpage, cl_page_alloc()
291 struct cl_page *page; cl_page_alloc() local
294 OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize, cl_page_alloc()
296 if (page != NULL) { cl_page_alloc()
298 atomic_set(&page->cp_ref, 1); cl_page_alloc()
300 atomic_inc(&page->cp_ref); cl_page_alloc()
301 page->cp_obj = o; cl_page_alloc()
303 lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page", cl_page_alloc()
304 page); cl_page_alloc()
305 page->cp_index = ind; cl_page_alloc()
306 cl_page_state_set_trust(page, CPS_CACHED); cl_page_alloc()
307 page->cp_type = type; cl_page_alloc()
308 INIT_LIST_HEAD(&page->cp_layers); cl_page_alloc()
309 INIT_LIST_HEAD(&page->cp_batch); cl_page_alloc()
310 INIT_LIST_HEAD(&page->cp_flight); cl_page_alloc()
311 mutex_init(&page->cp_mutex); cl_page_alloc()
312 lu_ref_init(&page->cp_reference); cl_page_alloc()
318 page, vmpage); cl_page_alloc()
320 cl_page_delete0(env, page, 0); cl_page_alloc()
321 cl_page_free(env, page); cl_page_alloc()
322 page = ERR_PTR(result); cl_page_alloc()
333 page = ERR_PTR(-ENOMEM); cl_page_alloc()
335 return page; cl_page_alloc()
340 * the VM page \a vmpage.
343 * cache (implemented as a per-object radix tree) is consulted. If page is
344 * found there, it is returned immediately. Otherwise new page is allocated
345 * and returned. In any case, additional reference to page is acquired.
351 pgoff_t idx, struct page *vmpage, cl_page_find0()
355 struct cl_page *page = NULL; cl_page_find0() local
382 page = cl_vmpage_page(vmpage, o); cl_page_find0()
383 PINVRNT(env, page, cl_page_find0()
384 ergo(page != NULL, cl_page_find0()
385 cl_page_vmpage(env, page) == vmpage && cl_page_find0()
387 idx) == page)); cl_page_find0()
390 if (page != NULL) { cl_page_find0()
392 return page; cl_page_find0()
396 page = cl_page_alloc(env, o, idx, vmpage, type); cl_page_find0()
397 if (IS_ERR(page)) cl_page_find0()
398 return page; cl_page_find0()
402 LASSERT(page->cp_parent == NULL); cl_page_find0()
403 page->cp_parent = parent; cl_page_find0()
404 parent->cp_child = page; cl_page_find0()
406 return page; cl_page_find0()
414 err = radix_tree_insert(&hdr->coh_tree, idx, page); cl_page_find0()
416 ghost = page; cl_page_find0()
430 page = ERR_PTR(err); cl_page_find0()
435 LASSERT(page->cp_parent == NULL); cl_page_find0()
436 page->cp_parent = parent; cl_page_find0()
437 parent->cp_child = page; cl_page_find0()
447 return page; cl_page_find0()
451 pgoff_t idx, struct page *vmpage, cl_page_find()
460 pgoff_t idx, struct page *vmpage, cl_page_find_sub()
494 * Either page is early in initialization (has neither child cl_page_invariant()
504 struct cl_page *page, enum cl_page_state state) cl_page_state_set0()
515 [CPS_OWNED] = 1, /* io finds existing cached page */ cl_page_state_set0()
550 old = page->cp_state; cl_page_state_set0()
551 PASSERT(env, page, allowed_transitions[old][state]); cl_page_state_set0()
552 CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state); cl_page_state_set0()
553 for (; page != NULL; page = page->cp_child) { cl_page_state_set0()
554 PASSERT(env, page, page->cp_state == old); cl_page_state_set0()
555 PASSERT(env, page, cl_page_state_set0()
556 equi(state == CPS_OWNED, page->cp_owner != NULL)); cl_page_state_set0()
558 CS_PAGESTATE_DEC(page->cp_obj, page->cp_state); cl_page_state_set0()
559 CS_PAGESTATE_INC(page->cp_obj, state); cl_page_state_set0()
560 cl_page_state_set_trust(page, state); cl_page_state_set0()
565 struct cl_page *page, enum cl_page_state state) cl_page_state_set()
567 cl_page_state_set0(env, page, state); cl_page_state_set()
571 * Acquires an additional reference to a page.
574 * page.
578 void cl_page_get(struct cl_page *page) cl_page_get() argument
580 cl_page_get_trust(page); cl_page_get()
585 * Releases a reference to a page.
587 * When last reference is released, page is returned to the cache, unless it
593 void cl_page_put(const struct lu_env *env, struct cl_page *page) cl_page_put() argument
595 PASSERT(env, page, atomic_read(&page->cp_ref) > !!page->cp_parent); cl_page_put()
597 CL_PAGE_HEADER(D_TRACE, env, page, "%d\n", cl_page_put()
598 atomic_read(&page->cp_ref)); cl_page_put()
600 if (atomic_dec_and_test(&page->cp_ref)) { cl_page_put()
601 LASSERT(page->cp_state == CPS_FREEING); cl_page_put()
603 LASSERT(atomic_read(&page->cp_ref) == 0); cl_page_put()
604 PASSERT(env, page, page->cp_owner == NULL); cl_page_put()
605 PASSERT(env, page, list_empty(&page->cp_batch)); cl_page_put()
610 cl_page_free(env, page); cl_page_put()
616 * Returns a VM page associated with a given cl_page.
618 struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page) cl_page_vmpage() argument
626 page = cl_page_top(page); cl_page_vmpage()
628 list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { cl_page_vmpage()
632 page = page->cp_child; cl_page_vmpage()
633 } while (page != NULL); cl_page_vmpage()
639 * Returns a cl_page associated with a VM page, and given cl_object.
641 struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj) cl_vmpage_page()
644 struct cl_page *page; cl_vmpage_page() local
649 * NOTE: absence of races and liveness of data are guaranteed by page cl_vmpage_page()
655 * This loop assumes that ->private points to the top-most page. This cl_vmpage_page()
662 for (page = top; page != NULL; page = page->cp_child) { cl_vmpage_page()
663 if (cl_object_same(page->cp_obj, obj)) { cl_vmpage_page()
664 cl_page_get_trust(page); cl_vmpage_page()
668 LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE)); cl_vmpage_page()
669 return page; cl_vmpage_page()
674 * Returns the top-page for a given page.
678 struct cl_page *cl_page_top(struct cl_page *page) cl_page_top() argument
680 return cl_page_top_trusted(page); cl_page_top()
684 const struct cl_page_slice *cl_page_at(const struct cl_page *page, cl_page_at() argument
687 return cl_page_at_trusted(page, dtype); cl_page_at()
753 /* get to the bottom page. */ \
770 struct cl_io *io, struct cl_page *page, ptrdiff_t op) cl_page_invoke()
773 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj)); cl_page_invoke()
774 return CL_PAGE_INVOKE(env, page, op, cl_page_invoke()
781 struct cl_io *io, struct cl_page *page, ptrdiff_t op) cl_page_invoid()
784 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj)); cl_page_invoid()
785 CL_PAGE_INVOID(env, page, op, cl_page_invoid()
790 static void cl_page_owner_clear(struct cl_page *page) cl_page_owner_clear() argument
792 for (page = cl_page_top(page); page != NULL; page = page->cp_child) { cl_page_owner_clear()
793 if (page->cp_owner != NULL) { cl_page_owner_clear()
794 LASSERT(page->cp_owner->ci_owned_nr > 0); cl_page_owner_clear()
795 page->cp_owner->ci_owned_nr--; cl_page_owner_clear()
796 page->cp_owner = NULL; cl_page_owner_clear()
797 page->cp_task = NULL; cl_page_owner_clear()
802 static void cl_page_owner_set(struct cl_page *page) cl_page_owner_set() argument
804 for (page = cl_page_top(page); page != NULL; page = page->cp_child) { cl_page_owner_set()
805 LASSERT(page->cp_owner != NULL); cl_page_owner_set()
806 page->cp_owner->ci_owned_nr++; cl_page_owner_set()
834 * returns true, iff page is owned by the given io.
844 * Try to own a page by IO.
846 * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
854 * \retval -ve failure, e.g., page was destroyed (and landed in
856 * or, page was owned by another thread, or in IO.
900 * Own a page, might be blocked.
924 * Assume page ownership.
926 * Called when page is already locked by the hosting VM.
951 * Releases page ownership without unlocking the page.
953 * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
954 * underlying VM page (as VM is supposed to do this itself).
979 * Releases page ownership.
981 * Moves page into cl_page_state::CPS_CACHED.
1001 * Called when page is to be removed from the object, e.g., as a result of
1023 * path. Doesn't check page invariant.
1039 * unexport the page firstly before freeing it so that cl_page_delete0()
1040 * the page content is considered to be invalid. cl_page_delete0()
1043 * Afterwards, if this page is found by other threads, then this cl_page_delete0()
1044 * page will be forced to reread. cl_page_delete0()
1076 * Called when a decision is made to throw page out of memory.
1078 * Notifies all layers about page destruction by calling
1081 * Moves page into cl_page_state::CPS_FREEING state (this is the only place
1084 * Eliminates all venues through which new references to the page can be
1087 * - removes page from the radix trees,
1089 * - breaks linkage from VM page to cl_page.
1091 * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
1092 * drain after some time, at which point page will be recycled.
1095 * \pre VM page is locked
1108 * Unmaps page from user virtual memory.
1111 * layer responsible for VM interaction has to unmap page from user space
1127 * Marks page up-to-date.
1130 * layer responsible for VM interaction has to mark/clear page as up-to-date
1184 * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
1185 * called top-to-bottom. Every layer either agrees to submit this page (by
1186 * returning 0), or requests to omit this page (by returning -EALREADY). Layer
1187 * handling interactions with the VM also has to inform VM that page is under
1202 * page. cl_page_prep()
1222 * that a transfer, of which this page is a part of has completed.
1261 * As page->cp_obj is pinned by a reference from page->cp_req, it is cl_page_completion()
1273 * Notify layers that transfer formation engine decided to yank this page from
1303 * Notify layers that high level io decided to place this page into a cache
1306 * The layer implementing transfer engine (osc) has to register this page in
1364 * Checks whether page is protected by any extent lock is at least required
1371 struct cl_page *page) cl_page_is_under_lock()
1375 PINVRNT(env, page, cl_page_invariant(page)); cl_page_is_under_lock()
1377 rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock), cl_page_is_under_lock()
1381 PASSERT(env, page, rc != 0); cl_page_is_under_lock()
1387 struct cl_page *page, void *cbdata) page_prune_cb()
1389 cl_page_own(env, io, page); page_prune_cb()
1390 cl_page_unmap(env, io, page); page_prune_cb()
1391 cl_page_discard(env, io, page); page_prune_cb()
1392 cl_page_disown(env, io, page); page_prune_cb()
1434 * Tells transfer engine that only part of a page is to be transmitted.
1458 "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n", cl_page_header_print()
1481 (*printer)(env, cookie, "end page@%p\n", pg); cl_page_print()
1486 * Cancel a page which is still in a transfer.
1488 int cl_page_cancel(const struct lu_env *env, struct cl_page *page) cl_page_cancel() argument
1490 return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel), cl_page_cancel()
1497 * Converts a byte offset within object \a obj into a page index.
1509 * Converts a page index into a byte offset within object \a obj.
1527 * Adds page slice to the compound page.
1530 * per-layer state to the page. New state is added at the end of
1535 void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice, cl_page_slice_add() argument
1539 list_add_tail(&slice->cpl_linkage, &page->cp_layers); cl_page_slice_add()
1542 slice->cpl_page = page; cl_page_slice_add()
503 cl_page_state_set0(const struct lu_env *env, struct cl_page *page, enum cl_page_state state) cl_page_state_set0() argument
564 cl_page_state_set(const struct lu_env *env, struct cl_page *page, enum cl_page_state state) cl_page_state_set() argument
769 cl_page_invoke(const struct lu_env *env, struct cl_io *io, struct cl_page *page, ptrdiff_t op) cl_page_invoke() argument
780 cl_page_invoid(const struct lu_env *env, struct cl_io *io, struct cl_page *page, ptrdiff_t op) cl_page_invoid() argument
1370 cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, struct cl_page *page) cl_page_is_under_lock() argument
1386 page_prune_cb(const struct lu_env *env, struct cl_io *io, struct cl_page *page, void *cbdata) page_prune_cb() argument
/linux-4.1.27/fs/ecryptfs/
H A Dmmap.c30 #include <linux/page-flags.h>
42 * Get one page from cache or lower f/s, return error otherwise.
44 * Returns locked and up-to-date page (if ok), with increased
47 struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index) ecryptfs_get_locked_page()
49 struct page *page = read_mapping_page(inode->i_mapping, index, NULL); ecryptfs_get_locked_page() local
50 if (!IS_ERR(page)) ecryptfs_get_locked_page()
51 lock_page(page); ecryptfs_get_locked_page()
52 return page; ecryptfs_get_locked_page()
57 * @page: Page that is locked before this call is made
65 static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc) ecryptfs_writepage() argument
69 rc = ecryptfs_encrypt_page(page); ecryptfs_writepage()
72 "page (upper index [0x%.16lx])\n", page->index); ecryptfs_writepage()
73 ClearPageUptodate(page); ecryptfs_writepage()
76 SetPageUptodate(page); ecryptfs_writepage()
78 unlock_page(page); ecryptfs_writepage()
113 * @page: Sort of a ``virtual'' representation of the encrypted lower
122 ecryptfs_copy_up_encrypted_with_header(struct page *page, ecryptfs_copy_up_encrypted_with_header() argument
131 loff_t view_extent_num = ((((loff_t)page->index) ecryptfs_copy_up_encrypted_with_header()
141 page_virt = kmap_atomic(page); ecryptfs_copy_up_encrypted_with_header()
148 page_virt, page->mapping->host); ecryptfs_copy_up_encrypted_with_header()
155 flush_dcache_page(page); ecryptfs_copy_up_encrypted_with_header()
168 page, (lower_offset >> PAGE_CACHE_SHIFT), ecryptfs_copy_up_encrypted_with_header()
170 crypt_stat->extent_size, page->mapping->host); ecryptfs_copy_up_encrypted_with_header()
188 * @page: Page from eCryptfs inode mapping into which to stick the read data
190 * Read in a page, decrypting if necessary.
194 static int ecryptfs_readpage(struct file *file, struct page *page) ecryptfs_readpage() argument
197 &ecryptfs_inode_to_private(page->mapping->host)->crypt_stat; ecryptfs_readpage()
201 rc = ecryptfs_read_lower_page_segment(page, page->index, 0, ecryptfs_readpage()
203 page->mapping->host); ecryptfs_readpage()
206 rc = ecryptfs_copy_up_encrypted_with_header(page, ecryptfs_readpage()
219 page, page->index, 0, PAGE_CACHE_SIZE, ecryptfs_readpage()
220 page->mapping->host); ecryptfs_readpage()
222 printk(KERN_ERR "Error reading page; rc = " ecryptfs_readpage()
228 rc = ecryptfs_decrypt_page(page); ecryptfs_readpage()
230 ecryptfs_printk(KERN_ERR, "Error decrypting page; " ecryptfs_readpage()
237 ClearPageUptodate(page); ecryptfs_readpage()
239 SetPageUptodate(page); ecryptfs_readpage()
240 ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16lx]\n", ecryptfs_readpage()
241 page->index); ecryptfs_readpage()
242 unlock_page(page); ecryptfs_readpage()
249 static int fill_zeros_to_end_of_page(struct page *page, unsigned int to) fill_zeros_to_end_of_page() argument
251 struct inode *inode = page->mapping->host; fill_zeros_to_end_of_page()
254 if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index) fill_zeros_to_end_of_page()
259 zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE); fill_zeros_to_end_of_page()
271 * @pagep: Pointer to return the page
281 struct page **pagep, void **fsdata) ecryptfs_write_begin()
284 struct page *page; ecryptfs_write_begin() local
288 page = grab_cache_page_write_begin(mapping, index, flags); ecryptfs_write_begin()
289 if (!page) ecryptfs_write_begin()
291 *pagep = page; ecryptfs_write_begin()
294 if (!PageUptodate(page)) { ecryptfs_write_begin()
300 page, index, 0, PAGE_CACHE_SIZE, mapping->host); ecryptfs_write_begin()
303 "lower page segment; rc = [%d]\n", ecryptfs_write_begin()
305 ClearPageUptodate(page); ecryptfs_write_begin()
308 SetPageUptodate(page); ecryptfs_write_begin()
312 page, crypt_stat); ecryptfs_write_begin()
320 ClearPageUptodate(page); ecryptfs_write_begin()
323 SetPageUptodate(page); ecryptfs_write_begin()
326 page, index, 0, PAGE_CACHE_SIZE, ecryptfs_write_begin()
330 "page; rc = [%d]\n", ecryptfs_write_begin()
332 ClearPageUptodate(page); ecryptfs_write_begin()
335 SetPageUptodate(page); ecryptfs_write_begin()
339 >= i_size_read(page->mapping->host)) { ecryptfs_write_begin()
340 zero_user(page, 0, PAGE_CACHE_SIZE); ecryptfs_write_begin()
341 SetPageUptodate(page); ecryptfs_write_begin()
343 rc = ecryptfs_decrypt_page(page); ecryptfs_write_begin()
346 "page at index [%ld]; " ecryptfs_write_begin()
348 __func__, page->index, rc); ecryptfs_write_begin()
349 ClearPageUptodate(page); ecryptfs_write_begin()
352 SetPageUptodate(page); ecryptfs_write_begin()
356 /* If creating a page or more of holes, zero them out via truncate. ecryptfs_write_begin()
359 if (prev_page_end_size > i_size_read(page->mapping->host)) { ecryptfs_write_begin()
371 /* Writing to a new page, and creating a small hole from start ecryptfs_write_begin()
372 * of page? Zero it out. */ ecryptfs_write_begin()
375 zero_user(page, 0, PAGE_CACHE_SIZE); ecryptfs_write_begin()
378 unlock_page(page); ecryptfs_write_begin()
379 page_cache_release(page); ecryptfs_write_begin()
475 * @page: The eCryptfs page
481 struct page *page, void *fsdata) ecryptfs_write_end()
492 "(page w/ index = [0x%.16lx], to = [%d])\n", index, to); ecryptfs_write_end()
494 rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page, 0, ecryptfs_write_end()
503 if (!PageUptodate(page)) { ecryptfs_write_end()
508 SetPageUptodate(page); ecryptfs_write_end()
511 rc = fill_zeros_to_end_of_page(page, to); ecryptfs_write_end()
514 "zeros in page with index = [0x%.16lx]\n", index); ecryptfs_write_end()
517 rc = ecryptfs_encrypt_page(page); ecryptfs_write_end()
519 ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper " ecryptfs_write_end()
536 unlock_page(page); ecryptfs_write_end()
537 page_cache_release(page); ecryptfs_write_end()
478 ecryptfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) ecryptfs_write_end() argument
/linux-4.1.27/kernel/power/
H A Dblock_io.c20 * @off physical offset of page.
21 * @page: page we're reading or writing.
25 * If we're reading, make sure the page is marked as dirty.
29 struct page *page, struct bio **bio_chain) submit()
39 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { submit()
40 printk(KERN_ERR "PM: Adding page to bio failed at %llu\n", submit()
46 lock_page(page); submit()
51 wait_on_page_locked(page); submit()
57 get_page(page); /* These pages are freed later */ submit()
90 struct page *page; hib_wait_on_bio_chain() local
93 page = bio->bi_io_vec[0].bv_page; hib_wait_on_bio_chain()
94 wait_on_page_locked(page); hib_wait_on_bio_chain()
95 if (!PageUptodate(page) || PageError(page)) hib_wait_on_bio_chain()
97 put_page(page); hib_wait_on_bio_chain()
28 submit(int rw, struct block_device *bdev, sector_t sector, struct page *page, struct bio **bio_chain) submit() argument
/linux-4.1.27/arch/mips/include/asm/
H A Dcacheflush.h21 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
24 * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
30 * - flush_data_cache_page() flushes a page from the data cache
34 * This flag is used to indicate that the page pointed to by a pte
39 #define Page_dcache_dirty(page) \
40 test_bit(PG_dcache_dirty, &(page)->flags)
41 #define SetPageDcacheDirty(page) \
42 set_bit(PG_dcache_dirty, &(page)->flags)
43 #define ClearPageDcacheDirty(page) \
44 clear_bit(PG_dcache_dirty, &(page)->flags)
52 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
53 extern void __flush_dcache_page(struct page *page);
56 static inline void flush_dcache_page(struct page *page) flush_dcache_page() argument
59 __flush_dcache_page(page); flush_dcache_page()
61 SetPageDcacheDirty(page); flush_dcache_page()
68 extern void __flush_anon_page(struct page *, unsigned long); flush_anon_page()
70 struct page *page, unsigned long vmaddr) flush_anon_page()
72 if (cpu_has_dc_aliases && PageAnon(page)) flush_anon_page()
73 __flush_anon_page(page, vmaddr); flush_anon_page()
77 struct page *page) flush_icache_page()
101 struct page *page, unsigned long vaddr, void *dst, const void *src,
105 struct page *page, unsigned long vaddr, void *dst, const void *src,
116 extern void *kmap_coherent(struct page *page, unsigned long addr);
118 extern void *kmap_noncoherent(struct page *page, unsigned long addr);
126 static inline void flush_kernel_dcache_page(struct page *page) flush_kernel_dcache_page() argument
128 BUG_ON(cpu_has_dc_aliases && PageHighMem(page)); flush_kernel_dcache_page()
69 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) flush_anon_page() argument
76 flush_icache_page(struct vm_area_struct *vma, struct page *page) flush_icache_page() argument
H A Dhighmem.h44 extern void * kmap_high(struct page *page);
45 extern void kunmap_high(struct page *page);
47 extern void *kmap(struct page *page);
48 extern void kunmap(struct page *page);
49 extern void *kmap_atomic(struct page *page);
52 extern struct page *kmap_atomic_to_page(void *ptr);
/linux-4.1.27/arch/frv/mm/
H A Dhighmem.c14 void *kmap(struct page *page) kmap() argument
17 if (!PageHighMem(page)) kmap()
18 return page_address(page); kmap()
19 return kmap_high(page); kmap()
24 void kunmap(struct page *page) kunmap() argument
28 if (!PageHighMem(page)) kunmap()
30 kunmap_high(page); kunmap()
35 struct page *kmap_atomic_to_page(void *ptr) kmap_atomic_to_page()
40 void *kmap_atomic(struct page *page) kmap_atomic() argument
47 paddr = page_to_phys(page); kmap_atomic()
H A Dcache-page.c1 /* cache-page.c: whole-page cache wrangling functions for MMU linux
19 * DCF takes a virtual address and the page may not currently have one
20 * - temporarily hijack a kmap_atomic() slot and attach the page to it
22 void flush_dcache_page(struct page *page) flush_dcache_page() argument
29 vaddr = kmap_atomic_primary(page); flush_dcache_page()
46 * ICI takes a virtual address and the page may not currently have one
47 * - so we temporarily attach the page to a bit of virtual space so that is can be flushed
49 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, flush_icache_user_range() argument
57 vaddr = kmap_atomic_primary(page); flush_icache_user_range()
H A Dpgalloc.c1 /* pgalloc.c: page directory & page table allocation
18 #include <asm/page.h>
33 struct page *page; pte_alloc_one() local
36 page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); pte_alloc_one()
38 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); pte_alloc_one()
40 if (!page) pte_alloc_one()
43 clear_highpage(page); pte_alloc_one()
44 if (!pgtable_page_ctor(page)) { pte_alloc_one()
45 __free_page(page); pte_alloc_one()
48 flush_dcache_page(page); pte_alloc_one()
49 return page; pte_alloc_one()
87 struct page *pgd_list;
91 struct page *page = virt_to_page(pgd); pgd_list_add() local
92 page->index = (unsigned long) pgd_list; pgd_list_add()
94 set_page_private(pgd_list, (unsigned long) &page->index); pgd_list_add()
95 pgd_list = page; pgd_list_add()
96 set_page_private(page, (unsigned long)&pgd_list); pgd_list_add()
101 struct page *next, **pprev, *page = virt_to_page(pgd); pgd_list_del() local
102 next = (struct page *) page->index; pgd_list_del()
103 pprev = (struct page **) page_private(page); pgd_list_del()
/linux-4.1.27/sound/pci/emu10k1/
H A Dmemory.c5 * EMU10K1 memory page allocation (PTB area)
33 /* page arguments of these two macros are Emu page (4096 bytes), not like
36 #define __set_ptb_entry(emu,page,addr) \
37 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
42 /* get aligned page from offset address */
44 /* get offset address from aligned page */
45 #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
48 /* page size == EMUPAGESIZE */
49 /* fill PTB entrie(s) corresponding to page with addr */
50 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
51 /* fill PTB entrie(s) corresponding to page with silence pointer */
52 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
55 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr) set_ptb_entry() argument
58 page *= UNIT_PAGES; set_ptb_entry()
59 for (i = 0; i < UNIT_PAGES; i++, page++) { set_ptb_entry()
60 __set_ptb_entry(emu, page, addr); set_ptb_entry()
64 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page) set_silent_ptb() argument
67 page *= UNIT_PAGES; set_silent_ptb()
68 for (i = 0; i < UNIT_PAGES; i++, page++) set_silent_ptb()
70 __set_ptb_entry(emu, page, emu->silent_page.addr); set_silent_ptb()
99 * if an empty region is found, return the page and store the next mapped block
105 int page = 0, found_page = -ENOMEM; search_empty_map_area() local
115 size = blk->mapped_page - page; search_empty_map_area()
118 return page; search_empty_map_area()
124 found_page = page; search_empty_map_area()
126 page = blk->mapped_page + blk->pages; search_empty_map_area()
128 size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page; search_empty_map_area()
131 return page; search_empty_map_area()
144 int page, pg; map_memblk() local
147 page = search_empty_map_area(emu, blk->pages, &next); map_memblk()
148 if (page < 0) /* not found */ map_memblk()
149 return page; map_memblk()
154 blk->mapped_page = page; map_memblk()
157 set_ptb_entry(emu, page, emu->page_addr_table[pg]); map_memblk()
158 page++; map_memblk()
203 * unlike synth_alloc the memory block is aligned to the page start
210 int page, psize; search_empty() local
213 page = 0; search_empty()
216 if (page + psize <= blk->first_page) search_empty()
218 page = blk->last_page + 1; search_empty()
220 if (page + psize > emu->max_cache_pages) search_empty()
228 blk->mem.offset = aligned_page_offset(page); /* set aligned offset */ search_empty()
246 dev_err(emu->card->dev, "page is not aligned\n"); is_valid_page()
275 /* no enough page - try to unmap some blocks */ snd_emu10k1_memblk_map()
298 * page allocation for DMA
306 int page, err, idx; snd_emu10k1_alloc_pages() local
329 for (page = blk->first_page; page <= blk->last_page; page++, idx++) { snd_emu10k1_alloc_pages()
338 "emu: failure page = %d\n", idx); snd_emu10k1_alloc_pages()
342 emu->page_addr_table[page] = addr; snd_emu10k1_alloc_pages()
343 emu->page_ptr_table[page] = NULL; snd_emu10k1_alloc_pages()
360 * release DMA buffer from page table
437 first_page++; /* first page was already allocated */ get_single_page_range()
443 last_page--; /* last page was already allocated */ get_single_page_range()
453 int page; __synth_free_pages() local
455 for (page = first_page; page <= last_page; page++) { __synth_free_pages()
456 free_page((unsigned long)emu->page_ptr_table[page]); __synth_free_pages()
457 emu->page_addr_table[page] = 0; __synth_free_pages()
458 emu->page_ptr_table[page] = NULL; __synth_free_pages()
467 int page, first_page, last_page; synth_alloc_pages() local
472 for (page = first_page; page <= last_page; page++) { synth_alloc_pages()
474 struct page *p = alloc_page(GFP_KERNEL | GFP_DMA32 | synth_alloc_pages()
485 __synth_free_pages(emu, first_page, page - 1); synth_alloc_pages()
488 emu->page_addr_table[page] = page_to_phys(p); synth_alloc_pages()
489 emu->page_ptr_table[page] = page_address(p); synth_alloc_pages()
507 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset) offset_ptr() argument
510 if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages)) offset_ptr()
512 ptr = emu->page_ptr_table[page]; offset_ptr()
515 "access to NULL ptr: page = %d\n", page); offset_ptr()
528 int page, nextofs, end_offset, temp, temp1; snd_emu10k1_synth_bzero() local
534 page = get_aligned_page(offset); snd_emu10k1_synth_bzero()
536 nextofs = aligned_page_offset(page + 1); snd_emu10k1_synth_bzero()
541 ptr = offset_ptr(emu, page + p->first_page, offset); snd_emu10k1_synth_bzero()
545 page++; snd_emu10k1_synth_bzero()
558 int page, nextofs, end_offset, temp, temp1; snd_emu10k1_synth_copy_from_user() local
564 page = get_aligned_page(offset); snd_emu10k1_synth_copy_from_user()
566 nextofs = aligned_page_offset(page + 1); snd_emu10k1_synth_copy_from_user()
571 ptr = offset_ptr(emu, page + p->first_page, offset); snd_emu10k1_synth_copy_from_user()
576 page++; snd_emu10k1_synth_copy_from_user()
/linux-4.1.27/arch/metag/lib/
H A Dclear_page.S3 #include <asm/page.h>
8 !! D1Ar1 - page
/linux-4.1.27/arch/m32r/mm/
H A DMakefile6 obj-y := init.o fault.o mmu.o extable.o ioremap.o cache.o page.o
8 obj-y := init.o fault-nommu.o mmu.o extable.o ioremap-nommu.o cache.o page.o
/linux-4.1.27/arch/sh/include/asm/
H A Dcacheflush.h14 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
17 * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
19 * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
46 extern void flush_dcache_page(struct page *page); cache_noop()
49 struct page *page); cache_noop()
58 extern void __flush_anon_page(struct page *page, unsigned long);
61 struct page *page, unsigned long vmaddr) flush_anon_page()
63 if (boot_cpu_data.dcache.n_aliases && PageAnon(page)) flush_anon_page()
64 __flush_anon_page(page, vmaddr); flush_anon_page()
76 static inline void flush_kernel_dcache_page(struct page *page) flush_kernel_dcache_page() argument
78 flush_dcache_page(page); flush_kernel_dcache_page()
82 struct page *page, unsigned long vaddr, void *dst, const void *src,
86 struct page *page, unsigned long vaddr, void *dst, const void *src,
96 void *kmap_coherent(struct page *page, unsigned long addr);
60 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) flush_anon_page() argument
H A Dtlbflush.h9 * - flush_tlb_page(vma, vmaddr) flushes one page
19 unsigned long page);
22 extern void local_flush_tlb_one(unsigned long asid, unsigned long page);
32 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
34 extern void flush_tlb_one(unsigned long asid, unsigned long page);
40 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
41 #define flush_tlb_one(asid, page) local_flush_tlb_one(asid, page)
/linux-4.1.27/fs/proc/
H A Dpage.c12 #include <linux/kernel-page-flags.h>
19 /* /proc/kpagecount - an array exposing page counts
22 * physical page count.
28 struct page *ppage; kpagecount_read()
70 /* /proc/kpageflags - an array exposing page flags
73 * physical page flags.
81 u64 stable_page_flags(struct page *page) stable_page_flags() argument
88 * it differentiates a memory hole from a page with no flags stable_page_flags()
90 if (!page) stable_page_flags()
93 k = page->flags; stable_page_flags()
99 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the stable_page_flags()
102 if (!PageSlab(page) && page_mapped(page)) stable_page_flags()
104 if (PageAnon(page)) stable_page_flags()
106 if (PageKsm(page)) stable_page_flags()
111 * they together define a compound page's start/end pos and order stable_page_flags()
113 if (PageHead(page)) stable_page_flags()
115 if (PageTail(page)) stable_page_flags()
117 if (PageHuge(page)) stable_page_flags()
123 * to make sure a given page is a thp, not a non-huge compound page. stable_page_flags()
125 else if (PageTransCompound(page)) { stable_page_flags()
126 struct page *head = compound_head(page); stable_page_flags()
134 } else if (is_zero_pfn(page_to_pfn(page))) stable_page_flags()
139 * Caveats on high order pages: page->_count will only be set stable_page_flags()
140 * -1 on the head page; SLUB/SLQB do the same for PG_slab; stable_page_flags()
143 if (PageBuddy(page)) stable_page_flags()
146 if (PageBalloon(page)) stable_page_flags()
191 struct page *ppage; kpageflags_read()
/linux-4.1.27/arch/parisc/include/asm/
H A Dcacheflush.h41 static inline void flush_kernel_dcache_page(struct page *page) flush_kernel_dcache_page() argument
43 flush_kernel_dcache_page_addr(page_address(page)); flush_kernel_dcache_page()
63 struct page *page = vmalloc_to_page(cursor); invalidate_kernel_vmap_range() local
65 if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) invalidate_kernel_vmap_range()
66 flush_kernel_dcache_page(page); invalidate_kernel_vmap_range()
75 extern void flush_dcache_page(struct page *page);
82 #define flush_icache_page(vma,page) do { \
83 flush_kernel_dcache_page(page); \
84 flush_kernel_icache_page(page_address(page)); \
92 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
94 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
99 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
101 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
114 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) flush_anon_page() argument
116 if (PageAnon(page)) { flush_anon_page()
119 flush_dcache_page_asm(page_to_phys(page), vmaddr); flush_anon_page()
132 static inline void *kmap(struct page *page) kmap() argument
135 return page_address(page); kmap()
138 static inline void kunmap(struct page *page) kunmap() argument
140 flush_kernel_dcache_page_addr(page_address(page)); kunmap()
143 static inline void *kmap_atomic(struct page *page) kmap_atomic() argument
146 return page_address(page); kmap_atomic()
155 #define kmap_atomic_prot(page, prot) kmap_atomic(page)
/linux-4.1.27/arch/x86/mm/
H A Dhighmem_32.c6 void *kmap(struct page *page) kmap() argument
9 if (!PageHighMem(page)) kmap()
10 return page_address(page); kmap()
11 return kmap_high(page); kmap()
15 void kunmap(struct page *page) kunmap() argument
19 if (!PageHighMem(page)) kunmap()
21 kunmap_high(page); kunmap()
33 void *kmap_atomic_prot(struct page *page, pgprot_t prot) kmap_atomic_prot() argument
41 if (!PageHighMem(page)) kmap_atomic_prot()
42 return page_address(page); kmap_atomic_prot()
48 set_pte(kmap_pte-idx, mk_pte(page, prot)); kmap_atomic_prot()
55 void *kmap_atomic(struct page *page) kmap_atomic() argument
57 return kmap_atomic_prot(page, kmap_prot); kmap_atomic()
63 * have a struct page associated with it.
88 * is a bad idea also, in case the page changes cacheability __kunmap_atomic()
89 * attributes or becomes a protected page in a hypervisor. __kunmap_atomic()
106 struct page *kmap_atomic_to_page(void *ptr) kmap_atomic_to_page()
/linux-4.1.27/fs/ext4/
H A Dreadpage.c20 * - encountering a page which has buffers
21 * - encountering a page which has a non-hole after a hole
22 * - encountering a page with non-contiguous blocks
25 * It does handle a page which has holes at the end - that is a common case:
50 * Call ext4_decrypt on every single page, reusing the encryption
63 struct page *page = bv->bv_page; bio_for_each_segment_all() local
65 int ret = ext4_decrypt(ctx, page); bio_for_each_segment_all()
68 SetPageError(page); bio_for_each_segment_all()
70 SetPageUptodate(page); bio_for_each_segment_all()
71 unlock_page(page); bio_for_each_segment_all()
93 * If a page does not map to a contiguous run of blocks then it simply falls
96 * Why is this? If a page's completion depends on a number of different BIOs
98 * status of that page is hard. See end_buffer_async_read() for the details.
119 struct page *page = bv->bv_page; bio_for_each_segment_all() local
122 SetPageUptodate(page); bio_for_each_segment_all()
124 ClearPageUptodate(page); bio_for_each_segment_all()
125 SetPageError(page); bio_for_each_segment_all()
127 unlock_page(page); bio_for_each_segment_all()
134 struct list_head *pages, struct page *page, ext4_mpage_readpages()
164 prefetchw(&page->flags); ext4_mpage_readpages()
166 page = list_entry(pages->prev, struct page, lru); ext4_mpage_readpages()
167 list_del(&page->lru); ext4_mpage_readpages()
168 if (add_to_page_cache_lru(page, mapping, ext4_mpage_readpages()
169 page->index, GFP_KERNEL)) ext4_mpage_readpages()
173 if (page_has_buffers(page)) ext4_mpage_readpages()
176 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); ext4_mpage_readpages()
209 * done with this page. ext4_mpage_readpages()
218 SetPageError(page); ext4_mpage_readpages()
219 zero_user_segment(page, 0, ext4_mpage_readpages()
221 unlock_page(page); ext4_mpage_readpages()
252 zero_user_segment(page, first_hole << blkbits, ext4_mpage_readpages()
255 SetPageUptodate(page); ext4_mpage_readpages()
256 unlock_page(page); ext4_mpage_readpages()
260 SetPageMappedToDisk(page); ext4_mpage_readpages()
263 !PageUptodate(page) && cleancache_get_page(page) == 0) { ext4_mpage_readpages()
264 SetPageUptodate(page); ext4_mpage_readpages()
269 * This page will go to BIO. Do we need to send this ext4_mpage_readpages()
300 if (bio_add_page(bio, page, length, 0) < length) ext4_mpage_readpages()
316 if (!PageUptodate(page)) ext4_mpage_readpages()
317 block_read_full_page(page, ext4_get_block); ext4_mpage_readpages()
319 unlock_page(page); ext4_mpage_readpages()
322 page_cache_release(page); ext4_mpage_readpages()
133 ext4_mpage_readpages(struct address_space *mapping, struct list_head *pages, struct page *page, unsigned nr_pages) ext4_mpage_readpages() argument
/linux-4.1.27/fs/ext2/
H A Ddir.c67 static inline void ext2_put_page(struct page *page) ext2_put_page() argument
69 kunmap(page); ext2_put_page()
70 page_cache_release(page); ext2_put_page()
79 * Return the offset into page `page_nr' of the last valid
80 * byte in that page, plus one.
93 static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len) ext2_commit_chunk() argument
95 struct address_space *mapping = page->mapping; ext2_commit_chunk()
100 block_write_end(NULL, mapping, pos, len, len, page, NULL); ext2_commit_chunk()
108 err = write_one_page(page, 1); ext2_commit_chunk()
112 unlock_page(page); ext2_commit_chunk()
118 static void ext2_check_page(struct page *page, int quiet) ext2_check_page() argument
120 struct inode *dir = page->mapping->host; ext2_check_page()
123 char *kaddr = page_address(page); ext2_check_page()
130 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { ext2_check_page()
155 SetPageChecked(page); ext2_check_page()
184 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, ext2_check_page()
192 "entry in directory #%lu spans the page boundary" ext2_check_page()
194 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, ext2_check_page()
198 SetPageChecked(page); ext2_check_page()
199 SetPageError(page); ext2_check_page()
202 static struct page * ext2_get_page(struct inode *dir, unsigned long n, ext2_get_page()
206 struct page *page = read_mapping_page(mapping, n, NULL); ext2_get_page() local
207 if (!IS_ERR(page)) { ext2_get_page()
208 kmap(page); ext2_get_page()
209 if (!PageChecked(page)) ext2_get_page()
210 ext2_check_page(page, quiet); ext2_get_page()
211 if (PageError(page)) ext2_get_page()
214 return page; ext2_get_page()
217 ext2_put_page(page); ext2_get_page()
237 * p is at least 6 bytes before the end of page
311 struct page *page = ext2_get_page(inode, n, 0); ext2_readdir() local
313 if (IS_ERR(page)) { ext2_readdir()
315 "bad page in #%lu", ext2_readdir()
318 return PTR_ERR(page); ext2_readdir()
320 kaddr = page_address(page); ext2_readdir()
335 ext2_put_page(page); ext2_readdir()
347 ext2_put_page(page); ext2_readdir()
353 ext2_put_page(page); ext2_readdir()
362 * returns the page in which the entry was found (as a parameter - res_page),
367 struct qstr *child, struct page ** res_page) ext2_find_entry()
374 struct page *page = NULL; ext2_find_entry() local
391 page = ext2_get_page(dir, n, dir_has_error); ext2_find_entry()
392 if (!IS_ERR(page)) { ext2_find_entry()
393 kaddr = page_address(page); ext2_find_entry()
400 ext2_put_page(page); ext2_find_entry()
407 ext2_put_page(page); ext2_find_entry()
413 /* next page is past the blocks we've got */ ext2_find_entry()
426 *res_page = page; ext2_find_entry()
431 struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p) ext2_dotdot()
433 struct page *page = ext2_get_page(dir, 0, 0); ext2_dotdot() local
436 if (!IS_ERR(page)) { ext2_dotdot()
437 de = ext2_next_entry((ext2_dirent *) page_address(page)); ext2_dotdot()
438 *p = page; ext2_dotdot()
447 struct page *page; ext2_inode_by_name() local
449 de = ext2_find_entry (dir, child, &page); ext2_inode_by_name()
452 ext2_put_page(page); ext2_inode_by_name()
457 static int ext2_prepare_chunk(struct page *page, loff_t pos, unsigned len) ext2_prepare_chunk() argument
459 return __block_write_begin(page, pos, len, ext2_get_block); ext2_prepare_chunk()
462 /* Releases the page */ ext2_set_link()
464 struct page *page, struct inode *inode, int update_times) ext2_set_link()
466 loff_t pos = page_offset(page) + ext2_set_link()
467 (char *) de - (char *) page_address(page); ext2_set_link()
471 lock_page(page); ext2_set_link()
472 err = ext2_prepare_chunk(page, pos, len); ext2_set_link()
476 err = ext2_commit_chunk(page, pos, len); ext2_set_link()
477 ext2_put_page(page); ext2_set_link()
495 struct page *page = NULL; ext2_add_link() local
505 * This code plays outside i_size, so it locks the page ext2_add_link()
511 page = ext2_get_page(dir, n, 0); ext2_add_link()
512 err = PTR_ERR(page); ext2_add_link()
513 if (IS_ERR(page)) ext2_add_link()
515 lock_page(page); ext2_add_link()
516 kaddr = page_address(page); ext2_add_link()
546 unlock_page(page); ext2_add_link()
547 ext2_put_page(page); ext2_add_link()
553 pos = page_offset(page) + ext2_add_link()
554 (char*)de - (char*)page_address(page); ext2_add_link()
555 err = ext2_prepare_chunk(page, pos, rec_len); ext2_add_link()
568 err = ext2_commit_chunk(page, pos, rec_len); ext2_add_link()
574 ext2_put_page(page); ext2_add_link()
578 unlock_page(page); ext2_add_link()
584 * previous entry. Page is up-to-date. Releases the page.
586 int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page ) ext2_delete_entry()
588 struct inode *inode = page->mapping->host; ext2_delete_entry()
589 char *kaddr = page_address(page); ext2_delete_entry()
609 from = (char*)pde - (char*)page_address(page); ext2_delete_entry()
610 pos = page_offset(page) + from; ext2_delete_entry()
611 lock_page(page); ext2_delete_entry()
612 err = ext2_prepare_chunk(page, pos, to - from); ext2_delete_entry()
617 err = ext2_commit_chunk(page, pos, to - from); ext2_delete_entry()
622 ext2_put_page(page); ext2_delete_entry()
631 struct page *page = grab_cache_page(inode->i_mapping, 0); ext2_make_empty() local
637 if (!page) ext2_make_empty()
640 err = ext2_prepare_chunk(page, 0, chunk_size); ext2_make_empty()
642 unlock_page(page); ext2_make_empty()
645 kaddr = kmap_atomic(page); ext2_make_empty()
661 err = ext2_commit_chunk(page, 0, chunk_size); ext2_make_empty()
663 page_cache_release(page); ext2_make_empty()
672 struct page *page = NULL; ext2_empty_dir() local
679 page = ext2_get_page(inode, i, dir_has_error); ext2_empty_dir()
681 if (IS_ERR(page)) { ext2_empty_dir()
686 kaddr = page_address(page); ext2_empty_dir()
712 ext2_put_page(page); ext2_empty_dir()
717 ext2_put_page(page); ext2_empty_dir()
463 ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de, struct page *page, struct inode *inode, int update_times) ext2_set_link() argument
/linux-4.1.27/fs/hfsplus/
H A Dbitmap.c22 struct page *page; hfsplus_block_allocate() local
36 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); hfsplus_block_allocate()
37 if (IS_ERR(page)) { hfsplus_block_allocate()
41 pptr = kmap(page); hfsplus_block_allocate()
76 kunmap(page); hfsplus_block_allocate()
80 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, hfsplus_block_allocate()
82 if (IS_ERR(page)) { hfsplus_block_allocate()
86 curr = pptr = kmap(page); hfsplus_block_allocate()
128 set_page_dirty(page); hfsplus_block_allocate()
129 kunmap(page); hfsplus_block_allocate()
131 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, hfsplus_block_allocate()
133 if (IS_ERR(page)) { hfsplus_block_allocate()
137 pptr = kmap(page); hfsplus_block_allocate()
152 set_page_dirty(page); hfsplus_block_allocate()
153 kunmap(page); hfsplus_block_allocate()
166 struct page *page; hfsplus_block_free() local
184 page = read_mapping_page(mapping, pnr, NULL); hfsplus_block_free()
185 if (IS_ERR(page)) hfsplus_block_free()
187 pptr = kmap(page); hfsplus_block_free()
216 set_page_dirty(page); hfsplus_block_free()
217 kunmap(page); hfsplus_block_free()
218 page = read_mapping_page(mapping, ++pnr, NULL); hfsplus_block_free()
219 if (IS_ERR(page)) hfsplus_block_free()
221 pptr = kmap(page); hfsplus_block_free()
232 set_page_dirty(page); hfsplus_block_free()
233 kunmap(page); hfsplus_block_free()
241 pr_crit("unable to mark blocks free: error %ld\n", PTR_ERR(page)); hfsplus_block_free()
/linux-4.1.27/drivers/xen/
H A Dballoon.c58 #include <asm/page.h>
71 #include <xen/page.h>
93 /* We increase/decrease in batches which fit in a page */
109 static void scrub_page(struct page *page) scrub_page() argument
112 clear_highpage(page); scrub_page()
116 /* balloon_append: add the given page to the balloon. */ __balloon_append()
117 static void __balloon_append(struct page *page) __balloon_append() argument
120 if (PageHighMem(page)) { __balloon_append()
121 list_add_tail(&page->lru, &ballooned_pages); __balloon_append()
124 list_add(&page->lru, &ballooned_pages); __balloon_append()
129 static void balloon_append(struct page *page) balloon_append() argument
131 __balloon_append(page); balloon_append()
132 adjust_managed_page_count(page, -1); balloon_append()
135 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ balloon_retrieve()
136 static struct page *balloon_retrieve(bool prefer_highmem) balloon_retrieve()
138 struct page *page; balloon_retrieve() local
144 page = list_entry(ballooned_pages.prev, struct page, lru); balloon_retrieve()
146 page = list_entry(ballooned_pages.next, struct page, lru); balloon_retrieve()
147 list_del(&page->lru); balloon_retrieve()
149 if (PageHighMem(page)) balloon_retrieve()
154 adjust_managed_page_count(page, 1); balloon_retrieve()
156 return page; balloon_retrieve()
159 static struct page *balloon_next_page(struct page *page) balloon_next_page() argument
161 struct list_head *next = page->lru.next; balloon_next_page()
164 return list_entry(next, struct page, lru); balloon_next_page()
217 * pages with PG_reserved bit not set; online_pages_range() does not allow page
218 * onlining in whole range if first onlined page does not have PG_reserved
219 * bit set). Real size of added memory is established at page onlining stage.
234 * add_memory() will build page tables for the new memory so reserve_additional_memory()
270 static void xen_online_page(struct page *page) xen_online_page() argument
272 __online_page_set_limits(page); xen_online_page()
276 __balloon_append(page); xen_online_page()
330 struct page *page; increase_reservation() local
349 page = list_first_entry_or_null(&ballooned_pages, struct page, lru); increase_reservation()
351 if (!page) { increase_reservation()
355 frame_list[i] = page_to_pfn(page); increase_reservation()
356 page = balloon_next_page(page); increase_reservation()
366 page = balloon_retrieve(false); increase_reservation()
367 BUG_ON(page == NULL); increase_reservation()
369 pfn = page_to_pfn(page); increase_reservation()
375 /* Link back into the page tables if not highmem. */ increase_reservation()
376 if (!PageHighMem(page)) { increase_reservation()
387 /* Relinquish the page back to the allocator. */ increase_reservation()
388 __free_reserved_page(page); increase_reservation()
400 struct page *page; decrease_reservation() local
421 page = alloc_page(gfp); decrease_reservation()
422 if (page == NULL) { decrease_reservation()
427 scrub_page(page); decrease_reservation()
429 frame_list[i] = page_to_pfn(page); decrease_reservation()
445 page = pfn_to_page(pfn); decrease_reservation()
449 if (!PageHighMem(page)) { decrease_reservation()
459 balloon_append(page); decrease_reservation()
531 int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem) alloc_xenballooned_pages()
534 struct page *page; alloc_xenballooned_pages() local
537 page = balloon_retrieve(highmem); alloc_xenballooned_pages()
538 if (page && (highmem || !PageHighMem(page))) { alloc_xenballooned_pages()
539 pages[pgno++] = page; alloc_xenballooned_pages()
542 if (page) alloc_xenballooned_pages()
543 balloon_append(page); alloc_xenballooned_pages()
567 void free_xenballooned_pages(int nr_pages, struct page **pages) free_xenballooned_pages()
590 struct page *page; balloon_add_region() local
600 page = pfn_to_page(pfn); balloon_add_region()
604 __balloon_append(page); balloon_add_region()
/linux-4.1.27/arch/tile/mm/
H A Dhomecache.c14 * This code maintains the "home" for each page in the system.
35 #include <asm/page.h>
104 * completed, and start to use the page while it's still dirty
174 void homecache_finv_map_page(struct page *page, int home) homecache_finv_map_page() argument
191 pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); homecache_finv_map_page()
202 static void homecache_finv_page_home(struct page *page, int home) homecache_finv_page_home() argument
204 if (!PageHighMem(page) && home == page_home(page)) homecache_finv_page_home()
205 homecache_finv_page_va(page_address(page), home); homecache_finv_page_home()
207 homecache_finv_map_page(page, home); homecache_finv_page_home()
215 static void homecache_finv_page_internal(struct page *page, int force_map) homecache_finv_page_internal() argument
217 int home = page_home(page); homecache_finv_page_internal()
223 homecache_finv_map_page(page, cpu); homecache_finv_page_internal()
226 homecache_finv_map_page(page, home); homecache_finv_page_internal()
228 homecache_finv_page_home(page, home); homecache_finv_page_internal()
230 sim_validate_lines_evicted(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE); homecache_finv_page_internal()
233 void homecache_finv_page(struct page *page) homecache_finv_page() argument
235 homecache_finv_page_internal(page, 0); homecache_finv_page()
273 * non-coherent PTE, but the underlying page is not pte_set_home()
276 * the page to be writable underneath. In this case, pte_set_home()
281 pr_err("non-immutable page incoherently referenced: %#llx\n", pte_set_home()
297 * We could home this page anywhere, since it's immutable, pte_set_home()
347 * of a kernel page once, and require a full-chip cache/TLB flush,
351 int page_home(struct page *page) page_home() argument
353 if (PageHighMem(page)) { page_home()
356 unsigned long kva = (unsigned long)page_address(page); page_home()
362 void homecache_change_page_home(struct page *page, int order, int home) homecache_change_page_home() argument
367 BUG_ON(PageHighMem(page)); homecache_change_page_home()
368 BUG_ON(page_count(page) > 1); homecache_change_page_home()
369 BUG_ON(page_mapcount(page) != 0); homecache_change_page_home()
370 kva = (unsigned long) page_address(page); homecache_change_page_home()
384 struct page *homecache_alloc_pages(gfp_t gfp_mask, homecache_alloc_pages()
387 struct page *page; homecache_alloc_pages() local
389 page = alloc_pages(gfp_mask, order); homecache_alloc_pages()
390 if (page) homecache_alloc_pages()
391 homecache_change_page_home(page, order, home); homecache_alloc_pages()
392 return page; homecache_alloc_pages()
396 struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, homecache_alloc_pages_node()
399 struct page *page; homecache_alloc_pages_node() local
401 page = alloc_pages_node(nid, gfp_mask, order); homecache_alloc_pages_node()
402 if (page) homecache_alloc_pages_node()
403 homecache_change_page_home(page, order, home); homecache_alloc_pages_node()
404 return page; homecache_alloc_pages_node()
407 void __homecache_free_pages(struct page *page, unsigned int order) __homecache_free_pages() argument
409 if (put_page_testzero(page)) { __homecache_free_pages()
410 homecache_change_page_home(page, order, PAGE_HOME_HASH); __homecache_free_pages()
412 free_hot_cold_page(page, false); __homecache_free_pages()
414 init_page_count(page); __homecache_free_pages()
415 __free_pages(page, order); __homecache_free_pages()
H A Dhighmem.c25 void *kmap(struct page *page) kmap() argument
32 if (!PageHighMem(page)) kmap()
33 return page_address(page); kmap()
34 kva = kmap_high(page); kmap()
37 * Rewrite the PTE under the lock. This ensures that the page kmap()
42 set_pte_at(&init_mm, kva, ptep, mk_pte(page, page_to_kpgprot(page))); kmap()
49 void kunmap(struct page *page) kunmap() argument
53 if (!PageHighMem(page)) kunmap()
55 kunmap_high(page); kunmap()
60 * Describe a single atomic mapping of a page on a given cpu at a
65 struct page *page; member in struct:atomic_mapped_page
83 * Add a page and va, on this cpu, to the list of kmap_atomic pages,
91 * Finally, doing it under the lock lets us safely examine the page
96 static void kmap_atomic_register(struct page *page, int type, kmap_atomic_register() argument
107 amp->page = page; kmap_atomic_register()
113 pteval = mk_pte(page, page_to_kpgprot(page)); kmap_atomic_register()
123 * Remove a page and va, on this cpu, from the list of kmap_atomic pages.
129 static void kmap_atomic_unregister(struct page *page, unsigned long va) kmap_atomic_unregister() argument
136 if (amp->page == page && amp->cpu == cpu && amp->va == va) kmap_atomic_unregister()
155 * Rewrite a default kernel PTE for this page. kmap_atomic_fix_one_kpte()
159 pte_t pte = mk_pte(amp->page, page_to_kpgprot(amp->page)); kmap_atomic_fix_one_kpte()
174 void kmap_atomic_fix_kpte(struct page *page, int finished) kmap_atomic_fix_kpte() argument
180 if (amp->page == page) kmap_atomic_fix_kpte()
198 void *kmap_atomic_prot(struct page *page, pgprot_t prot) kmap_atomic_prot() argument
210 if (!PageHighMem(page)) kmap_atomic_prot()
211 return page_address(page); kmap_atomic_prot()
219 /* Register that this page is mapped atomically on this cpu. */ kmap_atomic_prot()
220 kmap_atomic_register(page, type, vaddr, pte, mk_pte(page, prot)); kmap_atomic_prot()
226 void *kmap_atomic(struct page *page) kmap_atomic() argument
229 return kmap_atomic_prot(page, PAGE_NONE); kmap_atomic()
256 /* Must be a lowmem page */ __kunmap_atomic()
266 * This API is supposed to allow us to map memory without a "struct page".
278 struct page *kmap_atomic_to_page(void *ptr) kmap_atomic_to_page()
/linux-4.1.27/fs/ufs/
H A Ddir.c15 * Migration to usage of "page cache" on May 2006 by
43 static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len) ufs_commit_chunk() argument
45 struct address_space *mapping = page->mapping; ufs_commit_chunk()
50 block_write_end(NULL, mapping, pos, len, len, page, NULL); ufs_commit_chunk()
56 err = write_one_page(page, 1); ufs_commit_chunk()
58 unlock_page(page); ufs_commit_chunk()
62 static inline void ufs_put_page(struct page *page) ufs_put_page() argument
64 kunmap(page); ufs_put_page()
65 page_cache_release(page); ufs_put_page()
77 struct page *page; ufs_inode_by_name() local
79 de = ufs_find_entry(dir, qstr, &page); ufs_inode_by_name()
82 ufs_put_page(page); ufs_inode_by_name()
88 /* Releases the page */ ufs_set_link()
90 struct page *page, struct inode *inode) ufs_set_link()
92 loff_t pos = page_offset(page) + ufs_set_link()
93 (char *) de - (char *) page_address(page); ufs_set_link()
97 lock_page(page); ufs_set_link()
98 err = ufs_prepare_chunk(page, pos, len); ufs_set_link()
104 err = ufs_commit_chunk(page, pos, len); ufs_set_link()
105 ufs_put_page(page); ufs_set_link()
111 static void ufs_check_page(struct page *page) ufs_check_page() argument
113 struct inode *dir = page->mapping->host; ufs_check_page()
115 char *kaddr = page_address(page); ufs_check_page()
122 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { ufs_check_page()
148 SetPageChecked(page); ufs_check_page()
176 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, ufs_check_page()
182 "entry in directory #%lu spans the page boundary" ufs_check_page()
184 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs); ufs_check_page()
186 SetPageChecked(page); ufs_check_page()
187 SetPageError(page); ufs_check_page()
190 static struct page *ufs_get_page(struct inode *dir, unsigned long n) ufs_get_page()
193 struct page *page = read_mapping_page(mapping, n, NULL); ufs_get_page() local
194 if (!IS_ERR(page)) { ufs_get_page()
195 kmap(page); ufs_get_page()
196 if (!PageChecked(page)) ufs_get_page()
197 ufs_check_page(page); ufs_get_page()
198 if (PageError(page)) ufs_get_page()
201 return page; ufs_get_page()
204 ufs_put_page(page); ufs_get_page()
209 * Return the offset into page `page_nr' of the last valid
210 * byte in that page, plus one.
230 struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p) ufs_dotdot()
232 struct page *page = ufs_get_page(dir, 0); ufs_dotdot() local
235 if (!IS_ERR(page)) { ufs_dotdot()
237 (struct ufs_dir_entry *)page_address(page)); ufs_dotdot()
238 *p = page; ufs_dotdot()
247 * returns the page in which the entry was found, and the entry itself
252 struct page **res_page) ufs_find_entry()
260 struct page *page = NULL; ufs_find_entry() local
279 page = ufs_get_page(dir, n); ufs_find_entry()
280 if (!IS_ERR(page)) { ufs_find_entry()
281 kaddr = page_address(page); ufs_find_entry()
288 ufs_put_page(page); ufs_find_entry()
295 ufs_put_page(page); ufs_find_entry()
304 *res_page = page; ufs_find_entry()
321 struct page *page = NULL; ufs_add_link() local
333 * This code plays outside i_size, so it locks the page ufs_add_link()
339 page = ufs_get_page(dir, n); ufs_add_link()
340 err = PTR_ERR(page); ufs_add_link()
341 if (IS_ERR(page)) ufs_add_link()
343 lock_page(page); ufs_add_link()
344 kaddr = page_address(page); ufs_add_link()
374 unlock_page(page); ufs_add_link()
375 ufs_put_page(page); ufs_add_link()
381 pos = page_offset(page) + ufs_add_link()
382 (char*)de - (char*)page_address(page); ufs_add_link()
383 err = ufs_prepare_chunk(page, pos, rec_len); ufs_add_link()
400 err = ufs_commit_chunk(page, pos, rec_len); ufs_add_link()
406 ufs_put_page(page); ufs_add_link()
410 unlock_page(page); ufs_add_link()
454 struct page *page = ufs_get_page(inode, n); ufs_readdir() local
456 if (IS_ERR(page)) { ufs_readdir()
458 "bad page in #%lu", ufs_readdir()
463 kaddr = page_address(page); ufs_readdir()
478 ufs_put_page(page); ufs_readdir()
495 ufs_put_page(page); ufs_readdir()
501 ufs_put_page(page); ufs_readdir()
512 struct page * page) ufs_delete_entry()
515 char *kaddr = page_address(page); ufs_delete_entry()
541 from = (char*)pde - (char*)page_address(page); ufs_delete_entry()
543 pos = page_offset(page) + from; ufs_delete_entry()
544 lock_page(page); ufs_delete_entry()
545 err = ufs_prepare_chunk(page, pos, to - from); ufs_delete_entry()
550 err = ufs_commit_chunk(page, pos, to - from); ufs_delete_entry()
554 ufs_put_page(page); ufs_delete_entry()
563 struct page *page = grab_cache_page(mapping, 0); ufs_make_empty() local
569 if (!page) ufs_make_empty()
572 err = ufs_prepare_chunk(page, 0, chunk_size); ufs_make_empty()
574 unlock_page(page); ufs_make_empty()
578 kmap(page); ufs_make_empty()
579 base = (char*)page_address(page); ufs_make_empty()
596 kunmap(page); ufs_make_empty()
598 err = ufs_commit_chunk(page, 0, chunk_size); ufs_make_empty()
600 page_cache_release(page); ufs_make_empty()
610 struct page *page = NULL; ufs_empty_dir() local
616 page = ufs_get_page(inode, i); ufs_empty_dir()
618 if (IS_ERR(page)) ufs_empty_dir()
621 kaddr = page_address(page); ufs_empty_dir()
648 ufs_put_page(page); ufs_empty_dir()
653 ufs_put_page(page); ufs_empty_dir()
89 ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, struct page *page, struct inode *inode) ufs_set_link() argument
511 ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir, struct page * page) ufs_delete_entry() argument
/linux-4.1.27/arch/metag/include/asm/
H A Dhighmem.h44 extern void *kmap_high(struct page *page);
45 extern void kunmap_high(struct page *page);
54 extern void *kmap(struct page *page);
55 extern void kunmap(struct page *page);
56 extern void *kmap_atomic(struct page *page);
59 extern struct page *kmap_atomic_to_page(void *ptr);
/linux-4.1.27/arch/alpha/include/asm/
H A Dpgalloc.h8 * Allocate and free page tables. The xxx_kernel() versions are
9 * used to allocate a kernel page table - this turns on ASN bits
70 struct page *page; pte_alloc_one() local
74 page = virt_to_page(pte); pte_alloc_one()
75 if (!pgtable_page_ctor(page)) { pte_alloc_one()
76 __free_page(page); pte_alloc_one()
79 return page; pte_alloc_one()
83 pte_free(struct mm_struct *mm, pgtable_t page) pte_free() argument
85 pgtable_page_dtor(page); pte_free()
86 __free_page(page); pte_free()
/linux-4.1.27/arch/x86/include/asm/
H A Dagp.h12 * page. This avoids data corruption on some CPUs.
15 #define map_page_into_agp(page) set_pages_uc(page, 1)
16 #define unmap_page_from_agp(page) set_pages_wb(page, 1)
20 * need to be called for each cacheline of the whole page so it may
21 * not be worth it. Would need a page for it.
H A Dhighmem.h60 extern void *kmap_high(struct page *page);
61 extern void kunmap_high(struct page *page);
63 void *kmap(struct page *page);
64 void kunmap(struct page *page);
66 void *kmap_atomic_prot(struct page *page, pgprot_t prot);
67 void *kmap_atomic(struct page *page);
71 struct page *kmap_atomic_to_page(void *ptr);
H A Dpage_32.h24 static inline void clear_page(void *page) clear_page() argument
26 mmx_clear_page(page); clear_page()
36 static inline void clear_page(void *page) clear_page() argument
38 memset(page, 0, PAGE_SIZE); clear_page()
/linux-4.1.27/arch/frv/include/asm/
H A Dcacheflush.h52 extern void flush_dcache_page(struct page *page);
54 static inline void flush_dcache_page(struct page *page) flush_dcache_page() argument
56 unsigned long addr = page_to_phys(page); flush_dcache_page()
61 static inline void flush_page_to_ram(struct page *page) flush_page_to_ram() argument
63 flush_dcache_page(page); flush_page_to_ram()
77 extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
80 static inline void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, flush_icache_user_range() argument
87 static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) flush_icache_page() argument
89 flush_icache_user_range(vma, page, page_to_phys(page), PAGE_SIZE); flush_icache_page()
96 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
99 flush_icache_user_range((vma), (page), (vaddr), (len)); \
102 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dhighmem.h59 extern void *kmap_high(struct page *page);
60 extern void kunmap_high(struct page *page);
62 extern void *kmap(struct page *page);
63 extern void kunmap(struct page *page);
65 extern struct page *kmap_atomic_to_page(void *ptr);
88 /* cache flush page attachment point */ \
116 static inline void *kmap_atomic_primary(struct page *page) kmap_atomic_primary() argument
121 paddr = page_to_phys(page); kmap_atomic_primary()
144 void *kmap_atomic(struct page *page);
/linux-4.1.27/fs/gfs2/
H A Daops.c41 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, gfs2_page_add_databufs() argument
44 struct buffer_head *head = page_buffers(page); gfs2_page_add_databufs()
91 * @page: The page to be written
97 static int gfs2_writepage_common(struct page *page, gfs2_writepage_common() argument
100 struct inode *inode = page->mapping->host; gfs2_writepage_common()
111 /* Is the page fully outside i_size? (truncate in progress) */ gfs2_writepage_common()
113 if (page->index > end_index || (page->index == end_index && !offset)) { gfs2_writepage_common()
114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); gfs2_writepage_common()
119 redirty_page_for_writepage(wbc, page); gfs2_writepage_common()
121 unlock_page(page); gfs2_writepage_common()
126 * gfs2_writepage - Write page for writeback mappings
127 * @page: The page
132 static int gfs2_writepage(struct page *page, struct writeback_control *wbc) gfs2_writepage() argument
136 ret = gfs2_writepage_common(page, wbc); gfs2_writepage()
140 return nobh_writepage(page, gfs2_get_block_noalloc, wbc); gfs2_writepage()
145 * @page: The page to write
154 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) __gfs2_jdata_writepage() argument
156 struct inode *inode = page->mapping->host; __gfs2_jdata_writepage()
160 if (PageChecked(page)) { __gfs2_jdata_writepage()
161 ClearPageChecked(page); __gfs2_jdata_writepage()
162 if (!page_has_buffers(page)) { __gfs2_jdata_writepage()
163 create_empty_buffers(page, inode->i_sb->s_blocksize, __gfs2_jdata_writepage()
166 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1); __gfs2_jdata_writepage()
168 return block_write_full_page(page, gfs2_get_block_noalloc, wbc); __gfs2_jdata_writepage()
172 * gfs2_jdata_writepage - Write complete page
173 * @page: Page to write
179 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) gfs2_jdata_writepage() argument
181 struct inode *inode = page->mapping->host; gfs2_jdata_writepage()
186 if (PageChecked(page)) { gfs2_jdata_writepage()
194 ret = gfs2_writepage_common(page, wbc); gfs2_jdata_writepage()
196 ret = __gfs2_jdata_writepage(page, wbc); gfs2_jdata_writepage()
202 redirty_page_for_writepage(wbc, page); gfs2_jdata_writepage()
203 unlock_page(page); gfs2_jdata_writepage()
224 * @writepage: The writepage function to call for each page
248 struct page *page = pvec->pages[i]; gfs2_write_jdata_pagevec() local
251 * At this point, the page may be truncated or gfs2_write_jdata_pagevec()
252 * invalidated (changing page->mapping to NULL), or gfs2_write_jdata_pagevec()
254 * mapping. However, page->index will not change gfs2_write_jdata_pagevec()
255 * because we have a reference on the page. gfs2_write_jdata_pagevec()
257 if (page->index > end) { gfs2_write_jdata_pagevec()
266 *done_index = page->index; gfs2_write_jdata_pagevec()
268 lock_page(page); gfs2_write_jdata_pagevec()
270 if (unlikely(page->mapping != mapping)) { gfs2_write_jdata_pagevec()
272 unlock_page(page); gfs2_write_jdata_pagevec()
276 if (!PageDirty(page)) { gfs2_write_jdata_pagevec()
281 if (PageWriteback(page)) { gfs2_write_jdata_pagevec()
283 wait_on_page_writeback(page); gfs2_write_jdata_pagevec()
288 BUG_ON(PageWriteback(page)); gfs2_write_jdata_pagevec()
289 if (!clear_page_dirty_for_io(page)) gfs2_write_jdata_pagevec()
294 ret = __gfs2_jdata_writepage(page, wbc); gfs2_write_jdata_pagevec()
297 unlock_page(page); gfs2_write_jdata_pagevec()
302 * done_index is set past this page, gfs2_write_jdata_pagevec()
310 *done_index = page->index + 1; gfs2_write_jdata_pagevec()
340 * start transactions before we grab page locks. This allows us
402 * We hit the last page and there is more work to be done: wrap gfs2_write_cache_jdata()
441 * stuffed_readpage - Fill in a Linux page with stuffed file data
443 * @page: the page
448 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) stuffed_readpage() argument
457 * asked for a zero page in the case of a stuffed file being extended, stuffed_readpage()
460 if (unlikely(page->index)) { stuffed_readpage()
461 zero_user(page, 0, PAGE_CACHE_SIZE); stuffed_readpage()
462 SetPageUptodate(page); stuffed_readpage()
470 kaddr = kmap_atomic(page); stuffed_readpage()
476 flush_dcache_page(page); stuffed_readpage()
478 SetPageUptodate(page); stuffed_readpage()
486 * @file: The file to read a page for
487 * @page: The page to read
495 static int __gfs2_readpage(void *file, struct page *page) __gfs2_readpage() argument
497 struct gfs2_inode *ip = GFS2_I(page->mapping->host); __gfs2_readpage()
498 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); __gfs2_readpage()
502 error = stuffed_readpage(ip, page); __gfs2_readpage()
503 unlock_page(page); __gfs2_readpage()
505 error = mpage_readpage(page, gfs2_block_map); __gfs2_readpage()
515 * gfs2_readpage - read a page of a file
517 * @page: The page of the file
520 * relock the page in order to get the locking in the right
524 static int gfs2_readpage(struct file *file, struct page *page) gfs2_readpage() argument
526 struct address_space *mapping = page->mapping; gfs2_readpage()
531 unlock_page(page); gfs2_readpage()
537 lock_page(page); gfs2_readpage()
538 if (page->mapping == mapping && !PageUptodate(page)) gfs2_readpage()
539 error = __gfs2_readpage(file, page); gfs2_readpage()
541 unlock_page(page); gfs2_readpage()
546 lock_page(page); gfs2_readpage()
567 struct page *page; gfs2_internal_read() local
574 page = read_cache_page(mapping, index, __gfs2_readpage, NULL); gfs2_internal_read()
575 if (IS_ERR(page)) gfs2_internal_read()
576 return PTR_ERR(page); gfs2_internal_read()
577 p = kmap_atomic(page); gfs2_internal_read()
580 page_cache_release(page); gfs2_internal_read()
595 * the page lock and the glock) and return having done no I/O. Its
633 * @pagep: Pointer to return the page
641 struct page **pagep, void **fsdata) gfs2_write_begin()
652 struct page *page; gfs2_write_begin() local
702 page = grab_cache_page_write_begin(mapping, index, flags); gfs2_write_begin()
703 *pagep = page; gfs2_write_begin()
704 if (unlikely(!page)) gfs2_write_begin()
710 error = gfs2_unstuff_dinode(ip, page); gfs2_write_begin()
713 } else if (!PageUptodate(page)) { gfs2_write_begin()
714 error = stuffed_readpage(ip, page); gfs2_write_begin()
720 error = __block_write_begin(page, from, len, gfs2_block_map); gfs2_write_begin()
725 unlock_page(page); gfs2_write_begin()
726 page_cache_release(page); gfs2_write_begin()
798 * @page: The page
800 * This copies the data from the page into the inode block after
807 struct page *page) gfs2_stuffed_write_end()
817 kaddr = kmap_atomic(page); gfs2_stuffed_write_end()
820 flush_dcache_page(page); gfs2_stuffed_write_end()
823 if (!PageUptodate(page)) gfs2_stuffed_write_end()
824 SetPageUptodate(page); gfs2_stuffed_write_end()
825 unlock_page(page); gfs2_stuffed_write_end()
826 page_cache_release(page); gfs2_stuffed_write_end()
857 * @page: The page that has been written
869 struct page *page, void *fsdata) gfs2_write_end()
871 struct inode *inode = page->mapping->host; gfs2_write_end()
886 unlock_page(page); gfs2_write_end()
887 page_cache_release(page); gfs2_write_end()
892 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page); gfs2_write_end()
895 gfs2_page_add_databufs(ip, page, from, to); gfs2_write_end()
897 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); gfs2_write_end()
926 * @page: The page to dirty
928 * Returns: 1 if it dirtyed the page, or 0 otherwise
931 static int gfs2_set_page_dirty(struct page *page) gfs2_set_page_dirty() argument
933 SetPageChecked(page); gfs2_set_page_dirty()
934 return __set_page_dirty_buffers(page); gfs2_set_page_dirty()
986 static void gfs2_invalidatepage(struct page *page, unsigned int offset, gfs2_invalidatepage() argument
989 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); gfs2_invalidatepage()
995 BUG_ON(!PageLocked(page)); gfs2_invalidatepage()
997 ClearPageChecked(page); gfs2_invalidatepage()
998 if (!page_has_buffers(page)) gfs2_invalidatepage()
1001 bh = head = page_buffers(page); gfs2_invalidatepage()
1013 try_to_release_page(page, 0); gfs2_invalidatepage()
1055 * (i.e. their page cache entries for this inode). We do not, gfs2_direct_IO()
1106 * gfs2_releasepage - free the metadata associated with a page
1107 * @page: the page that's being released
1110 * Call try_to_free_buffers() if the buffers in this page can be
1116 int gfs2_releasepage(struct page *page, gfp_t gfp_mask) gfs2_releasepage() argument
1118 struct address_space *mapping = page->mapping; gfs2_releasepage()
1123 if (!page_has_buffers(page)) gfs2_releasepage()
1128 head = bh = page_buffers(page); gfs2_releasepage()
1141 head = bh = page_buffers(page); gfs2_releasepage()
1157 return try_to_free_buffers(page); gfs2_releasepage()
805 gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, loff_t pos, unsigned len, unsigned copied, struct page *page) gfs2_stuffed_write_end() argument
867 gfs2_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) gfs2_write_end() argument
/linux-4.1.27/arch/nios2/mm/
H A Dcacheflush.c89 static void flush_aliases(struct address_space *mapping, struct page *page) flush_aliases() argument
95 pgoff = page->index; flush_aliases()
108 page_to_pfn(page)); flush_aliases()
156 void flush_icache_page(struct vm_area_struct *vma, struct page *page) flush_icache_page() argument
158 unsigned long start = (unsigned long) page_address(page); flush_icache_page()
176 void __flush_dcache_page(struct address_space *mapping, struct page *page) __flush_dcache_page() argument
180 * page. This ensures that data in the physical page is mutually __flush_dcache_page()
183 unsigned long start = (unsigned long)page_address(page); __flush_dcache_page()
188 void flush_dcache_page(struct page *page) flush_dcache_page() argument
193 * The zero page is never written to, so never has any dirty flush_dcache_page()
196 if (page == ZERO_PAGE(0)) flush_dcache_page()
199 mapping = page_mapping(page); flush_dcache_page()
201 /* Flush this page if there are aliases. */ flush_dcache_page()
203 clear_bit(PG_dcache_clean, &page->flags); flush_dcache_page()
205 __flush_dcache_page(mapping, page); flush_dcache_page()
207 unsigned long start = (unsigned long)page_address(page); flush_dcache_page()
208 flush_aliases(mapping, page); flush_dcache_page()
211 set_bit(PG_dcache_clean, &page->flags); flush_dcache_page()
220 struct page *page; update_mmu_cache() local
227 * The zero page is never written to, so never has any dirty update_mmu_cache()
230 page = pfn_to_page(pfn); update_mmu_cache()
231 if (page == ZERO_PAGE(0)) update_mmu_cache()
234 mapping = page_mapping(page); update_mmu_cache()
235 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) update_mmu_cache()
236 __flush_dcache_page(mapping, page); update_mmu_cache()
240 flush_aliases(mapping, page); update_mmu_cache()
242 flush_icache_page(vma, page); update_mmu_cache()
247 struct page *to) copy_user_page()
256 void clear_user_page(void *addr, unsigned long vaddr, struct page *page) clear_user_page() argument
265 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, copy_from_user_page() argument
269 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); copy_from_user_page()
276 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, copy_to_user_page() argument
280 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); copy_to_user_page()
/linux-4.1.27/drivers/block/
H A Dbrd.c31 * the pages containing the block device's contents. A brd page's ->index is
52 * Look up and return a brd's page for a given sector.
55 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) brd_lookup_page()
58 struct page *page; brd_lookup_page() local
61 * The page lifetime is protected by the fact that we have opened the brd_lookup_page()
72 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */ brd_lookup_page()
73 page = radix_tree_lookup(&brd->brd_pages, idx); brd_lookup_page()
76 BUG_ON(page && page->index != idx); brd_lookup_page()
78 return page; brd_lookup_page()
82 * Look up and return a brd's page for a given sector.
83 * If one does not exist, allocate an empty page, and insert that. Then
86 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) brd_insert_page()
89 struct page *page; brd_insert_page() local
92 page = brd_lookup_page(brd, sector); brd_insert_page()
93 if (page) brd_insert_page()
94 return page; brd_insert_page()
98 * block or filesystem layers from page reclaim. brd_insert_page()
109 page = alloc_page(gfp_flags); brd_insert_page()
110 if (!page) brd_insert_page()
114 __free_page(page); brd_insert_page()
120 page->index = idx; brd_insert_page()
121 if (radix_tree_insert(&brd->brd_pages, idx, page)) { brd_insert_page()
122 __free_page(page); brd_insert_page()
123 page = radix_tree_lookup(&brd->brd_pages, idx); brd_insert_page()
124 BUG_ON(!page); brd_insert_page()
125 BUG_ON(page->index != idx); brd_insert_page()
131 return page; brd_insert_page()
136 struct page *page; brd_free_page() local
141 page = radix_tree_delete(&brd->brd_pages, idx); brd_free_page()
143 if (page) brd_free_page()
144 __free_page(page); brd_free_page()
149 struct page *page; brd_zero_page() local
151 page = brd_lookup_page(brd, sector); brd_zero_page()
152 if (page) brd_zero_page()
153 clear_highpage(page); brd_zero_page()
164 struct page *pages[FREE_BATCH]; brd_free_pages()
236 struct page *page; copy_to_brd() local
242 page = brd_lookup_page(brd, sector); copy_to_brd()
243 BUG_ON(!page); copy_to_brd()
245 dst = kmap_atomic(page); copy_to_brd()
253 page = brd_lookup_page(brd, sector); copy_to_brd()
254 BUG_ON(!page); copy_to_brd()
256 dst = kmap_atomic(page); copy_to_brd()
268 struct page *page; copy_from_brd() local
274 page = brd_lookup_page(brd, sector); copy_from_brd()
275 if (page) { copy_from_brd()
276 src = kmap_atomic(page); copy_from_brd()
286 page = brd_lookup_page(brd, sector); copy_from_brd()
287 if (page) { copy_from_brd()
288 src = kmap_atomic(page); copy_from_brd()
299 static int brd_do_bvec(struct brd_device *brd, struct page *page, brd_do_bvec() argument
312 mem = kmap_atomic(page); brd_do_bvec()
315 flush_dcache_page(page); brd_do_bvec()
317 flush_dcache_page(page); brd_do_bvec()
364 struct page *page, int rw) brd_rw_page()
367 int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector); brd_rw_page()
368 page_endio(page, rw & WRITE, err); brd_rw_page()
377 struct page *page; brd_direct_access() local
381 page = brd_insert_page(brd, sector); brd_direct_access()
382 if (!page) brd_direct_access()
384 *kaddr = page_address(page); brd_direct_access()
385 *pfn = page_to_pfn(page); brd_direct_access()
388 * TODO: If size > PAGE_SIZE, we could look to see if the next page in brd_direct_access()
389 * the file happens to be mapped to the next page of physical RAM. brd_direct_access()
363 brd_rw_page(struct block_device *bdev, sector_t sector, struct page *page, int rw) brd_rw_page() argument
/linux-4.1.27/arch/metag/mm/
H A Dhighmem.c13 void *kmap(struct page *page) kmap() argument
16 if (!PageHighMem(page)) kmap()
17 return page_address(page); kmap()
18 return kmap_high(page); kmap()
22 void kunmap(struct page *page) kunmap() argument
25 if (!PageHighMem(page)) kunmap()
27 kunmap_high(page); kunmap()
40 void *kmap_atomic(struct page *page) kmap_atomic() argument
48 if (!PageHighMem(page)) kmap_atomic()
49 return page_address(page); kmap_atomic()
57 set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL)); kmap_atomic()
75 * is a bad idea also, in case the page changes cacheability __kunmap_atomic()
76 * attributes or becomes a protected page in a hypervisor. __kunmap_atomic()
90 * have a struct page associated with it.
112 struct page *kmap_atomic_to_page(void *ptr) kmap_atomic_to_page()
/linux-4.1.27/arch/mn10300/include/asm/
H A Dhighmem.h46 extern unsigned long kmap_high(struct page *page);
47 extern void kunmap_high(struct page *page);
49 static inline unsigned long kmap(struct page *page) kmap() argument
53 if (page < highmem_start_page) kmap()
54 return page_address(page); kmap()
55 return kmap_high(page); kmap()
58 static inline void kunmap(struct page *page) kunmap() argument
62 if (page < highmem_start_page) kunmap()
64 kunmap_high(page); kunmap()
73 static inline void *kmap_atomic(struct page *page) kmap_atomic() argument
79 if (page < highmem_start_page) kmap_atomic()
80 return page_address(page); kmap_atomic()
89 set_pte(kmap_pte - idx, mk_pte(page, kmap_prot)); kmap_atomic()
H A Dpgalloc.h1 /* MN10300 Page and page table/directory allocation
14 #include <asm/page.h>
16 #include <linux/mm.h> /* for struct page */
19 struct page;
21 /* attach a page table to a PMD entry */
26 void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) pmd_populate()
33 * Allocate and free page tables.
40 extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
47 static inline void pte_free(struct mm_struct *mm, struct page *pte) pte_free()
/linux-4.1.27/fs/qnx6/
H A Ddir.c26 static struct page *qnx6_get_page(struct inode *dir, unsigned long n) qnx6_get_page()
29 struct page *page = read_mapping_page(mapping, n, NULL); qnx6_get_page() local
30 if (!IS_ERR(page)) qnx6_get_page()
31 kmap(page); qnx6_get_page()
32 return page; qnx6_get_page()
51 struct page **p) qnx6_longname()
56 /* within page */ qnx6_longname()
59 struct page *page = read_mapping_page(mapping, n, NULL); qnx6_longname() local
60 if (IS_ERR(page)) qnx6_longname()
61 return ERR_CAST(page); qnx6_longname()
62 kmap(*p = page); qnx6_longname()
63 return (struct qnx6_long_filename *)(page_address(page) + offs); qnx6_longname()
74 struct page *page; qnx6_dir_longfilename() local
83 lf = qnx6_longname(s, de, &page); qnx6_dir_longfilename()
94 qnx6_put_page(page); qnx6_dir_longfilename()
107 qnx6_put_page(page); qnx6_dir_longfilename()
111 qnx6_put_page(page); qnx6_dir_longfilename()
132 struct page *page = qnx6_get_page(inode, n); qnx6_readdir() local
137 if (IS_ERR(page)) { qnx6_readdir()
140 return PTR_ERR(page); qnx6_readdir()
142 de = ((struct qnx6_dir_entry *)page_address(page)) + start; qnx6_readdir()
171 qnx6_put_page(page); qnx6_readdir()
184 struct page *page; qnx6_long_match() local
186 struct qnx6_long_filename *lf = qnx6_longname(s, de, &page); qnx6_long_match()
193 qnx6_put_page(page); qnx6_long_match()
197 qnx6_put_page(page); qnx6_long_match()
200 qnx6_put_page(page); qnx6_long_match()
218 struct page **res_page) qnx6_find_entry()
222 struct page *page = NULL; qnx6_find_entry() local
239 page = qnx6_get_page(dir, n); qnx6_find_entry()
240 if (!IS_ERR(page)) { qnx6_find_entry()
244 de = (struct qnx6_dir_entry *)page_address(page); qnx6_find_entry()
263 qnx6_put_page(page); qnx6_find_entry()
272 *res_page = page; qnx6_find_entry()
H A Dnamei.c19 struct page *page; qnx6_lookup() local
27 ino = qnx6_find_entry(len, dir, name, &page); qnx6_lookup()
30 qnx6_put_page(page); qnx6_lookup()
/linux-4.1.27/drivers/video/fbdev/core/
H A Dfb_defio.c26 static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs) fb_deferred_io_page()
29 struct page *page; fb_deferred_io_page() local
32 page = vmalloc_to_page(screen_base + offs); fb_deferred_io_page()
34 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT); fb_deferred_io_page()
36 return page; fb_deferred_io_page()
44 struct page *page; fb_deferred_io_fault() local
51 page = fb_deferred_io_page(info, offset); fb_deferred_io_fault()
52 if (!page) fb_deferred_io_fault()
55 get_page(page); fb_deferred_io_fault()
58 page->mapping = vma->vm_file->f_mapping; fb_deferred_io_fault()
62 BUG_ON(!page->mapping); fb_deferred_io_fault()
63 page->index = vmf->pgoff; fb_deferred_io_fault()
65 vmf->page = page; fb_deferred_io_fault()
97 struct page *page = vmf->page; fb_deferred_io_mkwrite() local
100 struct page *cur; fb_deferred_io_mkwrite()
103 write to the page. we schedule a workqueue. that workqueue fb_deferred_io_mkwrite()
105 deferred framebuffer IO. then if userspace touches a page fb_deferred_io_mkwrite()
110 /* protect against the workqueue changing the page list */ fb_deferred_io_mkwrite()
118 * We want the page to remain locked from ->page_mkwrite until fb_deferred_io_mkwrite()
120 * before the PTE is updated, which would leave the page ignored fb_deferred_io_mkwrite()
122 * Do this by locking the page here and informing the caller fb_deferred_io_mkwrite()
125 lock_page(page); fb_deferred_io_mkwrite()
131 process could start writing to the same page fb_deferred_io_mkwrite()
135 if (unlikely(cur == page)) fb_deferred_io_mkwrite()
137 else if (cur->index > page->index) fb_deferred_io_mkwrite()
141 list_add_tail(&page->lru, &cur->lru); fb_deferred_io_mkwrite()
156 static int fb_deferred_io_set_page_dirty(struct page *page) fb_deferred_io_set_page_dirty() argument
158 if (!PageDirty(page)) fb_deferred_io_set_page_dirty()
159 SetPageDirty(page); fb_deferred_io_set_page_dirty()
183 struct page *cur; fb_deferred_io_work()
229 struct page *page; fb_deferred_io_cleanup() local
237 page = fb_deferred_io_page(info, i); fb_deferred_io_cleanup()
238 page->mapping = NULL; fb_deferred_io_cleanup()
/linux-4.1.27/arch/ia64/include/asm/
H A Dcacheflush.h9 #include <linux/page-flags.h>
12 #include <asm/page.h>
24 #define flush_icache_page(vma,page) do { } while (0)
29 #define flush_dcache_page(page) \
31 clear_bit(PG_arch_1, &(page)->flags); \
41 #define flush_icache_user_range(vma, page, user_addr, len) \
43 unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \
47 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
49 flush_icache_user_range(vma, page, vaddr, len); \
51 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dpage.h23 #define RGN_GATE 5 /* Gate page, Kernel text, etc */
27 * PAGE_SHIFT determines the actual kernel page size.
38 # error Unsupported page size!
64 extern void clear_page (void *page);
71 #define clear_user_page(addr, vaddr, page) \
74 flush_dcache_page(page); \
77 #define copy_user_page(to, from, vaddr, page) \
80 flush_dcache_page(page); \
86 struct page *page = alloc_page_vma( \
88 if (page) \
89 flush_dcache_page(page); \
90 page; \
104 extern struct page *vmem_map;
106 # define page_to_pfn(page) ((unsigned long) (page - vmem_map))
123 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
181 typedef struct page *pgtable_t;
204 typedef struct page *pgtable_t;
228 * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
H A Drw26.c38 * Lustre Lite I/O page cache routines for the 2.5/2.6 kernel version
64 * called when the page is truncate from a file, either as a result of
68 * [0, offset] bytes of the page remain valid (this is for a case of not-page
69 * aligned truncate). Lustre leaves partially truncated page in the cache,
72 static void ll_invalidatepage(struct page *vmpage, unsigned int offset, ll_invalidatepage()
77 struct cl_page *page; ll_invalidatepage() local
87 * below because they are run with page locked and all our io is ll_invalidatepage()
88 * happening with locked page too ll_invalidatepage()
96 page = cl_vmpage_page(vmpage, obj); ll_invalidatepage()
97 if (page != NULL) { ll_invalidatepage()
98 lu_ref_add(&page->cp_reference, ll_invalidatepage()
100 cl_page_delete(env, page); ll_invalidatepage()
101 lu_ref_del(&page->cp_reference, ll_invalidatepage()
103 cl_page_put(env, page); ll_invalidatepage()
117 static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) ll_releasepage()
122 struct cl_page *page; ll_releasepage() local
138 /* 1 for page allocator, 1 for cl_page and 1 for page cache */ ll_releasepage()
147 * page refcount by cl_page, so ask kernel to not free ll_releasepage()
148 * this page. */ ll_releasepage()
151 page = cl_vmpage_page(vmpage, obj); ll_releasepage()
152 result = page == NULL; ll_releasepage()
153 if (page != NULL) { ll_releasepage()
154 if (!cl_page_in_use(page)) { ll_releasepage()
156 cl_page_delete(env, page); ll_releasepage()
158 cl_page_put(env, page); ll_releasepage()
164 static int ll_set_page_dirty(struct page *vmpage) ll_set_page_dirty()
167 struct cl_page *page = vvp_vmpage_page_transient(vmpage); ll_set_page_dirty()
172 * XXX should page method be called here? ll_set_page_dirty()
174 LASSERT(&obj->co_cl == page->cp_obj); ll_set_page_dirty()
175 cpg = cl2vvp_page(cl_page_at(page, &vvp_device_type)); ll_set_page_dirty()
177 * XXX cannot do much here, because page is possibly not locked: ll_set_page_dirty()
189 size_t size, struct page ***pages, ll_get_user_pages()
214 /* ll_free_user_pages - tear down page struct array
215 * @pages: array of page struct pointers underlying target buffer */ ll_free_user_pages()
216 static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) ll_free_user_pages()
240 struct page **pages = pv->ldp_pages; ll_direct_rw_pages()
268 /* check the page type: if the page is a host page, then do ll_direct_rw_pages()
271 struct page *vmpage = cl_page_vmpage(env, clp); ll_direct_rw_pages()
272 struct page *src_page; ll_direct_rw_pages()
273 struct page *dst_page; ll_direct_rw_pages()
286 /* make sure page will be added to the transfer by ll_direct_rw_pages()
292 /* do not issue the page for read, since it ll_direct_rw_pages()
293 * may reread a ra page which has NOT uptodate ll_direct_rw_pages()
304 * Set page clip to tell transfer formation engine ll_direct_rw_pages()
305 * that page has to be sent even if it is beyond KMS. ll_direct_rw_pages()
337 struct page **pages, int page_count) ll_direct_IO_26_seg()
407 struct page **pages; ll_direct_IO_26()
432 * page worth of page pointers = 4MB on i386. */ ll_direct_IO_26()
474 struct page **pagep, void **fsdata) ll_write_begin()
477 struct page *page; ll_write_begin() local
481 page = grab_cache_page_write_begin(mapping, index, flags); ll_write_begin()
482 if (!page) ll_write_begin()
485 *pagep = page; ll_write_begin()
487 rc = ll_prepare_write(file, page, from, from + len); ll_write_begin()
489 unlock_page(page); ll_write_begin()
490 page_cache_release(page); ll_write_begin()
497 struct page *page, void *fsdata) ll_write_end()
502 rc = ll_commit_write(file, page, from, from + copied); ll_write_end()
503 unlock_page(page); ll_write_end()
504 page_cache_release(page); ll_write_end()
511 struct page *newpage, struct page *page, ll_migratepage()
515 /* Always fail page migration until we have a proper implementation */ ll_migratepage()
495 ll_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) ll_write_end() argument
510 ll_migratepage(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode ) ll_migratepage() argument
/linux-4.1.27/fs/
H A Dmpage.c37 * If a page does not map to a contiguous run of blocks then it simply falls
40 * Why is this? If a page's completion depends on a number of different BIOs
42 * status of that page is hard. See end_buffer_async_read() for the details.
51 struct page *page = bv->bv_page; bio_for_each_segment_all() local
52 page_endio(page, bio_data_dir(bio), err); bio_for_each_segment_all()
90 * the page, which allows readpage to avoid triggering a duplicate call
94 * them. So when the buffer is up to date and the page size == block size,
95 * this marks the page up to date instead of adding new buffers.
98 map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) map_buffer_to_page() argument
100 struct inode *inode = page->mapping->host; map_buffer_to_page()
104 if (!page_has_buffers(page)) { map_buffer_to_page()
107 * the page and the page just needs to be set up to date map_buffer_to_page()
111 SetPageUptodate(page); map_buffer_to_page()
114 create_empty_buffers(page, 1 << inode->i_blkbits, 0); map_buffer_to_page()
116 head = page_buffers(page); map_buffer_to_page()
140 do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, do_mpage_readpage() argument
144 struct inode *inode = page->mapping->host; do_mpage_readpage()
160 if (page_has_buffers(page)) do_mpage_readpage()
163 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); do_mpage_readpage()
195 * Then do more get_blocks calls until we are done with this page. do_mpage_readpage()
197 map_bh->b_page = page; do_mpage_readpage()
218 /* some filesystems will copy data into the page during do_mpage_readpage()
221 * we just collected from get_block into the page's buffers do_mpage_readpage()
225 map_buffer_to_page(page, map_bh, page_block); do_mpage_readpage()
250 zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE); do_mpage_readpage()
252 SetPageUptodate(page); do_mpage_readpage()
253 unlock_page(page); do_mpage_readpage()
257 SetPageMappedToDisk(page); do_mpage_readpage()
260 if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) && do_mpage_readpage()
261 cleancache_get_page(page) == 0) { do_mpage_readpage()
262 SetPageUptodate(page); do_mpage_readpage()
267 * This page will go to BIO. Do we need to send this BIO off first? do_mpage_readpage()
276 page)) do_mpage_readpage()
287 if (bio_add_page(bio, page, length, 0) < length) { do_mpage_readpage()
305 if (!PageUptodate(page)) do_mpage_readpage()
306 block_read_full_page(page, get_block); do_mpage_readpage()
308 unlock_page(page); do_mpage_readpage()
317 * The page at @pages->prev has the lowest file offset, and reads should be
322 * This function walks the pages and the blocks within each page, building and
327 * - encountering a page which has buffers
328 * - encountering a page which has a non-hole after a hole
329 * - encountering a page with non-contiguous blocks
332 * It does handle a page which has holes at the end - that is a common case:
368 struct page *page = list_entry(pages->prev, struct page, lru); mpage_readpages() local
370 prefetchw(&page->flags); mpage_readpages()
371 list_del(&page->lru); mpage_readpages()
372 if (!add_to_page_cache_lru(page, mapping, mpage_readpages()
373 page->index, GFP_KERNEL)) { mpage_readpages()
374 bio = do_mpage_readpage(bio, page, mpage_readpages()
380 page_cache_release(page); mpage_readpages()
392 int mpage_readpage(struct page *page, get_block_t get_block) mpage_readpage() argument
401 bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, mpage_readpage()
412 * If the page has buffers then they will be used for obtaining the disk
416 * If the page has no buffers (preferred) then the page is mapped here.
418 * If all blocks are found to be contiguous then the page can go into the
423 * just allocate full-size (16-page) BIOs.
437 static void clean_buffers(struct page *page, unsigned first_unmapped) clean_buffers() argument
441 if (!page_has_buffers(page)) clean_buffers()
443 head = page_buffers(page); clean_buffers()
454 * we cannot drop the bh if the page is not uptodate or a concurrent clean_buffers()
458 if (buffer_heads_over_limit && PageUptodate(page)) clean_buffers()
459 try_to_free_buffers(page); clean_buffers()
462 static int __mpage_writepage(struct page *page, struct writeback_control *wbc, __mpage_writepage() argument
467 struct address_space *mapping = page->mapping; __mpage_writepage()
468 struct inode *inode = page->mapping->host; __mpage_writepage()
486 if (page_has_buffers(page)) { __mpage_writepage()
487 struct buffer_head *head = page_buffers(page); __mpage_writepage()
528 * Page has buffers, but they are all unmapped. The page was __mpage_writepage()
537 * The page has no buffers: map it to disk __mpage_writepage()
539 BUG_ON(!PageUptodate(page)); __mpage_writepage()
540 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); __mpage_writepage()
542 map_bh.b_page = page; __mpage_writepage()
573 if (page->index >= end_index) { __mpage_writepage()
575 * The page straddles i_size. It must be zeroed out on each __mpage_writepage()
577 * "A file is mapped in multiples of the page size. For a file __mpage_writepage()
578 * that is not a multiple of the page size, the remaining memory __mpage_writepage()
584 if (page->index > end_index || !offset) __mpage_writepage()
586 zero_user_segment(page, offset, PAGE_CACHE_SIZE); __mpage_writepage()
590 * This page will go to BIO. Do we need to send this BIO off first? __mpage_writepage()
599 page, wbc)) { __mpage_writepage()
600 clean_buffers(page, first_unmapped); __mpage_writepage()
611 * Must try to add the page before marking the buffer clean or __mpage_writepage()
616 if (bio_add_page(bio, page, length, 0) < length) { __mpage_writepage()
621 clean_buffers(page, first_unmapped); __mpage_writepage()
623 BUG_ON(PageWriteback(page)); __mpage_writepage()
624 set_page_writeback(page); __mpage_writepage()
625 unlock_page(page); __mpage_writepage()
642 ret = mapping->a_ops->writepage(page, wbc); __mpage_writepage()
667 * If a page is already under I/O, generic_writepages() skips it, even
703 int mpage_writepage(struct page *page, get_block_t get_block, mpage_writepage() argument
712 int ret = __mpage_writepage(page, wbc, &mpd); mpage_writepage()
/linux-4.1.27/arch/score/include/asm/
H A Dpage.h7 /* PAGE_SHIFT determines the page size */
27 * PAGE_OFFSET -- the first address of the first page of memory. When not
28 * using MMU this corresponds to the first free page in physical memory (aligned
29 * on a page boundary).
36 #define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
44 typedef struct { unsigned long pte; } pte_t; /* page table entry */
47 typedef struct page *pgtable_t;
71 #define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
73 #define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
74 #define page_to_bus(page) (page_to_phys(page))
H A Dcacheflush.h12 unsigned long page, unsigned long pfn);
17 extern void flush_dcache_page(struct page *page);
29 struct page *page) flush_icache_page()
32 void *v = page_address(page); flush_icache_page()
38 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
41 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
45 flush_cache_page(vma, vaddr, page_to_pfn(page));\
28 flush_icache_page(struct vm_area_struct *vma, struct page *page) flush_icache_page() argument
/linux-4.1.27/arch/nios2/include/asm/
H A Dpage.h5 * MMU support based on asm/page.h from mips which is:
22 * PAGE_SHIFT determines the page size
29 * PAGE_OFFSET -- the first address of the first page of memory.
48 #define clear_page(page) memset((page), 0, PAGE_SIZE)
51 struct page;
53 extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
55 struct page *to);
60 typedef struct page *pgtable_t;
77 extern struct page *mem_map;
86 #define page_to_virt(page) \
87 ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
H A Dcacheflush.h16 * This flag is used to indicate that the page pointed to by a pte is clean
31 extern void flush_dcache_page(struct page *page);
34 extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
39 extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
42 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
/linux-4.1.27/include/asm-generic/
H A Dpage.h4 * Generic page.h implementation, for NOMMU architectures.
9 #error need to prove a real asm/page.h
13 /* PAGE_SHIFT determines the page size */
28 #define free_user_page(page, addr) free_page(addr)
30 #define clear_page(page) memset((page), 0, PAGE_SIZE)
33 #define clear_user_page(page, vaddr, pg) clear_page(page)
51 typedef struct page *pgtable_t;
87 #define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
90 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
/linux-4.1.27/fs/cifs/
H A Dfscache.h50 extern void __cifs_fscache_invalidate_page(struct page *, struct inode *);
51 extern int cifs_fscache_release_page(struct page *page, gfp_t gfp);
52 extern int __cifs_readpage_from_fscache(struct inode *, struct page *);
59 extern void __cifs_readpage_to_fscache(struct inode *, struct page *);
61 static inline void cifs_fscache_invalidate_page(struct page *page, cifs_fscache_invalidate_page() argument
64 if (PageFsCache(page)) cifs_fscache_invalidate_page()
65 __cifs_fscache_invalidate_page(page, inode); cifs_fscache_invalidate_page()
69 struct page *page) cifs_readpage_from_fscache()
72 return __cifs_readpage_from_fscache(inode, page); cifs_readpage_from_fscache()
89 struct page *page) cifs_readpage_to_fscache()
91 if (PageFsCache(page)) cifs_readpage_to_fscache()
92 __cifs_readpage_to_fscache(inode, page); cifs_readpage_to_fscache()
118 static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp) cifs_fscache_release_page() argument
120 return 1; /* May release page */ cifs_fscache_release_page()
123 static inline void cifs_fscache_invalidate_page(struct page *page, cifs_fscache_invalidate_page() argument
126 cifs_readpage_from_fscache(struct inode *inode, struct page *page) cifs_readpage_from_fscache() argument
140 struct page *page) {}
68 cifs_readpage_from_fscache(struct inode *inode, struct page *page) cifs_readpage_from_fscache() argument
88 cifs_readpage_to_fscache(struct inode *inode, struct page *page) cifs_readpage_to_fscache() argument
139 cifs_readpage_to_fscache(struct inode *inode, struct page *page) cifs_readpage_to_fscache() argument
/linux-4.1.27/drivers/infiniband/hw/ehca/
H A Dipz_pt_fn.c84 u64 page = __pa(queue->queue_pages[i]); ipz_queue_abs_to_offset() local
85 if (addr >= page && addr < page + queue->pagesize) { ipz_queue_abs_to_offset()
86 *q_offset = addr - page + i * queue->pagesize; ipz_queue_abs_to_offset()
99 * outer loop allocates whole kernel pages (page aligned) and
100 * inner loop divides a kernel page into smaller hca queue pages
130 struct ipz_small_queue_page *page; alloc_small_queue_page() local
136 page = list_entry(pd->free[order].next, alloc_small_queue_page()
139 page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL); alloc_small_queue_page()
140 if (!page) alloc_small_queue_page()
143 page->page = get_zeroed_page(GFP_KERNEL); alloc_small_queue_page()
144 if (!page->page) { alloc_small_queue_page()
145 kmem_cache_free(small_qp_cache, page); alloc_small_queue_page()
149 list_add(&page->list, &pd->free[order]); alloc_small_queue_page()
152 bit = find_first_zero_bit(page->bitmap, IPZ_SPAGE_PER_KPAGE >> order); alloc_small_queue_page()
153 __set_bit(bit, page->bitmap); alloc_small_queue_page()
154 page->fill++; alloc_small_queue_page()
156 if (page->fill == IPZ_SPAGE_PER_KPAGE >> order) alloc_small_queue_page()
157 list_move(&page->list, &pd->full[order]); alloc_small_queue_page()
161 queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9))); alloc_small_queue_page()
162 queue->small_page = page; alloc_small_queue_page()
167 ehca_err(pd->ib_pd.device, "failed to allocate small queue page"); alloc_small_queue_page()
175 struct ipz_small_queue_page *page = queue->small_page; free_small_queue_page() local
184 __clear_bit(bit, page->bitmap); free_small_queue_page()
185 page->fill--; free_small_queue_page()
187 if (page->fill == 0) { free_small_queue_page()
188 list_del(&page->list); free_small_queue_page()
192 if (page->fill == (IPZ_SPAGE_PER_KPAGE >> order) - 1) free_small_queue_page()
193 /* the page was full until we freed the chunk */ free_small_queue_page()
194 list_move_tail(&page->list, &pd->free[order]); free_small_queue_page()
199 free_page(page->page); free_small_queue_page()
200 kmem_cache_free(small_qp_cache, page); free_small_queue_page()
211 "is greater than kernel page size", pagesize); ipz_queue_ctor()
224 /* allocate queue page pointers */ ipz_queue_ctor()
230 ehca_gen_err("Couldn't allocate queue page list"); ipz_queue_ctor()
/linux-4.1.27/arch/m68k/mm/
H A Dmemory.c18 #include <asm/page.h>
25 struct page instead of separately kmalloced struct. Stolen from
31 #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
32 #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
40 unsigned long page = ptable & PAGE_MASK; init_pointer_table() local
41 unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE); init_pointer_table()
43 dp = PD_PTABLE(page); init_pointer_table()
54 /* unreserve the page so it's possible to free that page */ init_pointer_table()
70 * table is taken from a page allocated for the purpose. Each get_pointer_table()
71 * page can hold 8 pointer tables. The page is remapped in get_pointer_table()
75 void *page; get_pointer_table() local
78 if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) get_pointer_table()
81 flush_tlb_kernel_page(page); get_pointer_table()
82 nocache_page(page); get_pointer_table()
84 new = PD_PTABLE(page); get_pointer_table()
88 return (pmd_t *)page; get_pointer_table()
104 unsigned long page = (unsigned long)ptable & PAGE_MASK; free_pointer_table() local
105 unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE); free_pointer_table()
107 dp = PD_PTABLE(page); free_pointer_table()
114 /* all tables in page are free, free page */ free_pointer_table()
116 cache_page((void *)page); free_pointer_table()
117 free_page (page); free_pointer_table()
129 /* invalidate page in both caches */ clear040()
140 /* invalidate page in i-cache */ cleari040()
151 /* push page in both caches */
163 /* push and invalidate page in both caches, must disable ints
177 * 040: Hit every page containing an address in the range paddr..paddr+len-1.
179 * Hit every page until there is a page or less to go. Hit the next page,
185 * exactly at the beginning or end of a page boundary. Else, maybe too much
187 * it invalidates the page after pushing dirty data to memory. (Thanks to Jes
211 * We need special treatment for the first page, in case it cache_clear()
212 * is not page-aligned. Page align the addresses to work cache_clear()
228 /* a page boundary gets crossed at the end */ cache_clear()
/linux-4.1.27/fs/ubifs/
H A Dfile.c27 * UBIFS uses 2 page flags: @PG_private and @PG_checked. @PG_private is set if
28 * the page is dirty and is used for optimization purposes - dirty pages are
30 * the budget for this page. The @PG_checked flag is set if full budgeting is
31 * required for the page e.g., when it corresponds to a file hole or it is
35 * information about how the page was budgeted, to make it possible to release
44 * 'ubifs_writepage()' we are only guaranteed that the page is locked.
104 static int do_readpage(struct page *page) do_readpage() argument
110 struct inode *inode = page->mapping->host; do_readpage()
114 inode->i_ino, page->index, i_size, page->flags); do_readpage()
115 ubifs_assert(!PageChecked(page)); do_readpage()
116 ubifs_assert(!PagePrivate(page)); do_readpage()
118 addr = kmap(page); do_readpage()
120 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; do_readpage()
124 SetPageChecked(page); do_readpage()
166 SetPageChecked(page); do_readpage()
170 ubifs_err(c, "cannot read page %lu of inode %lu, error %d", do_readpage()
171 page->index, inode->i_ino, err); do_readpage()
178 SetPageUptodate(page); do_readpage()
179 ClearPageError(page); do_readpage()
180 flush_dcache_page(page); do_readpage()
181 kunmap(page); do_readpage()
186 ClearPageUptodate(page); do_readpage()
187 SetPageError(page); do_readpage()
188 flush_dcache_page(page); do_readpage()
189 kunmap(page); do_readpage()
194 * release_new_page_budget - release budget of a new page.
198 * of one new page of data.
208 * release_existing_page_budget - release budget of an existing page.
212 * of changing one one page of data which already exists on the flash media.
222 loff_t pos, unsigned len, struct page **pagep, write_begin_slow()
230 struct page *page; write_begin_slow() local
236 * At the slow path we have to budget before locking the page, because write_begin_slow()
238 * deadlock if we had the page locked. At this point we do not know write_begin_slow()
239 * anything about the page, so assume that this is a new page which is write_begin_slow()
251 page = grab_cache_page_write_begin(mapping, index, flags); write_begin_slow()
252 if (unlikely(!page)) { write_begin_slow()
257 if (!PageUptodate(page)) { write_begin_slow()
259 SetPageChecked(page); write_begin_slow()
261 err = do_readpage(page); write_begin_slow()
263 unlock_page(page); write_begin_slow()
264 page_cache_release(page); write_begin_slow()
270 SetPageUptodate(page); write_begin_slow()
271 ClearPageError(page); write_begin_slow()
274 if (PagePrivate(page)) write_begin_slow()
276 * The page is dirty, which means it was budgeted twice: write_begin_slow()
278 * made the page dirty and set the PG_private flag; write_begin_slow()
282 * So what we have to do is to release the page budget we write_begin_slow()
286 else if (!PageChecked(page)) write_begin_slow()
288 * We are changing a page which already exists on the media. write_begin_slow()
289 * This means that changing the page does not make the amount write_begin_slow()
312 *pagep = page; write_begin_slow()
319 * @page: page to allocate budget for
320 * @ui: UBIFS inode object the page belongs to
321 * @appending: non-zero if the page is appended
325 * this is appending, whether the page is dirty or not, and so on. This
329 static int allocate_budget(struct ubifs_info *c, struct page *page, allocate_budget() argument
334 if (PagePrivate(page)) { allocate_budget()
337 * The page is dirty and we are not appending, which allocate_budget()
345 * The page is dirty and we are appending, so the inode allocate_budget()
356 * The page is dirty, we are appending, the inode is clean, so allocate_budget()
361 if (PageChecked(page)) allocate_budget()
363 * The page corresponds to a hole and does not allocate_budget()
367 * page. allocate_budget()
373 * indexing information, budget for page allocate_budget()
394 * This function is called when a page of data is going to be written. Since
395 * the page of data will not necessarily go to the flash straight away, UBIFS
403 * o a new page is appended - we have to budget for a new page and for
406 * o an existing clean page is changed - we have budget for it; if the page
408 * page; otherwise, we may budget for changing an existing page; the
409 * difference between these cases is that changing an existing page does
412 * o an existing dirty page is changed - no need to budget at all, because
413 * the page budget has been acquired by earlier, when the page has been
427 struct page **pagep, void **fsdata) ubifs_write_begin()
435 struct page *page; ubifs_write_begin() local
444 page = grab_cache_page_write_begin(mapping, index, flags); ubifs_write_begin()
445 if (unlikely(!page)) ubifs_write_begin()
448 if (!PageUptodate(page)) { ubifs_write_begin()
449 /* The page is not loaded from the flash */ ubifs_write_begin()
452 * We change whole page so no need to load it. But we ubifs_write_begin()
453 * do not know whether this page exists on the media or ubifs_write_begin()
456 * to budget a bit more than to read the page from the ubifs_write_begin()
460 SetPageChecked(page); ubifs_write_begin()
463 err = do_readpage(page); ubifs_write_begin()
465 unlock_page(page); ubifs_write_begin()
466 page_cache_release(page); ubifs_write_begin()
471 SetPageUptodate(page); ubifs_write_begin()
472 ClearPageError(page); ubifs_write_begin()
475 err = allocate_budget(c, page, ui, appending); ubifs_write_begin()
479 * If we skipped reading the page because we were going to ubifs_write_begin()
483 ClearPageChecked(page); ubifs_write_begin()
484 ClearPageUptodate(page); ubifs_write_begin()
490 * page locked, because it would deadlock. Unlock and free ubifs_write_begin()
497 unlock_page(page); ubifs_write_begin()
498 page_cache_release(page); ubifs_write_begin()
509 *pagep = page; ubifs_write_begin()
517 * @page: page to cancel budget for
518 * @ui: UBIFS inode object the page belongs to
519 * @appending: non-zero if the page is appended
521 * This is a helper function for a page write operation. It unlocks the
524 static void cancel_budget(struct ubifs_info *c, struct page *page, cancel_budget() argument
532 if (!PagePrivate(page)) { cancel_budget()
533 if (PageChecked(page)) cancel_budget()
542 struct page *page, void *fsdata) ubifs_write_end()
551 inode->i_ino, pos, page->index, len, copied, inode->i_size); ubifs_write_end()
555 * VFS copied less data to the page that it intended and ubifs_write_end()
557 * argument. If the page was not up-to-date, and @len was ubifs_write_end()
560 * means that part of the page contains garbage. So read the ubifs_write_end()
561 * page now. ubifs_write_end()
563 dbg_gen("copied %d instead of %d, read page and repeat", ubifs_write_end()
565 cancel_budget(c, page, ui, appending); ubifs_write_end()
566 ClearPageChecked(page); ubifs_write_end()
572 copied = do_readpage(page); ubifs_write_end()
576 if (!PagePrivate(page)) { ubifs_write_end()
577 SetPagePrivate(page); ubifs_write_end()
579 __set_page_dirty_nobuffers(page); ubifs_write_end()
596 unlock_page(page); ubifs_write_end()
597 page_cache_release(page); ubifs_write_end()
602 * populate_page - copy data nodes into a page for bulk-read.
604 * @page: page
610 static int populate_page(struct ubifs_info *c, struct page *page, populate_page() argument
614 struct inode *inode = page->mapping->host; populate_page()
621 inode->i_ino, page->index, i_size, page->flags); populate_page()
623 addr = zaddr = kmap(page); populate_page()
626 if (!i_size || page->index > end_index) { populate_page()
632 page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; populate_page()
676 if (end_index == page->index) { populate_page()
685 SetPageChecked(page); populate_page()
689 SetPageUptodate(page); populate_page()
690 ClearPageError(page); populate_page()
691 flush_dcache_page(page); populate_page()
692 kunmap(page); populate_page()
697 ClearPageUptodate(page); populate_page()
698 SetPageError(page); populate_page()
699 flush_dcache_page(page); populate_page()
700 kunmap(page); populate_page()
710 * @page1: first page to read
715 struct page *page1) ubifs_do_bulk_read()
738 * This happens when there are multiple blocks per page and the ubifs_do_bulk_read()
739 * blocks for the first page we are looking for, are not ubifs_do_bulk_read()
781 struct page *page; ubifs_do_bulk_read() local
785 page = find_or_create_page(mapping, page_offset, ubifs_do_bulk_read()
787 if (!page) ubifs_do_bulk_read()
789 if (!PageUptodate(page)) ubifs_do_bulk_read()
790 err = populate_page(c, page, bu, &n); ubifs_do_bulk_read()
791 unlock_page(page); ubifs_do_bulk_read()
792 page_cache_release(page); ubifs_do_bulk_read()
815 * @page: page from which to start bulk-read.
822 static int ubifs_bulk_read(struct page *page) ubifs_bulk_read() argument
824 struct inode *inode = page->mapping->host; ubifs_bulk_read()
827 pgoff_t index = page->index, last_page_read = ui->last_page_read; ubifs_bulk_read()
875 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); ubifs_bulk_read()
876 err = ubifs_do_bulk_read(c, bu, page); ubifs_bulk_read()
888 static int ubifs_readpage(struct file *file, struct page *page) ubifs_readpage() argument
890 if (ubifs_bulk_read(page)) ubifs_readpage()
892 do_readpage(page); ubifs_readpage()
893 unlock_page(page); ubifs_readpage()
897 static int do_writepage(struct page *page, int len) do_writepage() argument
903 struct inode *inode = page->mapping->host; do_writepage()
909 ubifs_assert(page->index <= ui->synced_i_size >> PAGE_CACHE_SHIFT); do_writepage()
914 set_page_writeback(page); do_writepage()
916 addr = kmap(page); do_writepage()
917 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; do_writepage()
932 SetPageError(page); do_writepage()
933 ubifs_err(c, "cannot write page %lu of inode %lu, error %d", do_writepage()
934 page->index, inode->i_ino, err); do_writepage()
938 ubifs_assert(PagePrivate(page)); do_writepage()
939 if (PageChecked(page)) do_writepage()
945 ClearPagePrivate(page); do_writepage()
946 ClearPageChecked(page); do_writepage()
948 kunmap(page); do_writepage()
949 unlock_page(page); do_writepage()
950 end_page_writeback(page); do_writepage()
974 * the page locked, and it locks @ui_mutex. However, write-back does take inode
979 * page lock. This means that 'do_truncation()' cannot call 'truncate_setsize()'
997 * on the page lock and it would not write the truncated inode node to the
1000 static int ubifs_writepage(struct page *page, struct writeback_control *wbc) ubifs_writepage() argument
1002 struct inode *inode = page->mapping->host; ubifs_writepage()
1010 inode->i_ino, page->index, page->flags); ubifs_writepage()
1011 ubifs_assert(PagePrivate(page)); ubifs_writepage()
1013 /* Is the page fully outside @i_size? (truncate in progress) */ ubifs_writepage()
1014 if (page->index > end_index || (page->index == end_index && !len)) { ubifs_writepage()
1023 /* Is the page fully inside @i_size? */ ubifs_writepage()
1024 if (page->index < end_index) { ubifs_writepage()
1025 if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) { ubifs_writepage()
1038 return do_writepage(page, PAGE_CACHE_SIZE); ubifs_writepage()
1042 * The page straddles @i_size. It must be zeroed out on each and every ubifs_writepage()
1044 * in multiples of the page size. For a file that is not a multiple of ubifs_writepage()
1045 * the page size, the remaining memory is zeroed when mapped, and ubifs_writepage()
1048 kaddr = kmap_atomic(page); ubifs_writepage()
1050 flush_dcache_page(page); ubifs_writepage()
1059 return do_writepage(page, len); ubifs_writepage()
1062 unlock_page(page); ubifs_writepage()
1143 struct page *page; do_truncation() local
1145 page = find_lock_page(inode->i_mapping, index); do_truncation()
1146 if (page) { do_truncation()
1147 if (PageDirty(page)) { do_truncation()
1151 * out-of-date data because the page is dirty. do_truncation()
1152 * Write the page now, so that do_truncation()
1156 ubifs_assert(PagePrivate(page)); do_truncation()
1158 clear_page_dirty_for_io(page); do_truncation()
1162 err = do_writepage(page, offset); do_truncation()
1163 page_cache_release(page); do_truncation()
1172 * We could 'kmap()' the page and pass the data do_truncation()
1176 unlock_page(page); do_truncation()
1177 page_cache_release(page); do_truncation()
1282 static void ubifs_invalidatepage(struct page *page, unsigned int offset, ubifs_invalidatepage() argument
1285 struct inode *inode = page->mapping->host; ubifs_invalidatepage()
1288 ubifs_assert(PagePrivate(page)); ubifs_invalidatepage()
1290 /* Partial page remains dirty */ ubifs_invalidatepage()
1293 if (PageChecked(page)) ubifs_invalidatepage()
1299 ClearPagePrivate(page); ubifs_invalidatepage()
1300 ClearPageChecked(page); ubifs_invalidatepage()
1410 static int ubifs_set_page_dirty(struct page *page) ubifs_set_page_dirty() argument
1414 ret = __set_page_dirty_nobuffers(page); ubifs_set_page_dirty()
1416 * An attempt to dirty a page without budgeting for it - should not ubifs_set_page_dirty()
1423 static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) ubifs_releasepage() argument
1426 * An attempt to release a dirty page without budgeting for it - should ubifs_releasepage()
1429 if (PageWriteback(page)) ubifs_releasepage()
1431 ubifs_assert(PagePrivate(page)); ubifs_releasepage()
1433 ClearPagePrivate(page); ubifs_releasepage()
1434 ClearPageChecked(page); ubifs_releasepage()
1440 * UBIFS must ensure page is budgeted for.
1445 struct page *page = vmf->page; ubifs_vm_page_mkwrite() local
1452 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index, ubifs_vm_page_mkwrite()
1460 * We have not locked @page so far so we may budget for changing the ubifs_vm_page_mkwrite()
1461 * page. Note, we cannot do this after we locked the page, because ubifs_vm_page_mkwrite()
1464 * At the moment we do not know whether the page is dirty or not, so we ubifs_vm_page_mkwrite()
1465 * assume that it is not and budget for a new page. We could look at ubifs_vm_page_mkwrite()
1467 * back and the page state may change by the time we lock it, so this ubifs_vm_page_mkwrite()
1470 * budget for a new page and amend it later on if the page was in fact ubifs_vm_page_mkwrite()
1493 lock_page(page); ubifs_vm_page_mkwrite()
1494 if (unlikely(page->mapping != inode->i_mapping || ubifs_vm_page_mkwrite()
1495 page_offset(page) > i_size_read(inode))) { ubifs_vm_page_mkwrite()
1501 if (PagePrivate(page)) ubifs_vm_page_mkwrite()
1504 if (!PageChecked(page)) ubifs_vm_page_mkwrite()
1506 SetPagePrivate(page); ubifs_vm_page_mkwrite()
1508 __set_page_dirty_nobuffers(page); ubifs_vm_page_mkwrite()
1524 wait_for_stable_page(page); ubifs_vm_page_mkwrite()
1528 unlock_page(page); ubifs_vm_page_mkwrite()
540 ubifs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) ubifs_write_end() argument
/linux-4.1.27/arch/arm64/mm/
H A Dflush.c37 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, flush_ptrace_access() argument
53 * Copy user data from/to a page which is mapped into a different processes
59 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, copy_to_user_page() argument
67 flush_ptrace_access(vma, page, uaddr, dst, len); copy_to_user_page()
75 struct page *page = pte_page(pte); __sync_icache_dcache() local
78 if (!page_mapping(page)) __sync_icache_dcache()
81 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) { __sync_icache_dcache()
82 __flush_dcache_area(page_address(page), __sync_icache_dcache()
83 PAGE_SIZE << compound_order(page)); __sync_icache_dcache()
91 * This function is called when a page has been modified by the kernel. Mark
95 void flush_dcache_page(struct page *page) flush_dcache_page() argument
97 if (test_bit(PG_dcache_clean, &page->flags)) flush_dcache_page()
98 clear_bit(PG_dcache_clean, &page->flags); flush_dcache_page()
/linux-4.1.27/arch/mips/mm/
H A Dgup.c38 int write, struct page **pages, int *nr) gup_pte_range()
43 struct page *page; gup_pte_range() local
51 page = pte_page(pte); gup_pte_range()
52 get_page(page); gup_pte_range()
53 SetPageReferenced(page); gup_pte_range()
54 pages[*nr] = page; gup_pte_range()
63 static inline void get_head_page_multiple(struct page *page, int nr) get_head_page_multiple() argument
65 VM_BUG_ON(page != compound_head(page)); get_head_page_multiple()
66 VM_BUG_ON(page_count(page) == 0); get_head_page_multiple()
67 atomic_add(nr, &page->_count); get_head_page_multiple()
68 SetPageReferenced(page); get_head_page_multiple()
72 int write, struct page **pages, int *nr) gup_huge_pmd()
75 struct page *head, *page; gup_huge_pmd() local
86 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); gup_huge_pmd()
88 VM_BUG_ON(compound_head(page) != head); gup_huge_pmd()
89 pages[*nr] = page; gup_huge_pmd()
90 if (PageTail(page)) gup_huge_pmd()
91 get_huge_page_tail(page); gup_huge_pmd()
93 page++; gup_huge_pmd()
102 int write, struct page **pages, int *nr) gup_pmd_range()
138 int write, struct page **pages, int *nr) gup_huge_pud()
141 struct page *head, *page; gup_huge_pud() local
152 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); gup_huge_pud()
154 VM_BUG_ON(compound_head(page) != head); gup_huge_pud()
155 pages[*nr] = page; gup_huge_pud()
156 if (PageTail(page)) gup_huge_pud()
157 get_huge_page_tail(page); gup_huge_pud()
159 page++; gup_huge_pud()
168 int write, struct page **pages, int *nr) gup_pud_range()
197 struct page **pages) __get_user_pages_fast()
228 * So long as we atomically load page table pointers versus teardown, __get_user_pages_fast()
229 * we can follow the address down to the page and take a ref on it. __get_user_pages_fast()
264 struct page **pages) get_user_pages_fast()
H A Dcache.c30 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
82 void __flush_dcache_page(struct page *page) __flush_dcache_page() argument
84 struct address_space *mapping = page_mapping(page); __flush_dcache_page()
88 SetPageDcacheDirty(page); __flush_dcache_page()
97 if (PageHighMem(page)) __flush_dcache_page()
98 addr = (unsigned long)kmap_atomic(page); __flush_dcache_page()
100 addr = (unsigned long)page_address(page); __flush_dcache_page()
104 if (PageHighMem(page)) __flush_dcache_page()
110 void __flush_anon_page(struct page *page, unsigned long vmaddr) __flush_anon_page() argument
112 unsigned long addr = (unsigned long) page_address(page); __flush_anon_page()
115 if (page_mapped(page) && !Page_dcache_dirty(page)) { __flush_anon_page()
118 kaddr = kmap_coherent(page, vmaddr); __flush_anon_page()
130 struct page *page; __update_cache() local
137 page = pfn_to_page(pfn); __update_cache()
138 if (Page_dcache_dirty(page)) { __update_cache()
139 if (PageHighMem(page)) __update_cache()
140 addr = (unsigned long)kmap_atomic(page); __update_cache()
142 addr = (unsigned long)page_address(page); __update_cache()
147 if (PageHighMem(page)) __update_cache()
150 ClearPageDcacheDirty(page); __update_cache()
H A Dhighmem.c13 void *kmap(struct page *page) kmap() argument
18 if (!PageHighMem(page)) kmap()
19 return page_address(page); kmap()
20 addr = kmap_high(page); kmap()
27 void kunmap(struct page *page) kunmap() argument
30 if (!PageHighMem(page)) kunmap()
32 kunmap_high(page); kunmap()
45 void *kmap_atomic(struct page *page) kmap_atomic() argument
52 if (!PageHighMem(page)) kmap_atomic()
53 return page_address(page); kmap_atomic()
61 set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); kmap_atomic()
100 * have a struct page associated with it.
118 struct page *kmap_atomic_to_page(void *ptr) kmap_atomic_to_page()
/linux-4.1.27/arch/sh/mm/
H A Dcache.c58 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, copy_to_user_page() argument
62 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && copy_to_user_page()
63 test_bit(PG_dcache_clean, &page->flags)) { copy_to_user_page()
64 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); copy_to_user_page()
70 clear_bit(PG_dcache_clean, &page->flags); copy_to_user_page()
74 flush_cache_page(vma, vaddr, page_to_pfn(page)); copy_to_user_page()
77 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, copy_from_user_page() argument
81 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && copy_from_user_page()
82 test_bit(PG_dcache_clean, &page->flags)) { copy_from_user_page()
83 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); copy_from_user_page()
89 clear_bit(PG_dcache_clean, &page->flags); copy_from_user_page()
93 void copy_user_highpage(struct page *to, struct page *from, copy_user_highpage()
116 /* Make sure this page is cleared on other CPU's too before using it */ copy_user_highpage()
121 void clear_user_highpage(struct page *page, unsigned long vaddr) clear_user_highpage() argument
123 void *kaddr = kmap_atomic(page); clear_user_highpage()
137 struct page *page; __update_cache() local
143 page = pfn_to_page(pfn); __update_cache()
145 int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags); __update_cache()
147 __flush_purge_region(page_address(page), PAGE_SIZE); __update_cache()
151 void __flush_anon_page(struct page *page, unsigned long vmaddr) __flush_anon_page() argument
153 unsigned long addr = (unsigned long) page_address(page); __flush_anon_page()
156 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && __flush_anon_page()
157 test_bit(PG_dcache_clean, &page->flags)) { __flush_anon_page()
160 kaddr = kmap_coherent(page, vmaddr); __flush_anon_page()
216 void flush_dcache_page(struct page *page) flush_dcache_page() argument
218 cacheop_on_each_cpu(local_flush_dcache_page, page, 1); flush_dcache_page()
234 void flush_icache_page(struct vm_area_struct *vma, struct page *page) flush_icache_page() argument
236 /* Nothing uses the VMA, so just pass the struct page along */ flush_icache_page()
237 cacheop_on_each_cpu(local_flush_icache_page, page, 1); flush_icache_page()
/linux-4.1.27/drivers/staging/lustre/lustre/include/linux/
H A Dlustre_patchless_compat.h47 #define ll_delete_from_page_cache(page) delete_from_page_cache(page)
50 truncate_complete_page(struct address_space *mapping, struct page *page) truncate_complete_page() argument
52 if (page->mapping != mapping) truncate_complete_page()
55 if (PagePrivate(page)) truncate_complete_page()
56 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); truncate_complete_page()
58 if (TestClearPageDirty(page)) truncate_complete_page()
59 account_page_cleaned(page, mapping); truncate_complete_page()
61 ClearPageMappedToDisk(page); truncate_complete_page()
62 ll_delete_from_page_cache(page); truncate_complete_page()
/linux-4.1.27/arch/cris/include/asm/
H A Dpage.h4 #include <arch/page.h>
7 /* PAGE_SHIFT determines the page size */
12 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
15 #define clear_user_page(page, vaddr, pg) clear_page(page)
29 typedef struct page *pgtable_t;
41 /* for that before indexing into the page table starting at mem_map */
45 /* to index into the page map. our pages all start at physical addr PAGE_OFFSET so
56 /* convert a page (based on mem_map and forward) to a physical address
60 #define page_to_phys(page) __pa((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
/linux-4.1.27/drivers/target/iscsi/
H A Discsi_target_stat.c68 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_inst()
73 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); iscsi_stat_instance_show_attr_inst()
78 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_min_ver()
80 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION); iscsi_stat_instance_show_attr_min_ver()
85 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_max_ver()
87 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION); iscsi_stat_instance_show_attr_max_ver()
92 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_portals()
97 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_num_tpg_nps); iscsi_stat_instance_show_attr_portals()
102 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_nodes()
104 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES); iscsi_stat_instance_show_attr_nodes()
109 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_sessions()
114 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_nsessions); iscsi_stat_instance_show_attr_sessions()
119 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_fail_sess()
132 return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count); iscsi_stat_instance_show_attr_fail_sess()
137 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_fail_type()
143 return snprintf(page, PAGE_SIZE, "%u\n", iscsi_stat_instance_show_attr_fail_type()
149 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_fail_rem_name()
155 return snprintf(page, PAGE_SIZE, "%s\n", iscsi_stat_instance_show_attr_fail_rem_name()
162 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_disc_time()
164 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME); iscsi_stat_instance_show_attr_disc_time()
169 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_description()
171 return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR); iscsi_stat_instance_show_attr_description()
176 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_vendor()
178 return snprintf(page, PAGE_SIZE, "Datera, Inc. iSCSI-Target\n"); iscsi_stat_instance_show_attr_vendor()
183 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_version()
185 return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION); iscsi_stat_instance_show_attr_version()
238 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_sess_err_show_attr_inst()
243 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); iscsi_stat_sess_err_show_attr_inst()
248 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_sess_err_show_attr_digest_errors()
254 return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors); iscsi_stat_sess_err_show_attr_digest_errors()
259 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_sess_err_show_attr_cxn_errors()
265 return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors); iscsi_stat_sess_err_show_attr_cxn_errors()
270 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_sess_err_show_attr_format_errors()
276 return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors); iscsi_stat_sess_err_show_attr_format_errors()
320 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_tgt_attr_show_attr_inst()
325 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); iscsi_stat_tgt_attr_show_attr_inst()
330 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_tgt_attr_show_attr_indx()
332 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX); iscsi_stat_tgt_attr_show_attr_indx()
337 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_tgt_attr_show_attr_login_fails()
350 return snprintf(page, PAGE_SIZE, "%u\n", fail_count); iscsi_stat_tgt_attr_show_attr_login_fails()
355 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_tgt_attr_show_attr_last_fail_time()
368 return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time); iscsi_stat_tgt_attr_show_attr_last_fail_time()
373 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_tgt_attr_show_attr_last_fail_type()
384 return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type); iscsi_stat_tgt_attr_show_attr_last_fail_type()
389 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_tgt_attr_show_attr_fail_intr_name()
401 return snprintf(page, PAGE_SIZE, "%s\n", buf); iscsi_stat_tgt_attr_show_attr_fail_intr_name()
406 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type()
415 ret = snprintf(page, PAGE_SIZE, "ipv6\n"); iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type()
417 ret = snprintf(page, PAGE_SIZE, "ipv4\n"); iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type()
425 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_tgt_attr_show_attr_fail_intr_addr()
433 ret = snprintf(page, PAGE_SIZE, "%s\n", lstat->last_intr_fail_ip_addr); iscsi_stat_tgt_attr_show_attr_fail_intr_addr()
484 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_login_show_attr_inst()
489 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); iscsi_stat_login_show_attr_inst()
494 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_login_show_attr_indx()
496 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX); iscsi_stat_login_show_attr_indx()
501 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_login_show_attr_accepts()
509 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->accepts); iscsi_stat_login_show_attr_accepts()
517 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_login_show_attr_other_fails()
525 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->other_fails); iscsi_stat_login_show_attr_other_fails()
533 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_login_show_attr_redirects()
541 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->redirects); iscsi_stat_login_show_attr_redirects()
549 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_login_show_attr_authorize_fails()
557 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authorize_fails); iscsi_stat_login_show_attr_authorize_fails()
565 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_login_show_attr_authenticate_fails()
573 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authenticate_fails); iscsi_stat_login_show_attr_authenticate_fails()
581 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_login_show_attr_negotiate_fails()
589 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->negotiate_fails); iscsi_stat_login_show_attr_negotiate_fails()
641 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_logout_show_attr_inst()
646 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); iscsi_stat_logout_show_attr_inst()
651 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_logout_show_attr_indx()
653 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX); iscsi_stat_logout_show_attr_indx()
658 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_logout_show_attr_normal_logouts()
664 return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts); iscsi_stat_logout_show_attr_normal_logouts()
669 struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_logout_show_attr_abnormal_logouts()
675 return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts); iscsi_stat_logout_show_attr_abnormal_logouts()
720 struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_inst()
728 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); iscsi_stat_sess_show_attr_inst()
733 struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_node()
747 ret = snprintf(page, PAGE_SIZE, "%u\n", iscsi_stat_sess_show_attr_node()
757 struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_indx()
771 ret = snprintf(page, PAGE_SIZE, "%u\n", iscsi_stat_sess_show_attr_indx()
781 struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_cmd_pdus()
795 ret = snprintf(page, PAGE_SIZE, "%lu\n", iscsi_stat_sess_show_attr_cmd_pdus()
805 struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_rsp_pdus()
819 ret = snprintf(page, PAGE_SIZE, "%lu\n", iscsi_stat_sess_show_attr_rsp_pdus()
829 struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_txdata_octs()
843 ret = snprintf(page, PAGE_SIZE, "%lu\n", iscsi_stat_sess_show_attr_txdata_octs()
853 struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_rxdata_octs()
867 ret = snprintf(page, PAGE_SIZE, "%lu\n", iscsi_stat_sess_show_attr_rxdata_octs()
877 struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_conn_digest_errors()
891 ret = snprintf(page, PAGE_SIZE, "%lu\n", iscsi_stat_sess_show_attr_conn_digest_errors()
901 struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_conn_timeout_errors()
915 ret = snprintf(page, PAGE_SIZE, "%lu\n", iscsi_stat_sess_show_attr_conn_timeout_errors()
67 iscsi_stat_instance_show_attr_inst( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_inst() argument
77 iscsi_stat_instance_show_attr_min_ver( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_min_ver() argument
84 iscsi_stat_instance_show_attr_max_ver( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_max_ver() argument
91 iscsi_stat_instance_show_attr_portals( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_portals() argument
101 iscsi_stat_instance_show_attr_nodes( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_nodes() argument
108 iscsi_stat_instance_show_attr_sessions( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_sessions() argument
118 iscsi_stat_instance_show_attr_fail_sess( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_fail_sess() argument
136 iscsi_stat_instance_show_attr_fail_type( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_fail_type() argument
148 iscsi_stat_instance_show_attr_fail_rem_name( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_fail_rem_name() argument
161 iscsi_stat_instance_show_attr_disc_time( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_disc_time() argument
168 iscsi_stat_instance_show_attr_description( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_description() argument
175 iscsi_stat_instance_show_attr_vendor( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_vendor() argument
182 iscsi_stat_instance_show_attr_version( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_instance_show_attr_version() argument
237 iscsi_stat_sess_err_show_attr_inst( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_sess_err_show_attr_inst() argument
247 iscsi_stat_sess_err_show_attr_digest_errors( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_sess_err_show_attr_digest_errors() argument
258 iscsi_stat_sess_err_show_attr_cxn_errors( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_sess_err_show_attr_cxn_errors() argument
269 iscsi_stat_sess_err_show_attr_format_errors( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_sess_err_show_attr_format_errors() argument
319 iscsi_stat_tgt_attr_show_attr_inst( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_tgt_attr_show_attr_inst() argument
329 iscsi_stat_tgt_attr_show_attr_indx( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_tgt_attr_show_attr_indx() argument
336 iscsi_stat_tgt_attr_show_attr_login_fails( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_tgt_attr_show_attr_login_fails() argument
354 iscsi_stat_tgt_attr_show_attr_last_fail_time( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_tgt_attr_show_attr_last_fail_time() argument
372 iscsi_stat_tgt_attr_show_attr_last_fail_type( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_tgt_attr_show_attr_last_fail_type() argument
388 iscsi_stat_tgt_attr_show_attr_fail_intr_name( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_tgt_attr_show_attr_fail_intr_name() argument
405 iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type() argument
424 iscsi_stat_tgt_attr_show_attr_fail_intr_addr( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_tgt_attr_show_attr_fail_intr_addr() argument
483 iscsi_stat_login_show_attr_inst( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_login_show_attr_inst() argument
493 iscsi_stat_login_show_attr_indx( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_login_show_attr_indx() argument
500 iscsi_stat_login_show_attr_accepts( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_login_show_attr_accepts() argument
516 iscsi_stat_login_show_attr_other_fails( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_login_show_attr_other_fails() argument
532 iscsi_stat_login_show_attr_redirects( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_login_show_attr_redirects() argument
548 iscsi_stat_login_show_attr_authorize_fails( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_login_show_attr_authorize_fails() argument
564 iscsi_stat_login_show_attr_authenticate_fails( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_login_show_attr_authenticate_fails() argument
580 iscsi_stat_login_show_attr_negotiate_fails( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_login_show_attr_negotiate_fails() argument
640 iscsi_stat_logout_show_attr_inst( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_logout_show_attr_inst() argument
650 iscsi_stat_logout_show_attr_indx( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_logout_show_attr_indx() argument
657 iscsi_stat_logout_show_attr_normal_logouts( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_logout_show_attr_normal_logouts() argument
668 iscsi_stat_logout_show_attr_abnormal_logouts( struct iscsi_wwn_stat_grps *igrps, char *page) iscsi_stat_logout_show_attr_abnormal_logouts() argument
719 iscsi_stat_sess_show_attr_inst( struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_inst() argument
732 iscsi_stat_sess_show_attr_node( struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_node() argument
756 iscsi_stat_sess_show_attr_indx( struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_indx() argument
780 iscsi_stat_sess_show_attr_cmd_pdus( struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_cmd_pdus() argument
804 iscsi_stat_sess_show_attr_rsp_pdus( struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_rsp_pdus() argument
828 iscsi_stat_sess_show_attr_txdata_octs( struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_txdata_octs() argument
852 iscsi_stat_sess_show_attr_rxdata_octs( struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_rxdata_octs() argument
876 iscsi_stat_sess_show_attr_conn_digest_errors( struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_conn_digest_errors() argument
900 iscsi_stat_sess_show_attr_conn_timeout_errors( struct iscsi_node_stat_grps *igrps, char *page) iscsi_stat_sess_show_attr_conn_timeout_errors() argument
/linux-4.1.27/drivers/infiniband/hw/mlx4/
H A Ddoorbell.c47 struct mlx4_ib_user_db_page *page; mlx4_ib_db_map_user() local
52 list_for_each_entry(page, &context->db_page_list, list) mlx4_ib_db_map_user()
53 if (page->user_virt == (virt & PAGE_MASK)) mlx4_ib_db_map_user()
56 page = kmalloc(sizeof *page, GFP_KERNEL); mlx4_ib_db_map_user()
57 if (!page) { mlx4_ib_db_map_user()
62 page->user_virt = (virt & PAGE_MASK); mlx4_ib_db_map_user()
63 page->refcnt = 0; mlx4_ib_db_map_user()
64 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, mlx4_ib_db_map_user()
66 if (IS_ERR(page->umem)) { mlx4_ib_db_map_user()
67 err = PTR_ERR(page->umem); mlx4_ib_db_map_user()
68 kfree(page); mlx4_ib_db_map_user()
72 list_add(&page->list, &context->db_page_list); mlx4_ib_db_map_user()
75 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); mlx4_ib_db_map_user()
76 db->u.user_page = page; mlx4_ib_db_map_user()
77 ++page->refcnt; mlx4_ib_db_map_user()
/linux-4.1.27/drivers/infiniband/hw/mlx5/
H A Ddoorbell.c49 struct mlx5_ib_user_db_page *page; mlx5_ib_db_map_user() local
54 list_for_each_entry(page, &context->db_page_list, list) mlx5_ib_db_map_user()
55 if (page->user_virt == (virt & PAGE_MASK)) mlx5_ib_db_map_user()
58 page = kmalloc(sizeof(*page), GFP_KERNEL); mlx5_ib_db_map_user()
59 if (!page) { mlx5_ib_db_map_user()
64 page->user_virt = (virt & PAGE_MASK); mlx5_ib_db_map_user()
65 page->refcnt = 0; mlx5_ib_db_map_user()
66 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, mlx5_ib_db_map_user()
68 if (IS_ERR(page->umem)) { mlx5_ib_db_map_user()
69 err = PTR_ERR(page->umem); mlx5_ib_db_map_user()
70 kfree(page); mlx5_ib_db_map_user()
74 list_add(&page->list, &context->db_page_list); mlx5_ib_db_map_user()
77 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); mlx5_ib_db_map_user()
78 db->u.user_page = page; mlx5_ib_db_map_user()
79 ++page->refcnt; mlx5_ib_db_map_user()
/linux-4.1.27/arch/x86/um/
H A Dmem_64.c2 #include <asm/page.h>
/linux-4.1.27/arch/s390/kernel/vdso32/
H A Dvdso32_wrapper.S3 #include <asm/page.h>
/linux-4.1.27/arch/s390/kernel/vdso64/
H A Dvdso64_wrapper.S3 #include <asm/page.h>
/linux-4.1.27/arch/powerpc/kernel/vdso32/
H A Dvdso32_wrapper.S2 #include <asm/page.h>
/linux-4.1.27/arch/powerpc/kernel/vdso64/
H A Dvdso64_wrapper.S2 #include <asm/page.h>
/linux-4.1.27/arch/c6x/include/asm/
H A Dcacheflush.h19 #include <asm/page.h>
33 #define flush_dcache_page(page) do {} while (0)
46 #define flush_icache_page(vma, page) \
49 L1D_cache_block_writeback_invalidate(page_address(page), \
50 (unsigned long) page_address(page) + PAGE_SIZE)); \
51 L1P_cache_block_invalidate(page_address(page), \
52 (unsigned long) page_address(page) + PAGE_SIZE)); \
56 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
62 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dpage.h9 #include <asm-generic/page.h>
/linux-4.1.27/fs/hfs/
H A Dbnode.c20 struct page *page; hfs_bnode_read() local
23 page = node->page[0]; hfs_bnode_read()
25 memcpy(buf, kmap(page) + off, len); hfs_bnode_read()
26 kunmap(page); hfs_bnode_read()
62 struct page *page; hfs_bnode_write() local
65 page = node->page[0]; hfs_bnode_write()
67 memcpy(kmap(page) + off, buf, len); hfs_bnode_write()
68 kunmap(page); hfs_bnode_write()
69 set_page_dirty(page); hfs_bnode_write()
87 struct page *page; hfs_bnode_clear() local
90 page = node->page[0]; hfs_bnode_clear()
92 memset(kmap(page) + off, 0, len); hfs_bnode_clear()
93 kunmap(page); hfs_bnode_clear()
94 set_page_dirty(page); hfs_bnode_clear()
101 struct page *src_page, *dst_page; hfs_bnode_copy()
109 src_page = src_node->page[0]; hfs_bnode_copy()
110 dst_page = dst_node->page[0]; hfs_bnode_copy()
120 struct page *page; hfs_bnode_move() local
128 page = node->page[0]; hfs_bnode_move()
129 ptr = kmap(page); hfs_bnode_move()
131 kunmap(page); hfs_bnode_move()
132 set_page_dirty(page); hfs_bnode_move()
242 struct page *page; __hfs_bnode_create() local
253 sizeof(struct page *); __hfs_bnode_create()
284 page = read_mapping_page(mapping, block++, NULL); __hfs_bnode_create()
285 if (IS_ERR(page)) __hfs_bnode_create()
287 if (PageError(page)) { __hfs_bnode_create()
288 page_cache_release(page); __hfs_bnode_create()
291 node->page[i] = page; __hfs_bnode_create()
341 desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset); hfs_bnode_find()
347 kunmap(node->page[0]); hfs_bnode_find()
403 if (node->page[i]) hfs_bnode_free()
404 page_cache_release(node->page[i]); hfs_bnode_free()
411 struct page **pagep; hfs_bnode_create()
430 pagep = node->page; hfs_bnode_create()
470 if (!node->page[i]) hfs_bnode_put()
472 mark_page_accessed(node->page[i]); hfs_bnode_put()
/linux-4.1.27/arch/mn10300/mm/
H A Dcache-flush-icache.c18 * flush_icache_page - Flush a page from the dcache and invalidate the icache
19 * @vma: The VMA the page is part of.
20 * @page: The page to be flushed.
22 * Write a page back from the dcache and invalidate the icache so that we can
25 void flush_icache_page(struct vm_area_struct *vma, struct page *page) flush_icache_page() argument
27 unsigned long start = page_to_phys(page); flush_icache_page()
42 * single page
43 * @start: The starting virtual address of the page part.
44 * @end: The ending virtual address of the page part.
46 * Flush the dcache and invalidate the icache for part of a single page, as
47 * determined by the virtual addresses given. The page must be in the paged
53 struct page *page; flush_icache_page_range() local
59 /* work out how much of the page to flush */ flush_icache_page_range()
63 /* get the physical address the page is mapped to from the page flush_icache_page_range()
86 page = pte_page(pte); flush_icache_page_range()
87 if (!page) flush_icache_page_range()
90 addr = page_to_phys(page); flush_icache_page_range()
124 * require page tables, so we just map such addresses flush_icache_range()
139 /* the first and last bytes are on the same page */ flush_icache_range()
/linux-4.1.27/drivers/hwmon/pmbus/
H A Dpmbus.c31 * Find sensor groups and status registers on each page.
36 int page; pmbus_find_sensor_groups() local
38 /* Sensors detected on page 0 only */ pmbus_find_sensor_groups()
75 for (page = 0; page < info->pages; page++) { pmbus_find_sensor_groups()
76 if (pmbus_check_word_register(client, page, PMBUS_READ_VOUT)) { pmbus_find_sensor_groups()
77 info->func[page] |= PMBUS_HAVE_VOUT; pmbus_find_sensor_groups()
78 if (pmbus_check_byte_register(client, page, pmbus_find_sensor_groups()
80 info->func[page] |= PMBUS_HAVE_STATUS_VOUT; pmbus_find_sensor_groups()
82 if (pmbus_check_word_register(client, page, PMBUS_READ_IOUT)) { pmbus_find_sensor_groups()
83 info->func[page] |= PMBUS_HAVE_IOUT; pmbus_find_sensor_groups()
86 info->func[page] |= PMBUS_HAVE_STATUS_IOUT; pmbus_find_sensor_groups()
88 if (pmbus_check_word_register(client, page, PMBUS_READ_POUT)) pmbus_find_sensor_groups()
89 info->func[page] |= PMBUS_HAVE_POUT; pmbus_find_sensor_groups()
104 * keep setting the page number until it fails or until the pmbus_identify()
109 int page; pmbus_identify() local
111 for (page = 1; page < PMBUS_PAGES; page++) { pmbus_identify()
112 if (pmbus_set_page(client, page) < 0) pmbus_identify()
116 info->pages = page; pmbus_identify()
H A Dltc2978.c105 static int ltc2978_read_word_data_common(struct i2c_client *client, int page, ltc2978_read_word_data_common() argument
114 ret = pmbus_read_word_data(client, page, LTC2978_MFR_VIN_PEAK); ltc2978_read_word_data_common()
122 ret = pmbus_read_word_data(client, page, LTC2978_MFR_VOUT_PEAK); ltc2978_read_word_data_common()
128 if (ret > data->vout_max[page]) ltc2978_read_word_data_common()
129 data->vout_max[page] = ret; ltc2978_read_word_data_common()
130 ret = data->vout_max[page]; ltc2978_read_word_data_common()
134 ret = pmbus_read_word_data(client, page, ltc2978_read_word_data_common()
138 > lin11_to_val(data->temp_max[page])) ltc2978_read_word_data_common()
139 data->temp_max[page] = ret; ltc2978_read_word_data_common()
140 ret = data->temp_max[page]; ltc2978_read_word_data_common()
155 static int ltc2978_read_word_data(struct i2c_client *client, int page, int reg) ltc2978_read_word_data() argument
163 ret = pmbus_read_word_data(client, page, LTC2978_MFR_VIN_MIN); ltc2978_read_word_data()
171 ret = pmbus_read_word_data(client, page, LTC2978_MFR_VOUT_MIN); ltc2978_read_word_data()
179 if (data->vout_max[page] && ret > data->vout_max[page]) ltc2978_read_word_data()
180 ret = data->vout_max[page]; ltc2978_read_word_data()
181 if (ret < data->vout_min[page]) ltc2978_read_word_data()
182 data->vout_min[page] = ret; ltc2978_read_word_data()
183 ret = data->vout_min[page]; ltc2978_read_word_data()
187 ret = pmbus_read_word_data(client, page, ltc2978_read_word_data()
191 < lin11_to_val(data->temp_min[page])) ltc2978_read_word_data()
192 data->temp_min[page] = ret; ltc2978_read_word_data()
193 ret = data->temp_min[page]; ltc2978_read_word_data()
203 ret = ltc2978_read_word_data_common(client, page, reg); ltc2978_read_word_data()
209 static int ltc2974_read_word_data(struct i2c_client *client, int page, int reg) ltc2974_read_word_data() argument
217 ret = pmbus_read_word_data(client, page, LTC2974_MFR_IOUT_PEAK); ltc2974_read_word_data()
220 > lin11_to_val(data->iout_max[page])) ltc2974_read_word_data()
221 data->iout_max[page] = ret; ltc2974_read_word_data()
222 ret = data->iout_max[page]; ltc2974_read_word_data()
226 ret = pmbus_read_word_data(client, page, LTC2974_MFR_IOUT_MIN); ltc2974_read_word_data()
229 < lin11_to_val(data->iout_min[page])) ltc2974_read_word_data()
230 data->iout_min[page] = ret; ltc2974_read_word_data()
231 ret = data->iout_min[page]; ltc2974_read_word_data()
238 ret = ltc2978_read_word_data(client, page, reg); ltc2974_read_word_data()
244 static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg) ltc3880_read_word_data() argument
252 ret = pmbus_read_word_data(client, page, LTC3880_MFR_IOUT_PEAK); ltc3880_read_word_data()
255 > lin11_to_val(data->iout_max[page])) ltc3880_read_word_data()
256 data->iout_max[page] = ret; ltc3880_read_word_data()
257 ret = data->iout_max[page]; ltc3880_read_word_data()
261 ret = pmbus_read_word_data(client, page, ltc3880_read_word_data()
279 ret = ltc2978_read_word_data_common(client, page, reg); ltc3880_read_word_data()
285 static int ltc3883_read_word_data(struct i2c_client *client, int page, int reg) ltc3883_read_word_data() argument
293 ret = pmbus_read_word_data(client, page, LTC3883_MFR_IIN_PEAK); ltc3883_read_word_data()
305 ret = ltc3880_read_word_data(client, page, reg); ltc3883_read_word_data()
311 static int ltc2978_clear_peaks(struct i2c_client *client, int page, ltc2978_clear_peaks() argument
319 ret = pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS); ltc2978_clear_peaks()
324 static int ltc2978_write_word_data(struct i2c_client *client, int page, ltc2978_write_word_data() argument
334 ret = ltc2978_clear_peaks(client, page, data->id); ltc2978_write_word_data()
337 data->iout_max[page] = 0x7c00; ltc2978_write_word_data()
338 data->iout_min[page] = 0xfbff; ltc2978_write_word_data()
339 ret = ltc2978_clear_peaks(client, page, data->id); ltc2978_write_word_data()
343 ret = ltc2978_clear_peaks(client, page, data->id); ltc2978_write_word_data()
346 data->vout_min[page] = 0xffff; ltc2978_write_word_data()
347 data->vout_max[page] = 0; ltc2978_write_word_data()
348 ret = ltc2978_clear_peaks(client, page, data->id); ltc2978_write_word_data()
353 ret = ltc2978_clear_peaks(client, page, data->id); ltc2978_write_word_data()
356 data->temp_min[page] = 0x7bff; ltc2978_write_word_data()
357 data->temp_max[page] = 0x7c00; ltc2978_write_word_data()
358 ret = ltc2978_clear_peaks(client, page, data->id); ltc2978_write_word_data()
/linux-4.1.27/arch/x86/mm/kmemcheck/
H A Dshadow.c5 #include <asm/page.h>
21 struct page *page; kmemcheck_shadow_lookup() local
30 page = virt_to_page(address); kmemcheck_shadow_lookup()
31 if (!page->shadow) kmemcheck_shadow_lookup()
33 return page->shadow + (address & (PAGE_SIZE - 1)); kmemcheck_shadow_lookup()
41 unsigned long page = addr & PAGE_MASK; mark_shadow() local
46 /* If the memory range crosses a page boundary, stop there. */ mark_shadow()
47 if (page == last_page) mark_shadow()
50 first_n = page + PAGE_SIZE - addr; mark_shadow()
59 /* Do full-page memset()s. */ mark_shadow()
69 /* Do the remaining page, if any. */ mark_shadow()
102 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n) kmemcheck_mark_unallocated_pages()
110 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n) kmemcheck_mark_uninitialized_pages()
118 void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n) kmemcheck_mark_initialized_pages()
/linux-4.1.27/include/uapi/asm-generic/
H A Dmman-common.h9 #define PROT_READ 0x1 /* page can be read */
10 #define PROT_WRITE 0x2 /* page can be written */
11 #define PROT_EXEC 0x4 /* page can be executed */
12 #define PROT_SEM 0x8 /* page may be used for atomic ops */
13 #define PROT_NONE 0x0 /* page can not be accessed */
33 #define MADV_RANDOM 1 /* expect random page references */
34 #define MADV_SEQUENTIAL 2 /* expect sequential page references */
42 #define MADV_HWPOISON 100 /* poison a page for testing */
43 #define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */
59 * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
64 * When 0 use the default page size.
/linux-4.1.27/arch/openrisc/include/asm/
H A Dpage.h23 /* PAGE_SHIFT determines the page size */
44 #define free_user_page(page, addr) free_page(addr)
46 #define clear_page(page) memset((page), 0, PAGE_SIZE)
49 #define clear_user_page(page, vaddr, pg) clear_page(page)
64 typedef struct page *pgtable_t;
87 #define page_to_virt(page) \
88 ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
90 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
/linux-4.1.27/drivers/target/
H A Dtarget_core_stat.c77 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_dev_show_attr_inst()
83 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); target_stat_scsi_dev_show_attr_inst()
88 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_dev_show_attr_indx()
93 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); target_stat_scsi_dev_show_attr_indx()
98 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_dev_show_attr_role()
100 return snprintf(page, PAGE_SIZE, "Target\n"); target_stat_scsi_dev_show_attr_role()
105 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_dev_show_attr_ports()
110 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count); target_stat_scsi_dev_show_attr_ports()
154 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_tgt_dev_show_attr_inst()
160 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); target_stat_scsi_tgt_dev_show_attr_inst()
165 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_tgt_dev_show_attr_indx()
170 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); target_stat_scsi_tgt_dev_show_attr_indx()
175 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_tgt_dev_show_attr_num_lus()
177 return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT); target_stat_scsi_tgt_dev_show_attr_num_lus()
182 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_tgt_dev_show_attr_status()
188 return snprintf(page, PAGE_SIZE, "activated"); target_stat_scsi_tgt_dev_show_attr_status()
190 return snprintf(page, PAGE_SIZE, "deactivated"); target_stat_scsi_tgt_dev_show_attr_status()
195 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_tgt_dev_show_attr_non_access_lus()
206 return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus); target_stat_scsi_tgt_dev_show_attr_non_access_lus()
211 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_tgt_dev_show_attr_resets()
216 return snprintf(page, PAGE_SIZE, "%lu\n", target_stat_scsi_tgt_dev_show_attr_resets()
262 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_inst()
268 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); target_stat_scsi_lu_show_attr_inst()
273 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_dev()
278 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); target_stat_scsi_lu_show_attr_dev()
283 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_indx()
285 return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX); target_stat_scsi_lu_show_attr_indx()
290 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_lun()
293 return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0); target_stat_scsi_lu_show_attr_lun()
298 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_lu_name()
304 return snprintf(page, PAGE_SIZE, "%s\n", target_stat_scsi_lu_show_attr_lu_name()
311 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_vend()
323 return snprintf(page, PAGE_SIZE, "%s\n", str); target_stat_scsi_lu_show_attr_vend()
328 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_prod()
340 return snprintf(page, PAGE_SIZE, "%s\n", str); target_stat_scsi_lu_show_attr_prod()
345 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_rev()
357 return snprintf(page, PAGE_SIZE, "%s\n", str); target_stat_scsi_lu_show_attr_rev()
362 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_dev_type()
368 return snprintf(page, PAGE_SIZE, "%u\n", target_stat_scsi_lu_show_attr_dev_type()
374 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_status()
380 return snprintf(page, PAGE_SIZE, "%s\n", target_stat_scsi_lu_show_attr_status()
386 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_state_bit()
389 return snprintf(page, PAGE_SIZE, "exposed\n"); target_stat_scsi_lu_show_attr_state_bit()
394 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_num_cmds()
400 return snprintf(page, PAGE_SIZE, "%lu\n", target_stat_scsi_lu_show_attr_num_cmds()
406 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_read_mbytes()
412 return snprintf(page, PAGE_SIZE, "%lu\n", target_stat_scsi_lu_show_attr_read_mbytes()
418 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_write_mbytes()
424 return snprintf(page, PAGE_SIZE, "%lu\n", target_stat_scsi_lu_show_attr_write_mbytes()
430 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_resets()
436 return snprintf(page, PAGE_SIZE, "%lu\n", atomic_long_read(&dev->num_resets)); target_stat_scsi_lu_show_attr_resets()
441 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_full_stat()
444 return snprintf(page, PAGE_SIZE, "%u\n", 0); target_stat_scsi_lu_show_attr_full_stat()
449 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_hs_num_cmds()
452 return snprintf(page, PAGE_SIZE, "%u\n", 0); target_stat_scsi_lu_show_attr_hs_num_cmds()
457 struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_creation_time()
463 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time - target_stat_scsi_lu_show_attr_creation_time()
543 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_port_show_attr_inst()
558 ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); target_stat_scsi_port_show_attr_inst()
565 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_port_show_attr_dev()
578 ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); target_stat_scsi_port_show_attr_dev()
585 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_port_show_attr_indx()
597 ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index); target_stat_scsi_port_show_attr_indx()
604 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_port_show_attr_role()
620 ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index); target_stat_scsi_port_show_attr_role()
627 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_port_show_attr_busy_count()
640 ret = snprintf(page, PAGE_SIZE, "%u\n", 0); target_stat_scsi_port_show_attr_busy_count()
686 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_inst()
701 ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); target_stat_scsi_tgt_port_show_attr_inst()
708 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_dev()
721 ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); target_stat_scsi_tgt_port_show_attr_dev()
728 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_indx()
740 ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index); target_stat_scsi_tgt_port_show_attr_indx()
747 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_name()
762 ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", target_stat_scsi_tgt_port_show_attr_name()
770 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_port_index()
785 ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", target_stat_scsi_tgt_port_show_attr_port_index()
794 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_in_cmds()
807 ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus); target_stat_scsi_tgt_port_show_attr_in_cmds()
814 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_write_mbytes()
827 ret = snprintf(page, PAGE_SIZE, "%u\n", target_stat_scsi_tgt_port_show_attr_write_mbytes()
835 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_read_mbytes()
848 ret = snprintf(page, PAGE_SIZE, "%u\n", target_stat_scsi_tgt_port_show_attr_read_mbytes()
856 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_hs_in_cmds()
870 ret = snprintf(page, PAGE_SIZE, "%u\n", 0); target_stat_scsi_tgt_port_show_attr_hs_in_cmds()
922 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_transport_show_attr_inst()
938 ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); target_stat_scsi_transport_show_attr_inst()
945 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_transport_show_attr_device()
960 ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", target_stat_scsi_transport_show_attr_device()
968 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_transport_show_attr_indx()
982 ret = snprintf(page, PAGE_SIZE, "%u\n", target_stat_scsi_transport_show_attr_indx()
990 struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_transport_show_attr_dev_name()
1008 ret = snprintf(page, PAGE_SIZE, "%s+%s\n", target_stat_scsi_transport_show_attr_dev_name()
1079 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_inst()
1096 ret = snprintf(page, PAGE_SIZE, "%u\n", target_stat_scsi_auth_intr_show_attr_inst()
1104 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_dev()
1121 ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); target_stat_scsi_auth_intr_show_attr_dev()
1128 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_port()
1145 ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); target_stat_scsi_auth_intr_show_attr_port()
1152 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_indx()
1167 ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); target_stat_scsi_auth_intr_show_attr_indx()
1174 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_dev_or_port()
1189 ret = snprintf(page, PAGE_SIZE, "%u\n", 1); target_stat_scsi_auth_intr_show_attr_dev_or_port()
1196 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_intr_name()
1211 ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname); target_stat_scsi_auth_intr_show_attr_intr_name()
1218 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_map_indx()
1233 ret = snprintf(page, PAGE_SIZE, "%u\n", 0); target_stat_scsi_auth_intr_show_attr_map_indx()
1240 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_att_count()
1255 ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count); target_stat_scsi_auth_intr_show_attr_att_count()
1262 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_num_cmds()
1277 ret = snprintf(page, PAGE_SIZE, "%u\n", deve->total_cmds); target_stat_scsi_auth_intr_show_attr_num_cmds()
1284 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_read_mbytes()
1299 ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->read_bytes >> 20)); target_stat_scsi_auth_intr_show_attr_read_mbytes()
1306 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_write_mbytes()
1321 ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->write_bytes >> 20)); target_stat_scsi_auth_intr_show_attr_write_mbytes()
1328 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_hs_num_cmds()
1343 ret = snprintf(page, PAGE_SIZE, "%u\n", 0); target_stat_scsi_auth_intr_show_attr_hs_num_cmds()
1350 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_creation_time()
1365 ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time - target_stat_scsi_auth_intr_show_attr_creation_time()
1373 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_row_status()
1388 ret = snprintf(page, PAGE_SIZE, "Ready\n"); target_stat_scsi_auth_intr_show_attr_row_status()
1445 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_att_intr_port_show_attr_inst()
1462 ret = snprintf(page, PAGE_SIZE, "%u\n", target_stat_scsi_att_intr_port_show_attr_inst()
1470 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_att_intr_port_show_attr_dev()
1487 ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); target_stat_scsi_att_intr_port_show_attr_dev()
1494 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_att_intr_port_show_attr_port()
1511 ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); target_stat_scsi_att_intr_port_show_attr_port()
1518 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_att_intr_port_show_attr_indx()
1536 ret = snprintf(page, PAGE_SIZE, "%u\n", target_stat_scsi_att_intr_port_show_attr_indx()
1544 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_att_intr_port_show_attr_port_auth_indx()
1559 ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); target_stat_scsi_att_intr_port_show_attr_port_auth_indx()
1566 struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_att_intr_port_show_attr_port_ident()
1589 ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf); target_stat_scsi_att_intr_port_show_attr_port_ident()
76 target_stat_scsi_dev_show_attr_inst( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_dev_show_attr_inst() argument
87 target_stat_scsi_dev_show_attr_indx( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_dev_show_attr_indx() argument
97 target_stat_scsi_dev_show_attr_role( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_dev_show_attr_role() argument
104 target_stat_scsi_dev_show_attr_ports( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_dev_show_attr_ports() argument
153 target_stat_scsi_tgt_dev_show_attr_inst( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_tgt_dev_show_attr_inst() argument
164 target_stat_scsi_tgt_dev_show_attr_indx( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_tgt_dev_show_attr_indx() argument
174 target_stat_scsi_tgt_dev_show_attr_num_lus( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_tgt_dev_show_attr_num_lus() argument
181 target_stat_scsi_tgt_dev_show_attr_status( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_tgt_dev_show_attr_status() argument
194 target_stat_scsi_tgt_dev_show_attr_non_access_lus( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_tgt_dev_show_attr_non_access_lus() argument
210 target_stat_scsi_tgt_dev_show_attr_resets( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_tgt_dev_show_attr_resets() argument
261 target_stat_scsi_lu_show_attr_inst( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_inst() argument
272 target_stat_scsi_lu_show_attr_dev( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_dev() argument
282 target_stat_scsi_lu_show_attr_indx( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_indx() argument
289 target_stat_scsi_lu_show_attr_lun( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_lun() argument
297 target_stat_scsi_lu_show_attr_lu_name( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_lu_name() argument
310 target_stat_scsi_lu_show_attr_vend( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_vend() argument
327 target_stat_scsi_lu_show_attr_prod( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_prod() argument
344 target_stat_scsi_lu_show_attr_rev( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_rev() argument
361 target_stat_scsi_lu_show_attr_dev_type( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_dev_type() argument
373 target_stat_scsi_lu_show_attr_status( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_status() argument
385 target_stat_scsi_lu_show_attr_state_bit( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_state_bit() argument
393 target_stat_scsi_lu_show_attr_num_cmds( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_num_cmds() argument
405 target_stat_scsi_lu_show_attr_read_mbytes( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_read_mbytes() argument
417 target_stat_scsi_lu_show_attr_write_mbytes( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_write_mbytes() argument
429 target_stat_scsi_lu_show_attr_resets( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_resets() argument
440 target_stat_scsi_lu_show_attr_full_stat( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_full_stat() argument
448 target_stat_scsi_lu_show_attr_hs_num_cmds( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_hs_num_cmds() argument
456 target_stat_scsi_lu_show_attr_creation_time( struct se_dev_stat_grps *sgrps, char *page) target_stat_scsi_lu_show_attr_creation_time() argument
542 target_stat_scsi_port_show_attr_inst( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_port_show_attr_inst() argument
564 target_stat_scsi_port_show_attr_dev( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_port_show_attr_dev() argument
584 target_stat_scsi_port_show_attr_indx( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_port_show_attr_indx() argument
603 target_stat_scsi_port_show_attr_role( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_port_show_attr_role() argument
626 target_stat_scsi_port_show_attr_busy_count( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_port_show_attr_busy_count() argument
685 target_stat_scsi_tgt_port_show_attr_inst( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_inst() argument
707 target_stat_scsi_tgt_port_show_attr_dev( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_dev() argument
727 target_stat_scsi_tgt_port_show_attr_indx( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_indx() argument
746 target_stat_scsi_tgt_port_show_attr_name( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_name() argument
769 target_stat_scsi_tgt_port_show_attr_port_index( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_port_index() argument
793 target_stat_scsi_tgt_port_show_attr_in_cmds( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_in_cmds() argument
813 target_stat_scsi_tgt_port_show_attr_write_mbytes( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_write_mbytes() argument
834 target_stat_scsi_tgt_port_show_attr_read_mbytes( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_read_mbytes() argument
855 target_stat_scsi_tgt_port_show_attr_hs_in_cmds( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_tgt_port_show_attr_hs_in_cmds() argument
921 target_stat_scsi_transport_show_attr_inst( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_transport_show_attr_inst() argument
944 target_stat_scsi_transport_show_attr_device( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_transport_show_attr_device() argument
967 target_stat_scsi_transport_show_attr_indx( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_transport_show_attr_indx() argument
989 target_stat_scsi_transport_show_attr_dev_name( struct se_port_stat_grps *pgrps, char *page) target_stat_scsi_transport_show_attr_dev_name() argument
1078 target_stat_scsi_auth_intr_show_attr_inst( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_inst() argument
1103 target_stat_scsi_auth_intr_show_attr_dev( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_dev() argument
1127 target_stat_scsi_auth_intr_show_attr_port( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_port() argument
1151 target_stat_scsi_auth_intr_show_attr_indx( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_indx() argument
1173 target_stat_scsi_auth_intr_show_attr_dev_or_port( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_dev_or_port() argument
1195 target_stat_scsi_auth_intr_show_attr_intr_name( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_intr_name() argument
1217 target_stat_scsi_auth_intr_show_attr_map_indx( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_map_indx() argument
1239 target_stat_scsi_auth_intr_show_attr_att_count( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_att_count() argument
1261 target_stat_scsi_auth_intr_show_attr_num_cmds( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_num_cmds() argument
1283 target_stat_scsi_auth_intr_show_attr_read_mbytes( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_read_mbytes() argument
1305 target_stat_scsi_auth_intr_show_attr_write_mbytes( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_write_mbytes() argument
1327 target_stat_scsi_auth_intr_show_attr_hs_num_cmds( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_hs_num_cmds() argument
1349 target_stat_scsi_auth_intr_show_attr_creation_time( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_creation_time() argument
1372 target_stat_scsi_auth_intr_show_attr_row_status( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_auth_intr_show_attr_row_status() argument
1444 target_stat_scsi_att_intr_port_show_attr_inst( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_att_intr_port_show_attr_inst() argument
1469 target_stat_scsi_att_intr_port_show_attr_dev( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_att_intr_port_show_attr_dev() argument
1493 target_stat_scsi_att_intr_port_show_attr_port( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_att_intr_port_show_attr_port() argument
1517 target_stat_scsi_att_intr_port_show_attr_indx( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_att_intr_port_show_attr_indx() argument
1543 target_stat_scsi_att_intr_port_show_attr_port_auth_indx( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_att_intr_port_show_attr_port_auth_indx() argument
1565 target_stat_scsi_att_intr_port_show_attr_port_ident( struct se_ml_stat_grps *lgrps, char *page) target_stat_scsi_att_intr_port_show_attr_port_ident() argument
/linux-4.1.27/tools/virtio/linux/
H A Dscatterlist.h19 * sg_assign_page - Assign a given page to an SG entry
21 * @page: The page
24 * Assign page to sg entry. Also see sg_set_page(), the most commonly used
28 static inline void sg_assign_page(struct scatterlist *sg, struct page *page) sg_assign_page() argument
36 BUG_ON((unsigned long) page & 0x03); sg_assign_page()
41 sg->page_link = page_link | (unsigned long) page; sg_assign_page()
45 * sg_set_page - Set sg entry to point at given page
47 * @page: The page
49 * @offset: Offset into page
52 * Use this function to set an sg entry pointing at a page, never assign
53 * the page directly. We encode sg table information in the lower bits
54 * of the page pointer. See sg_page() for looking up the page belonging
58 static inline void sg_set_page(struct scatterlist *sg, struct page *page, sg_set_page() argument
61 sg_assign_page(sg, page); sg_set_page()
66 static inline struct page *sg_page(struct scatterlist *sg) sg_page()
72 return (struct page *)((sg)->page_link & ~0x3); sg_page()
/linux-4.1.27/block/
H A Dblk-sysfs.c23 queue_var_show(unsigned long var, char *page) queue_var_show() argument
25 return sprintf(page, "%lu\n", var); queue_var_show()
29 queue_var_store(unsigned long *var, const char *page, size_t count) queue_var_store() argument
34 err = kstrtoul(page, 10, &v); queue_var_store()
43 static ssize_t queue_requests_show(struct request_queue *q, char *page) queue_requests_show() argument
45 return queue_var_show(q->nr_requests, (page)); queue_requests_show()
49 queue_requests_store(struct request_queue *q, const char *page, size_t count) queue_requests_store() argument
57 ret = queue_var_store(&nr, page, count); queue_requests_store()
75 static ssize_t queue_ra_show(struct request_queue *q, char *page) queue_ra_show() argument
80 return queue_var_show(ra_kb, (page)); queue_ra_show()
84 queue_ra_store(struct request_queue *q, const char *page, size_t count) queue_ra_store() argument
87 ssize_t ret = queue_var_store(&ra_kb, page, count); queue_ra_store()
97 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) queue_max_sectors_show() argument
101 return queue_var_show(max_sectors_kb, (page)); queue_max_sectors_show()
104 static ssize_t queue_max_segments_show(struct request_queue *q, char *page) queue_max_segments_show() argument
106 return queue_var_show(queue_max_segments(q), (page)); queue_max_segments_show()
109 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) queue_max_integrity_segments_show() argument
111 return queue_var_show(q->limits.max_integrity_segments, (page)); queue_max_integrity_segments_show()
114 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) queue_max_segment_size_show() argument
117 return queue_var_show(queue_max_segment_size(q), (page)); queue_max_segment_size_show()
119 return queue_var_show(PAGE_CACHE_SIZE, (page)); queue_max_segment_size_show()
122 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) queue_logical_block_size_show() argument
124 return queue_var_show(queue_logical_block_size(q), page); queue_logical_block_size_show()
127 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) queue_physical_block_size_show() argument
129 return queue_var_show(queue_physical_block_size(q), page); queue_physical_block_size_show()
132 static ssize_t queue_io_min_show(struct request_queue *q, char *page) queue_io_min_show() argument
134 return queue_var_show(queue_io_min(q), page); queue_io_min_show()
137 static ssize_t queue_io_opt_show(struct request_queue *q, char *page) queue_io_opt_show() argument
139 return queue_var_show(queue_io_opt(q), page); queue_io_opt_show()
142 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) queue_discard_granularity_show() argument
144 return queue_var_show(q->limits.discard_granularity, page); queue_discard_granularity_show()
147 static ssize_t queue_discard_max_show(struct request_queue *q, char *page) queue_discard_max_show() argument
149 return sprintf(page, "%llu\n", queue_discard_max_show()
153 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) queue_discard_zeroes_data_show() argument
155 return queue_var_show(queue_discard_zeroes_data(q), page); queue_discard_zeroes_data_show()
158 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) queue_write_same_max_show() argument
160 return sprintf(page, "%llu\n", queue_write_same_max_show()
166 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) queue_max_sectors_store() argument
171 ssize_t ret = queue_var_store(&max_sectors_kb, page, count); queue_max_sectors_store()
186 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) queue_max_hw_sectors_show() argument
190 return queue_var_show(max_hw_sectors_kb, (page)); queue_max_hw_sectors_show()
195 queue_show_##name(struct request_queue *q, char *page) \
199 return queue_var_show(neg ? !bit : bit, page); \
202 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
206 ret = queue_var_store(&val, page, count); \
226 static ssize_t queue_nomerges_show(struct request_queue *q, char *page) queue_nomerges_show() argument
229 blk_queue_noxmerges(q), page); queue_nomerges_show()
232 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, queue_nomerges_store() argument
236 ssize_t ret = queue_var_store(&nm, page, count); queue_nomerges_store()
253 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) queue_rq_affinity_show() argument
258 return queue_var_show(set << force, page); queue_rq_affinity_show()
262 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) queue_rq_affinity_store() argument
268 ret = queue_var_store(&val, page, count); queue_rq_affinity_store()
436 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) queue_attr_show() argument
450 res = entry->show(q, page); queue_attr_show()
457 const char *page, size_t length) queue_attr_store()
472 res = entry->store(q, page, length); queue_attr_store()
456 queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) queue_attr_store() argument
/linux-4.1.27/arch/unicore32/include/asm/
H A Dpage.h2 * linux/arch/unicore32/include/asm/page.h
15 /* PAGE_SHIFT determines the page size */
22 struct page;
25 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
28 #define clear_user_page(page, vaddr, pg) clear_page(page)
67 typedef struct page *pgtable_t;

Completed in 6007 milliseconds

1234567891011>>