Home
last modified time | relevance | path

Searched refs:page (Results 1 – 200 of 2517) sorted by relevance

12345678910>>...13

/linux-4.1.27/include/linux/
Dpage-flags.h141 static inline int Page##uname(const struct page *page) \
142 { return test_bit(PG_##lname, &page->flags); }
145 static inline void SetPage##uname(struct page *page) \
146 { set_bit(PG_##lname, &page->flags); }
149 static inline void ClearPage##uname(struct page *page) \
150 { clear_bit(PG_##lname, &page->flags); }
153 static inline void __SetPage##uname(struct page *page) \
154 { __set_bit(PG_##lname, &page->flags); }
157 static inline void __ClearPage##uname(struct page *page) \
158 { __clear_bit(PG_##lname, &page->flags); }
[all …]
Dballoon_compaction.h63 int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
64 struct page *page, enum migrate_mode mode);
67 extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
68 extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
79 extern bool balloon_page_isolate(struct page *page);
80 extern void balloon_page_putback(struct page *page);
81 extern int balloon_page_migrate(struct page *newpage,
82 struct page *page, enum migrate_mode mode);
87 static inline bool __is_movable_balloon_page(struct page *page) in __is_movable_balloon_page() argument
89 return PageBalloon(page); in __is_movable_balloon_page()
[all …]
Dpagemap.h95 #define page_cache_get(page) get_page(page) argument
96 #define page_cache_release(page) put_page(page) argument
97 void release_pages(struct page **pages, int nr, bool cold);
143 static inline int page_cache_get_speculative(struct page *page) in page_cache_get_speculative() argument
160 VM_BUG_ON_PAGE(page_count(page) == 0, page); in page_cache_get_speculative()
161 atomic_inc(&page->_count); in page_cache_get_speculative()
164 if (unlikely(!get_page_unless_zero(page))) { in page_cache_get_speculative()
173 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_get_speculative()
181 static inline int page_cache_add_speculative(struct page *page, int count) in page_cache_add_speculative() argument
189 VM_BUG_ON_PAGE(page_count(page) == 0, page); in page_cache_add_speculative()
[all …]
Dmm.h83 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) argument
227 struct page *cow_page; /* Handler may choose to COW */
228 struct page *page; /* ->fault handlers should return a member
296 struct page *(*find_special_page)(struct vm_area_struct *vma,
303 #define page_private(page) ((page)->private) argument
304 #define set_page_private(page, v) ((page)->private = (v)) argument
307 static inline void set_freepage_migratetype(struct page *page, int migratetype) in set_freepage_migratetype() argument
309 page->index = migratetype; in set_freepage_migratetype()
313 static inline int get_freepage_migratetype(struct page *page) in get_freepage_migratetype() argument
315 return page->index; in get_freepage_migratetype()
[all …]
Dswap.h252 void *workingset_eviction(struct address_space *mapping, struct page *page);
254 void workingset_activation(struct page *page);
299 extern void lru_cache_add(struct page *);
300 extern void lru_cache_add_anon(struct page *page);
301 extern void lru_cache_add_file(struct page *page);
302 extern void lru_add_page_tail(struct page *page, struct page *page_tail,
304 extern void activate_page(struct page *);
305 extern void mark_page_accessed(struct page *);
309 extern void rotate_reclaimable_page(struct page *page);
310 extern void deactivate_file_page(struct page *page);
[all …]
Dhighmem.h14 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma… in flush_anon_page() argument
20 static inline void flush_kernel_dcache_page(struct page *page) in flush_kernel_dcache_page() argument
42 struct page *kmap_to_page(void *addr);
48 static inline struct page *kmap_to_page(void *addr) in kmap_to_page()
56 static inline void *kmap(struct page *page) in kmap() argument
59 return page_address(page); in kmap()
62 static inline void kunmap(struct page *page) in kunmap() argument
66 static inline void *kmap_atomic(struct page *page) in kmap_atomic() argument
69 return page_address(page); in kmap_atomic()
71 #define kmap_atomic_prot(page, prot) kmap_atomic(page) argument
[all …]
Dhugetlb_cgroup.h29 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) in hugetlb_cgroup_from_page() argument
31 VM_BUG_ON_PAGE(!PageHuge(page), page); in hugetlb_cgroup_from_page()
33 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) in hugetlb_cgroup_from_page()
35 return (struct hugetlb_cgroup *)page[2].lru.next; in hugetlb_cgroup_from_page()
39 int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) in set_hugetlb_cgroup() argument
41 VM_BUG_ON_PAGE(!PageHuge(page), page); in set_hugetlb_cgroup()
43 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) in set_hugetlb_cgroup()
45 page[2].lru.next = (void *)h_cg; in set_hugetlb_cgroup()
60 struct page *page);
62 struct page *page);
[all …]
Drmap.h145 struct anon_vma *page_get_anon_vma(struct page *page);
150 void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
151 void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
152 void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
154 void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
155 void page_add_file_rmap(struct page *);
156 void page_remove_rmap(struct page *);
158 void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
160 void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
163 static inline void page_dup_rmap(struct page *page) in page_dup_rmap() argument
[all …]
Dmigrate.h8 typedef struct page *new_page_t(struct page *page, unsigned long private,
10 typedef void free_page_t(struct page *page, unsigned long private);
33 struct page *, struct page *, enum migrate_mode);
39 extern void migrate_page_copy(struct page *newpage, struct page *page);
41 struct page *newpage, struct page *page);
43 struct page *newpage, struct page *page,
57 static inline void migrate_page_copy(struct page *newpage, in migrate_page_copy()
58 struct page *page) {} in migrate_page_copy() argument
61 struct page *newpage, struct page *page) in migrate_huge_page_move_mapping() argument
70 extern int migrate_misplaced_page(struct page *page,
[all …]
Dmm_inline.h20 static inline int page_is_file_cache(struct page *page) in page_is_file_cache() argument
22 return !PageSwapBacked(page); in page_is_file_cache()
25 static __always_inline void add_page_to_lru_list(struct page *page, in add_page_to_lru_list() argument
28 int nr_pages = hpage_nr_pages(page); in add_page_to_lru_list()
30 list_add(&page->lru, &lruvec->lists[lru]); in add_page_to_lru_list()
34 static __always_inline void del_page_from_lru_list(struct page *page, in del_page_from_lru_list() argument
37 int nr_pages = hpage_nr_pages(page); in del_page_from_lru_list()
39 list_del(&page->lru); in del_page_from_lru_list()
51 static inline enum lru_list page_lru_base_type(struct page *page) in page_lru_base_type() argument
53 if (page_is_file_cache(page)) in page_lru_base_type()
[all …]
Dksm.h38 static inline struct stable_node *page_stable_node(struct page *page) in page_stable_node() argument
40 return PageKsm(page) ? page_rmapping(page) : NULL; in page_stable_node()
43 static inline void set_page_stable_node(struct page *page, in set_page_stable_node() argument
46 page->mapping = (void *)stable_node + in set_page_stable_node()
61 struct page *ksm_might_need_to_copy(struct page *page,
64 int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
65 void ksm_migrate_page(struct page *newpage, struct page *oldpage);
85 static inline struct page *ksm_might_need_to_copy(struct page *page, in ksm_might_need_to_copy() argument
88 return page; in ksm_might_need_to_copy()
91 static inline int page_referenced_ksm(struct page *page, in page_referenced_ksm() argument
[all …]
Dpage-isolation.h9 static inline bool is_migrate_isolate_page(struct page *page) in is_migrate_isolate_page() argument
11 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; in is_migrate_isolate_page()
22 static inline bool is_migrate_isolate_page(struct page *page) in is_migrate_isolate_page() argument
32 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
34 void set_pageblock_migratetype(struct page *page, int migratetype);
35 int move_freepages_block(struct zone *zone, struct page *page,
38 struct page *start_page, struct page *end_page,
71 int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages);
72 void unset_migratetype_isolate(struct page *page, unsigned migratetype);
73 struct page *alloc_migrate_target(struct page *page, unsigned long private,
Dfscache.h39 #define PageFsCache(page) PagePrivate2((page)) argument
40 #define SetPageFsCache(page) SetPagePrivate2((page)) argument
41 #define ClearPageFsCache(page) ClearPagePrivate2((page)) argument
42 #define TestSetPageFsCache(page) TestSetPagePrivate2((page)) argument
43 #define TestClearPageFsCache(page) TestClearPagePrivate2((page)) argument
53 typedef void (*fscache_rw_complete_t)(struct page *page,
145 struct page *page);
228 struct page *,
239 extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
240 extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
[all …]
Dpageblock-flags.h66 struct page;
68 unsigned long get_pfnblock_flags_mask(struct page *page,
73 void set_pfnblock_flags_mask(struct page *page,
80 #define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \ argument
81 get_pfnblock_flags_mask(page, page_to_pfn(page), \
84 #define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \ argument
85 set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \
90 #define get_pageblock_skip(page) \ argument
91 get_pageblock_flags_group(page, PB_migrate_skip, \
93 #define clear_pageblock_skip(page) \ argument
[all …]
Dmemcontrol.h28 struct page;
76 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
78 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
80 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
81 void mem_cgroup_uncharge(struct page *page);
84 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
88 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
94 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
162 struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
192 void mem_cgroup_split_huge_fixup(struct page *head);
[all …]
Dcleancache.h32 pgoff_t, struct page *);
34 pgoff_t, struct page *);
43 extern int __cleancache_get_page(struct page *);
44 extern void __cleancache_put_page(struct page *);
45 extern void __cleancache_invalidate_page(struct address_space *, struct page *);
51 static inline bool cleancache_fs_enabled(struct page *page) in cleancache_fs_enabled() argument
53 return page->mapping->host->i_sb->cleancache_poolid >= 0; in cleancache_fs_enabled()
90 static inline int cleancache_get_page(struct page *page) in cleancache_get_page() argument
94 if (cleancache_enabled && cleancache_fs_enabled(page)) in cleancache_get_page()
95 ret = __cleancache_get_page(page); in cleancache_get_page()
[all …]
Dpage_owner.h8 extern void __reset_page_owner(struct page *page, unsigned int order);
9 extern void __set_page_owner(struct page *page,
12 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument
17 __reset_page_owner(page, order); in reset_page_owner()
20 static inline void set_page_owner(struct page *page, in set_page_owner() argument
26 __set_page_owner(page, order, gfp_mask); in set_page_owner()
29 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument
32 static inline void set_page_owner(struct page *page, in set_page_owner() argument
Dkmemcheck.h11 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
12 void kmemcheck_free_shadow(struct page *page, int order);
17 void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
20 void kmemcheck_show_pages(struct page *p, unsigned int n);
21 void kmemcheck_hide_pages(struct page *p, unsigned int n);
23 bool kmemcheck_page_is_tracked(struct page *p);
30 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
31 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
32 void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
93 kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) in kmemcheck_alloc_shadow() argument
[all …]
Dbuffer_head.h48 struct page;
65 struct page *b_page; /* the page this bh is mapped to */
137 #define page_buffers(page) \ argument
139 BUG_ON(!PagePrivate(page)); \
140 ((struct buffer_head *)page_private(page)); \
142 #define page_has_buffers(page) PagePrivate(page) argument
144 void buffer_check_dirty_writeback(struct page *page,
155 struct page *page, unsigned long offset);
156 int try_to_free_buffers(struct page *);
157 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
[all …]
Dhugetlb.h63 struct page **, struct vm_area_struct **,
66 unsigned long, unsigned long, struct page *);
70 struct page *ref_page);
73 struct page *ref_page);
84 int dequeue_hwpoisoned_huge_page(struct page *page);
85 bool isolate_huge_page(struct page *page, struct list_head *list);
86 void putback_active_hugepage(struct page *page);
87 void free_huge_page(struct page *page);
103 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
105 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
[all …]
Dkasan.h7 struct page;
38 void kasan_alloc_pages(struct page *page, unsigned int order);
39 void kasan_free_pages(struct page *page, unsigned int order);
41 void kasan_poison_slab(struct page *page);
64 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} in kasan_alloc_pages() argument
65 static inline void kasan_free_pages(struct page *page, unsigned int order) {} in kasan_free_pages() argument
67 static inline void kasan_poison_slab(struct page *page) {} in kasan_poison_slab() argument
Dhuge_mm.h18 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
54 extern pmd_t *page_check_address_pmd(struct page *page,
96 extern int split_huge_page_to_list(struct page *page, struct list_head *list);
97 static inline int split_huge_page(struct page *page) in split_huge_page() argument
99 return split_huge_page_to_list(page, NULL); in split_huge_page()
150 static inline int hpage_nr_pages(struct page *page) in hpage_nr_pages() argument
152 if (unlikely(PageTransHuge(page))) in hpage_nr_pages()
160 extern struct page *huge_zero_page;
162 static inline bool is_huge_zero_page(struct page *page) in is_huge_zero_page() argument
164 return ACCESS_ONCE(huge_zero_page) == page; in is_huge_zero_page()
[all …]
Dfrontswap.h10 int (*store)(unsigned, pgoff_t, struct page *);
11 int (*load)(unsigned, pgoff_t, struct page *);
27 extern int __frontswap_store(struct page *page);
28 extern int __frontswap_load(struct page *page);
71 static inline int frontswap_store(struct page *page) in frontswap_store() argument
76 ret = __frontswap_store(page); in frontswap_store()
80 static inline int frontswap_load(struct page *page) in frontswap_load() argument
85 ret = __frontswap_load(page); in frontswap_load()
Dquicklist.h17 void *page; member
39 p = q->page; in quicklist_alloc()
41 q->page = p[0]; in quicklist_alloc()
56 struct page *page) in __quicklist_free() argument
61 *(void **)p = q->page; in __quicklist_free()
62 q->page = p; in __quicklist_free()
73 struct page *page) in quicklist_free_page() argument
75 __quicklist_free(nr, dtor, page_address(page), page); in quicklist_free_page()
/linux-4.1.27/mm/
Dswap.c52 static void __page_cache_release(struct page *page) in __page_cache_release() argument
54 if (PageLRU(page)) { in __page_cache_release()
55 struct zone *zone = page_zone(page); in __page_cache_release()
60 lruvec = mem_cgroup_page_lruvec(page, zone); in __page_cache_release()
61 VM_BUG_ON_PAGE(!PageLRU(page), page); in __page_cache_release()
62 __ClearPageLRU(page); in __page_cache_release()
63 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in __page_cache_release()
66 mem_cgroup_uncharge(page); in __page_cache_release()
69 static void __put_single_page(struct page *page) in __put_single_page() argument
71 __page_cache_release(page); in __put_single_page()
[all …]
Dfilemap.c112 struct page *page, void *shadow) in page_cache_tree_delete() argument
120 VM_BUG_ON(!PageLocked(page)); in page_cache_tree_delete()
122 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); in page_cache_tree_delete()
144 index = page->index; in page_cache_tree_delete()
179 void __delete_from_page_cache(struct page *page, void *shadow) in __delete_from_page_cache() argument
181 struct address_space *mapping = page->mapping; in __delete_from_page_cache()
183 trace_mm_filemap_delete_from_page_cache(page); in __delete_from_page_cache()
189 if (PageUptodate(page) && PageMappedToDisk(page)) in __delete_from_page_cache()
190 cleancache_put_page(page); in __delete_from_page_cache()
192 cleancache_invalidate_page(mapping, page); in __delete_from_page_cache()
[all …]
Dballoon_compaction.c23 struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info) in balloon_page_enqueue()
26 struct page *page = alloc_page(balloon_mapping_gfp_mask() | in balloon_page_enqueue() local
28 if (!page) in balloon_page_enqueue()
36 BUG_ON(!trylock_page(page)); in balloon_page_enqueue()
38 balloon_page_insert(b_dev_info, page); in balloon_page_enqueue()
41 unlock_page(page); in balloon_page_enqueue()
42 return page; in balloon_page_enqueue()
57 struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) in balloon_page_dequeue()
59 struct page *page, *tmp; in balloon_page_dequeue() local
65 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_dequeue()
[all …]
Dtruncate.c81 void do_invalidatepage(struct page *page, unsigned int offset, in do_invalidatepage() argument
84 void (*invalidatepage)(struct page *, unsigned int, unsigned int); in do_invalidatepage()
86 invalidatepage = page->mapping->a_ops->invalidatepage; in do_invalidatepage()
92 (*invalidatepage)(page, offset, length); in do_invalidatepage()
106 truncate_complete_page(struct address_space *mapping, struct page *page) in truncate_complete_page() argument
108 if (page->mapping != mapping) in truncate_complete_page()
111 if (page_has_private(page)) in truncate_complete_page()
112 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); in truncate_complete_page()
119 if (TestClearPageDirty(page)) in truncate_complete_page()
120 account_page_cleaned(page, mapping); in truncate_complete_page()
[all …]
Dinternal.h20 static inline void set_page_count(struct page *page, int v) in set_page_count() argument
22 atomic_set(&page->_count, v); in set_page_count()
43 static inline void set_page_refcounted(struct page *page) in set_page_refcounted() argument
45 VM_BUG_ON_PAGE(PageTail(page), page); in set_page_refcounted()
46 VM_BUG_ON_PAGE(atomic_read(&page->_count), page); in set_page_refcounted()
47 set_page_count(page, 1); in set_page_refcounted()
50 static inline void __get_page_tail_foll(struct page *page, in __get_page_tail_foll() argument
64 VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page); in __get_page_tail_foll()
66 atomic_inc(&page->first_page->_count); in __get_page_tail_foll()
67 get_huge_page_tail(page); in __get_page_tail_foll()
[all …]
Dpage_isolation.c12 int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) in set_migratetype_isolate() argument
20 zone = page_zone(page); in set_migratetype_isolate()
24 pfn = page_to_pfn(page); in set_migratetype_isolate()
48 if (!has_unmovable_pages(zone, page, arg.pages_found, in set_migratetype_isolate()
60 int migratetype = get_pageblock_migratetype(page); in set_migratetype_isolate()
62 set_pageblock_migratetype(page, MIGRATE_ISOLATE); in set_migratetype_isolate()
64 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); in set_migratetype_isolate()
75 void unset_migratetype_isolate(struct page *page, unsigned migratetype) in unset_migratetype_isolate() argument
79 struct page *isolated_page = NULL; in unset_migratetype_isolate()
82 struct page *buddy; in unset_migratetype_isolate()
[all …]
Dmigrate.c84 struct page *page; in putback_movable_pages() local
85 struct page *page2; in putback_movable_pages()
87 list_for_each_entry_safe(page, page2, l, lru) { in putback_movable_pages()
88 if (unlikely(PageHuge(page))) { in putback_movable_pages()
89 putback_active_hugepage(page); in putback_movable_pages()
92 list_del(&page->lru); in putback_movable_pages()
93 dec_zone_page_state(page, NR_ISOLATED_ANON + in putback_movable_pages()
94 page_is_file_cache(page)); in putback_movable_pages()
95 if (unlikely(isolated_balloon_page(page))) in putback_movable_pages()
96 balloon_page_putback(page); in putback_movable_pages()
[all …]
Dswap_state.c79 int __add_to_swap_cache(struct page *page, swp_entry_t entry) in __add_to_swap_cache() argument
84 VM_BUG_ON_PAGE(!PageLocked(page), page); in __add_to_swap_cache()
85 VM_BUG_ON_PAGE(PageSwapCache(page), page); in __add_to_swap_cache()
86 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); in __add_to_swap_cache()
88 page_cache_get(page); in __add_to_swap_cache()
89 SetPageSwapCache(page); in __add_to_swap_cache()
90 set_page_private(page, entry.val); in __add_to_swap_cache()
95 entry.val, page); in __add_to_swap_cache()
98 __inc_zone_page_state(page, NR_FILE_PAGES); in __add_to_swap_cache()
110 set_page_private(page, 0UL); in __add_to_swap_cache()
[all …]
Drmap.c453 struct anon_vma *page_get_anon_vma(struct page *page) in page_get_anon_vma() argument
459 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma()
462 if (!page_mapped(page)) in page_get_anon_vma()
478 if (!page_mapped(page)) { in page_get_anon_vma()
496 struct anon_vma *page_lock_anon_vma_read(struct page *page) in page_lock_anon_vma_read() argument
503 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read()
506 if (!page_mapped(page)) in page_lock_anon_vma_read()
517 if (!page_mapped(page)) { in page_lock_anon_vma_read()
530 if (!page_mapped(page)) { in page_lock_anon_vma_read()
567 __vma_address(struct page *page, struct vm_area_struct *vma) in __vma_address() argument
[all …]
Dpage_io.c28 struct page *page, bio_end_io_t end_io) in get_swap_bio() argument
34 bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); in get_swap_bio()
36 bio->bi_io_vec[0].bv_page = page; in get_swap_bio()
49 struct page *page = bio->bi_io_vec[0].bv_page; in end_swap_bio_write() local
52 SetPageError(page); in end_swap_bio_write()
61 set_page_dirty(page); in end_swap_bio_write()
66 ClearPageReclaim(page); in end_swap_bio_write()
68 end_page_writeback(page); in end_swap_bio_write()
75 struct page *page = bio->bi_io_vec[0].bv_page; in end_swap_bio_read() local
78 SetPageError(page); in end_swap_bio_read()
[all …]
Dmlock.c57 void clear_page_mlock(struct page *page) in clear_page_mlock() argument
59 if (!TestClearPageMlocked(page)) in clear_page_mlock()
62 mod_zone_page_state(page_zone(page), NR_MLOCK, in clear_page_mlock()
63 -hpage_nr_pages(page)); in clear_page_mlock()
65 if (!isolate_lru_page(page)) { in clear_page_mlock()
66 putback_lru_page(page); in clear_page_mlock()
71 if (PageUnevictable(page)) in clear_page_mlock()
80 void mlock_vma_page(struct page *page) in mlock_vma_page() argument
83 BUG_ON(!PageLocked(page)); in mlock_vma_page()
85 if (!TestSetPageMlocked(page)) { in mlock_vma_page()
[all …]
Ddebug-pagealloc.c32 static inline void set_page_poison(struct page *page) in set_page_poison() argument
36 page_ext = lookup_page_ext(page); in set_page_poison()
40 static inline void clear_page_poison(struct page *page) in clear_page_poison() argument
44 page_ext = lookup_page_ext(page); in clear_page_poison()
48 static inline bool page_poison(struct page *page) in page_poison() argument
52 page_ext = lookup_page_ext(page); in page_poison()
56 static void poison_page(struct page *page) in poison_page() argument
58 void *addr = kmap_atomic(page); in poison_page()
60 set_page_poison(page); in poison_page()
65 static void poison_pages(struct page *page, int n) in poison_pages() argument
[all …]
Ddmapool.c73 struct dma_page *page; in show_pools() local
89 list_for_each_entry(page, &pool->page_list, page_list) { in show_pools()
91 blocks += page->in_use; in show_pools()
206 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) in pool_initialise_page() argument
217 *(int *)(page->vaddr + offset) = next; in pool_initialise_page()
224 struct dma_page *page; in pool_alloc_page() local
226 page = kmalloc(sizeof(*page), mem_flags); in pool_alloc_page()
227 if (!page) in pool_alloc_page()
229 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, in pool_alloc_page()
230 &page->dma, mem_flags); in pool_alloc_page()
[all …]
Dslub.c229 struct page *page, const void *object) in check_valid_pointer() argument
236 base = page_address(page); in check_valid_pointer()
237 if (object < base || object >= base + page->objects * s->size || in check_valid_pointer()
339 static __always_inline void slab_lock(struct page *page) in slab_lock() argument
341 bit_spin_lock(PG_locked, &page->flags); in slab_lock()
344 static __always_inline void slab_unlock(struct page *page) in slab_unlock() argument
346 __bit_spin_unlock(PG_locked, &page->flags); in slab_unlock()
349 static inline void set_page_slub_counters(struct page *page, unsigned long counters_new) in set_page_slub_counters() argument
351 struct page tmp; in set_page_slub_counters()
359 page->frozen = tmp.frozen; in set_page_slub_counters()
[all …]
Dhighmem.c57 static inline unsigned int get_pkmap_color(struct page *page) in get_pkmap_color() argument
154 struct page *kmap_to_page(void *vaddr) in kmap_to_page()
175 struct page *page; in flush_all_zero_pkmaps() local
197 page = pte_page(pkmap_page_table[i]); in flush_all_zero_pkmaps()
200 set_page_address(page, NULL); in flush_all_zero_pkmaps()
217 static inline unsigned long map_new_virtual(struct page *page) in map_new_virtual() argument
222 unsigned int color = get_pkmap_color(page); in map_new_virtual()
254 if (page_address(page)) in map_new_virtual()
255 return (unsigned long)page_address(page); in map_new_virtual()
263 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); in map_new_virtual()
[all …]
Dzsmalloc.c226 struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
402 static int is_first_page(struct page *page) in is_first_page() argument
404 return PagePrivate(page); in is_first_page()
407 static int is_last_page(struct page *page) in is_last_page() argument
409 return PagePrivate2(page); in is_last_page()
412 static void get_zspage_mapping(struct page *page, unsigned int *class_idx, in get_zspage_mapping() argument
416 BUG_ON(!is_first_page(page)); in get_zspage_mapping()
418 m = (unsigned long)page->mapping; in get_zspage_mapping()
423 static void set_zspage_mapping(struct page *page, unsigned int class_idx, in set_zspage_mapping() argument
427 BUG_ON(!is_first_page(page)); in set_zspage_mapping()
[all …]
Dksm.c365 struct page *page; in break_ksm() local
370 page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION); in break_ksm()
371 if (IS_ERR_OR_NULL(page)) in break_ksm()
373 if (PageKsm(page)) in break_ksm()
378 put_page(page); in break_ksm()
444 static struct page *page_trans_compound_anon(struct page *page) in page_trans_compound_anon() argument
446 if (PageTransCompound(page)) { in page_trans_compound_anon()
447 struct page *head = compound_head(page); in page_trans_compound_anon()
458 static struct page *get_mergeable_page(struct rmap_item *rmap_item) in get_mergeable_page()
463 struct page *page; in get_mergeable_page() local
[all …]
Dpage_alloc.c168 static void __free_pages_ok(struct page *page, unsigned int order);
238 void set_pageblock_migratetype(struct page *page, int migratetype) in set_pageblock_migratetype() argument
244 set_pageblock_flags_group(page, (unsigned long)migratetype, in set_pageblock_migratetype()
249 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
253 unsigned long pfn = page_to_pfn(page); in page_outside_zone_boundaries()
272 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
274 if (!pfn_valid_within(page_to_pfn(page))) in page_is_consistent()
276 if (zone != page_zone(page)) in page_is_consistent()
284 static int bad_range(struct zone *zone, struct page *page) in bad_range() argument
286 if (page_outside_zone_boundaries(zone, page)) in bad_range()
[all …]
Dmemory-failure.c79 static int hwpoison_filter_dev(struct page *p) in hwpoison_filter_dev()
109 static int hwpoison_filter_flags(struct page *p) in hwpoison_filter_flags()
134 static int hwpoison_filter_task(struct page *p) in hwpoison_filter_task()
157 static int hwpoison_filter_task(struct page *p) { return 0; } in hwpoison_filter_task()
160 int hwpoison_filter(struct page *p) in hwpoison_filter()
177 int hwpoison_filter(struct page *p) in hwpoison_filter()
191 unsigned long pfn, struct page *page, int flags) in kill_proc() argument
205 si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT; in kill_proc()
230 void shake_page(struct page *p, int access) in shake_page()
289 static void add_to_kill(struct task_struct *tsk, struct page *p, in add_to_kill()
[all …]
Dpage_owner.c49 void __reset_page_owner(struct page *page, unsigned int order) in __reset_page_owner() argument
55 page_ext = lookup_page_ext(page + i); in __reset_page_owner()
60 void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) in __set_page_owner() argument
62 struct page_ext *page_ext = lookup_page_ext(page); in __set_page_owner()
81 struct page *page, struct page_ext *page_ext) in print_page_owner() argument
103 pageblock_mt = get_pfnblock_migratetype(page, pfn); in print_page_owner()
111 PageLocked(page) ? "K" : " ", in print_page_owner()
112 PageError(page) ? "E" : " ", in print_page_owner()
113 PageReferenced(page) ? "R" : " ", in print_page_owner()
114 PageUptodate(page) ? "U" : " ", in print_page_owner()
[all …]
Dvmscan.c109 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
115 struct page *prev; \
129 struct page *prev; \
445 static inline int is_page_cache_freeable(struct page *page) in is_page_cache_freeable() argument
452 return page_count(page) - page_has_private(page) == 2; in is_page_cache_freeable()
480 struct page *page, int error) in handle_write_error() argument
482 lock_page(page); in handle_write_error()
483 if (page_mapping(page) == mapping) in handle_write_error()
485 unlock_page(page); in handle_write_error()
504 static pageout_t pageout(struct page *page, struct address_space *mapping, in pageout() argument
[all …]
Dhugetlb.c589 static void enqueue_huge_page(struct hstate *h, struct page *page) in enqueue_huge_page() argument
591 int nid = page_to_nid(page); in enqueue_huge_page()
592 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page()
597 static struct page *dequeue_huge_page_node(struct hstate *h, int nid) in dequeue_huge_page_node()
599 struct page *page; in dequeue_huge_page_node() local
601 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) in dequeue_huge_page_node()
602 if (!is_migrate_isolate_page(page)) in dequeue_huge_page_node()
608 if (&h->hugepage_freelists[nid] == &page->lru) in dequeue_huge_page_node()
610 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node()
611 set_page_refcounted(page); in dequeue_huge_page_node()
[all …]
Dhuge_memory.c172 struct page *huge_zero_page __read_mostly;
179 static struct page *get_huge_zero_page(void) in get_huge_zero_page()
181 struct page *zero_page; in get_huge_zero_page()
226 struct page *zero_page = xchg(&huge_zero_page, NULL); in shrink_huge_zero_page_scan()
709 static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot) in mk_huge_pmd() argument
712 entry = mk_pmd(page, prot); in mk_huge_pmd()
720 struct page *page, gfp_t gfp) in __do_huge_pmd_anonymous_page() argument
726 VM_BUG_ON_PAGE(!PageCompound(page), page); in __do_huge_pmd_anonymous_page()
728 if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) in __do_huge_pmd_anonymous_page()
733 mem_cgroup_cancel_charge(page, memcg); in __do_huge_pmd_anonymous_page()
[all …]
Dgup.c19 static struct page *no_page_table(struct vm_area_struct *vma, in no_page_table()
35 static struct page *follow_page_pte(struct vm_area_struct *vma, in follow_page_pte()
39 struct page *page; in follow_page_pte() local
74 page = vm_normal_page(vma, address, pte); in follow_page_pte()
75 if (unlikely(!page)) { in follow_page_pte()
79 page = pte_page(pte); in follow_page_pte()
83 get_page_foll(page); in follow_page_pte()
86 !pte_dirty(pte) && !PageDirty(page)) in follow_page_pte()
87 set_page_dirty(page); in follow_page_pte()
93 mark_page_accessed(page); in follow_page_pte()
[all …]
Dreadahead.c35 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
45 struct page *page) in read_cache_pages_invalidate_page() argument
47 if (page_has_private(page)) { in read_cache_pages_invalidate_page()
48 if (!trylock_page(page)) in read_cache_pages_invalidate_page()
50 page->mapping = mapping; in read_cache_pages_invalidate_page()
51 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); in read_cache_pages_invalidate_page()
52 page->mapping = NULL; in read_cache_pages_invalidate_page()
53 unlock_page(page); in read_cache_pages_invalidate_page()
55 page_cache_release(page); in read_cache_pages_invalidate_page()
64 struct page *victim; in read_cache_pages_invalidate_pages()
[all …]
Dkmemcheck.c8 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) in kmemcheck_alloc_shadow() argument
10 struct page *shadow; in kmemcheck_alloc_shadow()
29 page[i].shadow = page_address(&shadow[i]); in kmemcheck_alloc_shadow()
36 kmemcheck_hide_pages(page, pages); in kmemcheck_alloc_shadow()
39 void kmemcheck_free_shadow(struct page *page, int order) in kmemcheck_free_shadow() argument
41 struct page *shadow; in kmemcheck_free_shadow()
45 if (!kmemcheck_page_is_tracked(page)) in kmemcheck_free_shadow()
50 kmemcheck_show_pages(page, pages); in kmemcheck_free_shadow()
52 shadow = virt_to_page(page[0].shadow); in kmemcheck_free_shadow()
55 page[i].shadow = NULL; in kmemcheck_free_shadow()
[all …]
Dshmem.c119 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
120 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
123 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
126 struct page **pagep, enum sgp_type sgp, int *fault_type) in shmem_getpage()
292 static int shmem_add_to_page_cache(struct page *page, in shmem_add_to_page_cache() argument
298 VM_BUG_ON_PAGE(!PageLocked(page), page); in shmem_add_to_page_cache()
299 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); in shmem_add_to_page_cache()
301 page_cache_get(page); in shmem_add_to_page_cache()
302 page->mapping = mapping; in shmem_add_to_page_cache()
303 page->index = index; in shmem_add_to_page_cache()
[all …]
Dcompaction.c55 struct page *page, *next; in release_freepages() local
58 list_for_each_entry_safe(page, next, freelist, lru) { in release_freepages()
59 unsigned long pfn = page_to_pfn(page); in release_freepages()
60 list_del(&page->lru); in release_freepages()
61 __free_page(page); in release_freepages()
71 struct page *page; in map_pages() local
73 list_for_each_entry(page, list, lru) { in map_pages()
74 arch_alloc_page(page, 0); in map_pages()
75 kernel_map_pages(page, 1, 1); in map_pages()
76 kasan_alloc_pages(page, 0); in map_pages()
[all …]
Dpage-writeback.c1883 struct page *page = pvec.pages[i]; in write_cache_pages() local
1892 if (page->index > end) { in write_cache_pages()
1901 done_index = page->index; in write_cache_pages()
1903 lock_page(page); in write_cache_pages()
1913 if (unlikely(page->mapping != mapping)) { in write_cache_pages()
1915 unlock_page(page); in write_cache_pages()
1919 if (!PageDirty(page)) { in write_cache_pages()
1924 if (PageWriteback(page)) { in write_cache_pages()
1926 wait_on_page_writeback(page); in write_cache_pages()
1931 BUG_ON(PageWriteback(page)); in write_cache_pages()
[all …]
Dswapfile.c99 struct page *page; in __try_to_reclaim_swap() local
102 page = find_get_page(swap_address_space(entry), entry.val); in __try_to_reclaim_swap()
103 if (!page) in __try_to_reclaim_swap()
112 if (trylock_page(page)) { in __try_to_reclaim_swap()
113 ret = try_to_free_swap(page); in __try_to_reclaim_swap()
114 unlock_page(page); in __try_to_reclaim_swap()
116 page_cache_release(page); in __try_to_reclaim_swap()
862 int page_swapcount(struct page *page) in page_swapcount() argument
868 entry.val = page_private(page); in page_swapcount()
883 int reuse_swap_page(struct page *page) in reuse_swap_page() argument
[all …]
Dcleancache.c177 int __cleancache_get_page(struct page *page) in __cleancache_get_page() argument
188 VM_BUG_ON_PAGE(!PageLocked(page), page); in __cleancache_get_page()
189 pool_id = page->mapping->host->i_sb->cleancache_poolid; in __cleancache_get_page()
193 if (cleancache_get_key(page->mapping->host, &key) < 0) in __cleancache_get_page()
196 ret = cleancache_ops->get_page(pool_id, key, page->index, page); in __cleancache_get_page()
216 void __cleancache_put_page(struct page *page) in __cleancache_put_page() argument
226 VM_BUG_ON_PAGE(!PageLocked(page), page); in __cleancache_put_page()
227 pool_id = page->mapping->host->i_sb->cleancache_poolid; in __cleancache_put_page()
229 cleancache_get_key(page->mapping->host, &key) >= 0) { in __cleancache_put_page()
230 cleancache_ops->put_page(pool_id, key, page->index, page); in __cleancache_put_page()
[all …]
Dsparse.c45 int page_to_nid(const struct page *page) in page_to_nid() argument
47 return section_to_node_table[page_to_section(page)]; in page_to_nid()
209 return nr_pages * sizeof(struct page); in node_memmap_size_bytes()
217 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) in sparse_encode_mem_map()
225 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) in sparse_decode_mem_map()
229 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); in sparse_decode_mem_map()
233 unsigned long pnum, struct page *mem_map, in sparse_init_one_section()
372 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) in sparse_mem_map_populate()
374 struct page *map; in sparse_mem_map_populate()
377 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); in sparse_mem_map_populate()
[all …]
Dmemory_hotplug.c47 static void generic_online_page(struct page *page);
158 void get_page_bootmem(unsigned long info, struct page *page, in get_page_bootmem() argument
161 page->lru.next = (struct list_head *) type; in get_page_bootmem()
162 SetPagePrivate(page); in get_page_bootmem()
163 set_page_private(page, info); in get_page_bootmem()
164 atomic_inc(&page->_count); in get_page_bootmem()
167 void put_page_bootmem(struct page *page) in put_page_bootmem() argument
171 type = (unsigned long) page->lru.next; in put_page_bootmem()
175 if (atomic_dec_return(&page->_count) == 1) { in put_page_bootmem()
176 ClearPagePrivate(page); in put_page_bootmem()
[all …]
Dslab.c397 static void set_obj_status(struct page *page, int idx, int val) in set_obj_status() argument
401 struct kmem_cache *cachep = page->slab_cache; in set_obj_status()
404 status = (char *)page->freelist + freelist_size; in set_obj_status()
408 static inline unsigned int get_obj_status(struct page *page, int idx) in get_obj_status() argument
412 struct kmem_cache *cachep = page->slab_cache; in get_obj_status()
415 status = (char *)page->freelist + freelist_size; in get_obj_status()
421 static inline void set_obj_status(struct page *page, int idx, int val) {} in set_obj_status() argument
436 struct page *page = virt_to_head_page(obj); in virt_to_cache() local
437 return page->slab_cache; in virt_to_cache()
440 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, in index_to_obj() argument
[all …]
Dpercpu-vm.c13 static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, in pcpu_chunk_page()
33 static struct page **pcpu_get_pages(struct pcpu_chunk *chunk_alloc) in pcpu_get_pages()
35 static struct page **pages; in pcpu_get_pages()
56 struct page **pages, int page_start, int page_end) in pcpu_free_pages()
63 struct page *page = pages[pcpu_page_idx(cpu, i)]; in pcpu_free_pages() local
65 if (page) in pcpu_free_pages()
66 __free_page(page); in pcpu_free_pages()
83 struct page **pages, int page_start, int page_end) in pcpu_alloc_pages()
91 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; in pcpu_alloc_pages()
152 struct page **pages, int page_start, int page_end) in pcpu_unmap_pages()
[all …]
/linux-4.1.27/fs/9p/
Dcache.h44 extern int __v9fs_fscache_release_page(struct page *page, gfp_t gfp);
45 extern void __v9fs_fscache_invalidate_page(struct page *page);
47 struct page *page);
52 extern void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page);
54 struct page *page);
56 static inline int v9fs_fscache_release_page(struct page *page, in v9fs_fscache_release_page() argument
59 return __v9fs_fscache_release_page(page, gfp); in v9fs_fscache_release_page()
62 static inline void v9fs_fscache_invalidate_page(struct page *page) in v9fs_fscache_invalidate_page() argument
64 __v9fs_fscache_invalidate_page(page); in v9fs_fscache_invalidate_page()
68 struct page *page) in v9fs_readpage_from_fscache() argument
[all …]
Dvfs_addr.c52 static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page) in v9fs_fid_readpage() argument
54 struct inode *inode = page->mapping->host; in v9fs_fid_readpage()
55 struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; in v9fs_fid_readpage()
61 BUG_ON(!PageLocked(page)); in v9fs_fid_readpage()
63 retval = v9fs_readpage_from_fscache(inode, page); in v9fs_fid_readpage()
69 retval = p9_client_read(fid, page_offset(page), &to, &err); in v9fs_fid_readpage()
71 v9fs_uncache_page(inode, page); in v9fs_fid_readpage()
76 zero_user(page, retval, PAGE_SIZE - retval); in v9fs_fid_readpage()
77 flush_dcache_page(page); in v9fs_fid_readpage()
78 SetPageUptodate(page); in v9fs_fid_readpage()
[all …]
Dcache.c280 int __v9fs_fscache_release_page(struct page *page, gfp_t gfp) in __v9fs_fscache_release_page() argument
282 struct inode *inode = page->mapping->host; in __v9fs_fscache_release_page()
287 return fscache_maybe_release_page(v9inode->fscache, page, gfp); in __v9fs_fscache_release_page()
290 void __v9fs_fscache_invalidate_page(struct page *page) in __v9fs_fscache_invalidate_page() argument
292 struct inode *inode = page->mapping->host; in __v9fs_fscache_invalidate_page()
297 if (PageFsCache(page)) { in __v9fs_fscache_invalidate_page()
298 fscache_wait_on_page_write(v9inode->fscache, page); in __v9fs_fscache_invalidate_page()
299 BUG_ON(!PageLocked(page)); in __v9fs_fscache_invalidate_page()
300 fscache_uncache_page(v9inode->fscache, page); in __v9fs_fscache_invalidate_page()
304 static void v9fs_vfs_readpage_complete(struct page *page, void *data, in v9fs_vfs_readpage_complete() argument
[all …]
/linux-4.1.27/fs/jfs/
Djfs_metapage.c61 unlock_page(mp->page); in __lock_metapage()
63 lock_page(mp->page); in __lock_metapage()
92 #define mp_anchor(page) ((struct meta_anchor *)page_private(page)) argument
94 static inline struct metapage *page_to_mp(struct page *page, int offset) in page_to_mp() argument
96 if (!PagePrivate(page)) in page_to_mp()
98 return mp_anchor(page)->mp[offset >> L2PSIZE]; in page_to_mp()
101 static inline int insert_metapage(struct page *page, struct metapage *mp) in insert_metapage() argument
107 if (PagePrivate(page)) in insert_metapage()
108 a = mp_anchor(page); in insert_metapage()
113 set_page_private(page, (unsigned long)a); in insert_metapage()
[all …]
/linux-4.1.27/fs/sysv/
Ddir.c30 static inline void dir_put_page(struct page *page) in dir_put_page() argument
32 kunmap(page); in dir_put_page()
33 page_cache_release(page); in dir_put_page()
41 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) in dir_commit_chunk() argument
43 struct address_space *mapping = page->mapping; in dir_commit_chunk()
47 block_write_end(NULL, mapping, pos, len, len, page, NULL); in dir_commit_chunk()
53 err = write_one_page(page, 1); in dir_commit_chunk()
55 unlock_page(page); in dir_commit_chunk()
59 static struct page * dir_get_page(struct inode *dir, unsigned long n) in dir_get_page()
62 struct page *page = read_mapping_page(mapping, n, NULL); in dir_get_page() local
[all …]
/linux-4.1.27/fs/ceph/
Daddr.c58 static inline struct ceph_snap_context *page_snap_context(struct page *page) in page_snap_context() argument
60 if (PagePrivate(page)) in page_snap_context()
61 return (void *)page->private; in page_snap_context()
69 static int ceph_set_page_dirty(struct page *page) in ceph_set_page_dirty() argument
71 struct address_space *mapping = page->mapping; in ceph_set_page_dirty()
78 return !TestSetPageDirty(page); in ceph_set_page_dirty()
80 if (PageDirty(page)) { in ceph_set_page_dirty()
82 mapping->host, page, page->index); in ceph_set_page_dirty()
83 BUG_ON(!PagePrivate(page)); in ceph_set_page_dirty()
106 mapping->host, page, page->index, in ceph_set_page_dirty()
[all …]
Dcache.h42 int ceph_readpage_from_fscache(struct inode *inode, struct page *page);
47 void ceph_readpage_to_fscache(struct inode *inode, struct page *page);
48 void ceph_invalidate_fscache_page(struct inode* inode, struct page *page);
63 struct page *page) in ceph_fscache_uncache_page() argument
66 return fscache_uncache_page(ci->fscache, page); in ceph_fscache_uncache_page()
69 static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp) in ceph_release_fscache_page() argument
71 struct inode* inode = page->mapping->host; in ceph_release_fscache_page()
73 return fscache_maybe_release_page(ci->fscache, page, gfp); in ceph_release_fscache_page()
77 struct page *page) in ceph_fscache_readpage_cancel() argument
80 if (fscache_cookie_valid(ci->fscache) && PageFsCache(page)) in ceph_fscache_readpage_cancel()
[all …]
/linux-4.1.27/fs/afs/
Dfile.c21 static int afs_readpage(struct file *file, struct page *page);
22 static void afs_invalidatepage(struct page *page, unsigned int offset,
24 static int afs_releasepage(struct page *page, gfp_t gfp_flags);
25 static int afs_launder_page(struct page *page);
108 static void afs_file_readpage_read_complete(struct page *page, in afs_file_readpage_read_complete() argument
112 _enter("%p,%p,%d", page, data, error); in afs_file_readpage_read_complete()
117 SetPageUptodate(page); in afs_file_readpage_read_complete()
118 unlock_page(page); in afs_file_readpage_read_complete()
125 int afs_page_filler(void *data, struct page *page) in afs_page_filler() argument
127 struct inode *inode = page->mapping->host; in afs_page_filler()
[all …]
Dwrite.c20 struct page *page);
25 int afs_set_page_dirty(struct page *page) in afs_set_page_dirty() argument
28 return __set_page_dirty_nobuffers(page); in afs_set_page_dirty()
87 loff_t pos, struct page *page) in afs_fill_page() argument
101 ret = afs_vnode_fetch_data(vnode, key, pos, len, page); in afs_fill_page()
120 struct page **pagep, void **fsdata) in afs_write_begin()
124 struct page *page; in afs_write_begin() local
146 page = grab_cache_page_write_begin(mapping, index, flags); in afs_write_begin()
147 if (!page) { in afs_write_begin()
151 *pagep = page; in afs_write_begin()
[all …]
/linux-4.1.27/sound/pci/trident/
Dtrident_memory.c37 #define __set_tlb_bus(trident,page,ptr,addr) \ argument
38 do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \
39 (trident)->tlb.shadow_entries[page] = (ptr); } while (0)
40 #define __tlb_to_ptr(trident,page) \ argument
41 (void*)((trident)->tlb.shadow_entries[page])
42 #define __tlb_to_addr(trident,page) \ argument
43 (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1))
50 #define set_tlb_bus(trident,page,ptr,addr) __set_tlb_bus(trident,page,ptr,addr) argument
52 #define set_silent_tlb(trident,page) __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silen… argument
56 #define aligned_page_offset(page) ((page) << 12) argument
[all …]
/linux-4.1.27/fs/logfs/
Dfile.c14 struct page **pagep, void **fsdata) in logfs_write_begin()
17 struct page *page; in logfs_write_begin() local
20 page = grab_cache_page_write_begin(mapping, index, flags); in logfs_write_begin()
21 if (!page) in logfs_write_begin()
23 *pagep = page; in logfs_write_begin()
25 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) in logfs_write_begin()
32 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); in logfs_write_begin()
35 return logfs_readpage_nolock(page); in logfs_write_begin()
39 loff_t pos, unsigned len, unsigned copied, struct page *page, in logfs_write_end() argument
43 pgoff_t index = page->index; in logfs_write_end()
[all …]
Dreadwrite.c205 static void prelock_page(struct super_block *sb, struct page *page, int lock) in prelock_page() argument
209 BUG_ON(!PageLocked(page)); in prelock_page()
211 BUG_ON(PagePreLocked(page)); in prelock_page()
212 SetPagePreLocked(page); in prelock_page()
215 if (PagePreLocked(page)) in prelock_page()
218 SetPagePreLocked(page); in prelock_page()
222 static void preunlock_page(struct super_block *sb, struct page *page, int lock) in preunlock_page() argument
226 BUG_ON(!PageLocked(page)); in preunlock_page()
228 ClearPagePreLocked(page); in preunlock_page()
231 BUG_ON(!PagePreLocked(page)); in preunlock_page()
[all …]
Dsegment.c52 static struct page *get_mapping_page(struct super_block *sb, pgoff_t index, in get_mapping_page()
58 struct page *page; in get_mapping_page() local
62 page = read_cache_page(mapping, index, filler, sb); in get_mapping_page()
64 page = find_or_create_page(mapping, index, GFP_NOFS); in get_mapping_page()
65 if (page) in get_mapping_page()
66 unlock_page(page); in get_mapping_page()
68 return page; in get_mapping_page()
75 struct page *page; in __logfs_buf_write() local
84 page = get_mapping_page(area->a_sb, index, use_filler); in __logfs_buf_write()
85 if (IS_ERR(page)) in __logfs_buf_write()
[all …]
/linux-4.1.27/drivers/staging/android/ion/
Dion_page_pool.c29 struct page *page = alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages() local
31 if (!page) in ion_page_pool_alloc_pages()
33 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, in ion_page_pool_alloc_pages()
35 return page; in ion_page_pool_alloc_pages()
39 struct page *page) in ion_page_pool_free_pages() argument
41 __free_pages(page, pool->order); in ion_page_pool_free_pages()
44 static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) in ion_page_pool_add() argument
47 if (PageHighMem(page)) { in ion_page_pool_add()
48 list_add_tail(&page->lru, &pool->high_items); in ion_page_pool_add()
51 list_add_tail(&page->lru, &pool->low_items); in ion_page_pool_add()
[all …]
Dion_system_heap.c55 static struct page *alloc_buffer_page(struct ion_system_heap *heap, in alloc_buffer_page()
61 struct page *page; in alloc_buffer_page() local
64 page = ion_page_pool_alloc(pool); in alloc_buffer_page()
70 page = alloc_pages(gfp_flags | __GFP_COMP, order); in alloc_buffer_page()
71 if (!page) in alloc_buffer_page()
73 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order, in alloc_buffer_page()
77 return page; in alloc_buffer_page()
81 struct ion_buffer *buffer, struct page *page) in free_buffer_page() argument
83 unsigned int order = compound_order(page); in free_buffer_page()
89 ion_page_pool_free(pool, page); in free_buffer_page()
[all …]
/linux-4.1.27/fs/ecryptfs/
Dmmap.c47 struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index) in ecryptfs_get_locked_page()
49 struct page *page = read_mapping_page(inode->i_mapping, index, NULL); in ecryptfs_get_locked_page() local
50 if (!IS_ERR(page)) in ecryptfs_get_locked_page()
51 lock_page(page); in ecryptfs_get_locked_page()
52 return page; in ecryptfs_get_locked_page()
65 static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc) in ecryptfs_writepage() argument
69 rc = ecryptfs_encrypt_page(page); in ecryptfs_writepage()
72 "page (upper index [0x%.16lx])\n", page->index); in ecryptfs_writepage()
73 ClearPageUptodate(page); in ecryptfs_writepage()
76 SetPageUptodate(page); in ecryptfs_writepage()
[all …]
/linux-4.1.27/arch/mips/include/asm/
Dcacheflush.h39 #define Page_dcache_dirty(page) \ argument
40 test_bit(PG_dcache_dirty, &(page)->flags)
41 #define SetPageDcacheDirty(page) \ argument
42 set_bit(PG_dcache_dirty, &(page)->flags)
43 #define ClearPageDcacheDirty(page) \ argument
44 clear_bit(PG_dcache_dirty, &(page)->flags)
52 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
53 extern void __flush_dcache_page(struct page *page);
56 static inline void flush_dcache_page(struct page *page) in flush_dcache_page() argument
59 __flush_dcache_page(page); in flush_dcache_page()
[all …]
Dhighmem.h44 extern void * kmap_high(struct page *page);
45 extern void kunmap_high(struct page *page);
47 extern void *kmap(struct page *page);
48 extern void kunmap(struct page *page);
49 extern void *kmap_atomic(struct page *page);
52 extern struct page *kmap_atomic_to_page(void *ptr);
/linux-4.1.27/arch/avr32/mm/
Ddma-coherent.c40 static struct page *__dma_alloc(struct device *dev, size_t size, in __dma_alloc()
43 struct page *page, *free, *end; in __dma_alloc() local
56 page = alloc_pages(gfp, order); in __dma_alloc()
57 if (!page) in __dma_alloc()
59 split_page(page, order); in __dma_alloc()
70 invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size); in __dma_alloc()
72 *handle = page_to_bus(page); in __dma_alloc()
73 free = page + (size >> PAGE_SHIFT); in __dma_alloc()
74 end = page + (1 << order); in __dma_alloc()
84 return page; in __dma_alloc()
[all …]
/linux-4.1.27/fs/nilfs2/
Dpage.c43 __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, in __nilfs_get_page_block() argument
50 if (!page_has_buffers(page)) in __nilfs_get_page_block()
51 create_empty_buffers(page, 1 << blkbits, b_state); in __nilfs_get_page_block()
54 bh = nilfs_page_get_nth_block(page, block - first_block); in __nilfs_get_page_block()
68 struct page *page; in nilfs_grab_buffer() local
71 page = grab_cache_page(mapping, index); in nilfs_grab_buffer()
72 if (unlikely(!page)) in nilfs_grab_buffer()
75 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state); in nilfs_grab_buffer()
77 unlock_page(page); in nilfs_grab_buffer()
78 page_cache_release(page); in nilfs_grab_buffer()
[all …]
Ddir.c58 static inline void nilfs_put_page(struct page *page) in nilfs_put_page() argument
60 kunmap(page); in nilfs_put_page()
61 page_cache_release(page); in nilfs_put_page()
83 static int nilfs_prepare_chunk(struct page *page, unsigned from, unsigned to) in nilfs_prepare_chunk() argument
85 loff_t pos = page_offset(page) + from; in nilfs_prepare_chunk()
86 return __block_write_begin(page, pos, to - from, nilfs_get_block); in nilfs_prepare_chunk()
89 static void nilfs_commit_chunk(struct page *page, in nilfs_commit_chunk() argument
94 loff_t pos = page_offset(page) + from; in nilfs_commit_chunk()
99 nr_dirty = nilfs_page_count_clean_buffers(page, from, to); in nilfs_commit_chunk()
100 copied = block_write_end(NULL, mapping, pos, len, len, page, NULL); in nilfs_commit_chunk()
[all …]
Dfile.c61 struct page *page = vmf->page; in nilfs_page_mkwrite() local
70 lock_page(page); in nilfs_page_mkwrite()
71 if (page->mapping != inode->i_mapping || in nilfs_page_mkwrite()
72 page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) { in nilfs_page_mkwrite()
73 unlock_page(page); in nilfs_page_mkwrite()
81 if (PageMappedToDisk(page)) in nilfs_page_mkwrite()
84 if (page_has_buffers(page)) { in nilfs_page_mkwrite()
88 bh = head = page_buffers(page); in nilfs_page_mkwrite()
97 SetPageMappedToDisk(page); in nilfs_page_mkwrite()
101 unlock_page(page); in nilfs_page_mkwrite()
[all …]
/linux-4.1.27/fs/squashfs/
Dfile_direct.c23 static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
24 int pages, struct page **page);
27 int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) in squashfs_readpage_block()
38 struct page **page; in squashfs_readpage_block() local
47 page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL); in squashfs_readpage_block()
48 if (page == NULL) in squashfs_readpage_block()
55 actor = squashfs_page_actor_init_special(page, pages, 0); in squashfs_readpage_block()
61 page[i] = (n == target_page->index) ? target_page : in squashfs_readpage_block()
64 if (page[i] == NULL) { in squashfs_readpage_block()
69 if (PageUptodate(page[i])) { in squashfs_readpage_block()
[all …]
/linux-4.1.27/fs/exofs/
Ddir.c41 static inline void exofs_put_page(struct page *page) in exofs_put_page() argument
43 kunmap(page); in exofs_put_page()
44 page_cache_release(page); in exofs_put_page()
63 static int exofs_commit_chunk(struct page *page, loff_t pos, unsigned len) in exofs_commit_chunk() argument
65 struct address_space *mapping = page->mapping; in exofs_commit_chunk()
71 if (!PageUptodate(page)) in exofs_commit_chunk()
72 SetPageUptodate(page); in exofs_commit_chunk()
78 set_page_dirty(page); in exofs_commit_chunk()
81 err = write_one_page(page, 1); in exofs_commit_chunk()
83 unlock_page(page); in exofs_commit_chunk()
[all …]
Dinode.c55 struct page **pages;
63 struct page *that_locked_page;
113 pcol->pages = kmalloc(pages * sizeof(struct page *), in pcol_try_alloc()
137 static int pcol_add_page(struct page_collect *pcol, struct page *page, in pcol_add_page() argument
143 pcol->pages[pcol->nr_pages++] = page; in pcol_add_page()
149 static int update_read_page(struct page *page, int ret) in update_read_page() argument
154 SetPageUptodate(page); in update_read_page()
155 if (PageError(page)) in update_read_page()
156 ClearPageError(page); in update_read_page()
164 clear_highpage(page); in update_read_page()
[all …]
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/
Dcl_page.c54 # define PASSERT(env, page, expr) \ argument
57 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
62 # define PINVRNT(env, page, exp) \ argument
63 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
76 static struct cl_page *cl_page_top_trusted(struct cl_page *page) in cl_page_top_trusted() argument
78 while (page->cp_parent != NULL) in cl_page_top_trusted()
79 page = page->cp_parent; in cl_page_top_trusted()
80 return page; in cl_page_top_trusted()
94 static void cl_page_get_trust(struct cl_page *page) in cl_page_get_trust() argument
96 LASSERT(atomic_read(&page->cp_ref) > 0); in cl_page_get_trust()
[all …]
Dcl_io.c689 cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page) in cl_io_slice_page() argument
693 slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type); in cl_io_slice_page()
701 static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io) in cl_page_in_io() argument
708 idx = page->cp_index; in cl_page_in_io()
718 start = cl_offset(page->cp_obj, idx); in cl_page_in_io()
719 end = cl_offset(page->cp_obj, idx + 1); in cl_page_in_io()
739 struct cl_page *page) in cl_io_read_page() argument
746 LINVRNT(cl_page_is_owned(page, io)); in cl_io_read_page()
748 LINVRNT(cl_page_in_io(page, io)); in cl_io_read_page()
768 slice = cl_io_slice_page(scan, page); in cl_io_read_page()
[all …]
/linux-4.1.27/include/trace/events/
Dpagemap.h18 #define trace_pagemap_flags(page) ( \ argument
19 (PageAnon(page) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \
20 (page_mapped(page) ? PAGEMAP_MAPPED : 0) | \
21 (PageSwapCache(page) ? PAGEMAP_SWAPCACHE : 0) | \
22 (PageSwapBacked(page) ? PAGEMAP_SWAPBACKED : 0) | \
23 (PageMappedToDisk(page) ? PAGEMAP_MAPPEDDISK : 0) | \
24 (page_has_private(page) ? PAGEMAP_BUFFERS : 0) \
30 struct page *page,
34 TP_ARGS(page, lru),
37 __field(struct page *, page )
[all …]
Dcma.h12 TP_PROTO(unsigned long pfn, const struct page *page,
15 TP_ARGS(pfn, page, count, align),
19 __field(const struct page *, page)
26 __entry->page = page;
33 __entry->page,
40 TP_PROTO(unsigned long pfn, const struct page *page,
43 TP_ARGS(pfn, page, count),
47 __field(const struct page *, page)
53 __entry->page = page;
59 __entry->page,
Dfilemap.h16 TP_PROTO(struct page *page),
18 TP_ARGS(page),
28 __entry->pfn = page_to_pfn(page);
29 __entry->i_ino = page->mapping->host->i_ino;
30 __entry->index = page->index;
31 if (page->mapping->host->i_sb)
32 __entry->s_dev = page->mapping->host->i_sb->s_dev;
34 __entry->s_dev = page->mapping->host->i_rdev;
46 TP_PROTO(struct page *page),
47 TP_ARGS(page)
[all …]
Dkmem.h163 TP_PROTO(struct page *page, unsigned int order),
165 TP_ARGS(page, order),
185 __entry->pfn = page_to_pfn(page);
197 TP_PROTO(struct page *page, int cold),
199 TP_ARGS(page, cold),
207 __entry->pfn = page_to_pfn(page);
219 TP_PROTO(struct page *page, unsigned int order,
222 TP_ARGS(page, order, gfp_flags, migratetype),
232 __entry->pfn = page ? page_to_pfn(page) : -1UL;
248 TP_PROTO(struct page *page, unsigned int order, int migratetype),
[all …]
/linux-4.1.27/arch/arm/mm/
Dflush.c116 void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr, in __flush_ptrace_access() argument
128 flush_pfn_alias(page_to_pfn(page), uaddr); in __flush_ptrace_access()
137 flush_icache_alias(page_to_pfn(page), uaddr, len); in __flush_ptrace_access()
147 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, in flush_ptrace_access() argument
155 __flush_ptrace_access(page, uaddr, kaddr, len, flags); in flush_ptrace_access()
158 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, in flush_uprobe_xol_access() argument
163 __flush_ptrace_access(page, uaddr, kaddr, len, flags); in flush_uprobe_xol_access()
173 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument
181 flush_ptrace_access(vma, page, uaddr, dst, len); in copy_to_user_page()
187 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument
[all …]
Dhighmem.c37 void *kmap(struct page *page) in kmap() argument
40 if (!PageHighMem(page)) in kmap()
41 return page_address(page); in kmap()
42 return kmap_high(page); in kmap()
46 void kunmap(struct page *page) in kunmap() argument
49 if (!PageHighMem(page)) in kunmap()
51 kunmap_high(page); in kunmap()
55 void *kmap_atomic(struct page *page) in kmap_atomic() argument
63 if (!PageHighMem(page)) in kmap_atomic()
64 return page_address(page); in kmap_atomic()
[all …]
Ddma-mapping.c56 static void __dma_page_cpu_to_dev(struct page *, unsigned long,
58 static void __dma_page_dev_to_cpu(struct page *, unsigned long,
75 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, in arm_dma_map_page() argument
80 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_dma_map_page()
81 return pfn_to_dma(dev, page_to_pfn(page)) + offset; in arm_dma_map_page()
84 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, in arm_coherent_dma_map_page() argument
88 return pfn_to_dma(dev, page_to_pfn(page)) + offset; in arm_coherent_dma_map_page()
118 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); in arm_dma_sync_single_for_cpu() local
119 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_dma_sync_single_for_cpu()
126 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); in arm_dma_sync_single_for_device() local
[all …]
/linux-4.1.27/fs/minix/
Ddir.c28 static inline void dir_put_page(struct page *page) in dir_put_page() argument
30 kunmap(page); in dir_put_page()
31 page_cache_release(page); in dir_put_page()
53 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) in dir_commit_chunk() argument
55 struct address_space *mapping = page->mapping; in dir_commit_chunk()
58 block_write_end(NULL, mapping, pos, len, len, page, NULL); in dir_commit_chunk()
65 err = write_one_page(page, 1); in dir_commit_chunk()
67 unlock_page(page); in dir_commit_chunk()
71 static struct page * dir_get_page(struct inode *dir, unsigned long n) in dir_get_page()
74 struct page *page = read_mapping_page(mapping, n, NULL); in dir_get_page() local
[all …]
/linux-4.1.27/arch/xtensa/mm/
Dcache.c63 static inline void kmap_invalidate_coherent(struct page *page, in kmap_invalidate_coherent() argument
66 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { in kmap_invalidate_coherent()
69 if (!PageHighMem(page)) { in kmap_invalidate_coherent()
70 kvaddr = (unsigned long)page_to_virt(page); in kmap_invalidate_coherent()
75 (page_to_phys(page) & DCACHE_ALIAS_MASK); in kmap_invalidate_coherent()
78 page_to_phys(page)); in kmap_invalidate_coherent()
83 static inline void *coherent_kvaddr(struct page *page, unsigned long base, in coherent_kvaddr() argument
86 if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { in coherent_kvaddr()
87 *paddr = page_to_phys(page); in coherent_kvaddr()
91 return page_to_virt(page); in coherent_kvaddr()
[all …]
/linux-4.1.27/fs/ubifs/
Dfile.c104 static int do_readpage(struct page *page) in do_readpage() argument
110 struct inode *inode = page->mapping->host; in do_readpage()
114 inode->i_ino, page->index, i_size, page->flags); in do_readpage()
115 ubifs_assert(!PageChecked(page)); in do_readpage()
116 ubifs_assert(!PagePrivate(page)); in do_readpage()
118 addr = kmap(page); in do_readpage()
120 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; in do_readpage()
124 SetPageChecked(page); in do_readpage()
166 SetPageChecked(page); in do_readpage()
171 page->index, inode->i_ino, err); in do_readpage()
[all …]
/linux-4.1.27/drivers/xen/
Dballoon.c109 static void scrub_page(struct page *page) in scrub_page() argument
112 clear_highpage(page); in scrub_page()
117 static void __balloon_append(struct page *page) in __balloon_append() argument
120 if (PageHighMem(page)) { in __balloon_append()
121 list_add_tail(&page->lru, &ballooned_pages); in __balloon_append()
124 list_add(&page->lru, &ballooned_pages); in __balloon_append()
129 static void balloon_append(struct page *page) in balloon_append() argument
131 __balloon_append(page); in balloon_append()
132 adjust_managed_page_count(page, -1); in balloon_append()
136 static struct page *balloon_retrieve(bool prefer_highmem) in balloon_retrieve()
[all …]
/linux-4.1.27/fs/ext2/
Ddir.c67 static inline void ext2_put_page(struct page *page) in ext2_put_page() argument
69 kunmap(page); in ext2_put_page()
70 page_cache_release(page); in ext2_put_page()
93 static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len) in ext2_commit_chunk() argument
95 struct address_space *mapping = page->mapping; in ext2_commit_chunk()
100 block_write_end(NULL, mapping, pos, len, len, page, NULL); in ext2_commit_chunk()
108 err = write_one_page(page, 1); in ext2_commit_chunk()
112 unlock_page(page); in ext2_commit_chunk()
118 static void ext2_check_page(struct page *page, int quiet) in ext2_check_page() argument
120 struct inode *dir = page->mapping->host; in ext2_check_page()
[all …]
/linux-4.1.27/fs/ufs/
Ddir.c43 static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len) in ufs_commit_chunk() argument
45 struct address_space *mapping = page->mapping; in ufs_commit_chunk()
50 block_write_end(NULL, mapping, pos, len, len, page, NULL); in ufs_commit_chunk()
56 err = write_one_page(page, 1); in ufs_commit_chunk()
58 unlock_page(page); in ufs_commit_chunk()
62 static inline void ufs_put_page(struct page *page) in ufs_put_page() argument
64 kunmap(page); in ufs_put_page()
65 page_cache_release(page); in ufs_put_page()
77 struct page *page; in ufs_inode_by_name() local
79 de = ufs_find_entry(dir, qstr, &page); in ufs_inode_by_name()
[all …]
/linux-4.1.27/Documentation/trace/
Dring-buffer-design.txt29 reader_page - A page outside the ring buffer used solely (for the most part)
32 head_page - a pointer to the page that the reader will use next
34 tail_page - a pointer to the page that will be written to next
36 commit_page - a pointer to the page with the last finished non-nested write.
93 At initialization a reader page is allocated for the reader that is not
97 to the same page.
99 The reader page is initialized to have its next pointer pointing to
100 the head page, and its previous pointer pointing to a page before
101 the head page.
103 The reader has its own page to use. At start up time, this page is
[all …]
/linux-4.1.27/net/ceph/
Dpagelist.c11 struct page *page = list_entry(pl->head.prev, struct page, lru); in ceph_pagelist_unmap_tail() local
12 kunmap(page); in ceph_pagelist_unmap_tail()
23 struct page *page = list_first_entry(&pl->head, struct page, in ceph_pagelist_release() local
25 list_del(&page->lru); in ceph_pagelist_release()
26 __free_page(page); in ceph_pagelist_release()
35 struct page *page; in ceph_pagelist_addpage() local
38 page = __page_cache_alloc(GFP_NOFS); in ceph_pagelist_addpage()
40 page = list_first_entry(&pl->free_list, struct page, lru); in ceph_pagelist_addpage()
41 list_del(&page->lru); in ceph_pagelist_addpage()
44 if (!page) in ceph_pagelist_addpage()
[all …]
/linux-4.1.27/fs/f2fs/
Dinline.c33 void read_inline_data(struct page *page, struct page *ipage) in read_inline_data() argument
37 if (PageUptodate(page)) in read_inline_data()
40 f2fs_bug_on(F2FS_P_SB(page), page->index); in read_inline_data()
42 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); in read_inline_data()
46 dst_addr = kmap_atomic(page); in read_inline_data()
48 flush_dcache_page(page); in read_inline_data()
50 SetPageUptodate(page); in read_inline_data()
53 bool truncate_inline_inode(struct page *ipage, u64 from) in truncate_inline_inode()
68 int f2fs_read_inline_data(struct inode *inode, struct page *page) in f2fs_read_inline_data() argument
70 struct page *ipage; in f2fs_read_inline_data()
[all …]
Ddir.c91 static struct f2fs_dir_entry *find_in_block(struct page *dentry_page, in find_in_block()
93 struct page **res_page) in find_in_block()
159 f2fs_hash_t namehash, struct page **res_page) in find_in_level()
164 struct page *dentry_page; in find_in_level()
210 struct qstr *child, struct page **res_page) in f2fs_find_entry()
241 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p) in f2fs_parent_dir()
243 struct page *page; in f2fs_parent_dir() local
250 page = get_lock_data_page(dir, 0); in f2fs_parent_dir()
251 if (IS_ERR(page)) in f2fs_parent_dir()
254 dentry_blk = kmap(page); in f2fs_parent_dir()
[all …]
Dnode.h213 static inline void fill_node_footer(struct page *page, nid_t nid, in fill_node_footer() argument
216 struct f2fs_node *rn = F2FS_NODE(page); in fill_node_footer()
232 static inline void copy_node_footer(struct page *dst, struct page *src) in copy_node_footer()
239 static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) in fill_node_footer_blkaddr() argument
241 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); in fill_node_footer_blkaddr()
242 struct f2fs_node *rn = F2FS_NODE(page); in fill_node_footer_blkaddr()
248 static inline nid_t ino_of_node(struct page *node_page) in ino_of_node()
254 static inline nid_t nid_of_node(struct page *node_page) in nid_of_node()
260 static inline unsigned int ofs_of_node(struct page *node_page) in ofs_of_node()
267 static inline unsigned long long cpver_of_node(struct page *node_page) in cpver_of_node()
[all …]
Dnode.c79 static void clear_node_page_dirty(struct page *page) in clear_node_page_dirty() argument
81 struct address_space *mapping = page->mapping; in clear_node_page_dirty()
84 if (PageDirty(page)) { in clear_node_page_dirty()
87 page_index(page), in clear_node_page_dirty()
91 clear_page_dirty_for_io(page); in clear_node_page_dirty()
94 ClearPageUptodate(page); in clear_node_page_dirty()
97 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) in get_current_nat_page()
103 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) in get_next_nat_page()
105 struct page *src_page; in get_next_nat_page()
106 struct page *dst_page; in get_next_nat_page()
[all …]
Drecovery.c70 static int recover_dentry(struct inode *inode, struct page *ipage) in recover_dentry()
76 struct page *page; in recover_dentry() local
95 de = f2fs_find_entry(dir, &name, &page); in recover_dentry()
113 f2fs_delete_entry(de, page, dir, einode); in recover_dentry()
131 f2fs_dentry_kunmap(dir, page); in recover_dentry()
132 f2fs_put_page(page, 0); in recover_dentry()
143 static void recover_inode(struct inode *inode, struct page *page) in recover_inode() argument
145 struct f2fs_inode *raw = F2FS_INODE(page); in recover_inode()
157 ino_of_node(page), F2FS_INODE(page)->i_name); in recover_inode()
164 struct page *page = NULL; in find_fsync_dnodes() local
[all …]
Ddata.c37 struct page *page = bvec->bv_page; in f2fs_read_end_io() local
40 SetPageUptodate(page); in f2fs_read_end_io()
42 ClearPageUptodate(page); in f2fs_read_end_io()
43 SetPageError(page); in f2fs_read_end_io()
45 unlock_page(page); in f2fs_read_end_io()
57 struct page *page = bvec->bv_page; in f2fs_write_end_io() local
60 set_page_dirty(page); in f2fs_write_end_io()
61 set_bit(AS_EIO, &page->mapping->flags); in f2fs_write_end_io()
64 end_page_writeback(page); in f2fs_write_end_io()
136 int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, in f2fs_submit_page_bio() argument
[all …]
Dcheckpoint.c32 struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in grab_meta_page()
35 struct page *page = NULL; in grab_meta_page() local
37 page = grab_cache_page(mapping, index); in grab_meta_page()
38 if (!page) { in grab_meta_page()
42 f2fs_wait_on_page_writeback(page, META); in grab_meta_page()
43 SetPageUptodate(page); in grab_meta_page()
44 return page; in grab_meta_page()
50 struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in get_meta_page()
53 struct page *page; in get_meta_page() local
60 page = grab_cache_page(mapping, index); in get_meta_page()
[all …]
/linux-4.1.27/arch/parisc/include/asm/
Dcacheflush.h41 static inline void flush_kernel_dcache_page(struct page *page) in flush_kernel_dcache_page() argument
43 flush_kernel_dcache_page_addr(page_address(page)); in flush_kernel_dcache_page()
63 struct page *page = vmalloc_to_page(cursor); in invalidate_kernel_vmap_range() local
65 if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) in invalidate_kernel_vmap_range()
66 flush_kernel_dcache_page(page); in invalidate_kernel_vmap_range()
75 extern void flush_dcache_page(struct page *page);
82 #define flush_icache_page(vma,page) do { \ argument
83 flush_kernel_dcache_page(page); \
84 flush_kernel_icache_page(page_address(page)); \
92 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
[all …]
/linux-4.1.27/arch/tile/mm/
Dhighmem.c25 void *kmap(struct page *page) in kmap() argument
32 if (!PageHighMem(page)) in kmap()
33 return page_address(page); in kmap()
34 kva = kmap_high(page); in kmap()
42 set_pte_at(&init_mm, kva, ptep, mk_pte(page, page_to_kpgprot(page))); in kmap()
49 void kunmap(struct page *page) in kunmap() argument
53 if (!PageHighMem(page)) in kunmap()
55 kunmap_high(page); in kunmap()
65 struct page *page; member
96 static void kmap_atomic_register(struct page *page, int type, in kmap_atomic_register() argument
[all …]
Dhomecache.c174 void homecache_finv_map_page(struct page *page, int home) in homecache_finv_map_page() argument
191 pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); in homecache_finv_map_page()
202 static void homecache_finv_page_home(struct page *page, int home) in homecache_finv_page_home() argument
204 if (!PageHighMem(page) && home == page_home(page)) in homecache_finv_page_home()
205 homecache_finv_page_va(page_address(page), home); in homecache_finv_page_home()
207 homecache_finv_map_page(page, home); in homecache_finv_page_home()
215 static void homecache_finv_page_internal(struct page *page, int force_map) in homecache_finv_page_internal() argument
217 int home = page_home(page); in homecache_finv_page_internal()
223 homecache_finv_map_page(page, cpu); in homecache_finv_page_internal()
226 homecache_finv_map_page(page, home); in homecache_finv_page_internal()
[all …]
/linux-4.1.27/fs/
Dbuffer.c83 void buffer_check_dirty_writeback(struct page *page, in buffer_check_dirty_writeback() argument
90 BUG_ON(!PageLocked(page)); in buffer_check_dirty_writeback()
92 if (!page_has_buffers(page)) in buffer_check_dirty_writeback()
95 if (PageWriteback(page)) in buffer_check_dirty_writeback()
98 head = page_buffers(page); in buffer_check_dirty_writeback()
124 __clear_page_buffers(struct page *page) in __clear_page_buffers() argument
126 ClearPagePrivate(page); in __clear_page_buffers()
127 set_page_private(page, 0); in __clear_page_buffers()
128 page_cache_release(page); in __clear_page_buffers()
206 struct page *page; in __find_get_block_slow() local
[all …]
Dmpage.c51 struct page *page = bv->bv_page; in mpage_end_io() local
52 page_endio(page, bio_data_dir(bio), err); in mpage_end_io()
98 map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) in map_buffer_to_page() argument
100 struct inode *inode = page->mapping->host; in map_buffer_to_page()
104 if (!page_has_buffers(page)) { in map_buffer_to_page()
111 SetPageUptodate(page); in map_buffer_to_page()
114 create_empty_buffers(page, 1 << inode->i_blkbits, 0); in map_buffer_to_page()
116 head = page_buffers(page); in map_buffer_to_page()
140 do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, in do_mpage_readpage() argument
144 struct inode *inode = page->mapping->host; in do_mpage_readpage()
[all …]
/linux-4.1.27/fs/nfs/
Dfscache.h83 extern void __nfs_fscache_invalidate_page(struct page *, struct inode *);
84 extern int nfs_fscache_release_page(struct page *, gfp_t);
87 struct inode *, struct page *);
91 extern void __nfs_readpage_to_fscache(struct inode *, struct page *, int);
97 struct page *page) in nfs_fscache_wait_on_page_write() argument
99 if (PageFsCache(page)) in nfs_fscache_wait_on_page_write()
100 fscache_wait_on_page_write(nfsi->fscache, page); in nfs_fscache_wait_on_page_write()
107 static inline void nfs_fscache_invalidate_page(struct page *page, in nfs_fscache_invalidate_page() argument
110 if (PageFsCache(page)) in nfs_fscache_invalidate_page()
111 __nfs_fscache_invalidate_page(page, inode); in nfs_fscache_invalidate_page()
[all …]
Dsymlink.c29 static int nfs_symlink_filler(struct inode *inode, struct page *page) in nfs_symlink_filler() argument
33 error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE); in nfs_symlink_filler()
36 SetPageUptodate(page); in nfs_symlink_filler()
37 unlock_page(page); in nfs_symlink_filler()
41 SetPageError(page); in nfs_symlink_filler()
42 unlock_page(page); in nfs_symlink_filler()
49 struct page *page; in nfs_follow_link() local
55 page = read_cache_page(&inode->i_data, 0, in nfs_follow_link()
57 if (IS_ERR(page)) { in nfs_follow_link()
58 err = page; in nfs_follow_link()
[all …]
Dfile.c324 static int nfs_want_read_modify_write(struct file *file, struct page *page, in nfs_want_read_modify_write() argument
327 unsigned int pglen = nfs_page_length(page); in nfs_want_read_modify_write()
332 if (!PageUptodate(page)) in nfs_want_read_modify_write()
338 !PageUptodate(page) && /* Uptodate? */ in nfs_want_read_modify_write()
339 !PagePrivate(page) && /* i/o request already? */ in nfs_want_read_modify_write()
356 struct page **pagep, void **fsdata) in nfs_write_begin()
360 struct page *page; in nfs_write_begin() local
380 page = grab_cache_page_write_begin(mapping, index, flags); in nfs_write_begin()
381 if (!page) in nfs_write_begin()
383 *pagep = page; in nfs_write_begin()
[all …]
Dfscache.c260 int nfs_fscache_release_page(struct page *page, gfp_t gfp) in nfs_fscache_release_page() argument
262 if (PageFsCache(page)) { in nfs_fscache_release_page()
263 struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host); in nfs_fscache_release_page()
267 cookie, page, NFS_I(page->mapping->host)); in nfs_fscache_release_page()
269 if (!fscache_maybe_release_page(cookie, page, gfp)) in nfs_fscache_release_page()
272 nfs_inc_fscache_stats(page->mapping->host, in nfs_fscache_release_page()
283 void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode) in __nfs_fscache_invalidate_page() argument
290 cookie, page, NFS_I(inode)); in __nfs_fscache_invalidate_page()
292 fscache_wait_on_page_write(cookie, page); in __nfs_fscache_invalidate_page()
294 BUG_ON(!PageLocked(page)); in __nfs_fscache_invalidate_page()
[all …]
Dread.c47 int nfs_return_empty_page(struct page *page) in nfs_return_empty_page() argument
49 zero_user(page, 0, PAGE_CACHE_SIZE); in nfs_return_empty_page()
50 SetPageUptodate(page); in nfs_return_empty_page()
51 unlock_page(page); in nfs_return_empty_page()
89 struct page *page) in nfs_readpage_async() argument
96 len = nfs_page_length(page); in nfs_readpage_async()
98 return nfs_return_empty_page(page); in nfs_readpage_async()
99 new = nfs_create_request(ctx, page, NULL, 0, len); in nfs_readpage_async()
101 unlock_page(page); in nfs_readpage_async()
105 zero_user_segment(page, len, PAGE_CACHE_SIZE); in nfs_readpage_async()
[all …]
Dwrite.c54 struct page *page);
108 nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page) in nfs_page_find_head_request_locked() argument
112 if (PagePrivate(page)) in nfs_page_find_head_request_locked()
113 req = (struct nfs_page *)page_private(page); in nfs_page_find_head_request_locked()
114 else if (unlikely(PageSwapCache(page))) in nfs_page_find_head_request_locked()
116 page); in nfs_page_find_head_request_locked()
131 static struct nfs_page *nfs_page_find_head_request(struct page *page) in nfs_page_find_head_request() argument
133 struct inode *inode = page_file_mapping(page)->host; in nfs_page_find_head_request()
137 req = nfs_page_find_head_request_locked(NFS_I(inode), page); in nfs_page_find_head_request()
143 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) in nfs_grow_file() argument
[all …]
/linux-4.1.27/arch/tile/include/asm/
Dhighmem.h53 void *kmap_high(struct page *page);
54 void kunmap_high(struct page *page);
55 void *kmap(struct page *page);
56 void kunmap(struct page *page);
57 void *kmap_fix_kpte(struct page *page, int finished);
60 #define kmap_prot page_to_kpgprot(page)
62 void *kmap_atomic(struct page *page);
66 struct page *kmap_atomic_to_page(void *ptr);
67 void *kmap_atomic_prot(struct page *page, pgprot_t prot);
68 void kmap_atomic_fix_kpte(struct page *page, int finished);
Dhomecache.h23 struct page;
68 extern void homecache_change_page_home(struct page *, int order, int home);
76 extern void homecache_finv_page(struct page *);
83 extern void homecache_finv_map_page(struct page *, int home);
92 extern struct page *homecache_alloc_pages(gfp_t gfp_mask,
94 extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
105 void __homecache_free_pages(struct page *, unsigned int order);
107 #define __homecache_free_page(page) __homecache_free_pages((page), 0) argument
108 #define homecache_free_page(page) homecache_free_pages((page), 0) argument
115 extern int page_home(struct page *);
/linux-4.1.27/fs/gfs2/
Daops.c41 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, in gfs2_page_add_databufs() argument
44 struct buffer_head *head = page_buffers(page); in gfs2_page_add_databufs()
97 static int gfs2_writepage_common(struct page *page, in gfs2_writepage_common() argument
100 struct inode *inode = page->mapping->host; in gfs2_writepage_common()
113 if (page->index > end_index || (page->index == end_index && !offset)) { in gfs2_writepage_common()
114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); in gfs2_writepage_common()
119 redirty_page_for_writepage(wbc, page); in gfs2_writepage_common()
121 unlock_page(page); in gfs2_writepage_common()
132 static int gfs2_writepage(struct page *page, struct writeback_control *wbc) in gfs2_writepage() argument
136 ret = gfs2_writepage_common(page, wbc); in gfs2_writepage()
[all …]
/linux-4.1.27/arch/microblaze/include/asm/
Dhighmem.h53 extern void *kmap_high(struct page *page);
54 extern void kunmap_high(struct page *page);
55 extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
58 static inline void *kmap(struct page *page) in kmap() argument
61 if (!PageHighMem(page)) in kmap()
62 return page_address(page); in kmap()
63 return kmap_high(page); in kmap()
66 static inline void kunmap(struct page *page) in kunmap() argument
69 if (!PageHighMem(page)) in kunmap()
71 kunmap_high(page); in kunmap()
[all …]
/linux-4.1.27/fs/cifs/
Dfscache.h50 extern void __cifs_fscache_invalidate_page(struct page *, struct inode *);
51 extern int cifs_fscache_release_page(struct page *page, gfp_t gfp);
52 extern int __cifs_readpage_from_fscache(struct inode *, struct page *);
59 extern void __cifs_readpage_to_fscache(struct inode *, struct page *);
61 static inline void cifs_fscache_invalidate_page(struct page *page, in cifs_fscache_invalidate_page() argument
64 if (PageFsCache(page)) in cifs_fscache_invalidate_page()
65 __cifs_fscache_invalidate_page(page, inode); in cifs_fscache_invalidate_page()
69 struct page *page) in cifs_readpage_from_fscache() argument
72 return __cifs_readpage_from_fscache(inode, page); in cifs_readpage_from_fscache()
89 struct page *page) in cifs_readpage_to_fscache() argument
[all …]
Dfscache.c128 int cifs_fscache_release_page(struct page *page, gfp_t gfp) in cifs_fscache_release_page() argument
130 if (PageFsCache(page)) { in cifs_fscache_release_page()
131 struct inode *inode = page->mapping->host; in cifs_fscache_release_page()
135 __func__, page, cifsi->fscache); in cifs_fscache_release_page()
136 if (!fscache_maybe_release_page(cifsi->fscache, page, gfp)) in cifs_fscache_release_page()
143 static void cifs_readpage_from_fscache_complete(struct page *page, void *ctx, in cifs_readpage_from_fscache_complete() argument
146 cifs_dbg(FYI, "%s: (0x%p/%d)\n", __func__, page, error); in cifs_readpage_from_fscache_complete()
148 SetPageUptodate(page); in cifs_readpage_from_fscache_complete()
149 unlock_page(page); in cifs_readpage_from_fscache_complete()
155 int __cifs_readpage_from_fscache(struct inode *inode, struct page *page) in __cifs_readpage_from_fscache() argument
[all …]
/linux-4.1.27/arch/powerpc/include/asm/
Dhighmem.h61 extern void *kmap_high(struct page *page);
62 extern void kunmap_high(struct page *page);
63 extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
66 static inline void *kmap(struct page *page) in kmap() argument
69 if (!PageHighMem(page)) in kmap()
70 return page_address(page); in kmap()
71 return kmap_high(page); in kmap()
74 static inline void kunmap(struct page *page) in kunmap() argument
77 if (!PageHighMem(page)) in kunmap()
79 kunmap_high(page); in kunmap()
[all …]
/linux-4.1.27/fs/ntfs/
Daops.h40 static inline void ntfs_unmap_page(struct page *page) in ntfs_unmap_page() argument
42 kunmap(page); in ntfs_unmap_page()
43 page_cache_release(page); in ntfs_unmap_page()
86 static inline struct page *ntfs_map_page(struct address_space *mapping, in ntfs_map_page()
89 struct page *page = read_mapping_page(mapping, index, NULL); in ntfs_map_page() local
91 if (!IS_ERR(page)) { in ntfs_map_page()
92 kmap(page); in ntfs_map_page()
93 if (!PageError(page)) in ntfs_map_page()
94 return page; in ntfs_map_page()
95 ntfs_unmap_page(page); in ntfs_map_page()
[all …]
Dcompress.c100 static void zero_partial_compressed_page(struct page *page, in zero_partial_compressed_page() argument
103 u8 *kp = page_address(page); in zero_partial_compressed_page()
107 if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) { in zero_partial_compressed_page()
123 static inline void handle_bounds_compressed_page(struct page *page, in handle_bounds_compressed_page() argument
126 if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) && in handle_bounds_compressed_page()
128 zero_partial_compressed_page(page, initialized_size); in handle_bounds_compressed_page()
169 static int ntfs_decompress(struct page *dest_pages[], int *dest_index, in ntfs_decompress()
185 struct page *dp; /* Current destination page being worked on. */ in ntfs_decompress()
482 int ntfs_read_compressed_block(struct page *page) in ntfs_read_compressed_block() argument
486 struct address_space *mapping = page->mapping; in ntfs_read_compressed_block()
[all …]
Daops.c62 struct page *page; in ntfs_end_buffer_async_read() local
67 page = bh->b_page; in ntfs_end_buffer_async_read()
68 vi = page->mapping->host; in ntfs_end_buffer_async_read()
77 file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + in ntfs_end_buffer_async_read()
96 kaddr = kmap_atomic(page); in ntfs_end_buffer_async_read()
99 flush_dcache_page(page); in ntfs_end_buffer_async_read()
105 SetPageError(page); in ntfs_end_buffer_async_read()
109 first = page_buffers(page); in ntfs_end_buffer_async_read()
137 if (likely(page_uptodate && !PageError(page))) in ntfs_end_buffer_async_read()
138 SetPageUptodate(page); in ntfs_end_buffer_async_read()
[all …]
Dbitmap.c53 struct page *page; in __ntfs_bitmap_set_bits_in_run() local
75 page = ntfs_map_page(mapping, index); in __ntfs_bitmap_set_bits_in_run()
76 if (IS_ERR(page)) { in __ntfs_bitmap_set_bits_in_run()
79 "%li), aborting.", PTR_ERR(page)); in __ntfs_bitmap_set_bits_in_run()
80 return PTR_ERR(page); in __ntfs_bitmap_set_bits_in_run()
82 kaddr = page_address(page); in __ntfs_bitmap_set_bits_in_run()
124 flush_dcache_page(page); in __ntfs_bitmap_set_bits_in_run()
125 set_page_dirty(page); in __ntfs_bitmap_set_bits_in_run()
126 ntfs_unmap_page(page); in __ntfs_bitmap_set_bits_in_run()
127 page = ntfs_map_page(mapping, ++index); in __ntfs_bitmap_set_bits_in_run()
[all …]
/linux-4.1.27/arch/frv/mm/
Dpgalloc.c33 struct page *page; in pte_alloc_one() local
36 page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); in pte_alloc_one()
38 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); in pte_alloc_one()
40 if (!page) in pte_alloc_one()
43 clear_highpage(page); in pte_alloc_one()
44 if (!pgtable_page_ctor(page)) { in pte_alloc_one()
45 __free_page(page); in pte_alloc_one()
48 flush_dcache_page(page); in pte_alloc_one()
49 return page; in pte_alloc_one()
87 struct page *pgd_list;
[all …]
Dhighmem.c14 void *kmap(struct page *page) in kmap() argument
17 if (!PageHighMem(page)) in kmap()
18 return page_address(page); in kmap()
19 return kmap_high(page); in kmap()
24 void kunmap(struct page *page) in kunmap() argument
28 if (!PageHighMem(page)) in kunmap()
30 kunmap_high(page); in kunmap()
35 struct page *kmap_atomic_to_page(void *ptr) in kmap_atomic_to_page()
40 void *kmap_atomic(struct page *page) in kmap_atomic() argument
47 paddr = page_to_phys(page); in kmap_atomic()
/linux-4.1.27/sound/pci/emu10k1/
Dmemory.c36 #define __set_ptb_entry(emu,page,addr) \ argument
37 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
45 #define aligned_page_offset(page) ((page) << PAGE_SHIFT) argument
50 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr) argument
52 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr) argument
55 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr) in set_ptb_entry() argument
58 page *= UNIT_PAGES; in set_ptb_entry()
59 for (i = 0; i < UNIT_PAGES; i++, page++) { in set_ptb_entry()
60 __set_ptb_entry(emu, page, addr); in set_ptb_entry()
64 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page) in set_silent_ptb() argument
[all …]
/linux-4.1.27/drivers/block/
Dbrd.c55 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) in brd_lookup_page()
58 struct page *page; in brd_lookup_page() local
73 page = radix_tree_lookup(&brd->brd_pages, idx); in brd_lookup_page()
76 BUG_ON(page && page->index != idx); in brd_lookup_page()
78 return page; in brd_lookup_page()
86 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) in brd_insert_page()
89 struct page *page; in brd_insert_page() local
92 page = brd_lookup_page(brd, sector); in brd_insert_page()
93 if (page) in brd_insert_page()
94 return page; in brd_insert_page()
[all …]
/linux-4.1.27/arch/xtensa/include/asm/
Dhighmem.h32 static inline int get_pkmap_color(struct page *page) in get_pkmap_color() argument
34 return DCACHE_ALIAS(page_to_phys(page)); in get_pkmap_color()
66 void *kmap_high(struct page *page);
67 void kunmap_high(struct page *page);
69 static inline void *kmap(struct page *page) in kmap() argument
72 if (!PageHighMem(page)) in kmap()
73 return page_address(page); in kmap()
74 return kmap_high(page); in kmap()
77 static inline void kunmap(struct page *page) in kunmap() argument
80 if (!PageHighMem(page)) in kunmap()
[all …]
/linux-4.1.27/fs/ext4/
Dreadpage.c63 struct page *page = bv->bv_page; in completion_pages() local
65 int ret = ext4_decrypt(ctx, page); in completion_pages()
68 SetPageError(page); in completion_pages()
70 SetPageUptodate(page); in completion_pages()
71 unlock_page(page); in completion_pages()
119 struct page *page = bv->bv_page; in mpage_end_io() local
122 SetPageUptodate(page); in mpage_end_io()
124 ClearPageUptodate(page); in mpage_end_io()
125 SetPageError(page); in mpage_end_io()
127 unlock_page(page); in mpage_end_io()
[all …]
/linux-4.1.27/drivers/video/fbdev/core/
Dfb_defio.c26 static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs) in fb_deferred_io_page()
29 struct page *page; in fb_deferred_io_page() local
32 page = vmalloc_to_page(screen_base + offs); in fb_deferred_io_page()
34 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT); in fb_deferred_io_page()
36 return page; in fb_deferred_io_page()
44 struct page *page; in fb_deferred_io_fault() local
51 page = fb_deferred_io_page(info, offset); in fb_deferred_io_fault()
52 if (!page) in fb_deferred_io_fault()
55 get_page(page); in fb_deferred_io_fault()
58 page->mapping = vma->vm_file->f_mapping; in fb_deferred_io_fault()
[all …]
/linux-4.1.27/arch/sparc/include/asm/
Dcacheflush_64.h25 #define flush_cache_page(vma, page, pfn) \ argument
37 void flush_dcache_page_impl(struct page *page);
39 void smp_flush_dcache_page_impl(struct page *page, int cpu);
40 void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
42 #define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page) argument
43 #define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page) argument
48 void flush_dcache_page(struct page *page);
53 void flush_ptrace_access(struct vm_area_struct *, struct page *,
57 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
59 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
[all …]
Dhighmem.h52 void *kmap_high(struct page *page);
53 void kunmap_high(struct page *page);
55 static inline void *kmap(struct page *page) in kmap() argument
58 if (!PageHighMem(page)) in kmap()
59 return page_address(page); in kmap()
60 return kmap_high(page); in kmap()
63 static inline void kunmap(struct page *page) in kunmap() argument
66 if (!PageHighMem(page)) in kunmap()
68 kunmap_high(page); in kunmap()
71 void *kmap_atomic(struct page *page);
/linux-4.1.27/arch/frv/include/asm/
Dcacheflush.h52 extern void flush_dcache_page(struct page *page);
54 static inline void flush_dcache_page(struct page *page) in flush_dcache_page() argument
56 unsigned long addr = page_to_phys(page); in flush_dcache_page()
61 static inline void flush_page_to_ram(struct page *page) in flush_page_to_ram() argument
63 flush_dcache_page(page); in flush_page_to_ram()
77 extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
80 static inline void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, in flush_icache_user_range() argument
87 static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) in flush_icache_page() argument
89 flush_icache_user_range(vma, page, page_to_phys(page), PAGE_SIZE); in flush_icache_page()
96 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
[all …]
Dhighmem.h59 extern void *kmap_high(struct page *page);
60 extern void kunmap_high(struct page *page);
62 extern void *kmap(struct page *page);
63 extern void kunmap(struct page *page);
65 extern struct page *kmap_atomic_to_page(void *ptr);
116 static inline void *kmap_atomic_primary(struct page *page) in kmap_atomic_primary() argument
121 paddr = page_to_phys(page); in kmap_atomic_primary()
144 void *kmap_atomic(struct page *page);
/linux-4.1.27/fs/qnx6/
Ddir.c26 static struct page *qnx6_get_page(struct inode *dir, unsigned long n) in qnx6_get_page()
29 struct page *page = read_mapping_page(mapping, n, NULL); in qnx6_get_page() local
30 if (!IS_ERR(page)) in qnx6_get_page()
31 kmap(page); in qnx6_get_page()
32 return page; in qnx6_get_page()
51 struct page **p) in qnx6_longname()
59 struct page *page = read_mapping_page(mapping, n, NULL); in qnx6_longname() local
60 if (IS_ERR(page)) in qnx6_longname()
61 return ERR_CAST(page); in qnx6_longname()
62 kmap(*p = page); in qnx6_longname()
[all …]
/linux-4.1.27/arch/mn10300/include/asm/
Dhighmem.h46 extern unsigned long kmap_high(struct page *page);
47 extern void kunmap_high(struct page *page);
49 static inline unsigned long kmap(struct page *page) in kmap() argument
53 if (page < highmem_start_page) in kmap()
54 return page_address(page); in kmap()
55 return kmap_high(page); in kmap()
58 static inline void kunmap(struct page *page) in kunmap() argument
62 if (page < highmem_start_page) in kunmap()
64 kunmap_high(page); in kunmap()
73 static inline void *kmap_atomic(struct page *page) in kmap_atomic() argument
[all …]
/linux-4.1.27/arch/nios2/mm/
Dcacheflush.c89 static void flush_aliases(struct address_space *mapping, struct page *page) in flush_aliases() argument
95 pgoff = page->index; in flush_aliases()
108 page_to_pfn(page)); in flush_aliases()
156 void flush_icache_page(struct vm_area_struct *vma, struct page *page) in flush_icache_page() argument
158 unsigned long start = (unsigned long) page_address(page); in flush_icache_page()
176 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument
183 unsigned long start = (unsigned long)page_address(page); in __flush_dcache_page()
188 void flush_dcache_page(struct page *page) in flush_dcache_page() argument
196 if (page == ZERO_PAGE(0)) in flush_dcache_page()
199 mapping = page_mapping(page); in flush_dcache_page()
[all …]
/linux-4.1.27/arch/m68k/include/asm/
Dmotorola_pgalloc.h32 struct page *page; in pte_alloc_one() local
35 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); in pte_alloc_one()
36 if(!page) in pte_alloc_one()
38 if (!pgtable_page_ctor(page)) { in pte_alloc_one()
39 __free_page(page); in pte_alloc_one()
43 pte = kmap(page); in pte_alloc_one()
47 kunmap(page); in pte_alloc_one()
48 return page; in pte_alloc_one()
51 static inline void pte_free(struct mm_struct *mm, pgtable_t page) in pte_free() argument
53 pgtable_page_dtor(page); in pte_free()
[all …]
Dmcf_pgalloc.h17 unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT); in pte_alloc_one_kernel() local
19 if (!page) in pte_alloc_one_kernel()
22 memset((void *)page, 0, PAGE_SIZE); in pte_alloc_one_kernel()
23 return (pte_t *) (page); in pte_alloc_one_kernel()
36 #define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \ argument
37 (unsigned long)(page_address(page)))
43 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, in __pte_free_tlb() argument
46 __free_page(page); in __pte_free_tlb()
51 static inline struct page *pte_alloc_one(struct mm_struct *mm, in pte_alloc_one()
54 struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0); in pte_alloc_one() local
[all …]
Dsun3_pgalloc.h25 static inline void pte_free(struct mm_struct *mm, pgtable_t page) in pte_free() argument
27 pgtable_page_dtor(page); in pte_free()
28 __free_page(page); in pte_free()
40 unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT); in pte_alloc_one_kernel() local
42 if (!page) in pte_alloc_one_kernel()
45 memset((void *)page, 0, PAGE_SIZE); in pte_alloc_one_kernel()
46 return (pte_t *) (page); in pte_alloc_one_kernel()
52 struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); in pte_alloc_one() local
54 if (page == NULL) in pte_alloc_one()
57 clear_highpage(page); in pte_alloc_one()
[all …]
/linux-4.1.27/arch/x86/mm/
Dgup.c72 unsigned long end, int write, struct page **pages, int *nr) in gup_pte_range()
84 struct page *page; in gup_pte_range() local
97 page = pte_page(pte); in gup_pte_range()
98 get_page(page); in gup_pte_range()
99 SetPageReferenced(page); in gup_pte_range()
100 pages[*nr] = page; in gup_pte_range()
109 static inline void get_head_page_multiple(struct page *page, int nr) in get_head_page_multiple() argument
111 VM_BUG_ON_PAGE(page != compound_head(page), page); in get_head_page_multiple()
112 VM_BUG_ON_PAGE(page_count(page) == 0, page); in get_head_page_multiple()
113 atomic_add(nr, &page->_count); in get_head_page_multiple()
[all …]
Dhighmem_32.c6 void *kmap(struct page *page) in kmap() argument
9 if (!PageHighMem(page)) in kmap()
10 return page_address(page); in kmap()
11 return kmap_high(page); in kmap()
15 void kunmap(struct page *page) in kunmap() argument
19 if (!PageHighMem(page)) in kunmap()
21 kunmap_high(page); in kunmap()
33 void *kmap_atomic_prot(struct page *page, pgprot_t prot) in kmap_atomic_prot() argument
41 if (!PageHighMem(page)) in kmap_atomic_prot()
42 return page_address(page); in kmap_atomic_prot()
[all …]
/linux-4.1.27/arch/arm/include/asm/
Dhighmem.h23 extern void *kmap_high(struct page *page);
24 extern void kunmap_high(struct page *page);
53 extern void *kmap_high_get(struct page *page);
55 static inline void *kmap_high_get(struct page *page) in kmap_high_get() argument
66 extern void *kmap(struct page *page);
67 extern void kunmap(struct page *page);
68 extern void *kmap_atomic(struct page *page);
71 extern struct page *kmap_atomic_to_page(const void *ptr);
Dpage.h110 struct page;
114 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
115 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
130 extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
131 extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
135 #define clear_user_highpage(page,vaddr) \ argument
136 __cpu_clear_user_highpage(page, vaddr)
142 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) argument
157 typedef struct page *pgtable_t;
/linux-4.1.27/arch/mips/mm/
Dgup.c38 int write, struct page **pages, int *nr) in gup_pte_range()
43 struct page *page; in gup_pte_range() local
51 page = pte_page(pte); in gup_pte_range()
52 get_page(page); in gup_pte_range()
53 SetPageReferenced(page); in gup_pte_range()
54 pages[*nr] = page; in gup_pte_range()
63 static inline void get_head_page_multiple(struct page *page, int nr) in get_head_page_multiple() argument
65 VM_BUG_ON(page != compound_head(page)); in get_head_page_multiple()
66 VM_BUG_ON(page_count(page) == 0); in get_head_page_multiple()
67 atomic_add(nr, &page->_count); in get_head_page_multiple()
[all …]
Dinit.c64 struct page *page; in setup_zero_pages() local
75 page = virt_to_page((void *)empty_zero_page); in setup_zero_pages()
76 split_page(page, order); in setup_zero_pages()
77 for (i = 0; i < (1 << order); i++, page++) in setup_zero_pages()
78 mark_page_reserved(page); in setup_zero_pages()
83 static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) in __kmap_pgprot() argument
91 BUG_ON(Page_dcache_dirty(page)); in __kmap_pgprot()
97 pte = mk_pte(page, prot); in __kmap_pgprot()
126 void *kmap_coherent(struct page *page, unsigned long addr) in kmap_coherent() argument
128 return __kmap_pgprot(page, addr, PAGE_KERNEL); in kmap_coherent()
[all …]
Dcache.c30 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
82 void __flush_dcache_page(struct page *page) in __flush_dcache_page() argument
84 struct address_space *mapping = page_mapping(page); in __flush_dcache_page()
88 SetPageDcacheDirty(page); in __flush_dcache_page()
97 if (PageHighMem(page)) in __flush_dcache_page()
98 addr = (unsigned long)kmap_atomic(page); in __flush_dcache_page()
100 addr = (unsigned long)page_address(page); in __flush_dcache_page()
104 if (PageHighMem(page)) in __flush_dcache_page()
110 void __flush_anon_page(struct page *page, unsigned long vmaddr) in __flush_anon_page() argument
112 unsigned long addr = (unsigned long) page_address(page); in __flush_anon_page()
[all …]
Dhighmem.c13 void *kmap(struct page *page) in kmap() argument
18 if (!PageHighMem(page)) in kmap()
19 return page_address(page); in kmap()
20 addr = kmap_high(page); in kmap()
27 void kunmap(struct page *page) in kunmap() argument
30 if (!PageHighMem(page)) in kunmap()
32 kunmap_high(page); in kunmap()
45 void *kmap_atomic(struct page *page) in kmap_atomic() argument
52 if (!PageHighMem(page)) in kmap_atomic()
53 return page_address(page); in kmap_atomic()
[all …]
/linux-4.1.27/drivers/hwmon/pmbus/
Dltc2978.c105 static int ltc2978_read_word_data_common(struct i2c_client *client, int page, in ltc2978_read_word_data_common() argument
114 ret = pmbus_read_word_data(client, page, LTC2978_MFR_VIN_PEAK); in ltc2978_read_word_data_common()
122 ret = pmbus_read_word_data(client, page, LTC2978_MFR_VOUT_PEAK); in ltc2978_read_word_data_common()
128 if (ret > data->vout_max[page]) in ltc2978_read_word_data_common()
129 data->vout_max[page] = ret; in ltc2978_read_word_data_common()
130 ret = data->vout_max[page]; in ltc2978_read_word_data_common()
134 ret = pmbus_read_word_data(client, page, in ltc2978_read_word_data_common()
138 > lin11_to_val(data->temp_max[page])) in ltc2978_read_word_data_common()
139 data->temp_max[page] = ret; in ltc2978_read_word_data_common()
140 ret = data->temp_max[page]; in ltc2978_read_word_data_common()
[all …]
Dpmbus.c36 int page; in pmbus_find_sensor_groups() local
75 for (page = 0; page < info->pages; page++) { in pmbus_find_sensor_groups()
76 if (pmbus_check_word_register(client, page, PMBUS_READ_VOUT)) { in pmbus_find_sensor_groups()
77 info->func[page] |= PMBUS_HAVE_VOUT; in pmbus_find_sensor_groups()
78 if (pmbus_check_byte_register(client, page, in pmbus_find_sensor_groups()
80 info->func[page] |= PMBUS_HAVE_STATUS_VOUT; in pmbus_find_sensor_groups()
82 if (pmbus_check_word_register(client, page, PMBUS_READ_IOUT)) { in pmbus_find_sensor_groups()
83 info->func[page] |= PMBUS_HAVE_IOUT; in pmbus_find_sensor_groups()
86 info->func[page] |= PMBUS_HAVE_STATUS_IOUT; in pmbus_find_sensor_groups()
88 if (pmbus_check_word_register(client, page, PMBUS_READ_POUT)) in pmbus_find_sensor_groups()
[all …]
/linux-4.1.27/drivers/staging/rtl8188eu/core/
Drtw_debug.c25 int proc_get_drv_version(char *page, char **start, in proc_get_drv_version() argument
31 len += snprintf(page + len, count - len, "%s\n", DRIVERVERSION); in proc_get_drv_version()
37 int proc_get_write_reg(char *page, char **start, in proc_get_write_reg() argument
86 int proc_get_read_reg(char *page, char **start, in proc_get_read_reg() argument
102 …len += snprintf(page + len, count - len, "usb_read8(0x%x)=0x%x\n", proc_get_read_addr, usb_read8(p… in proc_get_read_reg()
105 …len += snprintf(page + len, count - len, "usb_read16(0x%x)=0x%x\n", proc_get_read_addr, usb_read16… in proc_get_read_reg()
108 …len += snprintf(page + len, count - len, "usb_read32(0x%x)=0x%x\n", proc_get_read_addr, usb_read32… in proc_get_read_reg()
111 len += snprintf(page + len, count - len, "error read length=%d\n", proc_get_read_len); in proc_get_read_reg()
146 int proc_get_fwstate(char *page, char **start, in proc_get_fwstate() argument
156 len += snprintf(page + len, count - len, "fwstate=0x%x\n", get_fwstate(pmlmepriv)); in proc_get_fwstate()
[all …]
/linux-4.1.27/arch/sh/include/asm/
Dcacheflush.h46 extern void flush_dcache_page(struct page *page);
49 struct page *page);
58 extern void __flush_anon_page(struct page *page, unsigned long);
61 struct page *page, unsigned long vmaddr) in flush_anon_page() argument
63 if (boot_cpu_data.dcache.n_aliases && PageAnon(page)) in flush_anon_page()
64 __flush_anon_page(page, vmaddr); in flush_anon_page()
76 static inline void flush_kernel_dcache_page(struct page *page) in flush_kernel_dcache_page() argument
78 flush_dcache_page(page); in flush_kernel_dcache_page()
82 struct page *page, unsigned long vaddr, void *dst, const void *src,
86 struct page *page, unsigned long vaddr, void *dst, const void *src,
[all …]
/linux-4.1.27/arch/s390/mm/
Dpage-states.c54 static inline void set_page_unstable(struct page *page, int order) in set_page_unstable() argument
61 : "a" (page_to_phys(page + i)), in set_page_unstable()
65 void arch_free_page(struct page *page, int order) in arch_free_page() argument
69 set_page_unstable(page, order); in arch_free_page()
72 static inline void set_page_stable(struct page *page, int order) in set_page_stable() argument
79 : "a" (page_to_phys(page + i)), in set_page_stable()
83 void arch_alloc_page(struct page *page, int order) in arch_alloc_page() argument
87 set_page_stable(page, order); in arch_alloc_page()
94 struct page *page; in arch_set_page_states() local
105 page = list_entry(l, struct page, lru); in arch_set_page_states()
[all …]
Dpgtable.c36 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); in crst_table_alloc() local
38 if (!page) in crst_table_alloc()
40 return (unsigned long *) page_to_phys(page); in crst_table_alloc()
150 struct page *page; in gmap_alloc() local
179 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); in gmap_alloc()
180 if (!page) in gmap_alloc()
182 page->index = 0; in gmap_alloc()
183 list_add(&page->lru, &gmap->crst_list); in gmap_alloc()
184 table = (unsigned long *) page_to_phys(page); in gmap_alloc()
240 struct page *page, *next; in gmap_free() local
[all …]
/linux-4.1.27/drivers/target/iscsi/
Discsi_target_stat.c68 struct iscsi_wwn_stat_grps *igrps, char *page) in iscsi_stat_instance_show_attr_inst() argument
73 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); in iscsi_stat_instance_show_attr_inst()
78 struct iscsi_wwn_stat_grps *igrps, char *page) in iscsi_stat_instance_show_attr_min_ver() argument
80 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION); in iscsi_stat_instance_show_attr_min_ver()
85 struct iscsi_wwn_stat_grps *igrps, char *page) in iscsi_stat_instance_show_attr_max_ver() argument
87 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION); in iscsi_stat_instance_show_attr_max_ver()
92 struct iscsi_wwn_stat_grps *igrps, char *page) in iscsi_stat_instance_show_attr_portals() argument
97 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_num_tpg_nps); in iscsi_stat_instance_show_attr_portals()
102 struct iscsi_wwn_stat_grps *igrps, char *page) in iscsi_stat_instance_show_attr_nodes() argument
104 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES); in iscsi_stat_instance_show_attr_nodes()
[all …]
/linux-4.1.27/drivers/infiniband/hw/ehca/
Dipz_pt_fn.c84 u64 page = __pa(queue->queue_pages[i]); in ipz_queue_abs_to_offset() local
85 if (addr >= page && addr < page + queue->pagesize) { in ipz_queue_abs_to_offset()
86 *q_offset = addr - page + i * queue->pagesize; in ipz_queue_abs_to_offset()
130 struct ipz_small_queue_page *page; in alloc_small_queue_page() local
136 page = list_entry(pd->free[order].next, in alloc_small_queue_page()
139 page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL); in alloc_small_queue_page()
140 if (!page) in alloc_small_queue_page()
143 page->page = get_zeroed_page(GFP_KERNEL); in alloc_small_queue_page()
144 if (!page->page) { in alloc_small_queue_page()
145 kmem_cache_free(small_qp_cache, page); in alloc_small_queue_page()
[all …]
/linux-4.1.27/arch/unicore32/mm/
Dflush.c35 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, in flush_ptrace_access() argument
53 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument
58 flush_ptrace_access(vma, page, uaddr, dst, len); in copy_to_user_page()
61 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument
68 __cpuc_flush_kern_dcache_area(page_address(page), PAGE_SIZE); in __flush_dcache_page()
75 void flush_dcache_page(struct page *page) in flush_dcache_page() argument
83 if (page == ZERO_PAGE(0)) in flush_dcache_page()
86 mapping = page_mapping(page); in flush_dcache_page()
89 clear_bit(PG_dcache_clean, &page->flags); in flush_dcache_page()
91 __flush_dcache_page(mapping, page); in flush_dcache_page()
[all …]
/linux-4.1.27/fs/fscache/
Dpage.c23 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page) in __fscache_check_page_write() argument
28 val = radix_tree_lookup(&cookie->stores, page->index); in __fscache_check_page_write()
38 void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page) in __fscache_wait_on_page_write() argument
42 wait_event(*wq, !__fscache_check_page_write(cookie, page)); in __fscache_wait_on_page_write()
51 bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page) in release_page_wait_timeout() argument
55 return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page), in release_page_wait_timeout()
64 struct page *page, in __fscache_maybe_release_page() argument
67 struct page *xpage; in __fscache_maybe_release_page()
70 _enter("%p,%p,%x", cookie, page, gfp); in __fscache_maybe_release_page()
74 val = radix_tree_lookup(&cookie->stores, page->index); in __fscache_maybe_release_page()
[all …]
/linux-4.1.27/arch/x86/include/asm/
Dhighmem.h60 extern void *kmap_high(struct page *page);
61 extern void kunmap_high(struct page *page);
63 void *kmap(struct page *page);
64 void kunmap(struct page *page);
66 void *kmap_atomic_prot(struct page *page, pgprot_t prot);
67 void *kmap_atomic(struct page *page);
71 struct page *kmap_atomic_to_page(void *ptr);
Dcacheflush.h53 int set_pages_array_uc(struct page **pages, int addrinarray);
54 int set_pages_array_wc(struct page **pages, int addrinarray);
55 int set_pages_array_wb(struct page **pages, int addrinarray);
77 int set_pages_uc(struct page *page, int numpages);
78 int set_pages_wb(struct page *page, int numpages);
79 int set_pages_x(struct page *page, int numpages);
80 int set_pages_nx(struct page *page, int numpages);
81 int set_pages_ro(struct page *page, int numpages);
82 int set_pages_rw(struct page *page, int numpages);
/linux-4.1.27/drivers/infiniband/hw/mlx5/
Ddoorbell.c49 struct mlx5_ib_user_db_page *page; in mlx5_ib_db_map_user() local
54 list_for_each_entry(page, &context->db_page_list, list) in mlx5_ib_db_map_user()
55 if (page->user_virt == (virt & PAGE_MASK)) in mlx5_ib_db_map_user()
58 page = kmalloc(sizeof(*page), GFP_KERNEL); in mlx5_ib_db_map_user()
59 if (!page) { in mlx5_ib_db_map_user()
64 page->user_virt = (virt & PAGE_MASK); in mlx5_ib_db_map_user()
65 page->refcnt = 0; in mlx5_ib_db_map_user()
66 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx5_ib_db_map_user()
68 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user()
69 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user()
[all …]
/linux-4.1.27/drivers/infiniband/hw/mlx4/
Ddoorbell.c47 struct mlx4_ib_user_db_page *page; in mlx4_ib_db_map_user() local
52 list_for_each_entry(page, &context->db_page_list, list) in mlx4_ib_db_map_user()
53 if (page->user_virt == (virt & PAGE_MASK)) in mlx4_ib_db_map_user()
56 page = kmalloc(sizeof *page, GFP_KERNEL); in mlx4_ib_db_map_user()
57 if (!page) { in mlx4_ib_db_map_user()
62 page->user_virt = (virt & PAGE_MASK); in mlx4_ib_db_map_user()
63 page->refcnt = 0; in mlx4_ib_db_map_user()
64 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx4_ib_db_map_user()
66 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user()
67 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user()
[all …]
/linux-4.1.27/fs/hfsplus/
Dbitmap.c22 struct page *page; in hfsplus_block_allocate() local
36 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); in hfsplus_block_allocate()
37 if (IS_ERR(page)) { in hfsplus_block_allocate()
41 pptr = kmap(page); in hfsplus_block_allocate()
76 kunmap(page); in hfsplus_block_allocate()
80 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, in hfsplus_block_allocate()
82 if (IS_ERR(page)) { in hfsplus_block_allocate()
86 curr = pptr = kmap(page); in hfsplus_block_allocate()
128 set_page_dirty(page); in hfsplus_block_allocate()
129 kunmap(page); in hfsplus_block_allocate()
[all …]
/linux-4.1.27/drivers/staging/lustre/lustre/include/linux/
Dlustre_patchless_compat.h47 #define ll_delete_from_page_cache(page) delete_from_page_cache(page) argument
50 truncate_complete_page(struct address_space *mapping, struct page *page) in truncate_complete_page() argument
52 if (page->mapping != mapping) in truncate_complete_page()
55 if (PagePrivate(page)) in truncate_complete_page()
56 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); in truncate_complete_page()
58 if (TestClearPageDirty(page)) in truncate_complete_page()
59 account_page_cleaned(page, mapping); in truncate_complete_page()
61 ClearPageMappedToDisk(page); in truncate_complete_page()
62 ll_delete_from_page_cache(page); in truncate_complete_page()
/linux-4.1.27/drivers/infiniband/hw/mthca/
Dmthca_memfree.c61 } page[0]; member
109 struct page *page; in mthca_alloc_icm_pages() local
115 page = alloc_pages(gfp_mask | __GFP_ZERO, order); in mthca_alloc_icm_pages()
116 if (!page) in mthca_alloc_icm_pages()
119 sg_set_page(mem, page, PAGE_SIZE << order, 0); in mthca_alloc_icm_pages()
283 struct page *page = NULL; in mthca_table_find() local
309 page = sg_page(&chunk->mem[i]); in mthca_table_find()
318 return page ? lowmem_page_address(page) + offset : NULL; in mthca_table_find()
439 static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page) in mthca_uarc_virt() argument
443 page * MTHCA_ICM_PAGE_SIZE; in mthca_uarc_virt()
[all …]
/linux-4.1.27/drivers/staging/rtl8188eu/include/
Drtw_debug.h120 int proc_get_drv_version(char *page, char **start,
124 int proc_get_write_reg(char *page, char **start,
130 int proc_get_read_reg(char *page, char **start,
137 int proc_get_fwstate(char *page, char **start,
140 int proc_get_sec_info(char *page, char **start,
143 int proc_get_mlmext_state(char *page, char **start,
147 int proc_get_qos_option(char *page, char **start,
150 int proc_get_ht_option(char *page, char **start,
153 int proc_get_rf_info(char *page, char **start,
156 int proc_get_ap_info(char *page, char **start,
[all …]
/linux-4.1.27/arch/arm64/mm/
Dflush.c37 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, in flush_ptrace_access() argument
59 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument
67 flush_ptrace_access(vma, page, uaddr, dst, len); in copy_to_user_page()
75 struct page *page = pte_page(pte); in __sync_icache_dcache() local
78 if (!page_mapping(page)) in __sync_icache_dcache()
81 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) { in __sync_icache_dcache()
82 __flush_dcache_area(page_address(page), in __sync_icache_dcache()
83 PAGE_SIZE << compound_order(page)); in __sync_icache_dcache()
95 void flush_dcache_page(struct page *page) in flush_dcache_page() argument
97 if (test_bit(PG_dcache_clean, &page->flags)) in flush_dcache_page()
[all …]
/linux-4.1.27/block/
Dblk-sysfs.c23 queue_var_show(unsigned long var, char *page) in queue_var_show() argument
25 return sprintf(page, "%lu\n", var); in queue_var_show()
29 queue_var_store(unsigned long *var, const char *page, size_t count) in queue_var_store() argument
34 err = kstrtoul(page, 10, &v); in queue_var_store()
43 static ssize_t queue_requests_show(struct request_queue *q, char *page) in queue_requests_show() argument
45 return queue_var_show(q->nr_requests, (page)); in queue_requests_show()
49 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument
57 ret = queue_var_store(&nr, page, count); in queue_requests_store()
75 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument
80 return queue_var_show(ra_kb, (page)); in queue_ra_show()
[all …]
Dblk-mq-sysfs.c33 char *page) in blk_mq_sysfs_show() argument
50 res = entry->show(ctx, page); in blk_mq_sysfs_show()
56 const char *page, size_t length) in blk_mq_sysfs_store() argument
73 res = entry->store(ctx, page, length); in blk_mq_sysfs_store()
79 struct attribute *attr, char *page) in blk_mq_hw_sysfs_show() argument
96 res = entry->show(hctx, page); in blk_mq_hw_sysfs_show()
102 struct attribute *attr, const char *page, in blk_mq_hw_sysfs_store() argument
120 res = entry->store(hctx, page, length); in blk_mq_hw_sysfs_store()
125 static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page) in blk_mq_sysfs_dispatched_show() argument
127 return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1], in blk_mq_sysfs_dispatched_show()
[all …]
/linux-4.1.27/arch/sh/mm/
Dcache.c58 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument
62 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && in copy_to_user_page()
63 test_bit(PG_dcache_clean, &page->flags)) { in copy_to_user_page()
64 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); in copy_to_user_page()
70 clear_bit(PG_dcache_clean, &page->flags); in copy_to_user_page()
74 flush_cache_page(vma, vaddr, page_to_pfn(page)); in copy_to_user_page()
77 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, in copy_from_user_page() argument
81 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && in copy_from_user_page()
82 test_bit(PG_dcache_clean, &page->flags)) { in copy_from_user_page()
83 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); in copy_from_user_page()
[all …]
/linux-4.1.27/fs/udf/
Dfile.c41 static void __udf_adinicb_readpage(struct page *page) in __udf_adinicb_readpage() argument
43 struct inode *inode = page->mapping->host; in __udf_adinicb_readpage()
47 kaddr = kmap(page); in __udf_adinicb_readpage()
50 flush_dcache_page(page); in __udf_adinicb_readpage()
51 SetPageUptodate(page); in __udf_adinicb_readpage()
52 kunmap(page); in __udf_adinicb_readpage()
55 static int udf_adinicb_readpage(struct file *file, struct page *page) in udf_adinicb_readpage() argument
57 BUG_ON(!PageLocked(page)); in udf_adinicb_readpage()
58 __udf_adinicb_readpage(page); in udf_adinicb_readpage()
59 unlock_page(page); in udf_adinicb_readpage()
[all …]
/linux-4.1.27/Documentation/vm/
Dpagemap.txt5 userspace programs to examine the page tables and related information by
11 physical frame each virtual page is mapped to. It contains one 64-bit
12 value for each virtual page, containing the following data (from
15 * Bits 0-54 page frame number (PFN) if present
20 * Bit 61 page is file-page or shared-anon
21 * Bit 62 page swapped
22 * Bit 63 page present
24 If the page is not present but in swap, then the PFN contains an
25 encoding of the swap file number and the page's offset into the
35 times each page is mapped, indexed by PFN.
[all …]
Dpage_migration9 The main intend of page migration is to reduce the latency of memory access
22 which provides an interface similar to other numa functionality for page
25 proc(5) man page.
31 manual page migration support. Automatic page migration may be implemented
67 Calling isolate_lru_page increases the references to the page
68 so that it cannot vanish while the page migration occurs.
70 the page.
74 how to allocate the correct new page given the old page.
78 the new page for each page that is considered for
84 migrate_pages() does several passes over its list of pages. A page is moved
[all …]
Dpage_owner.txt1 page owner: Tracking about who allocated each page
6 page owner is for the tracking about who allocated each page.
9 and order of pages is stored into certain storage for each page.
13 Although we already have tracepoint for tracing page allocation/free,
14 using it for analyzing who allocate each page is rather complex. We need
20 page owner can also be used for various purposes. For example, accurate
22 each page. It is already implemented and activated if page owner is
25 page owner is disabled in default. So, if you'd like to use it, you need
27 with page owner and page owner is disabled in runtime due to no enabling
30 memory overhead. And, page owner inserts just two unlikely branches into
[all …]
Dsplit_page_table_lock1 Split page table lock
4 Originally, mm->page_table_lock spinlock protected all page tables of the
5 mm_struct. But this approach leads to poor page fault scalability of
7 scalability, split page table lock was introduced.
9 With split page table lock we have separate per-table lock to serialize
29 Split page table lock for PTE tables is enabled compile-time if
33 Split page table lock for PMD tables is enabled, if it's enabled for PTE
36 Hugetlb and split page table lock
39 Hugetlb can support several page sizes. We use split lock only for PMD
44 takes pmd split lock for PMD_SIZE page, mm->page_table_lock
[all …]
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
Drw26.c72 static void ll_invalidatepage(struct page *vmpage, unsigned int offset, in ll_invalidatepage()
77 struct cl_page *page; in ll_invalidatepage() local
96 page = cl_vmpage_page(vmpage, obj); in ll_invalidatepage()
97 if (page != NULL) { in ll_invalidatepage()
98 lu_ref_add(&page->cp_reference, in ll_invalidatepage()
100 cl_page_delete(env, page); in ll_invalidatepage()
101 lu_ref_del(&page->cp_reference, in ll_invalidatepage()
103 cl_page_put(env, page); in ll_invalidatepage()
117 static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) in ll_releasepage()
122 struct cl_page *page; in ll_releasepage() local
[all …]
/linux-4.1.27/arch/microblaze/mm/
Dconsistent.c67 struct page *page, *end; in consistent_alloc() local
129 page = virt_to_page(vaddr); in consistent_alloc()
130 end = page + (1 << order); in consistent_alloc()
132 split_page(page, order); in consistent_alloc()
140 SetPageReserved(page); in consistent_alloc()
141 page++; in consistent_alloc()
145 while (page < end) { in consistent_alloc()
146 __free_page(page); in consistent_alloc()
147 page++; in consistent_alloc()
183 struct page *page; in consistent_free() local
[all …]
/linux-4.1.27/arch/metag/include/asm/
Dhighmem.h44 extern void *kmap_high(struct page *page);
45 extern void kunmap_high(struct page *page);
54 extern void *kmap(struct page *page);
55 extern void kunmap(struct page *page);
56 extern void *kmap_atomic(struct page *page);
59 extern struct page *kmap_atomic_to_page(void *ptr);
/linux-4.1.27/drivers/target/
Dtarget_core_stat.c77 struct se_dev_stat_grps *sgrps, char *page) in target_stat_scsi_dev_show_attr_inst() argument
83 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); in target_stat_scsi_dev_show_attr_inst()
88 struct se_dev_stat_grps *sgrps, char *page) in target_stat_scsi_dev_show_attr_indx() argument
93 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); in target_stat_scsi_dev_show_attr_indx()
98 struct se_dev_stat_grps *sgrps, char *page) in target_stat_scsi_dev_show_attr_role() argument
100 return snprintf(page, PAGE_SIZE, "Target\n"); in target_stat_scsi_dev_show_attr_role()
105 struct se_dev_stat_grps *sgrps, char *page) in target_stat_scsi_dev_show_attr_ports() argument
110 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count); in target_stat_scsi_dev_show_attr_ports()
154 struct se_dev_stat_grps *sgrps, char *page) in target_stat_scsi_tgt_dev_show_attr_inst() argument
160 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); in target_stat_scsi_tgt_dev_show_attr_inst()
[all …]
/linux-4.1.27/arch/metag/mm/
Dhighmem.c13 void *kmap(struct page *page) in kmap() argument
16 if (!PageHighMem(page)) in kmap()
17 return page_address(page); in kmap()
18 return kmap_high(page); in kmap()
22 void kunmap(struct page *page) in kunmap() argument
25 if (!PageHighMem(page)) in kunmap()
27 kunmap_high(page); in kunmap()
40 void *kmap_atomic(struct page *page) in kmap_atomic() argument
48 if (!PageHighMem(page)) in kmap_atomic()
49 return page_address(page); in kmap_atomic()
[all …]
/linux-4.1.27/fs/btrfs/
Dextent_io.c1390 struct page *page; in extent_range_clear_dirty_for_io() local
1393 page = find_get_page(inode->i_mapping, index); in extent_range_clear_dirty_for_io()
1394 BUG_ON(!page); /* Pages should be in the extent_io_tree */ in extent_range_clear_dirty_for_io()
1395 clear_page_dirty_for_io(page); in extent_range_clear_dirty_for_io()
1396 page_cache_release(page); in extent_range_clear_dirty_for_io()
1406 struct page *page; in extent_range_redirty_for_io() local
1409 page = find_get_page(inode->i_mapping, index); in extent_range_redirty_for_io()
1410 BUG_ON(!page); /* Pages should be in the extent_io_tree */ in extent_range_redirty_for_io()
1411 __set_page_dirty_nobuffers(page); in extent_range_redirty_for_io()
1412 account_page_redirty(page); in extent_range_redirty_for_io()
[all …]
Dextent_io.h68 int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
71 int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
72 int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
74 int (*merge_bio_hook)(int rw, struct page *page, unsigned long offset,
77 int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
79 struct page *page, u64 start, u64 end,
81 int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
158 struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
178 struct page *page,
186 struct extent_io_tree *tree, struct page *page,
[all …]
Dcompression.c50 struct page **compressed_pages;
84 static int btrfs_decompress_biovec(int type, struct page **pages_in,
111 struct page *page; in check_compressed_csum() local
121 page = cb->compressed_pages[i]; in check_compressed_csum()
124 kaddr = kmap_atomic(page); in check_compressed_csum()
159 struct page *page; in end_compressed_bio_read() local
194 page = cb->compressed_pages[index]; in end_compressed_bio_read()
195 page->mapping = NULL; in end_compressed_bio_read()
196 page_cache_release(page); in end_compressed_bio_read()
232 struct page *pages[16]; in end_compressed_writeback()
[all …]
/linux-4.1.27/arch/powerpc/mm/
Ddma-noncoherent.c157 struct page *page; in __dma_alloc_coherent() local
198 page = alloc_pages(gfp, order); in __dma_alloc_coherent()
199 if (!page) in __dma_alloc_coherent()
207 unsigned long kaddr = (unsigned long)page_address(page); in __dma_alloc_coherent()
208 memset(page_address(page), 0, size); in __dma_alloc_coherent()
219 struct page *end = page + (1 << order); in __dma_alloc_coherent()
221 split_page(page, order); in __dma_alloc_coherent()
226 *handle = page_to_phys(page); in __dma_alloc_coherent()
229 SetPageReserved(page); in __dma_alloc_coherent()
230 map_page(vaddr, page_to_phys(page), in __dma_alloc_coherent()
[all …]
/linux-4.1.27/arch/cris/arch-v32/drivers/
Daxisflashmap.c312 static char page[PAGESIZE]; in init_axis_flash() local
367 page); in init_axis_flash()
377 page[i] & 255, page[i+1] & 255, in init_axis_flash()
378 page[i+2] & 255, page[i+3] & 255, in init_axis_flash()
379 page[i+4] & 255, page[i+5] & 255, in init_axis_flash()
380 page[i+6] & 255, page[i+7] & 255, in init_axis_flash()
381 page[i+8] & 255, page[i+9] & 255, in init_axis_flash()
382 page[i+10] & 255, page[i+11] & 255, in init_axis_flash()
383 page[i+12] & 255, page[i+13] & 255, in init_axis_flash()
384 page[i+14] & 255, page[i+15] & 255); in init_axis_flash()
[all …]
/linux-4.1.27/fs/xfs/
Dxfs_aops.c41 struct page *page, in xfs_count_page_state() argument
49 bh = head = page_buffers(page); in xfs_count_page_state()
411 struct page *page, in xfs_start_page_writeback() argument
415 ASSERT(PageLocked(page)); in xfs_start_page_writeback()
416 ASSERT(!PageWriteback(page)); in xfs_start_page_writeback()
426 clear_page_dirty_for_io(page); in xfs_start_page_writeback()
427 set_page_writeback(page); in xfs_start_page_writeback()
429 set_page_writeback_keepwrite(page); in xfs_start_page_writeback()
431 unlock_page(page); in xfs_start_page_writeback()
435 end_page_writeback(page); in xfs_start_page_writeback()
[all …]
/linux-4.1.27/fs/coda/
Dsymlink.c23 static int coda_symlink_filler(struct file *file, struct page *page) in coda_symlink_filler() argument
25 struct inode *inode = page->mapping->host; in coda_symlink_filler()
29 char *p = kmap(page); in coda_symlink_filler()
36 SetPageUptodate(page); in coda_symlink_filler()
37 kunmap(page); in coda_symlink_filler()
38 unlock_page(page); in coda_symlink_filler()
42 SetPageError(page); in coda_symlink_filler()
43 kunmap(page); in coda_symlink_filler()
44 unlock_page(page); in coda_symlink_filler()
/linux-4.1.27/kernel/power/
Dsnapshot.c41 static int swsusp_page_is_free(struct page *);
42 static void swsusp_set_page_forbidden(struct page *);
43 static void swsusp_unset_page_forbidden(struct page *);
121 static struct page *alloc_image_page(gfp_t gfp_mask) in alloc_image_page()
123 struct page *page; in alloc_image_page() local
125 page = alloc_page(gfp_mask); in alloc_image_page()
126 if (page) { in alloc_image_page()
127 swsusp_set_page_forbidden(page); in alloc_image_page()
128 swsusp_set_page_free(page); in alloc_image_page()
130 return page; in alloc_image_page()
[all …]
Dblock_io.c29 struct page *page, struct bio **bio_chain) in submit() argument
39 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { in submit()
46 lock_page(page); in submit()
51 wait_on_page_locked(page); in submit()
57 get_page(page); /* These pages are freed later */ in submit()
90 struct page *page; in hib_wait_on_bio_chain() local
93 page = bio->bi_io_vec[0].bv_page; in hib_wait_on_bio_chain()
94 wait_on_page_locked(page); in hib_wait_on_bio_chain()
95 if (!PageUptodate(page) || PageError(page)) in hib_wait_on_bio_chain()
97 put_page(page); in hib_wait_on_bio_chain()
/linux-4.1.27/fs/hfs/
Dbnode.c20 struct page *page; in hfs_bnode_read() local
23 page = node->page[0]; in hfs_bnode_read()
25 memcpy(buf, kmap(page) + off, len); in hfs_bnode_read()
26 kunmap(page); in hfs_bnode_read()
62 struct page *page; in hfs_bnode_write() local
65 page = node->page[0]; in hfs_bnode_write()
67 memcpy(kmap(page) + off, buf, len); in hfs_bnode_write()
68 kunmap(page); in hfs_bnode_write()
69 set_page_dirty(page); in hfs_bnode_write()
87 struct page *page; in hfs_bnode_clear() local
[all …]
/linux-4.1.27/arch/m68k/mm/
Dmemory.c31 #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) argument
32 #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
40 unsigned long page = ptable & PAGE_MASK; in init_pointer_table() local
41 unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE); in init_pointer_table()
43 dp = PD_PTABLE(page); in init_pointer_table()
75 void *page; in get_pointer_table() local
78 if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) in get_pointer_table()
81 flush_tlb_kernel_page(page); in get_pointer_table()
82 nocache_page(page); in get_pointer_table()
84 new = PD_PTABLE(page); in get_pointer_table()
[all …]
/linux-4.1.27/arch/mn10300/mm/
Dpgtable.c72 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) in pte_alloc_one()
74 struct page *pte; in pte_alloc_one()
106 struct page *pgd_list;
110 struct page *page = virt_to_page(pgd); in pgd_list_add() local
111 page->index = (unsigned long) pgd_list; in pgd_list_add()
113 set_page_private(pgd_list, (unsigned long) &page->index); in pgd_list_add()
114 pgd_list = page; in pgd_list_add()
115 set_page_private(page, (unsigned long) &pgd_list); in pgd_list_add()
120 struct page *next, **pprev, *page = virt_to_page(pgd); in pgd_list_del() local
121 next = (struct page *) page->index; in pgd_list_del()
[all …]
/linux-4.1.27/arch/cris/arch-v10/mm/
Dinit.c70 IO_STATE(R_MMU_KSEG, seg_e, page ) | in paging_init()
71 IO_STATE(R_MMU_KSEG, seg_d, page ) | in paging_init()
72 IO_STATE(R_MMU_KSEG, seg_c, page ) | in paging_init()
77 IO_STATE(R_MMU_KSEG, seg_a, page ) | in paging_init()
81 IO_STATE(R_MMU_KSEG, seg_7, page ) | /* kernel vmalloc area */ in paging_init()
84 IO_STATE(R_MMU_KSEG, seg_4, page ) | /* user area */ in paging_init()
85 IO_STATE(R_MMU_KSEG, seg_3, page ) | /* user area */ in paging_init()
86 IO_STATE(R_MMU_KSEG, seg_2, page ) | /* user area */ in paging_init()
87 IO_STATE(R_MMU_KSEG, seg_1, page ) | /* user area */ in paging_init()
88 IO_STATE(R_MMU_KSEG, seg_0, page ) ); /* user area */ in paging_init()
[all …]

12345678910>>...13