/linux-4.4.14/include/linux/ |
D | page-flags.h | 140 static inline int Page##uname(const struct page *page) \ 141 { return test_bit(PG_##lname, &page->flags); } 144 static inline void SetPage##uname(struct page *page) \ 145 { set_bit(PG_##lname, &page->flags); } 148 static inline void ClearPage##uname(struct page *page) \ 149 { clear_bit(PG_##lname, &page->flags); } 152 static inline void __SetPage##uname(struct page *page) \ 153 { __set_bit(PG_##lname, &page->flags); } 156 static inline void __ClearPage##uname(struct page *page) \ 157 { __clear_bit(PG_##lname, &page->flags); } [all …]
|
D | balloon_compaction.h | 63 int (*migratepage)(struct balloon_dev_info *, struct page *newpage, 64 struct page *page, enum migrate_mode mode); 67 extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info); 68 extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); 79 extern bool balloon_page_isolate(struct page *page); 80 extern void balloon_page_putback(struct page *page); 81 extern int balloon_page_migrate(struct page *newpage, 82 struct page *page, enum migrate_mode mode); 87 static inline bool __is_movable_balloon_page(struct page *page) in __is_movable_balloon_page() argument 89 return PageBalloon(page); in __is_movable_balloon_page() [all …]
|
D | page_idle.h | 11 static inline bool page_is_young(struct page *page) in page_is_young() argument 13 return PageYoung(page); in page_is_young() 16 static inline void set_page_young(struct page *page) in set_page_young() argument 18 SetPageYoung(page); in set_page_young() 21 static inline bool test_and_clear_page_young(struct page *page) in test_and_clear_page_young() argument 23 return TestClearPageYoung(page); in test_and_clear_page_young() 26 static inline bool page_is_idle(struct page *page) in page_is_idle() argument 28 return PageIdle(page); in page_is_idle() 31 static inline void set_page_idle(struct page *page) in set_page_idle() argument 33 SetPageIdle(page); in set_page_idle() [all …]
|
D | pagemap.h | 102 #define page_cache_get(page) get_page(page) argument 103 #define page_cache_release(page) put_page(page) argument 104 void release_pages(struct page **pages, int nr, bool cold); 150 static inline int page_cache_get_speculative(struct page *page) in page_cache_get_speculative() argument 167 VM_BUG_ON_PAGE(page_count(page) == 0, page); in page_cache_get_speculative() 168 atomic_inc(&page->_count); in page_cache_get_speculative() 171 if (unlikely(!get_page_unless_zero(page))) { in page_cache_get_speculative() 180 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_get_speculative() 188 static inline int page_cache_add_speculative(struct page *page, int count) in page_cache_add_speculative() argument 196 VM_BUG_ON_PAGE(page_count(page) == 0, page); in page_cache_add_speculative() [all …]
|
D | mm.h | 85 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) argument 235 struct page *cow_page; /* Handler may choose to COW */ 236 struct page *page; /* ->fault handlers should return a member 307 struct page *(*find_special_page)(struct vm_area_struct *vma, 314 #define page_private(page) ((page)->private) argument 315 #define set_page_private(page, v) ((page)->private = (v)) argument 340 static inline int put_page_testzero(struct page *page) in put_page_testzero() argument 342 VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page); in put_page_testzero() 343 return atomic_dec_and_test(&page->_count); in put_page_testzero() 352 static inline int get_page_unless_zero(struct page *page) in get_page_unless_zero() argument [all …]
|
D | highmem.h | 14 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma… in flush_anon_page() argument 20 static inline void flush_kernel_dcache_page(struct page *page) in flush_kernel_dcache_page() argument 42 struct page *kmap_to_page(void *addr); 48 static inline struct page *kmap_to_page(void *addr) in kmap_to_page() 56 static inline void *kmap(struct page *page) in kmap() argument 59 return page_address(page); in kmap() 62 static inline void kunmap(struct page *page) in kunmap() argument 66 static inline void *kmap_atomic(struct page *page) in kmap_atomic() argument 70 return page_address(page); in kmap_atomic() 72 #define kmap_atomic_prot(page, prot) kmap_atomic(page) argument [all …]
|
D | swap.h | 252 void *workingset_eviction(struct address_space *mapping, struct page *page); 254 void workingset_activation(struct page *page); 299 extern void lru_cache_add(struct page *); 300 extern void lru_cache_add_anon(struct page *page); 301 extern void lru_cache_add_file(struct page *page); 302 extern void lru_add_page_tail(struct page *page, struct page *page_tail, 304 extern void activate_page(struct page *); 305 extern void mark_page_accessed(struct page *); 309 extern void rotate_reclaimable_page(struct page *page); 310 extern void deactivate_file_page(struct page *page); [all …]
|
D | hugetlb_cgroup.h | 29 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) in hugetlb_cgroup_from_page() argument 31 VM_BUG_ON_PAGE(!PageHuge(page), page); in hugetlb_cgroup_from_page() 33 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) in hugetlb_cgroup_from_page() 35 return (struct hugetlb_cgroup *)page[2].private; in hugetlb_cgroup_from_page() 39 int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) in set_hugetlb_cgroup() argument 41 VM_BUG_ON_PAGE(!PageHuge(page), page); in set_hugetlb_cgroup() 43 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) in set_hugetlb_cgroup() 45 page[2].private = (unsigned long)h_cg; in set_hugetlb_cgroup() 58 struct page *page); 60 struct page *page); [all …]
|
D | rmap.h | 148 struct anon_vma *page_get_anon_vma(struct page *page); 153 void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 154 void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 155 void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, 157 void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 158 void page_add_file_rmap(struct page *); 159 void page_remove_rmap(struct page *); 161 void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, 163 void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, 166 static inline void page_dup_rmap(struct page *page) in page_dup_rmap() argument [all …]
|
D | migrate.h | 8 typedef struct page *new_page_t(struct page *page, unsigned long private, 10 typedef void free_page_t(struct page *page, unsigned long private); 33 struct page *, struct page *, enum migrate_mode); 39 extern void migrate_page_copy(struct page *newpage, struct page *page); 41 struct page *newpage, struct page *page); 43 struct page *newpage, struct page *page, 57 static inline void migrate_page_copy(struct page *newpage, in migrate_page_copy() 58 struct page *page) {} in migrate_page_copy() argument 61 struct page *newpage, struct page *page) in migrate_huge_page_move_mapping() argument 70 extern int migrate_misplaced_page(struct page *page, [all …]
|
D | mm_inline.h | 20 static inline int page_is_file_cache(struct page *page) in page_is_file_cache() argument 22 return !PageSwapBacked(page); in page_is_file_cache() 25 static __always_inline void add_page_to_lru_list(struct page *page, in add_page_to_lru_list() argument 28 int nr_pages = hpage_nr_pages(page); in add_page_to_lru_list() 30 list_add(&page->lru, &lruvec->lists[lru]); in add_page_to_lru_list() 34 static __always_inline void del_page_from_lru_list(struct page *page, in del_page_from_lru_list() argument 37 int nr_pages = hpage_nr_pages(page); in del_page_from_lru_list() 39 list_del(&page->lru); in del_page_from_lru_list() 51 static inline enum lru_list page_lru_base_type(struct page *page) in page_lru_base_type() argument 53 if (page_is_file_cache(page)) in page_lru_base_type() [all …]
|
D | page_owner.h | 8 extern void __reset_page_owner(struct page *page, unsigned int order); 9 extern void __set_page_owner(struct page *page, 11 extern gfp_t __get_page_owner_gfp(struct page *page); 13 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument 18 __reset_page_owner(page, order); in reset_page_owner() 21 static inline void set_page_owner(struct page *page, in set_page_owner() argument 27 __set_page_owner(page, order, gfp_mask); in set_page_owner() 30 static inline gfp_t get_page_owner_gfp(struct page *page) in get_page_owner_gfp() argument 35 return __get_page_owner_gfp(page); in get_page_owner_gfp() 38 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument [all …]
|
D | ksm.h | 38 static inline struct stable_node *page_stable_node(struct page *page) in page_stable_node() argument 40 return PageKsm(page) ? page_rmapping(page) : NULL; in page_stable_node() 43 static inline void set_page_stable_node(struct page *page, in set_page_stable_node() argument 46 page->mapping = (void *)stable_node + in set_page_stable_node() 61 struct page *ksm_might_need_to_copy(struct page *page, 64 int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); 65 void ksm_migrate_page(struct page *newpage, struct page *oldpage); 85 static inline struct page *ksm_might_need_to_copy(struct page *page, in ksm_might_need_to_copy() argument 88 return page; in ksm_might_need_to_copy() 91 static inline int page_referenced_ksm(struct page *page, in page_referenced_ksm() argument [all …]
|
D | fscache.h | 39 #define PageFsCache(page) PagePrivate2((page)) argument 40 #define SetPageFsCache(page) SetPagePrivate2((page)) argument 41 #define ClearPageFsCache(page) ClearPagePrivate2((page)) argument 42 #define TestSetPageFsCache(page) TestSetPagePrivate2((page)) argument 43 #define TestClearPageFsCache(page) TestClearPagePrivate2((page)) argument 53 typedef void (*fscache_rw_complete_t)(struct page *page, 145 struct page *page); 228 struct page *, 239 extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t); 240 extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t); [all …]
|
D | pageblock-flags.h | 66 struct page; 68 unsigned long get_pfnblock_flags_mask(struct page *page, 73 void set_pfnblock_flags_mask(struct page *page, 80 #define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \ argument 81 get_pfnblock_flags_mask(page, page_to_pfn(page), \ 84 #define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \ argument 85 set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \ 90 #define get_pageblock_skip(page) \ argument 91 get_pageblock_flags_group(page, PB_migrate_skip, \ 93 #define clear_pageblock_skip(page) \ argument [all …]
|
D | page-isolation.h | 9 static inline bool is_migrate_isolate_page(struct page *page) in is_migrate_isolate_page() argument 11 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; in is_migrate_isolate_page() 22 static inline bool is_migrate_isolate_page(struct page *page) in is_migrate_isolate_page() argument 32 bool has_unmovable_pages(struct zone *zone, struct page *page, int count, 34 void set_pageblock_migratetype(struct page *page, int migratetype); 35 int move_freepages_block(struct zone *zone, struct page *page, 38 struct page *start_page, struct page *end_page, 68 struct page *alloc_migrate_target(struct page *page, unsigned long private,
|
D | cleancache.h | 32 pgoff_t, struct page *); 34 pgoff_t, struct page *); 43 extern int __cleancache_get_page(struct page *); 44 extern void __cleancache_put_page(struct page *); 45 extern void __cleancache_invalidate_page(struct address_space *, struct page *); 51 static inline bool cleancache_fs_enabled(struct page *page) in cleancache_fs_enabled() argument 53 return page->mapping->host->i_sb->cleancache_poolid >= 0; in cleancache_fs_enabled() 90 static inline int cleancache_get_page(struct page *page) in cleancache_get_page() argument 94 if (cleancache_enabled && cleancache_fs_enabled(page)) in cleancache_get_page() 95 ret = __cleancache_get_page(page); in cleancache_get_page() [all …]
|
D | buffer_head.h | 48 struct page; 65 struct page *b_page; /* the page this bh is mapped to */ 137 #define page_buffers(page) \ argument 139 BUG_ON(!PagePrivate(page)); \ 140 ((struct buffer_head *)page_private(page)); \ 142 #define page_has_buffers(page) PagePrivate(page) argument 144 void buffer_check_dirty_writeback(struct page *page, 155 struct page *page, unsigned long offset); 156 int try_to_free_buffers(struct page *); 157 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, [all …]
|
D | kmemcheck.h | 11 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node); 12 void kmemcheck_free_shadow(struct page *page, int order); 17 void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order, 20 void kmemcheck_show_pages(struct page *p, unsigned int n); 21 void kmemcheck_hide_pages(struct page *p, unsigned int n); 23 bool kmemcheck_page_is_tracked(struct page *p); 30 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n); 31 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n); 32 void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n); 93 kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) in kmemcheck_alloc_shadow() argument [all …]
|
D | memcontrol.h | 33 struct page; 296 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 298 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 300 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg); 301 void mem_cgroup_uncharge(struct page *page); 304 void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage); 307 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); 347 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); 348 ino_t page_cgroup_ino(struct page *page); 433 struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page); [all …]
|
D | hugetlb.h | 66 struct page **, struct vm_area_struct **, 69 unsigned long, unsigned long, struct page *); 73 struct page *ref_page); 76 struct page *ref_page); 88 int dequeue_hwpoisoned_huge_page(struct page *page); 89 bool isolate_huge_page(struct page *page, struct list_head *list); 90 void putback_active_hugepage(struct page *page); 91 void free_huge_page(struct page *page); 113 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 115 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, [all …]
|
D | huge_mm.h | 18 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 56 extern pmd_t *page_check_address_pmd(struct page *page, 98 extern int split_huge_page_to_list(struct page *page, struct list_head *list); 99 static inline int split_huge_page(struct page *page) in split_huge_page() argument 101 return split_huge_page_to_list(page, NULL); in split_huge_page() 143 static inline int hpage_nr_pages(struct page *page) in hpage_nr_pages() argument 145 if (unlikely(PageTransHuge(page))) in hpage_nr_pages() 153 extern struct page *huge_zero_page; 155 static inline bool is_huge_zero_page(struct page *page) in is_huge_zero_page() argument 157 return ACCESS_ONCE(huge_zero_page) == page; in is_huge_zero_page() [all …]
|
D | swapops.h | 104 static inline swp_entry_t make_migration_entry(struct page *page, int write) in make_migration_entry() argument 106 BUG_ON(!PageLocked(page)); in make_migration_entry() 108 page_to_pfn(page)); in make_migration_entry() 122 static inline struct page *migration_entry_to_page(swp_entry_t entry) in migration_entry_to_page() 124 struct page *p = pfn_to_page(swp_offset(entry)); in migration_entry_to_page() 146 #define make_migration_entry(page, write) swp_entry(0, 0) argument 173 static inline swp_entry_t make_hwpoison_entry(struct page *page) in make_hwpoison_entry() argument 175 BUG_ON(!PageLocked(page)); in make_hwpoison_entry() 176 return swp_entry(SWP_HWPOISON, page_to_pfn(page)); in make_hwpoison_entry() 184 static inline bool test_set_page_hwpoison(struct page *page) in test_set_page_hwpoison() argument [all …]
|
D | frontswap.h | 10 int (*store)(unsigned, pgoff_t, struct page *); /* store a page */ 11 int (*load)(unsigned, pgoff_t, struct page *); /* load a page */ 27 extern int __frontswap_store(struct page *page); 28 extern int __frontswap_load(struct page *page); 71 static inline int frontswap_store(struct page *page) in frontswap_store() argument 76 ret = __frontswap_store(page); in frontswap_store() 80 static inline int frontswap_load(struct page *page) in frontswap_load() argument 85 ret = __frontswap_load(page); in frontswap_load()
|
D | kasan.h | 7 struct page; 46 void kasan_alloc_pages(struct page *page, unsigned int order); 47 void kasan_free_pages(struct page *page, unsigned int order); 49 void kasan_poison_slab(struct page *page); 72 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} in kasan_alloc_pages() argument 73 static inline void kasan_free_pages(struct page *page, unsigned int order) {} in kasan_free_pages() argument 75 static inline void kasan_poison_slab(struct page *page) {} in kasan_poison_slab() argument
|
D | quicklist.h | 17 void *page; member 39 p = q->page; in quicklist_alloc() 41 q->page = p[0]; in quicklist_alloc() 56 struct page *page) in __quicklist_free() argument 61 *(void **)p = q->page; in __quicklist_free() 62 q->page = p; in __quicklist_free() 73 struct page *page) in quicklist_free_page() argument 75 __quicklist_free(nr, dtor, page_address(page), page); in quicklist_free_page()
|
/linux-4.4.14/mm/ |
D | swap.c | 53 static void __page_cache_release(struct page *page) in __page_cache_release() argument 55 if (PageLRU(page)) { in __page_cache_release() 56 struct zone *zone = page_zone(page); in __page_cache_release() 61 lruvec = mem_cgroup_page_lruvec(page, zone); in __page_cache_release() 62 VM_BUG_ON_PAGE(!PageLRU(page), page); in __page_cache_release() 63 __ClearPageLRU(page); in __page_cache_release() 64 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in __page_cache_release() 67 mem_cgroup_uncharge(page); in __page_cache_release() 70 static void __put_single_page(struct page *page) in __put_single_page() argument 72 __page_cache_release(page); in __put_single_page() [all …]
|
D | balloon_compaction.c | 23 struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info) in balloon_page_enqueue() 26 struct page *page = alloc_page(balloon_mapping_gfp_mask() | in balloon_page_enqueue() local 28 if (!page) in balloon_page_enqueue() 36 BUG_ON(!trylock_page(page)); in balloon_page_enqueue() 38 balloon_page_insert(b_dev_info, page); in balloon_page_enqueue() 41 unlock_page(page); in balloon_page_enqueue() 42 return page; in balloon_page_enqueue() 57 struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) in balloon_page_dequeue() 59 struct page *page, *tmp; in balloon_page_dequeue() local 65 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_dequeue() [all …]
|
D | filemap.c | 113 struct page *page, void *shadow) in page_cache_tree_delete() argument 121 VM_BUG_ON(!PageLocked(page)); in page_cache_tree_delete() 123 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); in page_cache_tree_delete() 145 index = page->index; in page_cache_tree_delete() 181 void __delete_from_page_cache(struct page *page, void *shadow, in __delete_from_page_cache() argument 184 struct address_space *mapping = page->mapping; in __delete_from_page_cache() 186 trace_mm_filemap_delete_from_page_cache(page); in __delete_from_page_cache() 192 if (PageUptodate(page) && PageMappedToDisk(page)) in __delete_from_page_cache() 193 cleancache_put_page(page); in __delete_from_page_cache() 195 cleancache_invalidate_page(mapping, page); in __delete_from_page_cache() [all …]
|
D | truncate.c | 81 void do_invalidatepage(struct page *page, unsigned int offset, in do_invalidatepage() argument 84 void (*invalidatepage)(struct page *, unsigned int, unsigned int); in do_invalidatepage() 86 invalidatepage = page->mapping->a_ops->invalidatepage; in do_invalidatepage() 92 (*invalidatepage)(page, offset, length); in do_invalidatepage() 106 truncate_complete_page(struct address_space *mapping, struct page *page) in truncate_complete_page() argument 108 if (page->mapping != mapping) in truncate_complete_page() 111 if (page_has_private(page)) in truncate_complete_page() 112 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); in truncate_complete_page() 119 cancel_dirty_page(page); in truncate_complete_page() 120 ClearPageMappedToDisk(page); in truncate_complete_page() [all …]
|
D | migrate.c | 85 struct page *page; in putback_movable_pages() local 86 struct page *page2; in putback_movable_pages() 88 list_for_each_entry_safe(page, page2, l, lru) { in putback_movable_pages() 89 if (unlikely(PageHuge(page))) { in putback_movable_pages() 90 putback_active_hugepage(page); in putback_movable_pages() 93 list_del(&page->lru); in putback_movable_pages() 94 dec_zone_page_state(page, NR_ISOLATED_ANON + in putback_movable_pages() 95 page_is_file_cache(page)); in putback_movable_pages() 96 if (unlikely(isolated_balloon_page(page))) in putback_movable_pages() 97 balloon_page_putback(page); in putback_movable_pages() [all …]
|
D | swap_state.c | 79 int __add_to_swap_cache(struct page *page, swp_entry_t entry) in __add_to_swap_cache() argument 84 VM_BUG_ON_PAGE(!PageLocked(page), page); in __add_to_swap_cache() 85 VM_BUG_ON_PAGE(PageSwapCache(page), page); in __add_to_swap_cache() 86 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); in __add_to_swap_cache() 88 page_cache_get(page); in __add_to_swap_cache() 89 SetPageSwapCache(page); in __add_to_swap_cache() 90 set_page_private(page, entry.val); in __add_to_swap_cache() 95 entry.val, page); in __add_to_swap_cache() 98 __inc_zone_page_state(page, NR_FILE_PAGES); in __add_to_swap_cache() 110 set_page_private(page, 0UL); in __add_to_swap_cache() [all …]
|
D | internal.h | 39 static inline void set_page_count(struct page *page, int v) in set_page_count() argument 41 atomic_set(&page->_count, v); in set_page_count() 62 static inline void set_page_refcounted(struct page *page) in set_page_refcounted() argument 64 VM_BUG_ON_PAGE(PageTail(page), page); in set_page_refcounted() 65 VM_BUG_ON_PAGE(atomic_read(&page->_count), page); in set_page_refcounted() 66 set_page_count(page, 1); in set_page_refcounted() 69 static inline void __get_page_tail_foll(struct page *page, in __get_page_tail_foll() argument 83 VM_BUG_ON_PAGE(atomic_read(&compound_head(page)->_count) <= 0, page); in __get_page_tail_foll() 85 atomic_inc(&compound_head(page)->_count); in __get_page_tail_foll() 86 get_huge_page_tail(page); in __get_page_tail_foll() [all …]
|
D | page_isolation.c | 12 static int set_migratetype_isolate(struct page *page, in set_migratetype_isolate() argument 21 zone = page_zone(page); in set_migratetype_isolate() 25 pfn = page_to_pfn(page); in set_migratetype_isolate() 49 if (!has_unmovable_pages(zone, page, arg.pages_found, in set_migratetype_isolate() 61 int migratetype = get_pageblock_migratetype(page); in set_migratetype_isolate() 63 set_pageblock_migratetype(page, MIGRATE_ISOLATE); in set_migratetype_isolate() 65 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); in set_migratetype_isolate() 76 static void unset_migratetype_isolate(struct page *page, unsigned migratetype) in unset_migratetype_isolate() argument 80 struct page *isolated_page = NULL; in unset_migratetype_isolate() 83 struct page *buddy; in unset_migratetype_isolate() [all …]
|
D | rmap.c | 458 struct anon_vma *page_get_anon_vma(struct page *page) in page_get_anon_vma() argument 464 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma() 467 if (!page_mapped(page)) in page_get_anon_vma() 483 if (!page_mapped(page)) { in page_get_anon_vma() 501 struct anon_vma *page_lock_anon_vma_read(struct page *page) in page_lock_anon_vma_read() argument 508 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read() 511 if (!page_mapped(page)) in page_lock_anon_vma_read() 522 if (!page_mapped(page)) { in page_lock_anon_vma_read() 535 if (!page_mapped(page)) { in page_lock_anon_vma_read() 572 __vma_address(struct page *page, struct vm_area_struct *vma) in __vma_address() argument [all …]
|
D | page_io.c | 28 struct page *page, bio_end_io_t end_io) in get_swap_bio() argument 34 bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); in get_swap_bio() 38 bio_add_page(bio, page, PAGE_SIZE, 0); in get_swap_bio() 46 struct page *page = bio->bi_io_vec[0].bv_page; in end_swap_bio_write() local 49 SetPageError(page); in end_swap_bio_write() 58 set_page_dirty(page); in end_swap_bio_write() 63 ClearPageReclaim(page); in end_swap_bio_write() 65 end_page_writeback(page); in end_swap_bio_write() 71 struct page *page = bio->bi_io_vec[0].bv_page; in end_swap_bio_read() local 74 SetPageError(page); in end_swap_bio_read() [all …]
|
D | page_idle.c | 30 static struct page *page_idle_get_page(unsigned long pfn) in page_idle_get_page() 32 struct page *page; in page_idle_get_page() local 38 page = pfn_to_page(pfn); in page_idle_get_page() 39 if (!page || !PageLRU(page) || in page_idle_get_page() 40 !get_page_unless_zero(page)) in page_idle_get_page() 43 zone = page_zone(page); in page_idle_get_page() 45 if (unlikely(!PageLRU(page))) { in page_idle_get_page() 46 put_page(page); in page_idle_get_page() 47 page = NULL; in page_idle_get_page() 50 return page; in page_idle_get_page() [all …]
|
D | mlock.c | 57 void clear_page_mlock(struct page *page) in clear_page_mlock() argument 59 if (!TestClearPageMlocked(page)) in clear_page_mlock() 62 mod_zone_page_state(page_zone(page), NR_MLOCK, in clear_page_mlock() 63 -hpage_nr_pages(page)); in clear_page_mlock() 65 if (!isolate_lru_page(page)) { in clear_page_mlock() 66 putback_lru_page(page); in clear_page_mlock() 71 if (PageUnevictable(page)) in clear_page_mlock() 80 void mlock_vma_page(struct page *page) in mlock_vma_page() argument 83 BUG_ON(!PageLocked(page)); in mlock_vma_page() 85 if (!TestSetPageMlocked(page)) { in mlock_vma_page() [all …]
|
D | debug-pagealloc.c | 32 static inline void set_page_poison(struct page *page) in set_page_poison() argument 36 page_ext = lookup_page_ext(page); in set_page_poison() 40 static inline void clear_page_poison(struct page *page) in clear_page_poison() argument 44 page_ext = lookup_page_ext(page); in clear_page_poison() 48 static inline bool page_poison(struct page *page) in page_poison() argument 52 page_ext = lookup_page_ext(page); in page_poison() 56 static void poison_page(struct page *page) in poison_page() argument 58 void *addr = kmap_atomic(page); in poison_page() 60 set_page_poison(page); in poison_page() 65 static void poison_pages(struct page *page, int n) in poison_pages() argument [all …]
|
D | memory-failure.c | 82 static int hwpoison_filter_dev(struct page *p) in hwpoison_filter_dev() 112 static int hwpoison_filter_flags(struct page *p) in hwpoison_filter_flags() 137 static int hwpoison_filter_task(struct page *p) in hwpoison_filter_task() 148 static int hwpoison_filter_task(struct page *p) { return 0; } in hwpoison_filter_task() 151 int hwpoison_filter(struct page *p) in hwpoison_filter() 168 int hwpoison_filter(struct page *p) in hwpoison_filter() 182 unsigned long pfn, struct page *page, int flags) in kill_proc() argument 196 si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT; in kill_proc() 221 void shake_page(struct page *p, int access) in shake_page() 280 static void add_to_kill(struct task_struct *tsk, struct page *p, in add_to_kill() [all …]
|
D | page_alloc.c | 136 static inline int get_pcppage_migratetype(struct page *page) in get_pcppage_migratetype() argument 138 return page->index; in get_pcppage_migratetype() 141 static inline void set_pcppage_migratetype(struct page *page, int migratetype) in set_pcppage_migratetype() argument 143 page->index = migratetype; in set_pcppage_migratetype() 187 static void __free_pages_ok(struct page *page, unsigned int order); 232 static void free_compound_page(struct page *page); 338 void set_pageblock_migratetype(struct page *page, int migratetype) in set_pageblock_migratetype() argument 344 set_pageblock_flags_group(page, (unsigned long)migratetype, in set_pageblock_migratetype() 349 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument 353 unsigned long pfn = page_to_pfn(page); in page_outside_zone_boundaries() [all …]
|
D | highmem.c | 57 static inline unsigned int get_pkmap_color(struct page *page) in get_pkmap_color() argument 154 struct page *kmap_to_page(void *vaddr) in kmap_to_page() 175 struct page *page; in flush_all_zero_pkmaps() local 197 page = pte_page(pkmap_page_table[i]); in flush_all_zero_pkmaps() 200 set_page_address(page, NULL); in flush_all_zero_pkmaps() 217 static inline unsigned long map_new_virtual(struct page *page) in map_new_virtual() argument 222 unsigned int color = get_pkmap_color(page); in map_new_virtual() 254 if (page_address(page)) in map_new_virtual() 255 return (unsigned long)page_address(page); in map_new_virtual() 263 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); in map_new_virtual() [all …]
|
D | zsmalloc.c | 208 struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; 407 static int is_first_page(struct page *page) in is_first_page() argument 409 return PagePrivate(page); in is_first_page() 412 static int is_last_page(struct page *page) in is_last_page() argument 414 return PagePrivate2(page); in is_last_page() 417 static void get_zspage_mapping(struct page *page, unsigned int *class_idx, in get_zspage_mapping() argument 421 BUG_ON(!is_first_page(page)); in get_zspage_mapping() 423 m = (unsigned long)page->mapping; in get_zspage_mapping() 428 static void set_zspage_mapping(struct page *page, unsigned int class_idx, in set_zspage_mapping() argument 432 BUG_ON(!is_first_page(page)); in set_zspage_mapping() [all …]
|
D | dmapool.c | 73 struct dma_page *page; in show_pools() local 89 list_for_each_entry(page, &pool->page_list, page_list) { in show_pools() 91 blocks += page->in_use; in show_pools() 206 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) in pool_initialise_page() argument 217 *(int *)(page->vaddr + offset) = next; in pool_initialise_page() 224 struct dma_page *page; in pool_alloc_page() local 226 page = kmalloc(sizeof(*page), mem_flags); in pool_alloc_page() 227 if (!page) in pool_alloc_page() 229 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, in pool_alloc_page() 230 &page->dma, mem_flags); in pool_alloc_page() [all …]
|
D | ksm.c | 365 struct page *page; in break_ksm() local 370 page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION); in break_ksm() 371 if (IS_ERR_OR_NULL(page)) in break_ksm() 373 if (PageKsm(page)) in break_ksm() 378 put_page(page); in break_ksm() 444 static struct page *page_trans_compound_anon(struct page *page) in page_trans_compound_anon() argument 446 if (PageTransCompound(page)) { in page_trans_compound_anon() 447 struct page *head = compound_head(page); in page_trans_compound_anon() 458 static struct page *get_mergeable_page(struct rmap_item *rmap_item) in get_mergeable_page() 463 struct page *page; in get_mergeable_page() local [all …]
|
D | slub.c | 229 struct page *page, const void *object) in check_valid_pointer() argument 236 base = page_address(page); in check_valid_pointer() 237 if (object < base || object >= base + page->objects * s->size || in check_valid_pointer() 339 static __always_inline void slab_lock(struct page *page) in slab_lock() argument 341 bit_spin_lock(PG_locked, &page->flags); in slab_lock() 344 static __always_inline void slab_unlock(struct page *page) in slab_unlock() argument 346 __bit_spin_unlock(PG_locked, &page->flags); in slab_unlock() 349 static inline void set_page_slub_counters(struct page *page, unsigned long counters_new) in set_page_slub_counters() argument 351 struct page tmp; in set_page_slub_counters() 359 page->frozen = tmp.frozen; in set_page_slub_counters() [all …]
|
D | page_owner.c | 49 void __reset_page_owner(struct page *page, unsigned int order) in __reset_page_owner() argument 55 page_ext = lookup_page_ext(page + i); in __reset_page_owner() 60 void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) in __set_page_owner() argument 62 struct page_ext *page_ext = lookup_page_ext(page); in __set_page_owner() 79 gfp_t __get_page_owner_gfp(struct page *page) in __get_page_owner_gfp() argument 81 struct page_ext *page_ext = lookup_page_ext(page); in __get_page_owner_gfp() 88 struct page *page, struct page_ext *page_ext) in print_page_owner() argument 110 pageblock_mt = get_pfnblock_migratetype(page, pfn); in print_page_owner() 118 PageLocked(page) ? "K" : " ", in print_page_owner() 119 PageError(page) ? "E" : " ", in print_page_owner() [all …]
|
D | vmscan.c | 109 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 115 struct page *prev; \ 129 struct page *prev; \ 476 static inline int is_page_cache_freeable(struct page *page) in is_page_cache_freeable() argument 483 return page_count(page) - page_has_private(page) == 2; in is_page_cache_freeable() 510 struct page *page, int error) in handle_write_error() argument 512 lock_page(page); in handle_write_error() 513 if (page_mapping(page) == mapping) in handle_write_error() 515 unlock_page(page); in handle_write_error() 534 static pageout_t pageout(struct page *page, struct address_space *mapping, in pageout() argument [all …]
|
D | huge_memory.c | 174 struct page *huge_zero_page __read_mostly; 176 struct page *get_huge_zero_page(void) in get_huge_zero_page() 178 struct page *zero_page; in get_huge_zero_page() 223 struct page *zero_page = xchg(&huge_zero_page, NULL); in shrink_huge_zero_page_scan() 706 static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot) in mk_huge_pmd() argument 709 entry = mk_pmd(page, prot); in mk_huge_pmd() 717 struct page *page, gfp_t gfp, in __do_huge_pmd_anonymous_page() argument 725 VM_BUG_ON_PAGE(!PageCompound(page), page); in __do_huge_pmd_anonymous_page() 727 if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) { in __do_huge_pmd_anonymous_page() 728 put_page(page); in __do_huge_pmd_anonymous_page() [all …]
|
D | hugetlb.c | 838 static void enqueue_huge_page(struct hstate *h, struct page *page) in enqueue_huge_page() argument 840 int nid = page_to_nid(page); in enqueue_huge_page() 841 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page() 846 static struct page *dequeue_huge_page_node(struct hstate *h, int nid) in dequeue_huge_page_node() 848 struct page *page; in dequeue_huge_page_node() local 850 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) in dequeue_huge_page_node() 851 if (!is_migrate_isolate_page(page)) in dequeue_huge_page_node() 857 if (&h->hugepage_freelists[nid] == &page->lru) in dequeue_huge_page_node() 859 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node() 860 set_page_refcounted(page); in dequeue_huge_page_node() [all …]
|
D | gup.c | 21 static struct page *no_page_table(struct vm_area_struct *vma, in no_page_table() 61 static struct page *follow_page_pte(struct vm_area_struct *vma, in follow_page_pte() 65 struct page *page; in follow_page_pte() local 100 page = vm_normal_page(vma, address, pte); in follow_page_pte() 101 if (unlikely(!page)) { in follow_page_pte() 104 page = ERR_PTR(-EFAULT); in follow_page_pte() 109 page = pte_page(pte); in follow_page_pte() 114 page = ERR_PTR(ret); in follow_page_pte() 120 get_page_foll(page); in follow_page_pte() 123 !pte_dirty(pte) && !PageDirty(page)) in follow_page_pte() [all …]
|
D | readahead.c | 35 #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) 45 struct page *page) in read_cache_pages_invalidate_page() argument 47 if (page_has_private(page)) { in read_cache_pages_invalidate_page() 48 if (!trylock_page(page)) in read_cache_pages_invalidate_page() 50 page->mapping = mapping; in read_cache_pages_invalidate_page() 51 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); in read_cache_pages_invalidate_page() 52 page->mapping = NULL; in read_cache_pages_invalidate_page() 53 unlock_page(page); in read_cache_pages_invalidate_page() 55 page_cache_release(page); in read_cache_pages_invalidate_page() 64 struct page *victim; in read_cache_pages_invalidate_pages() [all …]
|
D | kmemcheck.c | 8 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) in kmemcheck_alloc_shadow() argument 10 struct page *shadow; in kmemcheck_alloc_shadow() 29 page[i].shadow = page_address(&shadow[i]); in kmemcheck_alloc_shadow() 36 kmemcheck_hide_pages(page, pages); in kmemcheck_alloc_shadow() 39 void kmemcheck_free_shadow(struct page *page, int order) in kmemcheck_free_shadow() argument 41 struct page *shadow; in kmemcheck_free_shadow() 45 if (!kmemcheck_page_is_tracked(page)) in kmemcheck_free_shadow() 50 kmemcheck_show_pages(page, pages); in kmemcheck_free_shadow() 52 shadow = virt_to_page(page[0].shadow); in kmemcheck_free_shadow() 55 page[i].shadow = NULL; in kmemcheck_free_shadow() [all …]
|
D | compaction.c | 44 struct page *page, *next; in release_freepages() local 47 list_for_each_entry_safe(page, next, freelist, lru) { in release_freepages() 48 unsigned long pfn = page_to_pfn(page); in release_freepages() 49 list_del(&page->lru); in release_freepages() 50 __free_page(page); in release_freepages() 60 struct page *page; in map_pages() local 62 list_for_each_entry(page, list, lru) { in map_pages() 63 arch_alloc_page(page, 0); in map_pages() 64 kernel_map_pages(page, 1, 1); in map_pages() 65 kasan_alloc_pages(page, 0); in map_pages() [all …]
|
D | shmem.c | 121 static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 122 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 125 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type); 128 struct page **pagep, enum sgp_type sgp, int *fault_type) in shmem_getpage() 294 static int shmem_add_to_page_cache(struct page *page, in shmem_add_to_page_cache() argument 300 VM_BUG_ON_PAGE(!PageLocked(page), page); in shmem_add_to_page_cache() 301 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); in shmem_add_to_page_cache() 303 page_cache_get(page); in shmem_add_to_page_cache() 304 page->mapping = mapping; in shmem_add_to_page_cache() 305 page->index = index; in shmem_add_to_page_cache() [all …]
|
D | swapfile.c | 99 struct page *page; in __try_to_reclaim_swap() local 102 page = find_get_page(swap_address_space(entry), entry.val); in __try_to_reclaim_swap() 103 if (!page) in __try_to_reclaim_swap() 112 if (trylock_page(page)) { in __try_to_reclaim_swap() 113 ret = try_to_free_swap(page); in __try_to_reclaim_swap() 114 unlock_page(page); in __try_to_reclaim_swap() 116 page_cache_release(page); in __try_to_reclaim_swap() 862 int page_swapcount(struct page *page) in page_swapcount() argument 868 entry.val = page_private(page); in page_swapcount() 885 struct page *page; in swp_swapcount() local [all …]
|
D | cleancache.c | 177 int __cleancache_get_page(struct page *page) in __cleancache_get_page() argument 188 VM_BUG_ON_PAGE(!PageLocked(page), page); in __cleancache_get_page() 189 pool_id = page->mapping->host->i_sb->cleancache_poolid; in __cleancache_get_page() 193 if (cleancache_get_key(page->mapping->host, &key) < 0) in __cleancache_get_page() 196 ret = cleancache_ops->get_page(pool_id, key, page->index, page); in __cleancache_get_page() 216 void __cleancache_put_page(struct page *page) in __cleancache_put_page() argument 226 VM_BUG_ON_PAGE(!PageLocked(page), page); in __cleancache_put_page() 227 pool_id = page->mapping->host->i_sb->cleancache_poolid; in __cleancache_put_page() 229 cleancache_get_key(page->mapping->host, &key) >= 0) { in __cleancache_put_page() 230 cleancache_ops->put_page(pool_id, key, page->index, page); in __cleancache_put_page() [all …]
|
D | sparse.c | 45 int page_to_nid(const struct page *page) in page_to_nid() argument 47 return section_to_node_table[page_to_section(page)]; in page_to_nid() 209 return nr_pages * sizeof(struct page); in node_memmap_size_bytes() 217 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) in sparse_encode_mem_map() 225 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) in sparse_decode_mem_map() 229 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); in sparse_decode_mem_map() 233 unsigned long pnum, struct page *mem_map, in sparse_init_one_section() 372 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) in sparse_mem_map_populate() 374 struct page *map; in sparse_mem_map_populate() 377 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); in sparse_mem_map_populate() [all …]
|
D | userfaultfd.c | 25 struct page **pagep) in mcopy_atomic_pte() 32 struct page *page; in mcopy_atomic_pte() local 36 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); in mcopy_atomic_pte() 37 if (!page) in mcopy_atomic_pte() 40 page_kaddr = kmap_atomic(page); in mcopy_atomic_pte() 49 *pagep = page; in mcopy_atomic_pte() 54 page = *pagep; in mcopy_atomic_pte() 63 __SetPageUptodate(page); in mcopy_atomic_pte() 66 if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg)) in mcopy_atomic_pte() 69 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); in mcopy_atomic_pte() [all …]
|
D | memory_hotplug.c | 47 static void generic_online_page(struct page *page); 158 void get_page_bootmem(unsigned long info, struct page *page, in get_page_bootmem() argument 161 page->lru.next = (struct list_head *) type; in get_page_bootmem() 162 SetPagePrivate(page); in get_page_bootmem() 163 set_page_private(page, info); in get_page_bootmem() 164 atomic_inc(&page->_count); in get_page_bootmem() 167 void put_page_bootmem(struct page *page) in put_page_bootmem() argument 171 type = (unsigned long) page->lru.next; in put_page_bootmem() 175 if (atomic_dec_return(&page->_count) == 1) { in put_page_bootmem() 176 ClearPagePrivate(page); in put_page_bootmem() [all …]
|
D | page-writeback.c | 2193 struct page *page = pvec.pages[i]; in write_cache_pages() local 2202 if (page->index > end) { in write_cache_pages() 2211 done_index = page->index; in write_cache_pages() 2213 lock_page(page); in write_cache_pages() 2223 if (unlikely(page->mapping != mapping)) { in write_cache_pages() 2225 unlock_page(page); in write_cache_pages() 2229 if (!PageDirty(page)) { in write_cache_pages() 2234 if (PageWriteback(page)) { in write_cache_pages() 2236 wait_on_page_writeback(page); in write_cache_pages() 2241 BUG_ON(PageWriteback(page)); in write_cache_pages() [all …]
|
D | slab.c | 398 static void set_obj_status(struct page *page, int idx, int val) in set_obj_status() argument 402 struct kmem_cache *cachep = page->slab_cache; in set_obj_status() 405 status = (char *)page->freelist + freelist_size; in set_obj_status() 409 static inline unsigned int get_obj_status(struct page *page, int idx) in get_obj_status() argument 413 struct kmem_cache *cachep = page->slab_cache; in get_obj_status() 416 status = (char *)page->freelist + freelist_size; in get_obj_status() 422 static inline void set_obj_status(struct page *page, int idx, int val) {} in set_obj_status() argument 437 struct page *page = virt_to_head_page(obj); in virt_to_cache() local 438 return page->slab_cache; in virt_to_cache() 441 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, in index_to_obj() argument [all …]
|
/linux-4.4.14/fs/9p/ |
D | cache.h | 45 extern int __v9fs_fscache_release_page(struct page *page, gfp_t gfp); 46 extern void __v9fs_fscache_invalidate_page(struct page *page); 48 struct page *page); 53 extern void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page); 55 struct page *page); 57 static inline int v9fs_fscache_release_page(struct page *page, in v9fs_fscache_release_page() argument 60 return __v9fs_fscache_release_page(page, gfp); in v9fs_fscache_release_page() 63 static inline void v9fs_fscache_invalidate_page(struct page *page) in v9fs_fscache_invalidate_page() argument 65 __v9fs_fscache_invalidate_page(page); in v9fs_fscache_invalidate_page() 69 struct page *page) in v9fs_readpage_from_fscache() argument [all …]
|
D | vfs_addr.c | 52 static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page) in v9fs_fid_readpage() argument 54 struct inode *inode = page->mapping->host; in v9fs_fid_readpage() 55 struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; in v9fs_fid_readpage() 61 BUG_ON(!PageLocked(page)); in v9fs_fid_readpage() 63 retval = v9fs_readpage_from_fscache(inode, page); in v9fs_fid_readpage() 69 retval = p9_client_read(fid, page_offset(page), &to, &err); in v9fs_fid_readpage() 71 v9fs_uncache_page(inode, page); in v9fs_fid_readpage() 76 zero_user(page, retval, PAGE_SIZE - retval); in v9fs_fid_readpage() 77 flush_dcache_page(page); in v9fs_fid_readpage() 78 SetPageUptodate(page); in v9fs_fid_readpage() [all …]
|
D | cache.c | 280 int __v9fs_fscache_release_page(struct page *page, gfp_t gfp) in __v9fs_fscache_release_page() argument 282 struct inode *inode = page->mapping->host; in __v9fs_fscache_release_page() 287 return fscache_maybe_release_page(v9inode->fscache, page, gfp); in __v9fs_fscache_release_page() 290 void __v9fs_fscache_invalidate_page(struct page *page) in __v9fs_fscache_invalidate_page() argument 292 struct inode *inode = page->mapping->host; in __v9fs_fscache_invalidate_page() 297 if (PageFsCache(page)) { in __v9fs_fscache_invalidate_page() 298 fscache_wait_on_page_write(v9inode->fscache, page); in __v9fs_fscache_invalidate_page() 299 BUG_ON(!PageLocked(page)); in __v9fs_fscache_invalidate_page() 300 fscache_uncache_page(v9inode->fscache, page); in __v9fs_fscache_invalidate_page() 304 static void v9fs_vfs_readpage_complete(struct page *page, void *data, in v9fs_vfs_readpage_complete() argument [all …]
|
/linux-4.4.14/fs/jfs/ |
D | jfs_metapage.c | 61 unlock_page(mp->page); in __lock_metapage() 63 lock_page(mp->page); in __lock_metapage() 92 #define mp_anchor(page) ((struct meta_anchor *)page_private(page)) argument 94 static inline struct metapage *page_to_mp(struct page *page, int offset) in page_to_mp() argument 96 if (!PagePrivate(page)) in page_to_mp() 98 return mp_anchor(page)->mp[offset >> L2PSIZE]; in page_to_mp() 101 static inline int insert_metapage(struct page *page, struct metapage *mp) in insert_metapage() argument 107 if (PagePrivate(page)) in insert_metapage() 108 a = mp_anchor(page); in insert_metapage() 113 set_page_private(page, (unsigned long)a); in insert_metapage() [all …]
|
/linux-4.4.14/fs/sysv/ |
D | dir.c | 30 static inline void dir_put_page(struct page *page) in dir_put_page() argument 32 kunmap(page); in dir_put_page() 33 page_cache_release(page); in dir_put_page() 36 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) in dir_commit_chunk() argument 38 struct address_space *mapping = page->mapping; in dir_commit_chunk() 42 block_write_end(NULL, mapping, pos, len, len, page, NULL); in dir_commit_chunk() 48 err = write_one_page(page, 1); in dir_commit_chunk() 50 unlock_page(page); in dir_commit_chunk() 54 static struct page * dir_get_page(struct inode *dir, unsigned long n) in dir_get_page() 57 struct page *page = read_mapping_page(mapping, n, NULL); in dir_get_page() local [all …]
|
/linux-4.4.14/fs/afs/ |
D | file.c | 21 static int afs_readpage(struct file *file, struct page *page); 22 static void afs_invalidatepage(struct page *page, unsigned int offset, 24 static int afs_releasepage(struct page *page, gfp_t gfp_flags); 25 static int afs_launder_page(struct page *page); 108 static void afs_file_readpage_read_complete(struct page *page, in afs_file_readpage_read_complete() argument 112 _enter("%p,%p,%d", page, data, error); in afs_file_readpage_read_complete() 117 SetPageUptodate(page); in afs_file_readpage_read_complete() 118 unlock_page(page); in afs_file_readpage_read_complete() 125 int afs_page_filler(void *data, struct page *page) in afs_page_filler() argument 127 struct inode *inode = page->mapping->host; in afs_page_filler() [all …]
|
D | write.c | 20 struct page *page); 25 int afs_set_page_dirty(struct page *page) in afs_set_page_dirty() argument 28 return __set_page_dirty_nobuffers(page); in afs_set_page_dirty() 87 loff_t pos, struct page *page) in afs_fill_page() argument 101 ret = afs_vnode_fetch_data(vnode, key, pos, len, page); in afs_fill_page() 120 struct page **pagep, void **fsdata) in afs_write_begin() 124 struct page *page; in afs_write_begin() local 146 page = grab_cache_page_write_begin(mapping, index, flags); in afs_write_begin() 147 if (!page) { in afs_write_begin() 151 *pagep = page; in afs_write_begin() [all …]
|
/linux-4.4.14/drivers/staging/android/ion/ |
D | ion_page_pool.c | 29 struct page *page = alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages() local 31 if (!page) in ion_page_pool_alloc_pages() 33 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, in ion_page_pool_alloc_pages() 35 return page; in ion_page_pool_alloc_pages() 39 struct page *page) in ion_page_pool_free_pages() argument 41 __free_pages(page, pool->order); in ion_page_pool_free_pages() 44 static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) in ion_page_pool_add() argument 47 if (PageHighMem(page)) { in ion_page_pool_add() 48 list_add_tail(&page->lru, &pool->high_items); in ion_page_pool_add() 51 list_add_tail(&page->lru, &pool->low_items); in ion_page_pool_add() [all …]
|
D | ion_system_heap.c | 55 static struct page *alloc_buffer_page(struct ion_system_heap *heap, in alloc_buffer_page() 61 struct page *page; in alloc_buffer_page() local 64 page = ion_page_pool_alloc(pool); in alloc_buffer_page() 70 page = alloc_pages(gfp_flags | __GFP_COMP, order); in alloc_buffer_page() 71 if (!page) in alloc_buffer_page() 73 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order, in alloc_buffer_page() 77 return page; in alloc_buffer_page() 81 struct ion_buffer *buffer, struct page *page) in free_buffer_page() argument 83 unsigned int order = compound_order(page); in free_buffer_page() 89 ion_page_pool_free(pool, page); in free_buffer_page() [all …]
|
/linux-4.4.14/sound/pci/trident/ |
D | trident_memory.c | 37 #define __set_tlb_bus(trident,page,ptr,addr) \ argument 38 do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \ 39 (trident)->tlb.shadow_entries[page] = (ptr); } while (0) 40 #define __tlb_to_ptr(trident,page) \ argument 41 (void*)((trident)->tlb.shadow_entries[page]) 42 #define __tlb_to_addr(trident,page) \ argument 43 (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1)) 50 #define set_tlb_bus(trident,page,ptr,addr) __set_tlb_bus(trident,page,ptr,addr) argument 52 #define set_silent_tlb(trident,page) __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silen… argument 56 #define aligned_page_offset(page) ((page) << 12) argument [all …]
|
/linux-4.4.14/fs/logfs/ |
D | file.c | 14 struct page **pagep, void **fsdata) in logfs_write_begin() 17 struct page *page; in logfs_write_begin() local 20 page = grab_cache_page_write_begin(mapping, index, flags); in logfs_write_begin() 21 if (!page) in logfs_write_begin() 23 *pagep = page; in logfs_write_begin() 25 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) in logfs_write_begin() 32 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); in logfs_write_begin() 35 return logfs_readpage_nolock(page); in logfs_write_begin() 39 loff_t pos, unsigned len, unsigned copied, struct page *page, in logfs_write_end() argument 43 pgoff_t index = page->index; in logfs_write_end() [all …]
|
D | readwrite.c | 205 static void prelock_page(struct super_block *sb, struct page *page, int lock) in prelock_page() argument 209 BUG_ON(!PageLocked(page)); in prelock_page() 211 BUG_ON(PagePreLocked(page)); in prelock_page() 212 SetPagePreLocked(page); in prelock_page() 215 if (PagePreLocked(page)) in prelock_page() 218 SetPagePreLocked(page); in prelock_page() 222 static void preunlock_page(struct super_block *sb, struct page *page, int lock) in preunlock_page() argument 226 BUG_ON(!PageLocked(page)); in preunlock_page() 228 ClearPagePreLocked(page); in preunlock_page() 231 BUG_ON(!PagePreLocked(page)); in preunlock_page() [all …]
|
D | segment.c | 52 static struct page *get_mapping_page(struct super_block *sb, pgoff_t index, in get_mapping_page() 58 struct page *page; in get_mapping_page() local 62 page = read_cache_page(mapping, index, filler, sb); in get_mapping_page() 64 page = find_or_create_page(mapping, index, GFP_NOFS); in get_mapping_page() 65 if (page) in get_mapping_page() 66 unlock_page(page); in get_mapping_page() 68 return page; in get_mapping_page() 75 struct page *page; in __logfs_buf_write() local 84 page = get_mapping_page(area->a_sb, index, use_filler); in __logfs_buf_write() 85 if (IS_ERR(page)) in __logfs_buf_write() [all …]
|
/linux-4.4.14/fs/ceph/ |
D | addr.c | 58 static inline struct ceph_snap_context *page_snap_context(struct page *page) in page_snap_context() argument 60 if (PagePrivate(page)) in page_snap_context() 61 return (void *)page->private; in page_snap_context() 69 static int ceph_set_page_dirty(struct page *page) in ceph_set_page_dirty() argument 71 struct address_space *mapping = page->mapping; in ceph_set_page_dirty() 78 return !TestSetPageDirty(page); in ceph_set_page_dirty() 80 if (PageDirty(page)) { in ceph_set_page_dirty() 82 mapping->host, page, page->index); in ceph_set_page_dirty() 83 BUG_ON(!PagePrivate(page)); in ceph_set_page_dirty() 110 mapping->host, page, page->index, in ceph_set_page_dirty() [all …]
|
D | cache.h | 42 int ceph_readpage_from_fscache(struct inode *inode, struct page *page); 47 void ceph_readpage_to_fscache(struct inode *inode, struct page *page); 48 void ceph_invalidate_fscache_page(struct inode* inode, struct page *page); 63 struct page *page) in ceph_fscache_uncache_page() argument 66 return fscache_uncache_page(ci->fscache, page); in ceph_fscache_uncache_page() 69 static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp) in ceph_release_fscache_page() argument 71 struct inode* inode = page->mapping->host; in ceph_release_fscache_page() 73 return fscache_maybe_release_page(ci->fscache, page, gfp); in ceph_release_fscache_page() 77 struct page *page) in ceph_fscache_readpage_cancel() argument 80 if (fscache_cookie_valid(ci->fscache) && PageFsCache(page)) in ceph_fscache_readpage_cancel() [all …]
|
/linux-4.4.14/drivers/staging/lustre/lustre/obdclass/ |
D | cl_page.c | 54 # define PASSERT(env, page, expr) \ argument 57 CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \ 62 # define PINVRNT(env, page, exp) \ argument 63 ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp)) 70 static struct cl_page *cl_page_top_trusted(struct cl_page *page) in cl_page_top_trusted() argument 72 while (page->cp_parent != NULL) in cl_page_top_trusted() 73 page = page->cp_parent; in cl_page_top_trusted() 74 return page; in cl_page_top_trusted() 88 static void cl_page_get_trust(struct cl_page *page) in cl_page_get_trust() argument 90 LASSERT(atomic_read(&page->cp_ref) > 0); in cl_page_get_trust() [all …]
|
/linux-4.4.14/fs/ecryptfs/ |
D | mmap.c | 47 struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index) in ecryptfs_get_locked_page() 49 struct page *page = read_mapping_page(inode->i_mapping, index, NULL); in ecryptfs_get_locked_page() local 50 if (!IS_ERR(page)) in ecryptfs_get_locked_page() 51 lock_page(page); in ecryptfs_get_locked_page() 52 return page; in ecryptfs_get_locked_page() 65 static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc) in ecryptfs_writepage() argument 69 rc = ecryptfs_encrypt_page(page); in ecryptfs_writepage() 72 "page (upper index [0x%.16lx])\n", page->index); in ecryptfs_writepage() 73 ClearPageUptodate(page); in ecryptfs_writepage() 76 SetPageUptodate(page); in ecryptfs_writepage() [all …]
|
/linux-4.4.14/arch/mips/include/asm/ |
D | cacheflush.h | 39 #define Page_dcache_dirty(page) \ argument 40 test_bit(PG_dcache_dirty, &(page)->flags) 41 #define SetPageDcacheDirty(page) \ argument 42 set_bit(PG_dcache_dirty, &(page)->flags) 43 #define ClearPageDcacheDirty(page) \ argument 44 clear_bit(PG_dcache_dirty, &(page)->flags) 52 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); 53 extern void __flush_dcache_page(struct page *page); 56 static inline void flush_dcache_page(struct page *page) in flush_dcache_page() argument 59 __flush_dcache_page(page); in flush_dcache_page() [all …]
|
D | highmem.h | 44 extern void * kmap_high(struct page *page); 45 extern void kunmap_high(struct page *page); 47 extern void *kmap(struct page *page); 48 extern void kunmap(struct page *page); 49 extern void *kmap_atomic(struct page *page);
|
/linux-4.4.14/arch/avr32/mm/ |
D | dma-coherent.c | 40 static struct page *__dma_alloc(struct device *dev, size_t size, in __dma_alloc() 43 struct page *page, *free, *end; in __dma_alloc() local 56 page = alloc_pages(gfp, order); in __dma_alloc() 57 if (!page) in __dma_alloc() 59 split_page(page, order); in __dma_alloc() 70 invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size); in __dma_alloc() 72 *handle = page_to_bus(page); in __dma_alloc() 73 free = page + (size >> PAGE_SHIFT); in __dma_alloc() 74 end = page + (1 << order); in __dma_alloc() 84 return page; in __dma_alloc() [all …]
|
/linux-4.4.14/fs/nilfs2/ |
D | page.c | 43 __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, in __nilfs_get_page_block() argument 50 if (!page_has_buffers(page)) in __nilfs_get_page_block() 51 create_empty_buffers(page, 1 << blkbits, b_state); in __nilfs_get_page_block() 54 bh = nilfs_page_get_nth_block(page, block - first_block); in __nilfs_get_page_block() 68 struct page *page; in nilfs_grab_buffer() local 71 page = grab_cache_page(mapping, index); in nilfs_grab_buffer() 72 if (unlikely(!page)) in nilfs_grab_buffer() 75 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state); in nilfs_grab_buffer() 77 unlock_page(page); in nilfs_grab_buffer() 78 page_cache_release(page); in nilfs_grab_buffer() [all …]
|
D | dir.c | 58 static inline void nilfs_put_page(struct page *page) in nilfs_put_page() argument 60 kunmap(page); in nilfs_put_page() 61 page_cache_release(page); in nilfs_put_page() 78 static int nilfs_prepare_chunk(struct page *page, unsigned from, unsigned to) in nilfs_prepare_chunk() argument 80 loff_t pos = page_offset(page) + from; in nilfs_prepare_chunk() 81 return __block_write_begin(page, pos, to - from, nilfs_get_block); in nilfs_prepare_chunk() 84 static void nilfs_commit_chunk(struct page *page, in nilfs_commit_chunk() argument 89 loff_t pos = page_offset(page) + from; in nilfs_commit_chunk() 94 nr_dirty = nilfs_page_count_clean_buffers(page, from, to); in nilfs_commit_chunk() 95 copied = block_write_end(NULL, mapping, pos, len, len, page, NULL); in nilfs_commit_chunk() [all …]
|
/linux-4.4.14/fs/minix/ |
D | dir.c | 28 static inline void dir_put_page(struct page *page) in dir_put_page() argument 30 kunmap(page); in dir_put_page() 31 page_cache_release(page); in dir_put_page() 48 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) in dir_commit_chunk() argument 50 struct address_space *mapping = page->mapping; in dir_commit_chunk() 53 block_write_end(NULL, mapping, pos, len, len, page, NULL); in dir_commit_chunk() 60 err = write_one_page(page, 1); in dir_commit_chunk() 62 unlock_page(page); in dir_commit_chunk() 66 static struct page * dir_get_page(struct inode *dir, unsigned long n) in dir_get_page() 69 struct page *page = read_mapping_page(mapping, n, NULL); in dir_get_page() local [all …]
|
/linux-4.4.14/fs/exofs/ |
D | dir.c | 41 static inline void exofs_put_page(struct page *page) in exofs_put_page() argument 43 kunmap(page); in exofs_put_page() 44 page_cache_release(page); in exofs_put_page() 57 static int exofs_commit_chunk(struct page *page, loff_t pos, unsigned len) in exofs_commit_chunk() argument 59 struct address_space *mapping = page->mapping; in exofs_commit_chunk() 65 if (!PageUptodate(page)) in exofs_commit_chunk() 66 SetPageUptodate(page); in exofs_commit_chunk() 72 set_page_dirty(page); in exofs_commit_chunk() 75 err = write_one_page(page, 1); in exofs_commit_chunk() 77 unlock_page(page); in exofs_commit_chunk() [all …]
|
D | inode.c | 55 struct page **pages; 63 struct page *that_locked_page; 113 pcol->pages = kmalloc(pages * sizeof(struct page *), in pcol_try_alloc() 137 static int pcol_add_page(struct page_collect *pcol, struct page *page, in pcol_add_page() argument 143 pcol->pages[pcol->nr_pages++] = page; in pcol_add_page() 149 static int update_read_page(struct page *page, int ret) in update_read_page() argument 154 SetPageUptodate(page); in update_read_page() 155 if (PageError(page)) in update_read_page() 156 ClearPageError(page); in update_read_page() 164 clear_highpage(page); in update_read_page() [all …]
|
/linux-4.4.14/fs/squashfs/ |
D | file_direct.c | 23 static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, 24 int pages, struct page **page); 27 int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) in squashfs_readpage_block() 38 struct page **page; in squashfs_readpage_block() local 47 page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL); in squashfs_readpage_block() 48 if (page == NULL) in squashfs_readpage_block() 55 actor = squashfs_page_actor_init_special(page, pages, 0); in squashfs_readpage_block() 61 page[i] = (n == target_page->index) ? target_page : in squashfs_readpage_block() 64 if (page[i] == NULL) { in squashfs_readpage_block() 69 if (PageUptodate(page[i])) { in squashfs_readpage_block() [all …]
|
/linux-4.4.14/include/trace/events/ |
D | pagemap.h | 18 #define trace_pagemap_flags(page) ( \ argument 19 (PageAnon(page) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \ 20 (page_mapped(page) ? PAGEMAP_MAPPED : 0) | \ 21 (PageSwapCache(page) ? PAGEMAP_SWAPCACHE : 0) | \ 22 (PageSwapBacked(page) ? PAGEMAP_SWAPBACKED : 0) | \ 23 (PageMappedToDisk(page) ? PAGEMAP_MAPPEDDISK : 0) | \ 24 (page_has_private(page) ? PAGEMAP_BUFFERS : 0) \ 30 struct page *page, 34 TP_ARGS(page, lru), 37 __field(struct page *, page ) [all …]
|
D | cma.h | 12 TP_PROTO(unsigned long pfn, const struct page *page, 15 TP_ARGS(pfn, page, count, align), 19 __field(const struct page *, page) 26 __entry->page = page; 33 __entry->page, 40 TP_PROTO(unsigned long pfn, const struct page *page, 43 TP_ARGS(pfn, page, count), 47 __field(const struct page *, page) 53 __entry->page = page; 59 __entry->page,
|
D | filemap.h | 16 TP_PROTO(struct page *page), 18 TP_ARGS(page), 28 __entry->pfn = page_to_pfn(page); 29 __entry->i_ino = page->mapping->host->i_ino; 30 __entry->index = page->index; 31 if (page->mapping->host->i_sb) 32 __entry->s_dev = page->mapping->host->i_sb->s_dev; 34 __entry->s_dev = page->mapping->host->i_rdev; 46 TP_PROTO(struct page *page), 47 TP_ARGS(page) [all …]
|
D | kmem.h | 163 TP_PROTO(struct page *page, unsigned int order), 165 TP_ARGS(page, order), 185 __entry->pfn = page_to_pfn(page); 197 TP_PROTO(struct page *page, int cold), 199 TP_ARGS(page, cold), 207 __entry->pfn = page_to_pfn(page); 219 TP_PROTO(struct page *page, unsigned int order, 222 TP_ARGS(page, order, gfp_flags, migratetype), 232 __entry->pfn = page ? page_to_pfn(page) : -1UL; 248 TP_PROTO(struct page *page, unsigned int order, int migratetype), [all …]
|
/linux-4.4.14/arch/arm/mm/ |
D | flush.c | 131 void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr, in __flush_ptrace_access() argument 143 flush_pfn_alias(page_to_pfn(page), uaddr); in __flush_ptrace_access() 152 flush_icache_alias(page_to_pfn(page), uaddr, len); in __flush_ptrace_access() 162 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, in flush_ptrace_access() argument 170 __flush_ptrace_access(page, uaddr, kaddr, len, flags); in flush_ptrace_access() 173 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, in flush_uprobe_xol_access() argument 178 __flush_ptrace_access(page, uaddr, kaddr, len, flags); in flush_uprobe_xol_access() 188 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument 196 flush_ptrace_access(vma, page, uaddr, dst, len); in copy_to_user_page() 202 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument [all …]
|
D | dma-mapping.c | 57 static void __dma_page_cpu_to_dev(struct page *, unsigned long, 59 static void __dma_page_dev_to_cpu(struct page *, unsigned long, 76 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, in arm_dma_map_page() argument 81 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_dma_map_page() 82 return pfn_to_dma(dev, page_to_pfn(page)) + offset; in arm_dma_map_page() 85 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, in arm_coherent_dma_map_page() argument 89 return pfn_to_dma(dev, page_to_pfn(page)) + offset; in arm_coherent_dma_map_page() 119 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); in arm_dma_sync_single_for_cpu() local 120 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_dma_sync_single_for_cpu() 127 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); in arm_dma_sync_single_for_device() local [all …]
|
D | highmem.c | 37 void *kmap(struct page *page) in kmap() argument 40 if (!PageHighMem(page)) in kmap() 41 return page_address(page); in kmap() 42 return kmap_high(page); in kmap() 46 void kunmap(struct page *page) in kunmap() argument 49 if (!PageHighMem(page)) in kunmap() 51 kunmap_high(page); in kunmap() 55 void *kmap_atomic(struct page *page) in kmap_atomic() argument 64 if (!PageHighMem(page)) in kmap_atomic() 65 return page_address(page); in kmap_atomic() [all …]
|
/linux-4.4.14/arch/xtensa/mm/ |
D | cache.c | 63 static inline void kmap_invalidate_coherent(struct page *page, in kmap_invalidate_coherent() argument 66 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { in kmap_invalidate_coherent() 69 if (!PageHighMem(page)) { in kmap_invalidate_coherent() 70 kvaddr = (unsigned long)page_to_virt(page); in kmap_invalidate_coherent() 75 (page_to_phys(page) & DCACHE_ALIAS_MASK); in kmap_invalidate_coherent() 78 page_to_phys(page)); in kmap_invalidate_coherent() 83 static inline void *coherent_kvaddr(struct page *page, unsigned long base, in coherent_kvaddr() argument 86 if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { in coherent_kvaddr() 87 *paddr = page_to_phys(page); in coherent_kvaddr() 91 return page_to_virt(page); in coherent_kvaddr() [all …]
|
/linux-4.4.14/fs/ubifs/ |
D | file.c | 103 static int do_readpage(struct page *page) in do_readpage() argument 109 struct inode *inode = page->mapping->host; in do_readpage() 113 inode->i_ino, page->index, i_size, page->flags); in do_readpage() 114 ubifs_assert(!PageChecked(page)); in do_readpage() 115 ubifs_assert(!PagePrivate(page)); in do_readpage() 117 addr = kmap(page); in do_readpage() 119 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; in do_readpage() 123 SetPageChecked(page); in do_readpage() 165 SetPageChecked(page); in do_readpage() 170 page->index, inode->i_ino, err); in do_readpage() [all …]
|
/linux-4.4.14/fs/ext2/ |
D | dir.c | 67 static inline void ext2_put_page(struct page *page) in ext2_put_page() argument 69 kunmap(page); in ext2_put_page() 70 page_cache_release(page); in ext2_put_page() 88 static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len) in ext2_commit_chunk() argument 90 struct address_space *mapping = page->mapping; in ext2_commit_chunk() 95 block_write_end(NULL, mapping, pos, len, len, page, NULL); in ext2_commit_chunk() 103 err = write_one_page(page, 1); in ext2_commit_chunk() 107 unlock_page(page); in ext2_commit_chunk() 113 static void ext2_check_page(struct page *page, int quiet) in ext2_check_page() argument 115 struct inode *dir = page->mapping->host; in ext2_check_page() [all …]
|
/linux-4.4.14/fs/ufs/ |
D | dir.c | 43 static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len) in ufs_commit_chunk() argument 45 struct address_space *mapping = page->mapping; in ufs_commit_chunk() 50 block_write_end(NULL, mapping, pos, len, len, page, NULL); in ufs_commit_chunk() 56 err = write_one_page(page, 1); in ufs_commit_chunk() 58 unlock_page(page); in ufs_commit_chunk() 62 static inline void ufs_put_page(struct page *page) in ufs_put_page() argument 64 kunmap(page); in ufs_put_page() 65 page_cache_release(page); in ufs_put_page() 72 struct page *page; in ufs_inode_by_name() local 74 de = ufs_find_entry(dir, qstr, &page); in ufs_inode_by_name() [all …]
|
/linux-4.4.14/Documentation/trace/ |
D | ring-buffer-design.txt | 29 reader_page - A page outside the ring buffer used solely (for the most part) 32 head_page - a pointer to the page that the reader will use next 34 tail_page - a pointer to the page that will be written to next 36 commit_page - a pointer to the page with the last finished non-nested write. 93 At initialization a reader page is allocated for the reader that is not 97 to the same page. 99 The reader page is initialized to have its next pointer pointing to 100 the head page, and its previous pointer pointing to a page before 101 the head page. 103 The reader has its own page to use. At start up time, this page is [all …]
|
/linux-4.4.14/net/ceph/ |
D | pagelist.c | 11 struct page *page = list_entry(pl->head.prev, struct page, lru); in ceph_pagelist_unmap_tail() local 12 kunmap(page); in ceph_pagelist_unmap_tail() 23 struct page *page = list_first_entry(&pl->head, struct page, in ceph_pagelist_release() local 25 list_del(&page->lru); in ceph_pagelist_release() 26 __free_page(page); in ceph_pagelist_release() 35 struct page *page; in ceph_pagelist_addpage() local 38 page = __page_cache_alloc(GFP_NOFS); in ceph_pagelist_addpage() 40 page = list_first_entry(&pl->free_list, struct page, lru); in ceph_pagelist_addpage() 41 list_del(&page->lru); in ceph_pagelist_addpage() 44 if (!page) in ceph_pagelist_addpage() [all …]
|
/linux-4.4.14/fs/f2fs/ |
D | data.c | 45 struct page *page = bvec->bv_page; in f2fs_read_end_io() local 48 SetPageUptodate(page); in f2fs_read_end_io() 50 ClearPageUptodate(page); in f2fs_read_end_io() 51 SetPageError(page); in f2fs_read_end_io() 53 unlock_page(page); in f2fs_read_end_io() 65 struct page *page = bvec->bv_page; in f2fs_write_end_io() local 67 f2fs_restore_and_release_control_page(&page); in f2fs_write_end_io() 70 set_page_dirty(page); in f2fs_write_end_io() 71 set_bit(AS_EIO, &page->mapping->flags); in f2fs_write_end_io() 74 end_page_writeback(page); in f2fs_write_end_io() [all …]
|
D | inline.c | 48 void read_inline_data(struct page *page, struct page *ipage) in read_inline_data() argument 52 if (PageUptodate(page)) in read_inline_data() 55 f2fs_bug_on(F2FS_P_SB(page), page->index); in read_inline_data() 57 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); in read_inline_data() 61 dst_addr = kmap_atomic(page); in read_inline_data() 63 flush_dcache_page(page); in read_inline_data() 65 SetPageUptodate(page); in read_inline_data() 68 bool truncate_inline_inode(struct page *ipage, u64 from) in truncate_inline_inode() 83 int f2fs_read_inline_data(struct inode *inode, struct page *page) in f2fs_read_inline_data() argument 85 struct page *ipage; in f2fs_read_inline_data() [all …]
|
D | node.h | 215 static inline void fill_node_footer(struct page *page, nid_t nid, in fill_node_footer() argument 218 struct f2fs_node *rn = F2FS_NODE(page); in fill_node_footer() 234 static inline void copy_node_footer(struct page *dst, struct page *src) in copy_node_footer() 241 static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) in fill_node_footer_blkaddr() argument 243 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); in fill_node_footer_blkaddr() 244 struct f2fs_node *rn = F2FS_NODE(page); in fill_node_footer_blkaddr() 250 static inline nid_t ino_of_node(struct page *node_page) in ino_of_node() 256 static inline nid_t nid_of_node(struct page *node_page) in nid_of_node() 262 static inline unsigned int ofs_of_node(struct page *node_page) in ofs_of_node() 269 static inline unsigned long long cpver_of_node(struct page *node_page) in cpver_of_node() [all …]
|
D | dir.c | 79 static struct f2fs_dir_entry *find_in_block(struct page *dentry_page, in find_in_block() 83 struct page **res_page) in find_in_block() 161 struct page **res_page) in find_in_level() 167 struct page *dentry_page; in find_in_level() 217 struct qstr *child, struct page **res_page) in f2fs_find_entry() 252 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p) in f2fs_parent_dir() 254 struct page *page; in f2fs_parent_dir() local 261 page = get_lock_data_page(dir, 0, false); in f2fs_parent_dir() 262 if (IS_ERR(page)) in f2fs_parent_dir() 265 dentry_blk = kmap(page); in f2fs_parent_dir() [all …]
|
D | node.c | 79 static void clear_node_page_dirty(struct page *page) in clear_node_page_dirty() argument 81 struct address_space *mapping = page->mapping; in clear_node_page_dirty() 84 if (PageDirty(page)) { in clear_node_page_dirty() 87 page_index(page), in clear_node_page_dirty() 91 clear_page_dirty_for_io(page); in clear_node_page_dirty() 94 ClearPageUptodate(page); in clear_node_page_dirty() 97 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) in get_current_nat_page() 103 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) in get_next_nat_page() 105 struct page *src_page; in get_next_nat_page() 106 struct page *dst_page; in get_next_nat_page() [all …]
|
D | recovery.c | 70 static int recover_dentry(struct inode *inode, struct page *ipage) in recover_dentry() 76 struct page *page; in recover_dentry() local 100 de = f2fs_find_entry(dir, &name, &page); in recover_dentry() 118 f2fs_delete_entry(de, page, dir, einode); in recover_dentry() 136 f2fs_dentry_kunmap(dir, page); in recover_dentry() 137 f2fs_put_page(page, 0); in recover_dentry() 148 static void recover_inode(struct inode *inode, struct page *page) in recover_inode() argument 150 struct f2fs_inode *raw = F2FS_INODE(page); in recover_inode() 165 name = F2FS_INODE(page)->i_name; in recover_inode() 168 ino_of_node(page), name); in recover_inode() [all …]
|
D | checkpoint.c | 32 struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in grab_meta_page() 35 struct page *page = NULL; in grab_meta_page() local 37 page = grab_cache_page(mapping, index); in grab_meta_page() 38 if (!page) { in grab_meta_page() 42 f2fs_wait_on_page_writeback(page, META); in grab_meta_page() 43 SetPageUptodate(page); in grab_meta_page() 44 return page; in grab_meta_page() 50 static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, in __get_meta_page() 54 struct page *page; in __get_meta_page() local 66 page = grab_cache_page(mapping, index); in __get_meta_page() [all …]
|
D | crypto.c | 158 struct page *page = bv->bv_page; in completion_pages() local 159 int ret = f2fs_decrypt(ctx, page); in completion_pages() 163 SetPageError(page); in completion_pages() 165 SetPageUptodate(page); in completion_pages() 166 unlock_page(page); in completion_pages() 273 void f2fs_restore_and_release_control_page(struct page **page) in f2fs_restore_and_release_control_page() argument 276 struct page *bounce_page; in f2fs_restore_and_release_control_page() 279 if ((*page)->mapping) in f2fs_restore_and_release_control_page() 283 bounce_page = *page; in f2fs_restore_and_release_control_page() 287 *page = ctx->w.control_page; in f2fs_restore_and_release_control_page() [all …]
|
/linux-4.4.14/arch/parisc/include/asm/ |
D | cacheflush.h | 41 static inline void flush_kernel_dcache_page(struct page *page) in flush_kernel_dcache_page() argument 43 flush_kernel_dcache_page_addr(page_address(page)); in flush_kernel_dcache_page() 63 struct page *page = vmalloc_to_page(cursor); in invalidate_kernel_vmap_range() local 65 if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) in invalidate_kernel_vmap_range() 66 flush_kernel_dcache_page(page); in invalidate_kernel_vmap_range() 75 extern void flush_dcache_page(struct page *page); 82 #define flush_icache_page(vma,page) do { \ argument 83 flush_kernel_dcache_page(page); \ 84 flush_kernel_icache_page(page_address(page)); \ 92 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument [all …]
|
/linux-4.4.14/arch/tile/mm/ |
D | highmem.c | 25 void *kmap(struct page *page) in kmap() argument 32 if (!PageHighMem(page)) in kmap() 33 return page_address(page); in kmap() 34 kva = kmap_high(page); in kmap() 42 set_pte_at(&init_mm, kva, ptep, mk_pte(page, page_to_kpgprot(page))); in kmap() 49 void kunmap(struct page *page) in kunmap() argument 53 if (!PageHighMem(page)) in kunmap() 55 kunmap_high(page); in kunmap() 65 struct page *page; member 96 static void kmap_atomic_register(struct page *page, int type, in kmap_atomic_register() argument [all …]
|
D | homecache.c | 174 void homecache_finv_map_page(struct page *page, int home) in homecache_finv_map_page() argument 191 pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); in homecache_finv_map_page() 202 static void homecache_finv_page_home(struct page *page, int home) in homecache_finv_page_home() argument 204 if (!PageHighMem(page) && home == page_home(page)) in homecache_finv_page_home() 205 homecache_finv_page_va(page_address(page), home); in homecache_finv_page_home() 207 homecache_finv_map_page(page, home); in homecache_finv_page_home() 215 static void homecache_finv_page_internal(struct page *page, int force_map) in homecache_finv_page_internal() argument 217 int home = page_home(page); in homecache_finv_page_internal() 223 homecache_finv_map_page(page, cpu); in homecache_finv_page_internal() 226 homecache_finv_map_page(page, home); in homecache_finv_page_internal() [all …]
|
/linux-4.4.14/drivers/xen/ |
D | balloon.c | 162 static void scrub_page(struct page *page) in scrub_page() argument 165 clear_highpage(page); in scrub_page() 170 static void __balloon_append(struct page *page) in __balloon_append() argument 173 if (PageHighMem(page)) { in __balloon_append() 174 list_add_tail(&page->lru, &ballooned_pages); in __balloon_append() 177 list_add(&page->lru, &ballooned_pages); in __balloon_append() 183 static void balloon_append(struct page *page) in balloon_append() argument 185 __balloon_append(page); in balloon_append() 186 adjust_managed_page_count(page, -1); in balloon_append() 190 static struct page *balloon_retrieve(bool require_lowmem) in balloon_retrieve() [all …]
|
/linux-4.4.14/fs/ |
D | buffer.c | 87 void buffer_check_dirty_writeback(struct page *page, in buffer_check_dirty_writeback() argument 94 BUG_ON(!PageLocked(page)); in buffer_check_dirty_writeback() 96 if (!page_has_buffers(page)) in buffer_check_dirty_writeback() 99 if (PageWriteback(page)) in buffer_check_dirty_writeback() 102 head = page_buffers(page); in buffer_check_dirty_writeback() 128 __clear_page_buffers(struct page *page) in __clear_page_buffers() argument 130 ClearPagePrivate(page); in __clear_page_buffers() 131 set_page_private(page, 0); in __clear_page_buffers() 132 page_cache_release(page); in __clear_page_buffers() 210 struct page *page; in __find_get_block_slow() local [all …]
|
D | mpage.c | 51 struct page *page = bv->bv_page; in mpage_end_io() local 52 page_endio(page, bio_data_dir(bio), bio->bi_error); in mpage_end_io() 98 map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) in map_buffer_to_page() argument 100 struct inode *inode = page->mapping->host; in map_buffer_to_page() 104 if (!page_has_buffers(page)) { in map_buffer_to_page() 111 SetPageUptodate(page); in map_buffer_to_page() 114 create_empty_buffers(page, 1 << inode->i_blkbits, 0); in map_buffer_to_page() 116 head = page_buffers(page); in map_buffer_to_page() 140 do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, in do_mpage_readpage() argument 145 struct inode *inode = page->mapping->host; in do_mpage_readpage() [all …]
|
/linux-4.4.14/fs/nfs/ |
D | fscache.h | 83 extern void __nfs_fscache_invalidate_page(struct page *, struct inode *); 84 extern int nfs_fscache_release_page(struct page *, gfp_t); 87 struct inode *, struct page *); 91 extern void __nfs_readpage_to_fscache(struct inode *, struct page *, int); 97 struct page *page) in nfs_fscache_wait_on_page_write() argument 99 if (PageFsCache(page)) in nfs_fscache_wait_on_page_write() 100 fscache_wait_on_page_write(nfsi->fscache, page); in nfs_fscache_wait_on_page_write() 107 static inline void nfs_fscache_invalidate_page(struct page *page, in nfs_fscache_invalidate_page() argument 110 if (PageFsCache(page)) in nfs_fscache_invalidate_page() 111 __nfs_fscache_invalidate_page(page, inode); in nfs_fscache_invalidate_page() [all …]
|
D | symlink.c | 28 static int nfs_symlink_filler(struct inode *inode, struct page *page) in nfs_symlink_filler() argument 32 error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE); in nfs_symlink_filler() 35 SetPageUptodate(page); in nfs_symlink_filler() 36 unlock_page(page); in nfs_symlink_filler() 40 SetPageError(page); in nfs_symlink_filler() 41 unlock_page(page); in nfs_symlink_filler() 48 struct page *page; in nfs_follow_link() local 54 page = read_cache_page(&inode->i_data, 0, in nfs_follow_link() 56 if (IS_ERR(page)) in nfs_follow_link() 57 return ERR_CAST(page); in nfs_follow_link() [all …]
|
D | file.c | 317 static int nfs_want_read_modify_write(struct file *file, struct page *page, in nfs_want_read_modify_write() argument 320 unsigned int pglen = nfs_page_length(page); in nfs_want_read_modify_write() 325 if (!PageUptodate(page)) in nfs_want_read_modify_write() 331 !PageUptodate(page) && /* Uptodate? */ in nfs_want_read_modify_write() 332 !PagePrivate(page) && /* i/o request already? */ in nfs_want_read_modify_write() 349 struct page **pagep, void **fsdata) in nfs_write_begin() 353 struct page *page; in nfs_write_begin() local 373 page = grab_cache_page_write_begin(mapping, index, flags); in nfs_write_begin() 374 if (!page) in nfs_write_begin() 376 *pagep = page; in nfs_write_begin() [all …]
|
D | fscache.c | 260 int nfs_fscache_release_page(struct page *page, gfp_t gfp) in nfs_fscache_release_page() argument 262 if (PageFsCache(page)) { in nfs_fscache_release_page() 263 struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host); in nfs_fscache_release_page() 267 cookie, page, NFS_I(page->mapping->host)); in nfs_fscache_release_page() 269 if (!fscache_maybe_release_page(cookie, page, gfp)) in nfs_fscache_release_page() 272 nfs_inc_fscache_stats(page->mapping->host, in nfs_fscache_release_page() 283 void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode) in __nfs_fscache_invalidate_page() argument 290 cookie, page, NFS_I(inode)); in __nfs_fscache_invalidate_page() 292 fscache_wait_on_page_write(cookie, page); in __nfs_fscache_invalidate_page() 294 BUG_ON(!PageLocked(page)); in __nfs_fscache_invalidate_page() [all …]
|
D | read.c | 47 int nfs_return_empty_page(struct page *page) in nfs_return_empty_page() argument 49 zero_user(page, 0, PAGE_CACHE_SIZE); in nfs_return_empty_page() 50 SetPageUptodate(page); in nfs_return_empty_page() 51 unlock_page(page); in nfs_return_empty_page() 89 struct page *page) in nfs_readpage_async() argument 96 len = nfs_page_length(page); in nfs_readpage_async() 98 return nfs_return_empty_page(page); in nfs_readpage_async() 99 new = nfs_create_request(ctx, page, NULL, 0, len); in nfs_readpage_async() 101 unlock_page(page); in nfs_readpage_async() 105 zero_user_segment(page, len, PAGE_CACHE_SIZE); in nfs_readpage_async() [all …]
|
/linux-4.4.14/arch/microblaze/include/asm/ |
D | highmem.h | 53 extern void *kmap_high(struct page *page); 54 extern void kunmap_high(struct page *page); 55 extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); 58 static inline void *kmap(struct page *page) in kmap() argument 61 if (!PageHighMem(page)) in kmap() 62 return page_address(page); in kmap() 63 return kmap_high(page); in kmap() 66 static inline void kunmap(struct page *page) in kunmap() argument 69 if (!PageHighMem(page)) in kunmap() 71 kunmap_high(page); in kunmap() [all …]
|
/linux-4.4.14/fs/gfs2/ |
D | aops.c | 41 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, in gfs2_page_add_databufs() argument 44 struct buffer_head *head = page_buffers(page); in gfs2_page_add_databufs() 97 static int gfs2_writepage_common(struct page *page, in gfs2_writepage_common() argument 100 struct inode *inode = page->mapping->host; in gfs2_writepage_common() 113 if (page->index > end_index || (page->index == end_index && !offset)) { in gfs2_writepage_common() 114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); in gfs2_writepage_common() 119 redirty_page_for_writepage(wbc, page); in gfs2_writepage_common() 121 unlock_page(page); in gfs2_writepage_common() 132 static int gfs2_writepage(struct page *page, struct writeback_control *wbc) in gfs2_writepage() argument 136 ret = gfs2_writepage_common(page, wbc); in gfs2_writepage() [all …]
|
/linux-4.4.14/arch/tile/include/asm/ |
D | highmem.h | 53 void *kmap_high(struct page *page); 54 void kunmap_high(struct page *page); 55 void *kmap(struct page *page); 56 void kunmap(struct page *page); 57 void *kmap_fix_kpte(struct page *page, int finished); 60 #define kmap_prot page_to_kpgprot(page) 62 void *kmap_atomic(struct page *page); 66 void *kmap_atomic_prot(struct page *page, pgprot_t prot); 67 void kmap_atomic_fix_kpte(struct page *page, int finished);
|
D | homecache.h | 23 struct page; 68 extern void homecache_change_page_home(struct page *, int order, int home); 76 extern void homecache_finv_page(struct page *); 83 extern void homecache_finv_map_page(struct page *, int home); 92 extern struct page *homecache_alloc_pages(gfp_t gfp_mask, 94 extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, 105 void __homecache_free_pages(struct page *, unsigned int order); 107 #define __homecache_free_page(page) __homecache_free_pages((page), 0) argument 108 #define homecache_free_page(page) homecache_free_pages((page), 0) argument 115 extern int page_home(struct page *);
|
/linux-4.4.14/arch/powerpc/include/asm/ |
D | highmem.h | 61 extern void *kmap_high(struct page *page); 62 extern void kunmap_high(struct page *page); 63 extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); 66 static inline void *kmap(struct page *page) in kmap() argument 69 if (!PageHighMem(page)) in kmap() 70 return page_address(page); in kmap() 71 return kmap_high(page); in kmap() 74 static inline void kunmap(struct page *page) in kunmap() argument 77 if (!PageHighMem(page)) in kunmap() 79 kunmap_high(page); in kunmap() [all …]
|
/linux-4.4.14/fs/cifs/ |
D | fscache.h | 50 extern void __cifs_fscache_invalidate_page(struct page *, struct inode *); 51 extern int cifs_fscache_release_page(struct page *page, gfp_t gfp); 52 extern int __cifs_readpage_from_fscache(struct inode *, struct page *); 59 extern void __cifs_readpage_to_fscache(struct inode *, struct page *); 61 static inline void cifs_fscache_invalidate_page(struct page *page, in cifs_fscache_invalidate_page() argument 64 if (PageFsCache(page)) in cifs_fscache_invalidate_page() 65 __cifs_fscache_invalidate_page(page, inode); in cifs_fscache_invalidate_page() 69 struct page *page) in cifs_readpage_from_fscache() argument 72 return __cifs_readpage_from_fscache(inode, page); in cifs_readpage_from_fscache() 89 struct page *page) in cifs_readpage_to_fscache() argument [all …]
|
D | fscache.c | 128 int cifs_fscache_release_page(struct page *page, gfp_t gfp) in cifs_fscache_release_page() argument 130 if (PageFsCache(page)) { in cifs_fscache_release_page() 131 struct inode *inode = page->mapping->host; in cifs_fscache_release_page() 135 __func__, page, cifsi->fscache); in cifs_fscache_release_page() 136 if (!fscache_maybe_release_page(cifsi->fscache, page, gfp)) in cifs_fscache_release_page() 143 static void cifs_readpage_from_fscache_complete(struct page *page, void *ctx, in cifs_readpage_from_fscache_complete() argument 146 cifs_dbg(FYI, "%s: (0x%p/%d)\n", __func__, page, error); in cifs_readpage_from_fscache_complete() 148 SetPageUptodate(page); in cifs_readpage_from_fscache_complete() 149 unlock_page(page); in cifs_readpage_from_fscache_complete() 155 int __cifs_readpage_from_fscache(struct inode *inode, struct page *page) in __cifs_readpage_from_fscache() argument [all …]
|
/linux-4.4.14/fs/ntfs/ |
D | aops.h | 40 static inline void ntfs_unmap_page(struct page *page) in ntfs_unmap_page() argument 42 kunmap(page); in ntfs_unmap_page() 43 page_cache_release(page); in ntfs_unmap_page() 86 static inline struct page *ntfs_map_page(struct address_space *mapping, in ntfs_map_page() 89 struct page *page = read_mapping_page(mapping, index, NULL); in ntfs_map_page() local 91 if (!IS_ERR(page)) { in ntfs_map_page() 92 kmap(page); in ntfs_map_page() 93 if (!PageError(page)) in ntfs_map_page() 94 return page; in ntfs_map_page() 95 ntfs_unmap_page(page); in ntfs_map_page() [all …]
|
D | compress.c | 100 static void zero_partial_compressed_page(struct page *page, in zero_partial_compressed_page() argument 103 u8 *kp = page_address(page); in zero_partial_compressed_page() 107 if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) { in zero_partial_compressed_page() 123 static inline void handle_bounds_compressed_page(struct page *page, in handle_bounds_compressed_page() argument 126 if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) && in handle_bounds_compressed_page() 128 zero_partial_compressed_page(page, initialized_size); in handle_bounds_compressed_page() 169 static int ntfs_decompress(struct page *dest_pages[], int *dest_index, in ntfs_decompress() 185 struct page *dp; /* Current destination page being worked on. */ in ntfs_decompress() 482 int ntfs_read_compressed_block(struct page *page) in ntfs_read_compressed_block() argument 486 struct address_space *mapping = page->mapping; in ntfs_read_compressed_block() [all …]
|
D | aops.c | 62 struct page *page; in ntfs_end_buffer_async_read() local 67 page = bh->b_page; in ntfs_end_buffer_async_read() 68 vi = page->mapping->host; in ntfs_end_buffer_async_read() 77 file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + in ntfs_end_buffer_async_read() 96 kaddr = kmap_atomic(page); in ntfs_end_buffer_async_read() 99 flush_dcache_page(page); in ntfs_end_buffer_async_read() 105 SetPageError(page); in ntfs_end_buffer_async_read() 109 first = page_buffers(page); in ntfs_end_buffer_async_read() 137 if (likely(page_uptodate && !PageError(page))) in ntfs_end_buffer_async_read() 138 SetPageUptodate(page); in ntfs_end_buffer_async_read() [all …]
|
D | bitmap.c | 53 struct page *page; in __ntfs_bitmap_set_bits_in_run() local 75 page = ntfs_map_page(mapping, index); in __ntfs_bitmap_set_bits_in_run() 76 if (IS_ERR(page)) { in __ntfs_bitmap_set_bits_in_run() 79 "%li), aborting.", PTR_ERR(page)); in __ntfs_bitmap_set_bits_in_run() 80 return PTR_ERR(page); in __ntfs_bitmap_set_bits_in_run() 82 kaddr = page_address(page); in __ntfs_bitmap_set_bits_in_run() 124 flush_dcache_page(page); in __ntfs_bitmap_set_bits_in_run() 125 set_page_dirty(page); in __ntfs_bitmap_set_bits_in_run() 126 ntfs_unmap_page(page); in __ntfs_bitmap_set_bits_in_run() 127 page = ntfs_map_page(mapping, ++index); in __ntfs_bitmap_set_bits_in_run() [all …]
|
/linux-4.4.14/arch/frv/mm/ |
D | pgalloc.c | 33 struct page *page; in pte_alloc_one() local 36 page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); in pte_alloc_one() 38 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); in pte_alloc_one() 40 if (!page) in pte_alloc_one() 43 clear_highpage(page); in pte_alloc_one() 44 if (!pgtable_page_ctor(page)) { in pte_alloc_one() 45 __free_page(page); in pte_alloc_one() 48 flush_dcache_page(page); in pte_alloc_one() 49 return page; in pte_alloc_one() 87 struct page *pgd_list; [all …]
|
D | highmem.c | 14 void *kmap(struct page *page) in kmap() argument 17 if (!PageHighMem(page)) in kmap() 18 return page_address(page); in kmap() 19 return kmap_high(page); in kmap() 24 void kunmap(struct page *page) in kunmap() argument 28 if (!PageHighMem(page)) in kunmap() 30 kunmap_high(page); in kunmap() 35 void *kmap_atomic(struct page *page) in kmap_atomic() argument 43 paddr = page_to_phys(page); in kmap_atomic()
|
/linux-4.4.14/drivers/target/iscsi/ |
D | iscsi_target_stat.c | 60 char *page) in iscsi_stat_instance_inst_show() argument 62 return snprintf(page, PAGE_SIZE, "%u\n", in iscsi_stat_instance_inst_show() 67 char *page) in iscsi_stat_instance_min_ver_show() argument 69 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION); in iscsi_stat_instance_min_ver_show() 73 char *page) in iscsi_stat_instance_max_ver_show() argument 75 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION); in iscsi_stat_instance_max_ver_show() 79 char *page) in iscsi_stat_instance_portals_show() argument 81 return snprintf(page, PAGE_SIZE, "%u\n", in iscsi_stat_instance_portals_show() 86 char *page) in iscsi_stat_instance_nodes_show() argument 88 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES); in iscsi_stat_instance_nodes_show() [all …]
|
/linux-4.4.14/sound/pci/emu10k1/ |
D | memory.c | 36 #define __set_ptb_entry(emu,page,addr) \ argument 37 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page))) 45 #define aligned_page_offset(page) ((page) << PAGE_SHIFT) argument 50 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr) argument 52 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr) argument 55 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr) in set_ptb_entry() argument 58 page *= UNIT_PAGES; in set_ptb_entry() 59 for (i = 0; i < UNIT_PAGES; i++, page++) { in set_ptb_entry() 60 __set_ptb_entry(emu, page, addr); in set_ptb_entry() 64 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page) in set_silent_ptb() argument [all …]
|
/linux-4.4.14/arch/xtensa/include/asm/ |
D | highmem.h | 32 static inline int get_pkmap_color(struct page *page) in get_pkmap_color() argument 34 return DCACHE_ALIAS(page_to_phys(page)); in get_pkmap_color() 66 void *kmap_high(struct page *page); 67 void kunmap_high(struct page *page); 69 static inline void *kmap(struct page *page) in kmap() argument 72 if (!PageHighMem(page)) in kmap() 73 return page_address(page); in kmap() 74 return kmap_high(page); in kmap() 77 static inline void kunmap(struct page *page) in kunmap() argument 80 if (!PageHighMem(page)) in kunmap() [all …]
|
/linux-4.4.14/fs/ext4/ |
D | readpage.c | 63 struct page *page = bv->bv_page; in completion_pages() local 65 int ret = ext4_decrypt(page); in completion_pages() 68 SetPageError(page); in completion_pages() 70 SetPageUptodate(page); in completion_pages() 71 unlock_page(page); in completion_pages() 119 struct page *page = bv->bv_page; in mpage_end_io() local 122 SetPageUptodate(page); in mpage_end_io() 124 ClearPageUptodate(page); in mpage_end_io() 125 SetPageError(page); in mpage_end_io() 127 unlock_page(page); in mpage_end_io() [all …]
|
/linux-4.4.14/arch/nios2/mm/ |
D | cacheflush.c | 73 static void flush_aliases(struct address_space *mapping, struct page *page) in flush_aliases() argument 79 pgoff = page->index; in flush_aliases() 92 page_to_pfn(page)); in flush_aliases() 140 void flush_icache_page(struct vm_area_struct *vma, struct page *page) in flush_icache_page() argument 142 unsigned long start = (unsigned long) page_address(page); in flush_icache_page() 160 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument 167 unsigned long start = (unsigned long)page_address(page); in __flush_dcache_page() 172 void flush_dcache_page(struct page *page) in flush_dcache_page() argument 180 if (page == ZERO_PAGE(0)) in flush_dcache_page() 183 mapping = page_mapping(page); in flush_dcache_page() [all …]
|
/linux-4.4.14/drivers/video/fbdev/core/ |
D | fb_defio.c | 26 static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs) in fb_deferred_io_page() 29 struct page *page; in fb_deferred_io_page() local 32 page = vmalloc_to_page(screen_base + offs); in fb_deferred_io_page() 34 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT); in fb_deferred_io_page() 36 return page; in fb_deferred_io_page() 44 struct page *page; in fb_deferred_io_fault() local 51 page = fb_deferred_io_page(info, offset); in fb_deferred_io_fault() 52 if (!page) in fb_deferred_io_fault() 55 get_page(page); in fb_deferred_io_fault() 58 page->mapping = vma->vm_file->f_mapping; in fb_deferred_io_fault() [all …]
|
/linux-4.4.14/arch/sparc/include/asm/ |
D | cacheflush_64.h | 25 #define flush_cache_page(vma, page, pfn) \ argument 37 void flush_dcache_page_impl(struct page *page); 39 void smp_flush_dcache_page_impl(struct page *page, int cpu); 40 void flush_dcache_page_all(struct mm_struct *mm, struct page *page); 42 #define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page) argument 43 #define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page) argument 48 void flush_dcache_page(struct page *page); 53 void flush_ptrace_access(struct vm_area_struct *, struct page *, 57 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 59 flush_cache_page(vma, vaddr, page_to_pfn(page)); \ [all …]
|
D | highmem.h | 52 void *kmap_high(struct page *page); 53 void kunmap_high(struct page *page); 55 static inline void *kmap(struct page *page) in kmap() argument 58 if (!PageHighMem(page)) in kmap() 59 return page_address(page); in kmap() 60 return kmap_high(page); in kmap() 63 static inline void kunmap(struct page *page) in kunmap() argument 66 if (!PageHighMem(page)) in kunmap() 68 kunmap_high(page); in kunmap() 71 void *kmap_atomic(struct page *page);
|
/linux-4.4.14/arch/frv/include/asm/ |
D | cacheflush.h | 52 extern void flush_dcache_page(struct page *page); 54 static inline void flush_dcache_page(struct page *page) in flush_dcache_page() argument 56 unsigned long addr = page_to_phys(page); in flush_dcache_page() 61 static inline void flush_page_to_ram(struct page *page) in flush_page_to_ram() argument 63 flush_dcache_page(page); in flush_page_to_ram() 77 extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 80 static inline void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, in flush_icache_user_range() argument 87 static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) in flush_icache_page() argument 89 flush_icache_user_range(vma, page, page_to_phys(page), PAGE_SIZE); in flush_icache_page() 96 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument [all …]
|
D | highmem.h | 59 extern void *kmap_high(struct page *page); 60 extern void kunmap_high(struct page *page); 62 extern void *kmap(struct page *page); 63 extern void kunmap(struct page *page); 114 static inline void *kmap_atomic_primary(struct page *page) in kmap_atomic_primary() argument 119 paddr = page_to_phys(page); in kmap_atomic_primary() 142 void *kmap_atomic(struct page *page);
|
/linux-4.4.14/arch/x86/mm/ |
D | gup.c | 72 unsigned long end, int write, struct page **pages, int *nr) in gup_pte_range() 84 struct page *page; in gup_pte_range() local 97 page = pte_page(pte); in gup_pte_range() 98 get_page(page); in gup_pte_range() 99 SetPageReferenced(page); in gup_pte_range() 100 pages[*nr] = page; in gup_pte_range() 109 static inline void get_head_page_multiple(struct page *page, int nr) in get_head_page_multiple() argument 111 VM_BUG_ON_PAGE(page != compound_head(page), page); in get_head_page_multiple() 112 VM_BUG_ON_PAGE(page_count(page) == 0, page); in get_head_page_multiple() 113 atomic_add(nr, &page->_count); in get_head_page_multiple() [all …]
|
D | highmem_32.c | 6 void *kmap(struct page *page) in kmap() argument 9 if (!PageHighMem(page)) in kmap() 10 return page_address(page); in kmap() 11 return kmap_high(page); in kmap() 15 void kunmap(struct page *page) in kunmap() argument 19 if (!PageHighMem(page)) in kunmap() 21 kunmap_high(page); in kunmap() 33 void *kmap_atomic_prot(struct page *page, pgprot_t prot) in kmap_atomic_prot() argument 41 if (!PageHighMem(page)) in kmap_atomic_prot() 42 return page_address(page); in kmap_atomic_prot() [all …]
|
/linux-4.4.14/drivers/staging/rtl8188eu/core/ |
D | rtw_debug.c | 25 int proc_get_drv_version(char *page, char **start, in proc_get_drv_version() argument 31 len += snprintf(page + len, count - len, "%s\n", DRIVERVERSION); in proc_get_drv_version() 37 int proc_get_write_reg(char *page, char **start, in proc_get_write_reg() argument 86 int proc_get_read_reg(char *page, char **start, in proc_get_read_reg() argument 102 …len += snprintf(page + len, count - len, "usb_read8(0x%x)=0x%x\n", proc_get_read_addr, usb_read8(p… in proc_get_read_reg() 105 …len += snprintf(page + len, count - len, "usb_read16(0x%x)=0x%x\n", proc_get_read_addr, usb_read16… in proc_get_read_reg() 108 …len += snprintf(page + len, count - len, "usb_read32(0x%x)=0x%x\n", proc_get_read_addr, usb_read32… in proc_get_read_reg() 111 len += snprintf(page + len, count - len, "error read length=%d\n", proc_get_read_len); in proc_get_read_reg() 146 int proc_get_fwstate(char *page, char **start, in proc_get_fwstate() argument 156 len += snprintf(page + len, count - len, "fwstate=0x%x\n", get_fwstate(pmlmepriv)); in proc_get_fwstate() [all …]
|
/linux-4.4.14/drivers/target/ |
D | target_core_stat.c | 64 static ssize_t target_stat_inst_show(struct config_item *item, char *page) in target_stat_inst_show() argument 68 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); in target_stat_inst_show() 71 static ssize_t target_stat_indx_show(struct config_item *item, char *page) in target_stat_indx_show() argument 73 return snprintf(page, PAGE_SIZE, "%u\n", to_stat_dev(item)->dev_index); in target_stat_indx_show() 76 static ssize_t target_stat_role_show(struct config_item *item, char *page) in target_stat_role_show() argument 78 return snprintf(page, PAGE_SIZE, "Target\n"); in target_stat_role_show() 81 static ssize_t target_stat_ports_show(struct config_item *item, char *page) in target_stat_ports_show() argument 83 return snprintf(page, PAGE_SIZE, "%u\n", to_stat_dev(item)->export_count); in target_stat_ports_show() 114 static ssize_t target_stat_tgt_inst_show(struct config_item *item, char *page) in target_stat_tgt_inst_show() argument 118 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); in target_stat_tgt_inst_show() [all …]
|
/linux-4.4.14/fs/qnx6/ |
D | dir.c | 26 static struct page *qnx6_get_page(struct inode *dir, unsigned long n) in qnx6_get_page() 29 struct page *page = read_mapping_page(mapping, n, NULL); in qnx6_get_page() local 30 if (!IS_ERR(page)) in qnx6_get_page() 31 kmap(page); in qnx6_get_page() 32 return page; in qnx6_get_page() 46 struct page **p) in qnx6_longname() 54 struct page *page = read_mapping_page(mapping, n, NULL); in qnx6_longname() local 55 if (IS_ERR(page)) in qnx6_longname() 56 return ERR_CAST(page); in qnx6_longname() 57 kmap(*p = page); in qnx6_longname() [all …]
|
/linux-4.4.14/drivers/block/ |
D | brd.c | 55 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) in brd_lookup_page() 58 struct page *page; in brd_lookup_page() local 73 page = radix_tree_lookup(&brd->brd_pages, idx); in brd_lookup_page() 76 BUG_ON(page && page->index != idx); in brd_lookup_page() 78 return page; in brd_lookup_page() 86 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) in brd_insert_page() 89 struct page *page; in brd_insert_page() local 92 page = brd_lookup_page(brd, sector); in brd_insert_page() 93 if (page) in brd_insert_page() 94 return page; in brd_insert_page() [all …]
|
/linux-4.4.14/arch/mn10300/include/asm/ |
D | highmem.h | 46 extern unsigned long kmap_high(struct page *page); 47 extern void kunmap_high(struct page *page); 49 static inline unsigned long kmap(struct page *page) in kmap() argument 53 if (page < highmem_start_page) in kmap() 54 return page_address(page); in kmap() 55 return kmap_high(page); in kmap() 58 static inline void kunmap(struct page *page) in kunmap() argument 62 if (page < highmem_start_page) in kunmap() 64 kunmap_high(page); in kunmap() 73 static inline void *kmap_atomic(struct page *page) in kmap_atomic() argument [all …]
|
/linux-4.4.14/arch/m68k/include/asm/ |
D | motorola_pgalloc.h | 32 struct page *page; in pte_alloc_one() local 35 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); in pte_alloc_one() 36 if(!page) in pte_alloc_one() 38 if (!pgtable_page_ctor(page)) { in pte_alloc_one() 39 __free_page(page); in pte_alloc_one() 43 pte = kmap(page); in pte_alloc_one() 47 kunmap(page); in pte_alloc_one() 48 return page; in pte_alloc_one() 51 static inline void pte_free(struct mm_struct *mm, pgtable_t page) in pte_free() argument 53 pgtable_page_dtor(page); in pte_free() [all …]
|
D | mcf_pgalloc.h | 17 unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT); in pte_alloc_one_kernel() local 19 if (!page) in pte_alloc_one_kernel() 22 memset((void *)page, 0, PAGE_SIZE); in pte_alloc_one_kernel() 23 return (pte_t *) (page); in pte_alloc_one_kernel() 36 #define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \ argument 37 (unsigned long)(page_address(page))) 43 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, in __pte_free_tlb() argument 46 __free_page(page); in __pte_free_tlb() 51 static inline struct page *pte_alloc_one(struct mm_struct *mm, in pte_alloc_one() 54 struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0); in pte_alloc_one() local [all …]
|
D | sun3_pgalloc.h | 25 static inline void pte_free(struct mm_struct *mm, pgtable_t page) in pte_free() argument 27 pgtable_page_dtor(page); in pte_free() 28 __free_page(page); in pte_free() 40 unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT); in pte_alloc_one_kernel() local 42 if (!page) in pte_alloc_one_kernel() 45 memset((void *)page, 0, PAGE_SIZE); in pte_alloc_one_kernel() 46 return (pte_t *) (page); in pte_alloc_one_kernel() 52 struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); in pte_alloc_one() local 54 if (page == NULL) in pte_alloc_one() 57 clear_highpage(page); in pte_alloc_one() [all …]
|
/linux-4.4.14/arch/mips/mm/ |
D | gup.c | 38 int write, struct page **pages, int *nr) in gup_pte_range() 43 struct page *page; in gup_pte_range() local 51 page = pte_page(pte); in gup_pte_range() 52 get_page(page); in gup_pte_range() 53 SetPageReferenced(page); in gup_pte_range() 54 pages[*nr] = page; in gup_pte_range() 63 static inline void get_head_page_multiple(struct page *page, int nr) in get_head_page_multiple() argument 65 VM_BUG_ON(page != compound_head(page)); in get_head_page_multiple() 66 VM_BUG_ON(page_count(page) == 0); in get_head_page_multiple() 67 atomic_add(nr, &page->_count); in get_head_page_multiple() [all …]
|
D | cache.c | 30 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, 82 void __flush_dcache_page(struct page *page) in __flush_dcache_page() argument 84 struct address_space *mapping = page_mapping(page); in __flush_dcache_page() 88 SetPageDcacheDirty(page); in __flush_dcache_page() 97 if (PageHighMem(page)) in __flush_dcache_page() 98 addr = (unsigned long)kmap_atomic(page); in __flush_dcache_page() 100 addr = (unsigned long)page_address(page); in __flush_dcache_page() 104 if (PageHighMem(page)) in __flush_dcache_page() 110 void __flush_anon_page(struct page *page, unsigned long vmaddr) in __flush_anon_page() argument 112 unsigned long addr = (unsigned long) page_address(page); in __flush_anon_page() [all …]
|
D | highmem.c | 13 void *kmap(struct page *page) in kmap() argument 18 if (!PageHighMem(page)) in kmap() 19 return page_address(page); in kmap() 20 addr = kmap_high(page); in kmap() 27 void kunmap(struct page *page) in kunmap() argument 30 if (!PageHighMem(page)) in kunmap() 32 kunmap_high(page); in kunmap() 45 void *kmap_atomic(struct page *page) in kmap_atomic() argument 52 if (!PageHighMem(page)) in kmap_atomic() 53 return page_address(page); in kmap_atomic() [all …]
|
D | init.c | 66 struct page *page; in setup_zero_pages() local 77 page = virt_to_page((void *)empty_zero_page); in setup_zero_pages() 78 split_page(page, order); in setup_zero_pages() 79 for (i = 0; i < (1 << order); i++, page++) in setup_zero_pages() 80 mark_page_reserved(page); in setup_zero_pages() 85 static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) in __kmap_pgprot() argument 93 BUG_ON(Page_dcache_dirty(page)); in __kmap_pgprot() 100 pte = mk_pte(page, prot); in __kmap_pgprot() 129 void *kmap_coherent(struct page *page, unsigned long addr) in kmap_coherent() argument 131 return __kmap_pgprot(page, addr, PAGE_KERNEL); in kmap_coherent() [all …]
|
/linux-4.4.14/arch/sh/include/asm/ |
D | cacheflush.h | 46 extern void flush_dcache_page(struct page *page); 49 struct page *page); 58 extern void __flush_anon_page(struct page *page, unsigned long); 61 struct page *page, unsigned long vmaddr) in flush_anon_page() argument 63 if (boot_cpu_data.dcache.n_aliases && PageAnon(page)) in flush_anon_page() 64 __flush_anon_page(page, vmaddr); in flush_anon_page() 76 static inline void flush_kernel_dcache_page(struct page *page) in flush_kernel_dcache_page() argument 78 flush_dcache_page(page); in flush_kernel_dcache_page() 82 struct page *page, unsigned long vaddr, void *dst, const void *src, 86 struct page *page, unsigned long vaddr, void *dst, const void *src, [all …]
|
/linux-4.4.14/arch/s390/mm/ |
D | page-states.c | 54 static inline void set_page_unstable(struct page *page, int order) in set_page_unstable() argument 61 : "a" (page_to_phys(page + i)), in set_page_unstable() 65 void arch_free_page(struct page *page, int order) in arch_free_page() argument 69 set_page_unstable(page, order); in arch_free_page() 72 static inline void set_page_stable(struct page *page, int order) in set_page_stable() argument 79 : "a" (page_to_phys(page + i)), in set_page_stable() 83 void arch_alloc_page(struct page *page, int order) in arch_alloc_page() argument 87 set_page_stable(page, order); in arch_alloc_page() 94 struct page *page; in arch_set_page_states() local 105 page = list_entry(l, struct page, lru); in arch_set_page_states() [all …]
|
/linux-4.4.14/arch/arm/include/asm/ |
D | highmem.h | 23 extern void *kmap_high(struct page *page); 24 extern void kunmap_high(struct page *page); 53 extern void *kmap_high_get(struct page *page); 55 static inline void *kmap_high_get(struct page *page) in kmap_high_get() argument 66 extern void *kmap(struct page *page); 67 extern void kunmap(struct page *page); 68 extern void *kmap_atomic(struct page *page);
|
D | page.h | 110 struct page; 114 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); 115 void (*cpu_copy_user_highpage)(struct page *to, struct page *from, 130 extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); 131 extern void __cpu_copy_user_highpage(struct page *to, struct page *from, 135 #define clear_user_highpage(page,vaddr) \ argument 136 __cpu_clear_user_highpage(page, vaddr) 142 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) argument 157 typedef struct page *pgtable_t;
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
D | ipz_pt_fn.c | 84 u64 page = __pa(queue->queue_pages[i]); in ipz_queue_abs_to_offset() local 85 if (addr >= page && addr < page + queue->pagesize) { in ipz_queue_abs_to_offset() 86 *q_offset = addr - page + i * queue->pagesize; in ipz_queue_abs_to_offset() 130 struct ipz_small_queue_page *page; in alloc_small_queue_page() local 136 page = list_entry(pd->free[order].next, in alloc_small_queue_page() 139 page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL); in alloc_small_queue_page() 140 if (!page) in alloc_small_queue_page() 143 page->page = get_zeroed_page(GFP_KERNEL); in alloc_small_queue_page() 144 if (!page->page) { in alloc_small_queue_page() 145 kmem_cache_free(small_qp_cache, page); in alloc_small_queue_page() [all …]
|
/linux-4.4.14/arch/unicore32/mm/ |
D | flush.c | 35 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, in flush_ptrace_access() argument 53 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument 58 flush_ptrace_access(vma, page, uaddr, dst, len); in copy_to_user_page() 61 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument 68 __cpuc_flush_kern_dcache_area(page_address(page), PAGE_SIZE); in __flush_dcache_page() 75 void flush_dcache_page(struct page *page) in flush_dcache_page() argument 83 if (page == ZERO_PAGE(0)) in flush_dcache_page() 86 mapping = page_mapping(page); in flush_dcache_page() 89 clear_bit(PG_dcache_clean, &page->flags); in flush_dcache_page() 91 __flush_dcache_page(mapping, page); in flush_dcache_page() [all …]
|
/linux-4.4.14/fs/fscache/ |
D | page.c | 23 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page) in __fscache_check_page_write() argument 28 val = radix_tree_lookup(&cookie->stores, page->index); in __fscache_check_page_write() 38 void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page) in __fscache_wait_on_page_write() argument 42 wait_event(*wq, !__fscache_check_page_write(cookie, page)); in __fscache_wait_on_page_write() 51 bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page) in release_page_wait_timeout() argument 55 return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page), in release_page_wait_timeout() 64 struct page *page, in __fscache_maybe_release_page() argument 67 struct page *xpage; in __fscache_maybe_release_page() 70 _enter("%p,%p,%x", cookie, page, gfp); in __fscache_maybe_release_page() 74 val = radix_tree_lookup(&cookie->stores, page->index); in __fscache_maybe_release_page() [all …]
|
/linux-4.4.14/block/ |
D | blk-sysfs.c | 24 queue_var_show(unsigned long var, char *page) in queue_var_show() argument 26 return sprintf(page, "%lu\n", var); in queue_var_show() 30 queue_var_store(unsigned long *var, const char *page, size_t count) in queue_var_store() argument 35 err = kstrtoul(page, 10, &v); in queue_var_store() 44 static ssize_t queue_requests_show(struct request_queue *q, char *page) in queue_requests_show() argument 46 return queue_var_show(q->nr_requests, (page)); in queue_requests_show() 50 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument 58 ret = queue_var_store(&nr, page, count); in queue_requests_store() 76 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument 81 return queue_var_show(ra_kb, (page)); in queue_ra_show() [all …]
|
D | blk-mq-sysfs.c | 33 char *page) in blk_mq_sysfs_show() argument 50 res = entry->show(ctx, page); in blk_mq_sysfs_show() 56 const char *page, size_t length) in blk_mq_sysfs_store() argument 73 res = entry->store(ctx, page, length); in blk_mq_sysfs_store() 79 struct attribute *attr, char *page) in blk_mq_hw_sysfs_show() argument 96 res = entry->show(hctx, page); in blk_mq_hw_sysfs_show() 102 struct attribute *attr, const char *page, in blk_mq_hw_sysfs_store() argument 120 res = entry->store(hctx, page, length); in blk_mq_hw_sysfs_store() 125 static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page) in blk_mq_sysfs_dispatched_show() argument 127 return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1], in blk_mq_sysfs_dispatched_show() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
D | doorbell.c | 49 struct mlx5_ib_user_db_page *page; in mlx5_ib_db_map_user() local 54 list_for_each_entry(page, &context->db_page_list, list) in mlx5_ib_db_map_user() 55 if (page->user_virt == (virt & PAGE_MASK)) in mlx5_ib_db_map_user() 58 page = kmalloc(sizeof(*page), GFP_KERNEL); in mlx5_ib_db_map_user() 59 if (!page) { in mlx5_ib_db_map_user() 64 page->user_virt = (virt & PAGE_MASK); in mlx5_ib_db_map_user() 65 page->refcnt = 0; in mlx5_ib_db_map_user() 66 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx5_ib_db_map_user() 68 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user() 69 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user() [all …]
|
/linux-4.4.14/arch/x86/include/asm/ |
D | cacheflush.h | 57 int set_pages_array_uc(struct page **pages, int addrinarray); 58 int set_pages_array_wc(struct page **pages, int addrinarray); 59 int set_pages_array_wt(struct page **pages, int addrinarray); 60 int set_pages_array_wb(struct page **pages, int addrinarray); 82 int set_pages_uc(struct page *page, int numpages); 83 int set_pages_wb(struct page *page, int numpages); 84 int set_pages_x(struct page *page, int numpages); 85 int set_pages_nx(struct page *page, int numpages); 86 int set_pages_ro(struct page *page, int numpages); 87 int set_pages_rw(struct page *page, int numpages);
|
D | highmem.h | 60 extern void *kmap_high(struct page *page); 61 extern void kunmap_high(struct page *page); 63 void *kmap(struct page *page); 64 void kunmap(struct page *page); 66 void *kmap_atomic_prot(struct page *page, pgprot_t prot); 67 void *kmap_atomic(struct page *page);
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
D | doorbell.c | 47 struct mlx4_ib_user_db_page *page; in mlx4_ib_db_map_user() local 52 list_for_each_entry(page, &context->db_page_list, list) in mlx4_ib_db_map_user() 53 if (page->user_virt == (virt & PAGE_MASK)) in mlx4_ib_db_map_user() 56 page = kmalloc(sizeof *page, GFP_KERNEL); in mlx4_ib_db_map_user() 57 if (!page) { in mlx4_ib_db_map_user() 62 page->user_virt = (virt & PAGE_MASK); in mlx4_ib_db_map_user() 63 page->refcnt = 0; in mlx4_ib_db_map_user() 64 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx4_ib_db_map_user() 66 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user() 67 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user() [all …]
|
/linux-4.4.14/fs/hfsplus/ |
D | bitmap.c | 22 struct page *page; in hfsplus_block_allocate() local 36 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); in hfsplus_block_allocate() 37 if (IS_ERR(page)) { in hfsplus_block_allocate() 41 pptr = kmap(page); in hfsplus_block_allocate() 76 kunmap(page); in hfsplus_block_allocate() 80 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, in hfsplus_block_allocate() 82 if (IS_ERR(page)) { in hfsplus_block_allocate() 86 curr = pptr = kmap(page); in hfsplus_block_allocate() 128 set_page_dirty(page); in hfsplus_block_allocate() 129 kunmap(page); in hfsplus_block_allocate() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
D | mthca_memfree.c | 61 } page[0]; member 109 struct page *page; in mthca_alloc_icm_pages() local 115 page = alloc_pages(gfp_mask | __GFP_ZERO, order); in mthca_alloc_icm_pages() 116 if (!page) in mthca_alloc_icm_pages() 119 sg_set_page(mem, page, PAGE_SIZE << order, 0); in mthca_alloc_icm_pages() 283 struct page *page = NULL; in mthca_table_find() local 309 page = sg_page(&chunk->mem[i]); in mthca_table_find() 318 return page ? lowmem_page_address(page) + offset : NULL; in mthca_table_find() 439 static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page) in mthca_uarc_virt() argument 443 page * MTHCA_ICM_PAGE_SIZE; in mthca_uarc_virt() [all …]
|
/linux-4.4.14/drivers/staging/lustre/lustre/llite/ |
D | rw26.c | 72 static void ll_invalidatepage(struct page *vmpage, unsigned int offset, in ll_invalidatepage() 77 struct cl_page *page; in ll_invalidatepage() local 96 page = cl_vmpage_page(vmpage, obj); in ll_invalidatepage() 97 if (page != NULL) { in ll_invalidatepage() 98 lu_ref_add(&page->cp_reference, in ll_invalidatepage() 100 cl_page_delete(env, page); in ll_invalidatepage() 101 lu_ref_del(&page->cp_reference, in ll_invalidatepage() 103 cl_page_put(env, page); in ll_invalidatepage() 117 static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) in ll_releasepage() 122 struct cl_page *page; in ll_releasepage() local [all …]
|
/linux-4.4.14/drivers/staging/rtl8188eu/include/ |
D | rtw_debug.h | 120 int proc_get_drv_version(char *page, char **start, 124 int proc_get_write_reg(char *page, char **start, 130 int proc_get_read_reg(char *page, char **start, 137 int proc_get_fwstate(char *page, char **start, 140 int proc_get_sec_info(char *page, char **start, 143 int proc_get_mlmext_state(char *page, char **start, 147 int proc_get_qos_option(char *page, char **start, 150 int proc_get_ht_option(char *page, char **start, 153 int proc_get_rf_info(char *page, char **start, 156 int proc_get_ap_info(char *page, char **start, [all …]
|
/linux-4.4.14/arch/arm64/mm/ |
D | flush.c | 37 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, in flush_ptrace_access() argument 59 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument 65 flush_ptrace_access(vma, page, uaddr, dst, len); in copy_to_user_page() 71 struct page *page = pte_page(pte); in __sync_icache_dcache() local 74 if (!page_mapping(page)) in __sync_icache_dcache() 77 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) { in __sync_icache_dcache() 78 __flush_dcache_area(page_address(page), in __sync_icache_dcache() 79 PAGE_SIZE << compound_order(page)); in __sync_icache_dcache() 91 void flush_dcache_page(struct page *page) in flush_dcache_page() argument 93 if (test_bit(PG_dcache_clean, &page->flags)) in flush_dcache_page() [all …]
|
/linux-4.4.14/drivers/staging/lustre/lustre/include/linux/ |
D | lustre_patchless_compat.h | 46 #define ll_delete_from_page_cache(page) delete_from_page_cache(page) argument 49 truncate_complete_page(struct address_space *mapping, struct page *page) in truncate_complete_page() argument 51 if (page->mapping != mapping) in truncate_complete_page() 54 if (PagePrivate(page)) in truncate_complete_page() 55 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); in truncate_complete_page() 57 cancel_dirty_page(page); in truncate_complete_page() 58 ClearPageMappedToDisk(page); in truncate_complete_page() 59 ll_delete_from_page_cache(page); in truncate_complete_page()
|
/linux-4.4.14/arch/sh/mm/ |
D | cache.c | 58 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument 62 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && in copy_to_user_page() 63 test_bit(PG_dcache_clean, &page->flags)) { in copy_to_user_page() 64 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); in copy_to_user_page() 70 clear_bit(PG_dcache_clean, &page->flags); in copy_to_user_page() 74 flush_cache_page(vma, vaddr, page_to_pfn(page)); in copy_to_user_page() 77 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, in copy_from_user_page() argument 81 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && in copy_from_user_page() 82 test_bit(PG_dcache_clean, &page->flags)) { in copy_from_user_page() 83 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); in copy_from_user_page() [all …]
|
/linux-4.4.14/drivers/hwmon/pmbus/ |
D | ltc2978.c | 161 static int ltc_read_word_data(struct i2c_client *client, int page, int reg) in ltc_read_word_data() argument 169 return pmbus_read_word_data(client, page, reg); in ltc_read_word_data() 172 static int ltc_read_byte_data(struct i2c_client *client, int page, int reg) in ltc_read_byte_data() argument 180 return pmbus_read_byte_data(client, page, reg); in ltc_read_byte_data() 183 static int ltc_write_byte(struct i2c_client *client, int page, u8 byte) in ltc_write_byte() argument 191 return pmbus_write_byte(client, page, byte); in ltc_write_byte() 208 int page, int reg, u16 *pmax) in ltc_get_max() argument 212 ret = ltc_read_word_data(client, page, reg); in ltc_get_max() 222 int page, int reg, u16 *pmin) in ltc_get_min() argument 226 ret = ltc_read_word_data(client, page, reg); in ltc_get_min() [all …]
|
D | pmbus.c | 36 int page; in pmbus_find_sensor_groups() local 75 for (page = 0; page < info->pages; page++) { in pmbus_find_sensor_groups() 76 if (pmbus_check_word_register(client, page, PMBUS_READ_VOUT)) { in pmbus_find_sensor_groups() 77 info->func[page] |= PMBUS_HAVE_VOUT; in pmbus_find_sensor_groups() 78 if (pmbus_check_byte_register(client, page, in pmbus_find_sensor_groups() 80 info->func[page] |= PMBUS_HAVE_STATUS_VOUT; in pmbus_find_sensor_groups() 82 if (pmbus_check_word_register(client, page, PMBUS_READ_IOUT)) { in pmbus_find_sensor_groups() 83 info->func[page] |= PMBUS_HAVE_IOUT; in pmbus_find_sensor_groups() 86 info->func[page] |= PMBUS_HAVE_STATUS_IOUT; in pmbus_find_sensor_groups() 88 if (pmbus_check_word_register(client, page, PMBUS_READ_POUT)) in pmbus_find_sensor_groups() [all …]
|
/linux-4.4.14/fs/udf/ |
D | file.c | 41 static void __udf_adinicb_readpage(struct page *page) in __udf_adinicb_readpage() argument 43 struct inode *inode = page->mapping->host; in __udf_adinicb_readpage() 47 kaddr = kmap(page); in __udf_adinicb_readpage() 50 flush_dcache_page(page); in __udf_adinicb_readpage() 51 SetPageUptodate(page); in __udf_adinicb_readpage() 52 kunmap(page); in __udf_adinicb_readpage() 55 static int udf_adinicb_readpage(struct file *file, struct page *page) in udf_adinicb_readpage() argument 57 BUG_ON(!PageLocked(page)); in udf_adinicb_readpage() 58 __udf_adinicb_readpage(page); in udf_adinicb_readpage() 59 unlock_page(page); in udf_adinicb_readpage() [all …]
|
/linux-4.4.14/arch/arc/include/asm/ |
D | highmem.h | 37 extern void *kmap(struct page *page); 38 extern void *kmap_high(struct page *page); 39 extern void *kmap_atomic(struct page *page); 41 extern void kunmap_high(struct page *page); 50 static inline void kunmap(struct page *page) in kunmap() argument 53 if (!PageHighMem(page)) in kunmap() 55 kunmap_high(page); in kunmap()
|
/linux-4.4.14/arch/microblaze/mm/ |
D | consistent.c | 67 struct page *page, *end; in consistent_alloc() local 129 page = virt_to_page(vaddr); in consistent_alloc() 130 end = page + (1 << order); in consistent_alloc() 132 split_page(page, order); in consistent_alloc() 140 SetPageReserved(page); in consistent_alloc() 141 page++; in consistent_alloc() 145 while (page < end) { in consistent_alloc() 146 __free_page(page); in consistent_alloc() 147 page++; in consistent_alloc() 183 struct page *page; in consistent_free() local [all …]
|
/linux-4.4.14/Documentation/vm/ |
D | page_migration | 9 The main intend of page migration is to reduce the latency of memory access 22 which provides an interface similar to other numa functionality for page 25 proc(5) man page. 31 manual page migration support. Automatic page migration may be implemented 67 Calling isolate_lru_page increases the references to the page 68 so that it cannot vanish while the page migration occurs. 70 the page. 74 how to allocate the correct new page given the old page. 78 the new page for each page that is considered for 84 migrate_pages() does several passes over its list of pages. A page is moved [all …]
|
D | pagemap.txt | 5 userspace programs to examine the page tables and related information by 11 physical frame each virtual page is mapped to. It contains one 64-bit 12 value for each virtual page, containing the following data (from 15 * Bits 0-54 page frame number (PFN) if present 19 * Bit 56 page exclusively mapped (since 4.2) 21 * Bit 61 page is file-page or shared-anon (since 3.5) 22 * Bit 62 page swapped 23 * Bit 63 page present 30 If the page is not present but in swap, then the PFN contains an 31 encoding of the swap file number and the page's offset into the [all …]
|
D | split_page_table_lock | 1 Split page table lock 4 Originally, mm->page_table_lock spinlock protected all page tables of the 5 mm_struct. But this approach leads to poor page fault scalability of 7 scalability, split page table lock was introduced. 9 With split page table lock we have separate per-table lock to serialize 29 Split page table lock for PTE tables is enabled compile-time if 33 Split page table lock for PMD tables is enabled, if it's enabled for PTE 36 Hugetlb and split page table lock 39 Hugetlb can support several page sizes. We use split lock only for PMD 44 takes pmd split lock for PMD_SIZE page, mm->page_table_lock [all …]
|
D | page_owner.txt | 1 page owner: Tracking about who allocated each page 6 page owner is for the tracking about who allocated each page. 9 and order of pages is stored into certain storage for each page. 13 Although we already have tracepoint for tracing page allocation/free, 14 using it for analyzing who allocate each page is rather complex. We need 20 page owner can also be used for various purposes. For example, accurate 22 each page. It is already implemented and activated if page owner is 25 page owner is disabled in default. So, if you'd like to use it, you need 27 with page owner and page owner is disabled in runtime due to no enabling 30 memory overhead. And, page owner inserts just two unlikely branches into [all …]
|
/linux-4.4.14/arch/metag/mm/ |
D | highmem.c | 13 void *kmap(struct page *page) in kmap() argument 16 if (!PageHighMem(page)) in kmap() 17 return page_address(page); in kmap() 18 return kmap_high(page); in kmap() 22 void kunmap(struct page *page) in kunmap() argument 25 if (!PageHighMem(page)) in kunmap() 27 kunmap_high(page); in kunmap() 40 void *kmap_atomic(struct page *page) in kmap_atomic() argument 48 if (!PageHighMem(page)) in kmap_atomic() 49 return page_address(page); in kmap_atomic() [all …]
|
/linux-4.4.14/arch/powerpc/mm/ |
D | dma-noncoherent.c | 157 struct page *page; in __dma_alloc_coherent() local 198 page = alloc_pages(gfp, order); in __dma_alloc_coherent() 199 if (!page) in __dma_alloc_coherent() 207 unsigned long kaddr = (unsigned long)page_address(page); in __dma_alloc_coherent() 208 memset(page_address(page), 0, size); in __dma_alloc_coherent() 219 struct page *end = page + (1 << order); in __dma_alloc_coherent() 221 split_page(page, order); in __dma_alloc_coherent() 226 *handle = page_to_phys(page); in __dma_alloc_coherent() 229 SetPageReserved(page); in __dma_alloc_coherent() 230 map_page(vaddr, page_to_phys(page), in __dma_alloc_coherent() [all …]
|
/linux-4.4.14/arch/metag/include/asm/ |
D | highmem.h | 44 extern void *kmap_high(struct page *page); 45 extern void kunmap_high(struct page *page); 54 extern void *kmap(struct page *page); 55 extern void kunmap(struct page *page); 56 extern void *kmap_atomic(struct page *page);
|
/linux-4.4.14/fs/coda/ |
D | symlink.c | 23 static int coda_symlink_filler(struct file *file, struct page *page) in coda_symlink_filler() argument 25 struct inode *inode = page->mapping->host; in coda_symlink_filler() 29 char *p = kmap(page); in coda_symlink_filler() 36 SetPageUptodate(page); in coda_symlink_filler() 37 kunmap(page); in coda_symlink_filler() 38 unlock_page(page); in coda_symlink_filler() 42 SetPageError(page); in coda_symlink_filler() 43 kunmap(page); in coda_symlink_filler() 44 unlock_page(page); in coda_symlink_filler()
|
/linux-4.4.14/fs/btrfs/ |
D | extent_io.h | 70 int (*fill_delalloc)(struct inode *inode, struct page *locked_page, 73 int (*writepage_start_hook)(struct page *page, u64 start, u64 end); 74 int (*writepage_io_hook)(struct page *page, u64 start, u64 end); 76 int (*merge_bio_hook)(int rw, struct page *page, unsigned long offset, 79 int (*readpage_io_failed_hook)(struct page *page, int failed_mirror); 81 struct page *page, u64 start, u64 end, 83 int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, 160 struct page *pages[INLINE_EXTENT_BUFFER_PAGES]; 191 struct page *page, 199 struct extent_io_tree *tree, struct page *page, [all …]
|
D | extent_io.c | 1458 struct page *page; in extent_range_clear_dirty_for_io() local 1461 page = find_get_page(inode->i_mapping, index); in extent_range_clear_dirty_for_io() 1462 BUG_ON(!page); /* Pages should be in the extent_io_tree */ in extent_range_clear_dirty_for_io() 1463 clear_page_dirty_for_io(page); in extent_range_clear_dirty_for_io() 1464 page_cache_release(page); in extent_range_clear_dirty_for_io() 1474 struct page *page; in extent_range_redirty_for_io() local 1477 page = find_get_page(inode->i_mapping, index); in extent_range_redirty_for_io() 1478 BUG_ON(!page); /* Pages should be in the extent_io_tree */ in extent_range_redirty_for_io() 1479 __set_page_dirty_nobuffers(page); in extent_range_redirty_for_io() 1480 account_page_redirty(page); in extent_range_redirty_for_io() [all …]
|
/linux-4.4.14/kernel/power/ |
D | snapshot.c | 41 static int swsusp_page_is_free(struct page *); 42 static void swsusp_set_page_forbidden(struct page *); 43 static void swsusp_unset_page_forbidden(struct page *); 121 static struct page *alloc_image_page(gfp_t gfp_mask) in alloc_image_page() 123 struct page *page; in alloc_image_page() local 125 page = alloc_page(gfp_mask); in alloc_image_page() 126 if (page) { in alloc_image_page() 127 swsusp_set_page_forbidden(page); in alloc_image_page() 128 swsusp_set_page_free(page); in alloc_image_page() 130 return page; in alloc_image_page() [all …]
|
/linux-4.4.14/fs/hfs/ |
D | bnode.c | 20 struct page *page; in hfs_bnode_read() local 23 page = node->page[0]; in hfs_bnode_read() 25 memcpy(buf, kmap(page) + off, len); in hfs_bnode_read() 26 kunmap(page); in hfs_bnode_read() 62 struct page *page; in hfs_bnode_write() local 65 page = node->page[0]; in hfs_bnode_write() 67 memcpy(kmap(page) + off, buf, len); in hfs_bnode_write() 68 kunmap(page); in hfs_bnode_write() 69 set_page_dirty(page); in hfs_bnode_write() 87 struct page *page; in hfs_bnode_clear() local [all …]
|
/linux-4.4.14/arch/m68k/mm/ |
D | memory.c | 31 #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) argument 32 #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) 40 unsigned long page = ptable & PAGE_MASK; in init_pointer_table() local 41 unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE); in init_pointer_table() 43 dp = PD_PTABLE(page); in init_pointer_table() 75 void *page; in get_pointer_table() local 78 if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) in get_pointer_table() 81 flush_tlb_kernel_page(page); in get_pointer_table() 82 nocache_page(page); in get_pointer_table() 84 new = PD_PTABLE(page); in get_pointer_table() [all …]
|
/linux-4.4.14/arch/mn10300/mm/ |
D | pgtable.c | 72 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) in pte_alloc_one() 74 struct page *pte; in pte_alloc_one() 106 struct page *pgd_list; 110 struct page *page = virt_to_page(pgd); in pgd_list_add() local 111 page->index = (unsigned long) pgd_list; in pgd_list_add() 113 set_page_private(pgd_list, (unsigned long) &page->index); in pgd_list_add() 114 pgd_list = page; in pgd_list_add() 115 set_page_private(page, (unsigned long) &pgd_list); in pgd_list_add() 120 struct page *next, **pprev, *page = virt_to_page(pgd); in pgd_list_del() local 121 next = (struct page *) page->index; in pgd_list_del() [all …]
|
/linux-4.4.14/include/xen/ |
D | grant_table.h | 76 struct page **pages; 102 unsigned long page); 137 struct page *page, int readonly) in gnttab_page_grant_foreign_access_ref_one() argument 139 gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page), in gnttab_page_grant_foreign_access_ref_one() 195 int gnttab_alloc_pages(int nr_pages, struct page **pages); 196 void gnttab_free_pages(int nr_pages, struct page **pages); 200 struct page **pages, unsigned int count); 203 struct page **pages, unsigned int count); 226 static inline struct xen_page_foreign *xen_page_foreign(struct page *page) in xen_page_foreign() argument 228 if (!PageForeign(page)) in xen_page_foreign() [all …]
|
/linux-4.4.14/arch/cris/arch-v10/mm/ |
D | init.c | 70 IO_STATE(R_MMU_KSEG, seg_e, page ) | in paging_init() 71 IO_STATE(R_MMU_KSEG, seg_d, page ) | in paging_init() 72 IO_STATE(R_MMU_KSEG, seg_c, page ) | in paging_init() 74 IO_STATE(R_MMU_KSEG, seg_a, page ) | in paging_init() 77 IO_STATE(R_MMU_KSEG, seg_7, page ) | /* kernel vmalloc area */ in paging_init() 80 IO_STATE(R_MMU_KSEG, seg_4, page ) | /* user area */ in paging_init() 81 IO_STATE(R_MMU_KSEG, seg_3, page ) | /* user area */ in paging_init() 82 IO_STATE(R_MMU_KSEG, seg_2, page ) | /* user area */ in paging_init() 83 IO_STATE(R_MMU_KSEG, seg_1, page ) | /* user area */ in paging_init() 84 IO_STATE(R_MMU_KSEG, seg_0, page ) ); /* user area */ in paging_init() [all …]
|
/linux-4.4.14/kernel/ |
D | kexec_core.c | 142 static struct page *kimage_alloc_page(struct kimage *image, 282 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) in kimage_alloc_pages() 284 struct page *pages; in kimage_alloc_pages() 300 static void kimage_free_pages(struct page *page) in kimage_free_pages() argument 304 order = page_private(page); in kimage_free_pages() 307 ClearPageReserved(page + i); in kimage_free_pages() 308 __free_pages(page, order); in kimage_free_pages() 316 struct page *page; in kimage_free_page_list() local 318 page = list_entry(pos, struct page, lru); in kimage_free_page_list() 319 list_del(&page->lru); in kimage_free_page_list() [all …]
|
/linux-4.4.14/fs/xfs/ |
D | xfs_aops.c | 41 struct page *page, in xfs_count_page_state() argument 49 bh = head = page_buffers(page); in xfs_count_page_state() 416 struct page *page, in xfs_start_page_writeback() argument 420 ASSERT(PageLocked(page)); in xfs_start_page_writeback() 421 ASSERT(!PageWriteback(page)); in xfs_start_page_writeback() 431 clear_page_dirty_for_io(page); in xfs_start_page_writeback() 432 set_page_writeback(page); in xfs_start_page_writeback() 434 set_page_writeback_keepwrite(page); in xfs_start_page_writeback() 436 unlock_page(page); in xfs_start_page_writeback() 440 end_page_writeback(page); in xfs_start_page_writeback() [all …]
|