/linux-4.4.14/drivers/block/drbd/ |
D | drbd_bitmap.c | 210 return page_private(page) & BM_PAGE_IDX_MASK; in bm_page_to_idx() 219 void *addr = &page_private(b->bm_pages[page_nr]); in bm_page_lock_io() 226 void *addr = &page_private(b->bm_pages[page_nr]); in bm_page_unlock_io() 236 clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); in bm_set_page_unchanged() 237 clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); in bm_set_page_unchanged() 242 set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); in bm_set_page_need_writeout() 263 set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page)); in drbd_bm_mark_for_writeout() 268 volatile const unsigned long *addr = &page_private(page); in bm_test_page_unchanged() 274 set_bit(BM_PAGE_IO_ERROR, &page_private(page)); in bm_set_page_io_err() 279 clear_bit(BM_PAGE_IO_ERROR, &page_private(page)); in bm_clear_page_io_err() [all …]
|
D | drbd_int.h | 1678 return (struct page *)page_private(page); in page_chain_next()
|
D | drbd_main.c | 2028 drbd_pp_pool = (struct page *)page_private(page); in drbd_destroy_mempools()
|
/linux-4.4.14/fs/afs/ |
D | file.c | 315 struct afs_writeback *wb = (struct afs_writeback *) page_private(page); in afs_invalidatepage() 337 if (!page_private(page)) in afs_invalidatepage() 351 struct afs_writeback *wb = (struct afs_writeback *) page_private(page); in afs_releasepage()
|
D | write.c | 169 wb = (struct afs_writeback *) page_private(page); in afs_write_begin() 363 page_private(page) != (unsigned long) wb) { in afs_write_back_from_locked_page() 444 wb = (struct afs_writeback *) page_private(page); in afs_writepage() 509 wb = (struct afs_writeback *) page_private(page); in afs_writepages_region() 600 if (page_private(page) == (unsigned long) wb) { in afs_pages_written_back()
|
/linux-4.4.14/mm/ |
D | swap_state.c | 144 entry.val = page_private(page); in __delete_from_swap_cache() 146 radix_tree_delete(&address_space->page_tree, page_private(page)); in __delete_from_swap_cache() 217 entry.val = page_private(page); in delete_from_swap_cache()
|
D | internal.h | 241 return page_private(page); in page_order() 255 #define page_order_unsafe(page) READ_ONCE(page_private(page))
|
D | frontswap.c | 245 swp_entry_t entry = { .val = page_private(page), }; in __frontswap_store() 300 swp_entry_t entry = { .val = page_private(page), }; in __frontswap_load()
|
D | swapfile.c | 868 entry.val = page_private(page); in page_swapcount() 903 VM_BUG_ON(page_private(page) != SWP_CONTINUED); in swp_swapcount() 1568 likely(page_private(page) == entry.val)) in try_to_unuse() 1655 entry.val = page_private(page); in map_swap_page() 2734 swp_entry_t swap = { .val = page_private(page) }; in page_swap_info() 2751 swp_entry_t swap = { .val = page_private(page) }; in __page_file_index() 2826 if (!page_private(head)) { in add_swap_count_continuation() 2881 if (page_private(head) != SWP_CONTINUED) { in swap_count_continued() 2961 if (page_private(head)) { in free_swap_count_continuations()
|
D | zsmalloc.c | 781 return (struct page *)page_private(page); in get_first_page() 791 next = (struct page *)page_private(page); in get_next_page() 841 return page_private(page); in obj_to_head() 893 head_extra = (struct page *)page_private(first_page); in free_zspage()
|
D | page_io.c | 117 entry.val = page_private(page); in end_swap_bio_read()
|
D | util.c | 367 entry.val = page_private(page); in page_mapping()
|
D | migrate.c | 384 set_page_private(newpage, page_private(page)); in migrate_page_move_mapping() 642 set_page_private(newpage, page_private(page)); in buffer_migrate_page()
|
D | huge_memory.c | 1075 memcg = (void *)page_private(pages[i]); in do_huge_pmd_wp_page_fallback() 1113 memcg = (void *)page_private(pages[i]); in do_huge_pmd_wp_page_fallback() 1142 memcg = (void *)page_private(pages[i]); in do_huge_pmd_wp_page_fallback()
|
D | shmem.c | 723 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) in shmem_unuse() 990 swap_index = page_private(oldpage); in shmem_replace_page() 1122 if (!PageSwapCache(page) || page_private(page) != swap.val || in shmem_getpage_gfp()
|
D | rmap.c | 1396 swp_entry_t entry = { .val = page_private(page) }; in try_to_unmap_one()
|
D | memcontrol.c | 5349 swp_entry_t ent = { .val = page_private(page), }; in mem_cgroup_try_charge() 5423 swp_entry_t entry = { .val = page_private(page) }; in mem_cgroup_commit_charge()
|
D | vmscan.c | 654 swp_entry_t swap = { .val = page_private(page) }; in __remove_mapping()
|
D | hugetlb.c | 1213 (struct hugepage_subpool *)page_private(page); in free_huge_page()
|
D | memory.c | 2561 if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val)) in do_swap_page()
|
/linux-4.4.14/include/linux/ |
D | balloon_compaction.h | 156 return (struct balloon_dev_info *)page_private(page); in balloon_page_device()
|
D | buffer_head.h | 140 ((struct buffer_head *)page_private(page)); \
|
D | mm.h | 314 #define page_private(page) ((page)->private) macro 961 return page_private(page); in page_index()
|
/linux-4.4.14/arch/x86/kernel/cpu/ |
D | perf_event_intel_bts.c | 67 return 1 << (PAGE_SHIFT + page_private(page)); in buf_size() 85 pg += 1 << page_private(page); in bts_buffer_setup_aux() 109 __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1; in bts_buffer_setup_aux()
|
D | perf_event_intel_pt.c | 442 order = page_private(p); in topa_insert_pages()
|
/linux-4.4.14/arch/frv/mm/ |
D | pgalloc.c | 103 pprev = (struct page **) page_private(page); in pgd_list_del()
|
/linux-4.4.14/fs/f2fs/ |
D | crypto.c | 284 ctx = (struct f2fs_crypto_ctx *)page_private(bounce_page); in f2fs_restore_and_release_control_page() 295 (struct f2fs_crypto_ctx *)page_private(data_page); in f2fs_restore_control_page()
|
D | trace.c | 95 pid = page_private(fio->page); in f2fs_trace_ios()
|
D | segment.h | 189 (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
|
D | segment.c | 1453 ctx = (struct f2fs_crypto_ctx *)page_private( in is_merged_page()
|
/linux-4.4.14/drivers/firewire/ |
D | core-iso.c | 132 address = page_private(buffer->pages[i]); in fw_iso_buffer_destroy() 154 address = page_private(buffer->pages[i]); in fw_iso_buffer_lookup()
|
D | ohci.c | 667 return page_private(ctx->pages[i]); in ar_buffer_bus() 3279 page_bus = page_private(buffer->pages[page]); in queue_iso_transmit() 3362 page_bus = page_private(buffer->pages[page]); in queue_iso_packet_per_buffer() 3425 page_bus = page_private(buffer->pages[page]); in queue_iso_buffer_fill()
|
/linux-4.4.14/arch/mn10300/mm/ |
D | pgtable.c | 122 pprev = (struct page **) page_private(page); in pgd_list_del()
|
/linux-4.4.14/kernel/events/ |
D | ring_buffer.c | 514 for (last = rb->aux_nr_pages + (1 << page_private(page)); in rb_alloc_aux() 529 if (page_private(page) != max_order) in rb_alloc_aux()
|
/linux-4.4.14/fs/ext4/ |
D | crypto.c | 228 (struct ext4_crypto_ctx *)page_private(data_page); in ext4_restore_control_page()
|
D | page-io.c | 85 ctx = (struct ext4_crypto_ctx *)page_private(data_page); in ext4_finish_bio()
|
/linux-4.4.14/fs/jfs/ |
D | jfs_metapage.c | 92 #define mp_anchor(page) ((struct meta_anchor *)page_private(page)) 161 return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL; in page_to_mp()
|
/linux-4.4.14/drivers/xen/ |
D | grant-table.c | 724 kfree((void *)page_private(pages[i])); in gnttab_free_pages()
|
/linux-4.4.14/kernel/ |
D | kexec_core.c | 304 order = page_private(page); in kimage_free_pages()
|
D | relay.c | 1189 rbuf = (struct rchan_buf *)page_private(buf->page); in relay_pipe_buf_release()
|
/linux-4.4.14/drivers/staging/lustre/lustre/osc/ |
D | osc_request.c | 1192 page_private(pga[i]->pg), in osc_checksum_bulk() 1315 pg->pg, page_private(pg->pg), pg->pg->index, pg->off, in osc_brw_prep_request() 1316 pg_prev->pg, page_private(pg_prev->pg), in osc_brw_prep_request()
|
/linux-4.4.14/arch/x86/include/asm/ |
D | kvm_host.h | 1109 return (struct kvm_mmu_page *)page_private(page); in page_header()
|
/linux-4.4.14/drivers/staging/lustre/lustre/llite/ |
D | vvp_io.c | 618 page_private(vmf->page), vmf->virtual_address); in vvp_io_kernel_fault()
|
/linux-4.4.14/fs/nfs/ |
D | write.c | 113 req = (struct nfs_page *)page_private(page); in nfs_page_find_head_request_locked()
|
/linux-4.4.14/net/core/ |
D | skbuff.c | 892 struct page *next = (struct page *)page_private(head); in skb_copy_ubufs() 916 head = (struct page *)page_private(head); in skb_copy_ubufs()
|