Home
last modified time | relevance | path

Searched refs:new_page (Results 1 – 28 of 28) sorted by relevance

/linux-4.1.27/mm/
Dswap_state.c300 struct page *found_page, *new_page = NULL; in read_swap_cache_async() local
317 if (!new_page) { in read_swap_cache_async()
318 new_page = alloc_page_vma(gfp_mask, vma, addr); in read_swap_cache_async()
319 if (!new_page) in read_swap_cache_async()
360 __set_page_locked(new_page); in read_swap_cache_async()
361 SetPageSwapBacked(new_page); in read_swap_cache_async()
362 err = __add_to_swap_cache(new_page, entry); in read_swap_cache_async()
368 lru_cache_add_anon(new_page); in read_swap_cache_async()
369 swap_readpage(new_page); in read_swap_cache_async()
370 return new_page; in read_swap_cache_async()
[all …]
Dzswap.c448 struct page *found_page, *new_page = NULL; in zswap_get_swap_cache_page() local
466 if (!new_page) { in zswap_get_swap_cache_page()
467 new_page = alloc_page(GFP_KERNEL); in zswap_get_swap_cache_page()
468 if (!new_page) in zswap_get_swap_cache_page()
493 __set_page_locked(new_page); in zswap_get_swap_cache_page()
494 SetPageSwapBacked(new_page); in zswap_get_swap_cache_page()
495 err = __add_to_swap_cache(new_page, entry); in zswap_get_swap_cache_page()
498 lru_cache_add_anon(new_page); in zswap_get_swap_cache_page()
499 *retpage = new_page; in zswap_get_swap_cache_page()
503 ClearPageSwapBacked(new_page); in zswap_get_swap_cache_page()
[all …]
Dmigrate.c1725 struct page *new_page = NULL; in migrate_misplaced_transhuge_page() local
1739 new_page = alloc_pages_node(node, in migrate_misplaced_transhuge_page()
1742 if (!new_page) in migrate_misplaced_transhuge_page()
1747 put_page(new_page); in migrate_misplaced_transhuge_page()
1755 __set_page_locked(new_page); in migrate_misplaced_transhuge_page()
1756 SetPageSwapBacked(new_page); in migrate_misplaced_transhuge_page()
1759 new_page->mapping = page->mapping; in migrate_misplaced_transhuge_page()
1760 new_page->index = page->index; in migrate_misplaced_transhuge_page()
1761 migrate_page_copy(new_page, page); in migrate_misplaced_transhuge_page()
1762 WARN_ON(PageLRU(new_page)); in migrate_misplaced_transhuge_page()
[all …]
Dhuge_memory.c1087 struct page *page = NULL, *new_page; in do_huge_pmd_wp_page() local
1120 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_wp_page()
1122 new_page = NULL; in do_huge_pmd_wp_page()
1124 if (unlikely(!new_page)) { in do_huge_pmd_wp_page()
1141 if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg))) { in do_huge_pmd_wp_page()
1142 put_page(new_page); in do_huge_pmd_wp_page()
1156 clear_huge_page(new_page, haddr, HPAGE_PMD_NR); in do_huge_pmd_wp_page()
1158 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); in do_huge_pmd_wp_page()
1159 __SetPageUptodate(new_page); in do_huge_pmd_wp_page()
1170 mem_cgroup_cancel_charge(new_page, memcg); in do_huge_pmd_wp_page()
[all …]
Dmemory.c2063 struct page *new_page = NULL; in wp_page_copy() local
2075 new_page = alloc_zeroed_user_highpage_movable(vma, address); in wp_page_copy()
2076 if (!new_page) in wp_page_copy()
2079 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in wp_page_copy()
2080 if (!new_page) in wp_page_copy()
2082 cow_user_page(new_page, old_page, address, vma); in wp_page_copy()
2084 __SetPageUptodate(new_page); in wp_page_copy()
2086 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) in wp_page_copy()
2105 entry = mk_pte(new_page, vma->vm_page_prot); in wp_page_copy()
2114 page_add_new_anon_rmap(new_page, vma, address); in wp_page_copy()
[all …]
Dksm.c1866 struct page *new_page; in ksm_might_need_to_copy() local
1881 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in ksm_might_need_to_copy()
1882 if (new_page) { in ksm_might_need_to_copy()
1883 copy_user_highpage(new_page, page, address, vma); in ksm_might_need_to_copy()
1885 SetPageDirty(new_page); in ksm_might_need_to_copy()
1886 __SetPageUptodate(new_page); in ksm_might_need_to_copy()
1887 __set_page_locked(new_page); in ksm_might_need_to_copy()
1890 return new_page; in ksm_might_need_to_copy()
Dhugetlb.c2933 struct page *old_page, *new_page; in hugetlb_cow() local
2969 new_page = alloc_huge_page(vma, address, outside_reserve); in hugetlb_cow()
2971 if (IS_ERR(new_page)) { in hugetlb_cow()
2996 ret = (PTR_ERR(new_page) == -ENOMEM) ? in hugetlb_cow()
3010 copy_user_huge_page(new_page, old_page, address, vma, in hugetlb_cow()
3012 __SetPageUptodate(new_page); in hugetlb_cow()
3013 set_page_huge_active(new_page); in hugetlb_cow()
3026 ClearPagePrivate(new_page); in hugetlb_cow()
3032 make_huge_pte(vma, new_page, 1)); in hugetlb_cow()
3034 hugepage_add_new_anon_rmap(new_page, vma, address); in hugetlb_cow()
[all …]
Dmemory-failure.c1499 static struct page *new_page(struct page *p, unsigned long private, int **x) in new_page() function
1602 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, in soft_offline_huge_page()
1683 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, in __soft_offline_page()
Dmempolicy.c1097 static struct page *new_page(struct page *page, unsigned long start, int **x) in new_page() function
1132 static struct page *new_page(struct page *page, unsigned long start, int **x) in new_page() function
1217 nr_failed = migrate_pages(&pagelist, new_page, NULL, in do_mbind()
/linux-4.1.27/arch/s390/mm/
Dvmem.c228 void *new_page; in vmemmap_populate() local
230 new_page = vmemmap_alloc_block(PMD_SIZE, node); in vmemmap_populate()
231 if (!new_page) in vmemmap_populate()
233 pmd_val(*pm_dir) = __pa(new_page) | in vmemmap_populate()
249 void *new_page; in vmemmap_populate() local
251 new_page = vmemmap_alloc_block(PAGE_SIZE, node); in vmemmap_populate()
252 if (!new_page) in vmemmap_populate()
255 __pa(new_page) | pgprot_val(PAGE_KERNEL); in vmemmap_populate()
/linux-4.1.27/fs/f2fs/
Dnamei.c455 struct page *old_page, *new_page; in f2fs_rename() local
482 &new_page); in f2fs_rename()
497 f2fs_set_link(new_dir, new_entry, new_page, old_inode); in f2fs_rename()
561 f2fs_dentry_kunmap(new_dir, new_page); in f2fs_rename()
562 f2fs_put_page(new_page, 0); in f2fs_rename()
582 struct page *old_page, *new_page; in f2fs_cross_rename() local
594 new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); in f2fs_cross_rename()
672 f2fs_set_link(new_dir, new_entry, new_page, old_inode); in f2fs_cross_rename()
713 f2fs_dentry_kunmap(new_dir, new_page); in f2fs_cross_rename()
714 f2fs_put_page(new_page, 0); in f2fs_cross_rename()
/linux-4.1.27/fs/minix/
Dnamei.c210 struct page * new_page; in minix_rename() local
218 new_de = minix_find_entry(new_dentry, &new_page); in minix_rename()
221 minix_set_link(new_de, new_page, old_inode); in minix_rename()
/linux-4.1.27/fs/sysv/
Dnamei.c231 struct page * new_page; in sysv_rename() local
239 new_de = sysv_find_entry(new_dentry, &new_page); in sysv_rename()
242 sysv_set_link(new_de, new_page, old_inode); in sysv_rename()
/linux-4.1.27/fs/exofs/
Dnamei.c253 struct page *new_page; in exofs_rename() local
261 new_de = exofs_find_entry(new_dir, new_dentry, &new_page); in exofs_rename()
264 err = exofs_set_link(new_dir, new_de, new_page, old_inode); in exofs_rename()
/linux-4.1.27/fs/ufs/
Dnamei.c294 struct page *new_page; in ufs_rename() local
302 new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); in ufs_rename()
305 ufs_set_link(new_dir, new_de, new_page, old_inode); in ufs_rename()
/linux-4.1.27/fs/ubifs/
Dbudget.c379 znodes = req->new_ino + (req->new_page << UBIFS_BLOCKS_PER_PAGE_SHIFT) + in calc_idx_growth()
396 if (req->new_page) in calc_data_growth()
442 ubifs_assert(req->new_page <= 1); in ubifs_budget_space()
529 ubifs_assert(req->new_page <= 1); in ubifs_release_budget()
Dfile.c202 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 }; in release_new_page_budget()
228 struct ubifs_budget_req req = { .new_page = 1 }; in write_begin_slow()
369 req.new_page = 1; in allocate_budget()
1449 struct ubifs_budget_req req = { .new_page = 1 }; in ubifs_vm_page_mkwrite()
Dubifs.h886 unsigned int new_page:1; member
896 unsigned int new_page; member
Ddebug.c556 req->new_page, req->dirtied_page); in ubifs_dump_budget_req()
/linux-4.1.27/fs/ext2/
Dnamei.c342 struct page *new_page; in ext2_rename() local
350 new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); in ext2_rename()
353 ext2_set_link(new_dir, new_de, new_page, old_inode, 1); in ext2_rename()
/linux-4.1.27/fs/jbd/
Djournal.c313 struct page *new_page; in journal_write_metadata_buffer() local
342 new_page = virt_to_page(jh_in->b_frozen_data); in journal_write_metadata_buffer()
345 new_page = jh2bh(jh_in)->b_page; in journal_write_metadata_buffer()
349 mapped_data = kmap_atomic(new_page); in journal_write_metadata_buffer()
375 mapped_data = kmap_atomic(new_page); in journal_write_metadata_buffer()
379 new_page = virt_to_page(tmp); in journal_write_metadata_buffer()
389 mapped_data = kmap_atomic(new_page); in journal_write_metadata_buffer()
394 set_bh_page(new_bh, new_page, new_offset); in journal_write_metadata_buffer()
/linux-4.1.27/kernel/events/
Duprobes.c303 struct page *old_page, *new_page; in uprobe_write_opcode() local
322 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); in uprobe_write_opcode()
323 if (!new_page) in uprobe_write_opcode()
326 __SetPageUptodate(new_page); in uprobe_write_opcode()
327 copy_highpage(new_page, old_page); in uprobe_write_opcode()
328 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); in uprobe_write_opcode()
330 ret = __replace_page(vma, vaddr, old_page, new_page); in uprobe_write_opcode()
331 page_cache_release(new_page); in uprobe_write_opcode()
/linux-4.1.27/fs/
Ddax.c383 struct page *new_page = vmf->cow_page; in do_dax_fault() local
385 error = copy_user_bh(new_page, &bh, blkbits, vaddr); in do_dax_fault()
387 clear_user_highpage(new_page, vaddr); in do_dax_fault()
/linux-4.1.27/fs/nilfs2/
Dnamei.c387 struct page *new_page; in nilfs_rename() local
395 new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); in nilfs_rename()
398 nilfs_set_link(new_dir, new_de, new_page, old_inode); in nilfs_rename()
/linux-4.1.27/fs/jbd2/
Djournal.c358 struct page *new_page; in jbd2_journal_write_metadata_buffer() local
396 new_page = virt_to_page(jh_in->b_frozen_data); in jbd2_journal_write_metadata_buffer()
399 new_page = jh2bh(jh_in)->b_page; in jbd2_journal_write_metadata_buffer()
403 mapped_data = kmap_atomic(new_page); in jbd2_journal_write_metadata_buffer()
443 mapped_data = kmap_atomic(new_page); in jbd2_journal_write_metadata_buffer()
447 new_page = virt_to_page(tmp); in jbd2_journal_write_metadata_buffer()
464 mapped_data = kmap_atomic(new_page); in jbd2_journal_write_metadata_buffer()
469 set_bh_page(new_bh, new_page, new_offset); in jbd2_journal_write_metadata_buffer()
/linux-4.1.27/drivers/tty/serial/
Dicom.c361 unsigned char *new_page = NULL; in load_code() local
434 new_page = pci_alloc_consistent(dev, 4096, &temp_pci); in load_code()
436 if (!new_page) { in load_code()
456 new_page[index] = fw->data[index]; in load_code()
513 if (new_page != NULL) in load_code()
514 pci_free_consistent(dev, 4096, new_page, temp_pci); in load_code()
/linux-4.1.27/drivers/staging/rts5208/
Dxd.c1200 u32 old_page, new_page; in xd_copy_page() local
1218 new_page = (new_blk << xd_card->block_shift) + start_page; in xd_copy_page()
1290 xd_assign_phy_addr(chip, new_page, XD_RW_ADDR); in xd_copy_page()
1314 new_page++; in xd_copy_page()
/linux-4.1.27/drivers/net/vmxnet3/
Dvmxnet3_drv.c1183 struct page *new_page = NULL; in vmxnet3_rq_rx_complete() local
1276 new_page = alloc_page(GFP_ATOMIC); in vmxnet3_rq_rx_complete()
1277 if (unlikely(new_page == NULL)) { in vmxnet3_rq_rx_complete()
1299 rbi->page = new_page; in vmxnet3_rq_rx_complete()