new_page 265 arch/s390/mm/vmem.c void *new_page; new_page 267 arch/s390/mm/vmem.c new_page = vmemmap_alloc_block(PMD_SIZE, node); new_page 268 arch/s390/mm/vmem.c if (!new_page) new_page 270 arch/s390/mm/vmem.c pmd_val(*pm_dir) = __pa(new_page) | sgt_prot; new_page 285 arch/s390/mm/vmem.c void *new_page; new_page 287 arch/s390/mm/vmem.c new_page = vmemmap_alloc_block(PAGE_SIZE, node); new_page 288 arch/s390/mm/vmem.c if (!new_page) new_page 290 arch/s390/mm/vmem.c pte_val(*pt_dir) = __pa(new_page) | pgt_prot; new_page 674 drivers/net/ethernet/ti/cpsw.c struct page *new_page, *page = token; new_page 706 drivers/net/ethernet/ti/cpsw.c new_page = page; new_page 715 drivers/net/ethernet/ti/cpsw.c new_page = page_pool_dev_alloc_pages(pool); new_page 716 drivers/net/ethernet/ti/cpsw.c if (unlikely(!new_page)) { new_page 717 drivers/net/ethernet/ti/cpsw.c new_page = page; new_page 775 drivers/net/ethernet/ti/cpsw.c xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; new_page 779 drivers/net/ethernet/ti/cpsw.c dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM; new_page 780 drivers/net/ethernet/ti/cpsw.c ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, new_page 784 drivers/net/ethernet/ti/cpsw.c page_pool_recycle_direct(pool, new_page); new_page 1298 drivers/net/vmxnet3/vmxnet3_drv.c struct page *new_page = NULL; new_page 1450 drivers/net/vmxnet3/vmxnet3_drv.c new_page = alloc_page(GFP_ATOMIC); new_page 1456 drivers/net/vmxnet3/vmxnet3_drv.c if (unlikely(!new_page)) { new_page 1464 drivers/net/vmxnet3/vmxnet3_drv.c new_page, new_page 1469 drivers/net/vmxnet3/vmxnet3_drv.c put_page(new_page); new_page 1484 drivers/net/vmxnet3/vmxnet3_drv.c rbi->page = new_page; new_page 629 drivers/net/wireless/intel/iwlwifi/fw/dbg.c struct page *new_page; new_page 640 drivers/net/wireless/intel/iwlwifi/fw/dbg.c new_page = alloc_page(GFP_KERNEL); new_page 641 drivers/net/wireless/intel/iwlwifi/fw/dbg.c if (!new_page) { new_page 645 drivers/net/wireless/intel/iwlwifi/fw/dbg.c new_page = sg_page(iter); new_page 646 drivers/net/wireless/intel/iwlwifi/fw/dbg.c if (new_page) new_page 647 drivers/net/wireless/intel/iwlwifi/fw/dbg.c __free_page(new_page); new_page 654 drivers/net/wireless/intel/iwlwifi/fw/dbg.c sg_set_page(iter, new_page, alloc_size, 0); new_page 1102 drivers/staging/rts5208/xd.c u32 old_page, new_page; new_page 1116 drivers/staging/rts5208/xd.c new_page = (new_blk << xd_card->block_shift) + start_page; new_page 1185 drivers/staging/rts5208/xd.c xd_assign_phy_addr(chip, new_page, XD_RW_ADDR); new_page 1208 drivers/staging/rts5208/xd.c new_page++; new_page 344 drivers/tty/serial/icom.c unsigned char *new_page = NULL; new_page 417 drivers/tty/serial/icom.c new_page = pci_alloc_consistent(dev, 4096, &temp_pci); new_page 419 drivers/tty/serial/icom.c if (!new_page) { new_page 439 drivers/tty/serial/icom.c new_page[index] = fw->data[index]; new_page 496 drivers/tty/serial/icom.c if (new_page != NULL) new_page 497 drivers/tty/serial/icom.c pci_free_consistent(dev, 4096, new_page, temp_pci); new_page 349 fs/ext2/namei.c struct page *new_page; new_page 357 fs/ext2/namei.c new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); new_page 360 fs/ext2/namei.c ext2_set_link(new_dir, new_de, new_page, old_inode, 1); new_page 854 fs/f2fs/namei.c struct page *old_page, *new_page = NULL; new_page 916 fs/f2fs/namei.c &new_page); new_page 918 fs/f2fs/namei.c if (IS_ERR(new_page)) new_page 919 fs/f2fs/namei.c err = PTR_ERR(new_page); new_page 931 fs/f2fs/namei.c f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_page 1028 fs/f2fs/namei.c if (new_page) new_page 1029 fs/f2fs/namei.c f2fs_put_page(new_page, 0); new_page 1048 fs/f2fs/namei.c struct page *old_page, *new_page; new_page 1083 fs/f2fs/namei.c new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); new_page 1085 fs/f2fs/namei.c if (IS_ERR(new_page)) new_page 1086 fs/f2fs/namei.c err = PTR_ERR(new_page); new_page 1160 fs/f2fs/namei.c f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_page 1199 fs/f2fs/namei.c f2fs_put_page(new_page, 0); new_page 345 fs/jbd2/journal.c struct page *new_page; new_page 374 fs/jbd2/journal.c new_page = virt_to_page(jh_in->b_frozen_data); new_page 377 fs/jbd2/journal.c new_page = jh2bh(jh_in)->b_page; new_page 381 fs/jbd2/journal.c mapped_data = kmap_atomic(new_page); new_page 421 fs/jbd2/journal.c mapped_data = kmap_atomic(new_page); new_page 425 fs/jbd2/journal.c new_page = virt_to_page(tmp); new_page 442 fs/jbd2/journal.c mapped_data = kmap_atomic(new_page); new_page 447 fs/jbd2/journal.c set_bh_page(new_bh, new_page, new_offset); new_page 211 fs/minix/namei.c struct page * new_page; new_page 219 fs/minix/namei.c new_de = minix_find_entry(new_dentry, &new_page); new_page 222 fs/minix/namei.c minix_set_link(new_de, new_page, old_inode); new_page 374 fs/nilfs2/namei.c struct page *new_page; new_page 382 fs/nilfs2/namei.c new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); new_page 385 fs/nilfs2/namei.c nilfs_set_link(new_dir, new_de, new_page, old_inode); new_page 216 fs/sysv/namei.c struct page * new_page; new_page 224 fs/sysv/namei.c new_de = sysv_find_entry(new_dentry, &new_page); new_page 227 fs/sysv/namei.c sysv_set_link(new_de, new_page, old_inode); new_page 367 fs/ubifs/budget.c znodes = req->new_ino + (req->new_page << UBIFS_BLOCKS_PER_PAGE_SHIFT) + new_page 384 fs/ubifs/budget.c if (req->new_page) new_page 430 fs/ubifs/budget.c ubifs_assert(c, req->new_page <= 1); new_page 517 fs/ubifs/budget.c ubifs_assert(c, req->new_page <= 1); new_page 554 fs/ubifs/debug.c req->new_page, req->dirtied_page); new_page 198 fs/ubifs/file.c struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 }; new_page 224 fs/ubifs/file.c struct ubifs_budget_req req = { .new_page = 1 }; new_page 365 fs/ubifs/file.c req.new_page = 1; new_page 1513 fs/ubifs/file.c struct ubifs_budget_req req = { .new_page = 1 }; new_page 886 fs/ubifs/ubifs.h unsigned int new_page:1; new_page 896 fs/ubifs/ubifs.h unsigned int new_page; new_page 270 fs/ufs/namei.c struct page *new_page; new_page 278 fs/ufs/namei.c new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); new_page 281 fs/ufs/namei.c ufs_set_link(new_dir, new_de, new_page, old_inode, 1); new_page 39 include/linux/migrate.h struct page *new_page = NULL; new_page 53 include/linux/migrate.h new_page = __alloc_pages_nodemask(gfp_mask, order, new_page 56 include/linux/migrate.h if (new_page && PageTransHuge(new_page)) new_page 57 include/linux/migrate.h prep_transhuge_page(new_page); new_page 59 include/linux/migrate.h return new_page; new_page 155 kernel/events/uprobes.c struct page *old_page, struct page *new_page) new_page 170 kernel/events/uprobes.c if (new_page) { new_page 171 kernel/events/uprobes.c err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, new_page 183 kernel/events/uprobes.c if (new_page) new_page 184 kernel/events/uprobes.c mem_cgroup_cancel_charge(new_page, memcg, false); new_page 189 kernel/events/uprobes.c if (new_page) { new_page 190 kernel/events/uprobes.c get_page(new_page); new_page 191 kernel/events/uprobes.c page_add_new_anon_rmap(new_page, vma, addr, false); new_page 192 kernel/events/uprobes.c mem_cgroup_commit_charge(new_page, memcg, false, false); new_page 193 kernel/events/uprobes.c lru_cache_add_active_or_unevictable(new_page, vma); new_page 205 kernel/events/uprobes.c if (new_page) new_page 207 kernel/events/uprobes.c mk_pte(new_page, vma->vm_page_prot)); new_page 473 kernel/events/uprobes.c struct page *old_page, *new_page; new_page 519 kernel/events/uprobes.c new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); new_page 520 kernel/events/uprobes.c if (!new_page) new_page 523 kernel/events/uprobes.c __SetPageUptodate(new_page); new_page 524 kernel/events/uprobes.c copy_highpage(new_page, old_page); new_page 525 kernel/events/uprobes.c copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); new_page 539 kernel/events/uprobes.c pages_identical(new_page, orig_page)) { new_page 541 kernel/events/uprobes.c put_page(new_page); new_page 542 kernel/events/uprobes.c new_page = NULL; new_page 551 kernel/events/uprobes.c ret = __replace_page(vma, vaddr, old_page, new_page); new_page 552 kernel/events/uprobes.c if (new_page) new_page 553 kernel/events/uprobes.c put_page(new_page); new_page 1318 mm/huge_memory.c struct page *page = NULL, *new_page; new_page 1368 mm/huge_memory.c new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); new_page 1370 mm/huge_memory.c new_page = NULL; new_page 1372 mm/huge_memory.c if (likely(new_page)) { new_page 1373 mm/huge_memory.c prep_transhuge_page(new_page); new_page 1390 mm/huge_memory.c if (unlikely(mem_cgroup_try_charge_delay(new_page, vma->vm_mm, new_page 1392 mm/huge_memory.c put_page(new_page); new_page 1405 mm/huge_memory.c clear_huge_page(new_page, vmf->address, HPAGE_PMD_NR); new_page 1407 mm/huge_memory.c copy_user_huge_page(new_page, page, vmf->address, new_page 1409 mm/huge_memory.c __SetPageUptodate(new_page); new_page 1420 mm/huge_memory.c mem_cgroup_cancel_charge(new_page, memcg, true); new_page 1421 mm/huge_memory.c put_page(new_page); new_page 1425 mm/huge_memory.c entry = mk_huge_pmd(new_page, vma->vm_page_prot); new_page 1428 mm/huge_memory.c page_add_new_anon_rmap(new_page, vma, haddr, true); new_page 1429 mm/huge_memory.c mem_cgroup_commit_charge(new_page, memcg, false, true); new_page 1430 mm/huge_memory.c lru_cache_add_active_or_unevictable(new_page, vma); new_page 3733 mm/hugetlb.c struct page *old_page, *new_page; new_page 3771 mm/hugetlb.c new_page = alloc_huge_page(vma, haddr, outside_reserve); new_page 3773 mm/hugetlb.c if (IS_ERR(new_page)) { new_page 3798 mm/hugetlb.c ret = vmf_error(PTR_ERR(new_page)); new_page 3811 mm/hugetlb.c copy_user_huge_page(new_page, old_page, address, vma, new_page 3813 mm/hugetlb.c __SetPageUptodate(new_page); new_page 3826 mm/hugetlb.c ClearPagePrivate(new_page); new_page 3832 mm/hugetlb.c make_huge_pte(vma, new_page, 1)); new_page 3834 mm/hugetlb.c hugepage_add_new_anon_rmap(new_page, vma, haddr); new_page 3835 mm/hugetlb.c set_page_huge_active(new_page); new_page 3837 mm/hugetlb.c new_page = old_page; new_page 3842 mm/hugetlb.c restore_reserve_on_error(h, vma, haddr, new_page); new_page 3843 mm/hugetlb.c put_page(new_page); new_page 3920 mm/hugetlb.c bool new_page = false; new_page 4000 mm/hugetlb.c new_page = true; new_page 4077 mm/hugetlb.c if (new_page) new_page 954 mm/khugepaged.c struct page *new_page; new_page 974 mm/khugepaged.c new_page = khugepaged_alloc_page(hpage, gfp, node); new_page 975 mm/khugepaged.c if (!new_page) { new_page 980 mm/khugepaged.c if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { new_page 988 mm/khugepaged.c mem_cgroup_cancel_charge(new_page, memcg, true); new_page 996 mm/khugepaged.c mem_cgroup_cancel_charge(new_page, memcg, true); new_page 1007 mm/khugepaged.c mem_cgroup_cancel_charge(new_page, memcg, true); new_page 1075 mm/khugepaged.c __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); new_page 1077 mm/khugepaged.c __SetPageUptodate(new_page); new_page 1080 mm/khugepaged.c _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); new_page 1092 mm/khugepaged.c page_add_new_anon_rmap(new_page, vma, address, true); new_page 1093 mm/khugepaged.c mem_cgroup_commit_charge(new_page, memcg, false, true); new_page 1095 mm/khugepaged.c lru_cache_add_active_or_unevictable(new_page, vma); new_page 1111 mm/khugepaged.c mem_cgroup_cancel_charge(new_page, memcg, true); new_page 1498 mm/khugepaged.c struct page *new_page; new_page 1512 mm/khugepaged.c new_page = khugepaged_alloc_page(hpage, gfp, node); new_page 1513 mm/khugepaged.c if (!new_page) { new_page 1518 mm/khugepaged.c if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { new_page 1531 mm/khugepaged.c mem_cgroup_cancel_charge(new_page, memcg, true); new_page 1537 mm/khugepaged.c __SetPageLocked(new_page); new_page 1539 mm/khugepaged.c __SetPageSwapBacked(new_page); new_page 1540 mm/khugepaged.c new_page->index = start; new_page 1541 mm/khugepaged.c new_page->mapping = mapping; new_page 1572 mm/khugepaged.c xas_store(&xas, new_page); new_page 1691 mm/khugepaged.c xas_store(&xas, new_page); new_page 1700 mm/khugepaged.c __inc_node_page_state(new_page, NR_SHMEM_THPS); new_page 1702 mm/khugepaged.c __inc_node_page_state(new_page, NR_FILE_THPS); new_page 1707 mm/khugepaged.c struct zone *zone = page_zone(new_page); new_page 1729 mm/khugepaged.c clear_highpage(new_page + (index % HPAGE_PMD_NR)); new_page 1732 mm/khugepaged.c copy_highpage(new_page + (page->index % HPAGE_PMD_NR), new_page 1744 mm/khugepaged.c clear_highpage(new_page + (index % HPAGE_PMD_NR)); new_page 1748 mm/khugepaged.c SetPageUptodate(new_page); new_page 1749 mm/khugepaged.c page_ref_add(new_page, HPAGE_PMD_NR - 1); new_page 1750 mm/khugepaged.c mem_cgroup_commit_charge(new_page, memcg, false, true); new_page 1753 mm/khugepaged.c set_page_dirty(new_page); new_page 1754 mm/khugepaged.c lru_cache_add_anon(new_page); new_page 1756 mm/khugepaged.c lru_cache_add_file(new_page); new_page 1805 mm/khugepaged.c mem_cgroup_cancel_charge(new_page, memcg, true); new_page 1806 mm/khugepaged.c new_page->mapping = NULL; new_page 1809 mm/khugepaged.c unlock_page(new_page); new_page 2571 mm/ksm.c struct page *new_page; new_page 2586 mm/ksm.c new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); new_page 2587 mm/ksm.c if (new_page) { new_page 2588 mm/ksm.c copy_user_highpage(new_page, page, address, vma); new_page 2590 mm/ksm.c SetPageDirty(new_page); new_page 2591 mm/ksm.c __SetPageUptodate(new_page); new_page 2592 mm/ksm.c __SetPageLocked(new_page); new_page 2595 mm/ksm.c return new_page; new_page 1714 mm/memory-failure.c ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, new_page 1804 mm/memory-failure.c ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, new_page 2326 mm/memory.c struct page *new_page = NULL; new_page 2336 mm/memory.c new_page = alloc_zeroed_user_highpage_movable(vma, new_page 2338 mm/memory.c if (!new_page) new_page 2341 mm/memory.c new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, new_page 2343 mm/memory.c if (!new_page) new_page 2345 mm/memory.c cow_user_page(new_page, old_page, vmf->address, vma); new_page 2348 mm/memory.c if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false)) new_page 2351 mm/memory.c __SetPageUptodate(new_page); new_page 2373 mm/memory.c entry = mk_pte(new_page, vma->vm_page_prot); new_page 2382 mm/memory.c page_add_new_anon_rmap(new_page, vma, vmf->address, false); new_page 2383 mm/memory.c mem_cgroup_commit_charge(new_page, memcg, false, false); new_page 2384 mm/memory.c lru_cache_add_active_or_unevictable(new_page, vma); new_page 2419 mm/memory.c new_page = old_page; new_page 2422 mm/memory.c mem_cgroup_cancel_charge(new_page, memcg, false); new_page 2425 mm/memory.c if (new_page) new_page 2426 mm/memory.c put_page(new_page); new_page 2449 mm/memory.c put_page(new_page); new_page 1302 mm/mempolicy.c nr_failed = migrate_pages(&pagelist, new_page, NULL, new_page 2033 mm/migrate.c struct page *new_page = NULL; new_page 2037 mm/migrate.c new_page = alloc_pages_node(node, new_page 2040 mm/migrate.c if (!new_page) new_page 2042 mm/migrate.c prep_transhuge_page(new_page); new_page 2046 mm/migrate.c put_page(new_page); new_page 2051 mm/migrate.c __SetPageLocked(new_page); new_page 2053 mm/migrate.c __SetPageSwapBacked(new_page); new_page 2056 mm/migrate.c new_page->mapping = page->mapping; new_page 2057 mm/migrate.c new_page->index = page->index; new_page 2060 mm/migrate.c migrate_page_copy(new_page, page); new_page 2061 mm/migrate.c WARN_ON(PageLRU(new_page)); new_page 2069 mm/migrate.c if (TestClearPageActive(new_page)) new_page 2071 mm/migrate.c if (TestClearPageUnevictable(new_page)) new_page 2074 mm/migrate.c unlock_page(new_page); new_page 2075 mm/migrate.c put_page(new_page); /* Free it */ new_page 2086 mm/migrate.c entry = mk_huge_pmd(new_page, vma->vm_page_prot); new_page 2097 mm/migrate.c page_add_anon_rmap(new_page, vma, start, true); new_page 2113 mm/migrate.c mlock_migrate_page(new_page, page); new_page 2115 mm/migrate.c set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED); new_page 2120 mm/migrate.c get_page(new_page); new_page 2121 mm/migrate.c putback_lru_page(new_page); new_page 2123 mm/migrate.c unlock_page(new_page); new_page 363 mm/swap_state.c struct page *found_page = NULL, *new_page = NULL; new_page 397 mm/swap_state.c if (!new_page) { new_page 398 mm/swap_state.c new_page = alloc_page_vma(gfp_mask, vma, addr); new_page 399 mm/swap_state.c if (!new_page) new_page 419 mm/swap_state.c __SetPageLocked(new_page); new_page 420 mm/swap_state.c __SetPageSwapBacked(new_page); new_page 421 mm/swap_state.c err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); new_page 424 mm/swap_state.c SetPageWorkingset(new_page); new_page 425 mm/swap_state.c lru_cache_add_anon(new_page); new_page 427 mm/swap_state.c return new_page; new_page 429 mm/swap_state.c __ClearPageLocked(new_page); new_page 434 mm/swap_state.c put_swap_page(new_page, entry); new_page 437 mm/swap_state.c if (new_page) new_page 438 mm/swap_state.c put_page(new_page);