pages 1027 arch/alpha/kernel/core_marvel.c mem->page_count, mem->pages); pages 688 arch/alpha/kernel/core_titan.c mem->page_count, mem->pages); pages 908 arch/alpha/kernel/pci_iommu.c struct page **pages) pages 928 arch/alpha/kernel/pci_iommu.c ptes[j] = mk_iommu_pte(page_to_phys(pages[i])); pages 411 arch/arm/kernel/process.c .pages = &signal_page, pages 45 arch/arm/kernel/vdso.c .pages = &vdso_data_page, pages 217 arch/arm/kernel/vdso.c vdso_text_mapping.pages = vdso_text_pagelist; pages 1240 arch/arm/mm/dma-mapping.c struct page **pages; pages 1247 arch/arm/mm/dma-mapping.c pages = kzalloc(array_size, GFP_KERNEL); pages 1249 arch/arm/mm/dma-mapping.c pages = vzalloc(array_size); pages 1250 arch/arm/mm/dma-mapping.c if (!pages) pages 1266 arch/arm/mm/dma-mapping.c pages[i] = page + i; pages 1268 arch/arm/mm/dma-mapping.c return pages; pages 1293 arch/arm/mm/dma-mapping.c pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); pages 1296 arch/arm/mm/dma-mapping.c if (!pages[i]) { pages 1301 arch/arm/mm/dma-mapping.c pages[i] = alloc_pages(gfp, 0); pages 1302 arch/arm/mm/dma-mapping.c if (!pages[i]) pages 1307 arch/arm/mm/dma-mapping.c split_page(pages[i], order); pages 1310 arch/arm/mm/dma-mapping.c pages[i + j] = pages[i] + j; pages 1313 arch/arm/mm/dma-mapping.c __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag); pages 1318 arch/arm/mm/dma-mapping.c return pages; pages 1321 arch/arm/mm/dma-mapping.c if (pages[i]) pages 1322 arch/arm/mm/dma-mapping.c __free_pages(pages[i], 0); pages 1323 arch/arm/mm/dma-mapping.c kvfree(pages); pages 1327 arch/arm/mm/dma-mapping.c static int __iommu_free_buffer(struct device *dev, struct page **pages, pages 1334 arch/arm/mm/dma-mapping.c dma_release_from_contiguous(dev, pages[0], count); pages 1337 arch/arm/mm/dma-mapping.c if (pages[i]) pages 1338 arch/arm/mm/dma-mapping.c __free_pages(pages[i], 0); pages 1341 arch/arm/mm/dma-mapping.c kvfree(pages); pages 1349 arch/arm/mm/dma-mapping.c __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, pages 1365 arch/arm/mm/dma-mapping.c unsigned int next_pfn = page_to_pfn(pages[i]) + 1; pages 1366 arch/arm/mm/dma-mapping.c phys_addr_t phys = page_to_phys(pages[i]); pages 1370 arch/arm/mm/dma-mapping.c if (page_to_pfn(pages[j]) != next_pfn) pages 1466 arch/arm/mm/dma-mapping.c struct page **pages; pages 1485 arch/arm/mm/dma-mapping.c pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); pages 1486 arch/arm/mm/dma-mapping.c if (!pages) pages 1489 arch/arm/mm/dma-mapping.c *handle = __iommu_create_mapping(dev, pages, size, attrs); pages 1494 arch/arm/mm/dma-mapping.c return pages; pages 1496 arch/arm/mm/dma-mapping.c addr = dma_common_pages_remap(pages, size, prot, pages 1506 arch/arm/mm/dma-mapping.c __iommu_free_buffer(dev, pages, size, attrs); pages 1526 arch/arm/mm/dma-mapping.c struct page **pages = __iommu_get_pages(cpu_addr, attrs); pages 1530 arch/arm/mm/dma-mapping.c if (!pages) pages 1536 arch/arm/mm/dma-mapping.c err = vm_map_pages(vma, pages, nr_pages); pages 1565 arch/arm/mm/dma-mapping.c struct page **pages; pages 1573 arch/arm/mm/dma-mapping.c pages = __iommu_get_pages(cpu_addr, attrs); pages 1574 arch/arm/mm/dma-mapping.c if (!pages) { pages 1583 arch/arm/mm/dma-mapping.c __iommu_free_buffer(dev, pages, size, attrs); pages 1603 arch/arm/mm/dma-mapping.c struct page **pages = __iommu_get_pages(cpu_addr, attrs); pages 1605 arch/arm/mm/dma-mapping.c if (!pages) pages 1608 arch/arm/mm/dma-mapping.c return sg_alloc_table_from_pages(sgt, pages, count, 0, size, pages 65 arch/arm/plat-omap/sram.c int pages; pages 73 arch/arm/plat-omap/sram.c pages = PAGE_ALIGN(size) / PAGE_SIZE; pages 75 arch/arm/plat-omap/sram.c set_memory_rw(base, pages); pages 79 arch/arm/plat-omap/sram.c set_memory_ro(base, pages); pages 80 arch/arm/plat-omap/sram.c set_memory_x(base, pages); pages 101 arch/arm/plat-omap/sram.c int pages; pages 125 arch/arm/plat-omap/sram.c pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE; pages 127 arch/arm/plat-omap/sram.c set_memory_ro(base, pages); pages 128 arch/arm/plat-omap/sram.c set_memory_x(base, pages); pages 66 arch/arm/xen/enlighten.c int nr, struct page **pages) pages 68 arch/arm/xen/enlighten.c return xen_xlate_unmap_gfn_range(vma, nr, pages); pages 91 arch/arm/xen/p2m.c struct page **pages, unsigned int count) pages 108 arch/arm/xen/p2m.c struct page **pages, unsigned int count) pages 133 arch/arm64/kernel/vdso.c vdso_lookup[arch_index].dm->pages = &vdso_pagelist[0]; pages 134 arch/arm64/kernel/vdso.c vdso_lookup[arch_index].cm->pages = &vdso_pagelist[1]; pages 213 arch/arm64/kernel/vdso.c .pages = &aarch32_vdso_pages[C_VECTORS], pages 226 arch/arm64/kernel/vdso.c .pages = &aarch32_vdso_pages[C_SIGPAGE], pages 97 arch/arm64/mm/pageattr.c __change_memory_common((u64)page_address(area->pages[i]), pages 50 arch/m68k/mm/sun3kmap.c unsigned long type, int pages) pages 56 arch/m68k/mm/sun3kmap.c while(pages) { pages 60 arch/m68k/mm/sun3kmap.c pages--; pages 69 arch/m68k/mm/sun3kmap.c int pages; pages 88 arch/m68k/mm/sun3kmap.c pages = size / PAGE_SIZE; pages 92 arch/m68k/mm/sun3kmap.c while(pages) { pages 96 arch/m68k/mm/sun3kmap.c if(seg_pages > pages) pages 97 arch/m68k/mm/sun3kmap.c seg_pages = pages; pages 101 arch/m68k/mm/sun3kmap.c pages -= seg_pages; pages 47 arch/mips/ar7/memory.c unsigned long pages; pages 49 arch/mips/ar7/memory.c pages = memsize() >> PAGE_SHIFT; pages 50 arch/mips/ar7/memory.c add_memory_region(PHYS_OFFSET, pages << PAGE_SHIFT, BOOT_MEM_RAM); pages 131 arch/mips/fw/arc/memory.c i, p, p->base, p->pages, mtypes(p->type)); pages 144 arch/mips/fw/arc/memory.c size = p->pages << ARC_PAGE_SHIFT; pages 700 arch/mips/include/asm/octeon/cvmx-mio-defs.h uint64_t pages:2; pages 722 arch/mips/include/asm/octeon/cvmx-mio-defs.h uint64_t pages:2; pages 731 arch/mips/include/asm/octeon/cvmx-mio-defs.h uint64_t pages:2; pages 753 arch/mips/include/asm/octeon/cvmx-mio-defs.h uint64_t pages:2; pages 129 arch/mips/include/asm/sgiarcs.h ULONG pages; pages 99 arch/mips/jazz/jazzdma.c int first, last, pages, frame, i; pages 120 arch/mips/jazz/jazzdma.c pages = VDMA_PAGE(paddr + size) - VDMA_PAGE(paddr) + 1; pages 125 arch/mips/jazz/jazzdma.c if (first + pages > VDMA_PGTBL_ENTRIES) { /* nothing free */ pages 132 arch/mips/jazz/jazzdma.c && last - first < pages) pages 135 arch/mips/jazz/jazzdma.c if (last - first == pages) pages 159 arch/mips/jazz/jazzdma.c pages, laddr); pages 219 arch/mips/jazz/jazzdma.c int first, pages; pages 236 arch/mips/jazz/jazzdma.c pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; pages 239 arch/mips/jazz/jazzdma.c printk("vdma_remap: first=%x, pages=%x\n", first, pages); pages 240 arch/mips/jazz/jazzdma.c if (first + pages > VDMA_PGTBL_ENTRIES) { pages 247 arch/mips/jazz/jazzdma.c while (pages > 0 && first < VDMA_PGTBL_ENTRIES) { pages 256 arch/mips/jazz/jazzdma.c pages--; pages 266 arch/mips/jazz/jazzdma.c pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; pages 269 arch/mips/jazz/jazzdma.c for (i = first; i < first + pages; i++) pages 272 arch/mips/jazz/jazzdma.c for (i = first; i < first + pages; i++) pages 275 arch/mips/jazz/jazzdma.c for (i = first; i < first + pages; i++) pages 38 arch/mips/kernel/vdso.c .pages = no_pages, pages 53 arch/mips/kernel/vdso.c image->mapping.pages[i] = pfn_to_page(data_pfn + i); pages 48 arch/nds32/kernel/vdso.c .pages = no_pages, pages 87 arch/nds32/kernel/vdso.c vdso_spec[1].pages = &vdso_pagelist[0]; pages 91 arch/parisc/include/asm/page.h unsigned long pages; /* PAGE_SIZE pages */ pages 283 arch/parisc/include/asm/pdcpat.h unsigned int pages; /* Length in 4K pages */ pages 589 arch/parisc/include/uapi/asm/pdc.h unsigned int pages; pages 146 arch/parisc/kernel/inventory.c pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT); pages 349 arch/parisc/kernel/inventory.c || (mtbl_ptr->pages == 0) pages 363 arch/parisc/kernel/inventory.c set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages); pages 432 arch/parisc/kernel/inventory.c set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages); pages 158 arch/parisc/mm/init.c pmem_ranges[i-1].pages) > MAX_GAP) { pages 164 arch/parisc/mm/init.c pmem_ranges[i-1].pages)); pages 178 arch/parisc/mm/init.c size = (pmem_ranges[i].pages << PAGE_SHIFT); pages 208 arch/parisc/mm/init.c rsize = pmem_ranges[i].pages << PAGE_SHIFT; pages 214 arch/parisc/mm/init.c pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT) pages 233 arch/parisc/mm/init.c end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; pages 239 arch/parisc/mm/init.c pmem_holes[npmem_holes++].pages = hole_pages; pages 242 arch/parisc/mm/init.c end_pfn += pmem_ranges[i].pages; pages 245 arch/parisc/mm/init.c pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn; pages 262 arch/parisc/mm/init.c npages = pmem_ranges[i].pages; pages 303 arch/parisc/mm/init.c (pmem_holes[i].pages << PAGE_SHIFT)); pages 661 arch/parisc/mm/init.c size = pmem_ranges[range].pages << PAGE_SHIFT; pages 711 arch/parisc/mm/init.c unsigned long size = pmem_ranges[i].pages; pages 59 arch/powerpc/include/asm/iommu.h unsigned long pages, pages 218 arch/powerpc/include/asm/iommu.h unsigned long entry, unsigned long pages); pages 205 arch/powerpc/include/asm/kvm_host.h struct page *pages[0]; pages 1036 arch/powerpc/kernel/iommu.c unsigned long entry, unsigned long pages) pages 1039 arch/powerpc/kernel/iommu.c tbl->it_ops->tce_kill(tbl, entry, pages, false); pages 52 arch/powerpc/kernel/optprobes.c .pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages), pages 504 arch/powerpc/kvm/book3s_64_mmu_hv.c struct page *page, *pages[1]; pages 592 arch/powerpc/kvm/book3s_64_mmu_hv.c npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages); pages 609 arch/powerpc/kvm/book3s_64_mmu_hv.c page = pages[0]; pages 734 arch/powerpc/kvm/book3s_64_mmu_hv.c put_page(pages[0]); pages 1175 arch/powerpc/kvm/book3s_64_mmu_hv.c struct page *page, *pages[1]; pages 1185 arch/powerpc/kvm/book3s_64_mmu_hv.c npages = get_user_pages_fast(hva, 1, FOLL_WRITE, pages); pages 1188 arch/powerpc/kvm/book3s_64_mmu_hv.c page = pages[0]; pages 184 arch/powerpc/kvm/book3s_64_vio.c if (stt->pages[i]) pages 185 arch/powerpc/kvm/book3s_64_vio.c __free_page(stt->pages[i]); pages 193 arch/powerpc/kvm/book3s_64_vio.c struct page *page = stt->pages[sttpage]; pages 199 arch/powerpc/kvm/book3s_64_vio.c page = stt->pages[sttpage]; pages 204 arch/powerpc/kvm/book3s_64_vio.c stt->pages[sttpage] = page; pages 397 arch/powerpc/kvm/book3s_64_vio.c page = stt->pages[sttpage]; pages 178 arch/powerpc/kvm/book3s_64_vio_hv.c page = stt->pages[idx / TCES_PER_PAGE]; pages 215 arch/powerpc/kvm/book3s_64_vio_hv.c if (!stt->pages[i]) pages 245 arch/powerpc/kvm/book3s_64_vio_hv.c unsigned long entry, unsigned long pages) pages 248 arch/powerpc/kvm/book3s_64_vio_hv.c tbl->it_ops->tce_kill(tbl, entry, pages, true); pages 671 arch/powerpc/kvm/book3s_64_vio_hv.c page = stt->pages[idx / TCES_PER_PAGE]; pages 741 arch/powerpc/kvm/e500_mmu.c struct page **pages; pages 779 arch/powerpc/kvm/e500_mmu.c pages = kmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL); pages 780 arch/powerpc/kvm/e500_mmu.c if (!pages) pages 783 arch/powerpc/kvm/e500_mmu.c ret = get_user_pages_fast(cfg->array, num_pages, FOLL_WRITE, pages); pages 793 arch/powerpc/kvm/e500_mmu.c virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL); pages 837 arch/powerpc/kvm/e500_mmu.c vcpu_e500->shared_tlb_pages = pages; pages 854 arch/powerpc/kvm/e500_mmu.c put_page(pages[i]); pages 856 arch/powerpc/kvm/e500_mmu.c kfree(pages); pages 1225 arch/powerpc/net/bpf_jit_comp64.c bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE)); pages 100 arch/powerpc/platforms/cell/ras.c struct page *pages; pages 122 arch/powerpc/platforms/cell/ras.c area->pages = __alloc_pages_node(area->nid, pages 126 arch/powerpc/platforms/cell/ras.c if (!area->pages) { pages 137 arch/powerpc/platforms/cell/ras.c addr = __pa(page_address(area->pages)) + (PAGE_SIZE >> 1); pages 155 arch/powerpc/platforms/cell/ras.c __free_pages(area->pages, area->order); pages 224 arch/powerpc/platforms/cell/ras.c memset(page_address(area->pages), 0, pages 229 arch/powerpc/platforms/cell/ras.c __free_pages(area->pages, area->order); pages 555 arch/powerpc/platforms/ps3/mm.c int iopage, pages; pages 585 arch/powerpc/platforms/ps3/mm.c pages = len >> r->page_size; pages 587 arch/powerpc/platforms/ps3/mm.c r->page_size, r->len, pages, iopte_flag); pages 588 arch/powerpc/platforms/ps3/mm.c for (iopage = 0; iopage < pages; iopage++) { pages 1541 arch/powerpc/sysdev/xive/common.c struct page *pages; pages 1545 arch/powerpc/sysdev/xive/common.c pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order); pages 1546 arch/powerpc/sysdev/xive/common.c if (!pages) pages 1548 arch/powerpc/sysdev/xive/common.c qpage = (__be32 *)page_address(pages); pages 239 arch/s390/hypfs/hypfs_diag.c static void *diag204_alloc_vbuf(int pages) pages 242 arch/s390/hypfs/hypfs_diag.c diag204_buf_vmalloc = vmalloc(array_size(PAGE_SIZE, (pages + 1))); pages 246 arch/s390/hypfs/hypfs_diag.c diag204_buf_pages = pages; pages 259 arch/s390/hypfs/hypfs_diag.c static void *diag204_get_buffer(enum diag204_format fmt, int *pages) pages 262 arch/s390/hypfs/hypfs_diag.c *pages = diag204_buf_pages; pages 266 arch/s390/hypfs/hypfs_diag.c *pages = 1; pages 269 arch/s390/hypfs/hypfs_diag.c *pages = diag204((unsigned long)DIAG204_SUBC_RSI | pages 271 arch/s390/hypfs/hypfs_diag.c if (*pages <= 0) pages 274 arch/s390/hypfs/hypfs_diag.c return diag204_alloc_vbuf(*pages); pages 295 arch/s390/hypfs/hypfs_diag.c int pages, rc; pages 297 arch/s390/hypfs/hypfs_diag.c buf = diag204_get_buffer(DIAG204_INFO_EXT, &pages); pages 300 arch/s390/hypfs/hypfs_diag.c (unsigned long)DIAG204_INFO_EXT, pages, buf) >= 0) { pages 306 arch/s390/hypfs/hypfs_diag.c (unsigned long)DIAG204_INFO_EXT, pages, buf) >= 0) { pages 316 arch/s390/hypfs/hypfs_diag.c buf = diag204_get_buffer(DIAG204_INFO_SIMPLE, &pages); pages 322 arch/s390/hypfs/hypfs_diag.c (unsigned long)DIAG204_INFO_SIMPLE, pages, buf) >= 0) { pages 338 arch/s390/hypfs/hypfs_diag.c static int diag204_do_store(void *buf, int pages) pages 343 arch/s390/hypfs/hypfs_diag.c (unsigned long) diag204_info_type, pages, buf); pages 350 arch/s390/hypfs/hypfs_diag.c int pages, rc; pages 352 arch/s390/hypfs/hypfs_diag.c buf = diag204_get_buffer(diag204_info_type, &pages); pages 355 arch/s390/hypfs/hypfs_diag.c rc = diag204_do_store(buf, pages); pages 98 arch/s390/include/asm/debug.h debug_info_t *debug_register(const char *name, int pages, int nr_areas, pages 101 arch/s390/include/asm/debug.h debug_info_t *debug_register_mode(const char *name, int pages, int nr_areas, pages 826 arch/s390/include/asm/kvm_host.h struct page *pages[KVM_MAX_VCPUS]; pages 53 arch/s390/kernel/kprobes.c .pages = LIST_HEAD_INIT(kprobe_s390_insn_slots.pages), pages 1691 arch/s390/kernel/perf_cpum_sf.c static void *aux_buffer_setup(struct perf_event *event, void **pages, pages 1699 arch/s390/kernel/perf_cpum_sf.c if (!nr_pages || !pages) pages 1754 arch/s390/kernel/perf_cpum_sf.c *tail = (unsigned long)pages[i]; pages 1755 arch/s390/kernel/perf_cpum_sf.c aux->sdb_index[i] = (unsigned long)pages[i]; pages 1756 arch/s390/kernel/perf_cpum_sf.c aux_sdb_init((unsigned long)pages[i]); pages 305 arch/s390/kernel/sthyi.c int i, r, pages; pages 315 arch/s390/kernel/sthyi.c pages = diag204((unsigned long)DIAG204_SUBC_RSI | pages 317 arch/s390/kernel/sthyi.c if (pages <= 0) pages 320 arch/s390/kernel/sthyi.c diag204_buf = vmalloc(array_size(pages, PAGE_SIZE)); pages 325 arch/s390/kernel/sthyi.c (unsigned long)DIAG204_INFO_EXT, pages, diag204_buf); pages 54 arch/s390/kernel/suspend.c unsigned long page_key_additional_pages(unsigned long pages) pages 56 arch/s390/kernel/suspend.c return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); pages 77 arch/s390/kernel/suspend.c int page_key_alloc(unsigned long pages) pages 82 arch/s390/kernel/suspend.c size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); pages 798 arch/s390/kvm/gaccess.c unsigned long *pages, unsigned long nr_pages, pages 813 arch/s390/kvm/gaccess.c rc = guest_translate(vcpu, ga, pages, asce, mode, &prot); pages 817 arch/s390/kvm/gaccess.c *pages = kvm_s390_real_to_abs(vcpu, ga); pages 818 arch/s390/kvm/gaccess.c if (kvm_is_error_gpa(vcpu->kvm, *pages)) pages 824 arch/s390/kvm/gaccess.c pages++; pages 836 arch/s390/kvm/gaccess.c unsigned long *pages; pages 848 arch/s390/kvm/gaccess.c pages = pages_array; pages 850 arch/s390/kvm/gaccess.c pages = vmalloc(array_size(nr_pages, sizeof(unsigned long))); pages 851 arch/s390/kvm/gaccess.c if (!pages) pages 856 arch/s390/kvm/gaccess.c rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode); pages 858 arch/s390/kvm/gaccess.c gpa = *(pages + idx) + (ga & ~PAGE_MASK); pages 871 arch/s390/kvm/gaccess.c vfree(pages); pages 580 arch/s390/kvm/vsie.c page = READ_ONCE(kvm->arch.vsie.pages[i]); pages 1246 arch/s390/kvm/vsie.c kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page; pages 1251 arch/s390/kvm/vsie.c page = kvm->arch.vsie.pages[kvm->arch.vsie.next]; pages 1354 arch/s390/kvm/vsie.c page = kvm->arch.vsie.pages[i]; pages 1355 arch/s390/kvm/vsie.c kvm->arch.vsie.pages[i] = NULL; pages 43 arch/s390/mm/cmm.c unsigned long pages[CMM_NR_PAGES]; pages 97 arch/s390/mm/cmm.c pa->pages[pa->index++] = addr; pages 115 arch/s390/mm/cmm.c addr = pa->pages[--pa->index]; pages 45 arch/sh/kernel/io_trapped.c struct page *pages[TRAPPED_PAGES_MAX]; pages 71 arch/sh/kernel/io_trapped.c pages[k] = virt_to_page(tiop); pages 73 arch/sh/kernel/io_trapped.c tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE); pages 233 arch/sparc/kernel/pci_fire.c unsigned long pages, order, i; pages 236 arch/sparc/kernel/pci_fire.c pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); pages 237 arch/sparc/kernel/pci_fire.c if (pages == 0UL) { pages 242 arch/sparc/kernel/pci_fire.c memset((char *)pages, 0, PAGE_SIZE << order); pages 243 arch/sparc/kernel/pci_fire.c pbm->msi_queues = (void *) pages; pages 265 arch/sparc/kernel/pci_fire.c unsigned long pages, order; pages 268 arch/sparc/kernel/pci_fire.c pages = (unsigned long) pbm->msi_queues; pages 270 arch/sparc/kernel/pci_fire.c free_pages(pages, order); pages 1058 arch/sparc/kernel/pci_sun4v.c unsigned long q_size, alloc_size, pages, order; pages 1064 arch/sparc/kernel/pci_sun4v.c pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); pages 1065 arch/sparc/kernel/pci_sun4v.c if (pages == 0UL) { pages 1070 arch/sparc/kernel/pci_sun4v.c memset((char *)pages, 0, PAGE_SIZE << order); pages 1071 arch/sparc/kernel/pci_sun4v.c pbm->msi_queues = (void *) pages; pages 1074 arch/sparc/kernel/pci_sun4v.c unsigned long err, base = __pa(pages + (i * q_size)); pages 1106 arch/sparc/kernel/pci_sun4v.c free_pages(pages, order); pages 1112 arch/sparc/kernel/pci_sun4v.c unsigned long q_size, alloc_size, pages, order; pages 1125 arch/sparc/kernel/pci_sun4v.c pages = (unsigned long) pbm->msi_queues; pages 1127 arch/sparc/kernel/pci_sun4v.c free_pages(pages, order); pages 1588 arch/sparc/net/bpf_jit_comp_64.c bpf_flush_icache(header, (u8 *)header + (header->pages * PAGE_SIZE)); pages 269 arch/sparc/vdso/vma.c vdso_mapping->pages = cpp; pages 291 arch/sparc/vdso/vma.c vvar_mapping.pages = dpp; pages 315 arch/sparc/vdso/vma.c vdso_mapping->pages = NULL; pages 324 arch/sparc/vdso/vma.c vvar_mapping.pages = NULL; pages 274 arch/um/drivers/mconsole_kern.c void *pages[UNPLUGGED_PER_PAGE]; pages 325 arch/um/drivers/mconsole_kern.c addr = unplugged->pages[--unplug_index]; pages 361 arch/um/drivers/mconsole_kern.c unplugged->pages[unplug_index++] = addr; pages 46 arch/x86/boot/compressed/kaslr_64.c struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context; pages 50 arch/x86/boot/compressed/kaslr_64.c if (pages->pgt_buf_offset >= pages->pgt_buf_size) { pages 52 arch/x86/boot/compressed/kaslr_64.c debug_putaddr(pages->pgt_buf_offset); pages 53 arch/x86/boot/compressed/kaslr_64.c debug_putaddr(pages->pgt_buf_size); pages 57 arch/x86/boot/compressed/kaslr_64.c entry = pages->pgt_buf + pages->pgt_buf_offset; pages 58 arch/x86/boot/compressed/kaslr_64.c pages->pgt_buf_offset += PAGE_SIZE; pages 218 arch/x86/boot/video-vesa.c boot_params.screen_info.pages = vminfo.image_planes; pages 80 arch/x86/events/intel/bts.c bts_buffer_setup_aux(struct perf_event *event, void **pages, pages 93 arch/x86/events/intel/bts.c page = virt_to_page(pages[pg]); pages 111 arch/x86/events/intel/bts.c buf->data_pages = pages; pages 117 arch/x86/events/intel/bts.c page = virt_to_page(pages[pg]); pages 1192 arch/x86/events/intel/pt.c pt_buffer_setup_aux(struct perf_event *event, void **pages, pages 1210 arch/x86/events/intel/pt.c buf->data_pages = pages; pages 61 arch/x86/hyperv/nested.c u64 start_gfn, u64 pages) pages 75 arch/x86/hyperv/nested.c additional_pages = min_t(u64, pages, HV_MAX_FLUSH_PAGES) - 1; pages 81 arch/x86/hyperv/nested.c pages -= additional_pages + 1; pages 84 arch/x86/hyperv/nested.c } while (pages > 0); pages 197 arch/x86/include/asm/kexec.h extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, pages 201 arch/x86/include/asm/kexec.h extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages); pages 442 arch/x86/include/asm/kvm_host.h u64 pages; pages 94 arch/x86/include/asm/pgtable_32.h #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) pages 96 arch/x86/include/asm/pgtable_32.h #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) pages 550 arch/x86/include/asm/pgtable_types.h extern void update_page_count(int level, unsigned long pages); pages 552 arch/x86/include/asm/pgtable_types.h static inline void update_page_count(int level, unsigned long pages) { } pages 50 arch/x86/include/asm/set_memory.h int set_pages_array_uc(struct page **pages, int addrinarray); pages 51 arch/x86/include/asm/set_memory.h int set_pages_array_wc(struct page **pages, int addrinarray); pages 52 arch/x86/include/asm/set_memory.h int set_pages_array_wt(struct page **pages, int addrinarray); pages 53 arch/x86/include/asm/set_memory.h int set_pages_array_wb(struct page **pages, int addrinarray); pages 68 arch/x86/include/asm/xen/page.h struct page **pages, unsigned int count); pages 71 arch/x86/include/asm/xen/page.h struct page **pages, unsigned int count); pages 76 arch/x86/include/asm/xen/page.h struct page **pages, unsigned int count) pages 84 arch/x86/include/asm/xen/page.h struct page **pages, unsigned int count) pages 791 arch/x86/kernel/alternative.c struct page *pages[2] = {NULL}; pages 805 arch/x86/kernel/alternative.c pages[0] = vmalloc_to_page(addr); pages 807 arch/x86/kernel/alternative.c pages[1] = vmalloc_to_page(addr + PAGE_SIZE); pages 809 arch/x86/kernel/alternative.c pages[0] = virt_to_page(addr); pages 810 arch/x86/kernel/alternative.c WARN_ON(!PageReserved(pages[0])); pages 812 arch/x86/kernel/alternative.c pages[1] = virt_to_page(addr + PAGE_SIZE); pages 818 arch/x86/kernel/alternative.c BUG_ON(!pages[0] || (cross_page_boundary && !pages[1])); pages 838 arch/x86/kernel/alternative.c pte = mk_pte(pages[0], pgprot); pages 842 arch/x86/kernel/alternative.c pte = mk_pte(pages[1], pgprot); pages 327 arch/x86/kernel/amd_gart_64.c unsigned long pages) pages 329 arch/x86/kernel/amd_gart_64.c unsigned long iommu_start = alloc_iommu(dev, pages, 0); pages 338 arch/x86/kernel/amd_gart_64.c unsigned long pages, addr; pages 351 arch/x86/kernel/amd_gart_64.c pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE); pages 352 arch/x86/kernel/amd_gart_64.c while (pages--) { pages 358 arch/x86/kernel/amd_gart_64.c BUG_ON(iommu_page - iommu_start != pages); pages 365 arch/x86/kernel/amd_gart_64.c struct scatterlist *sout, unsigned long pages, int need) pages 373 arch/x86/kernel/amd_gart_64.c return __dma_map_cont(dev, start, nelems, sout, pages); pages 385 arch/x86/kernel/amd_gart_64.c unsigned long pages = 0; pages 419 arch/x86/kernel/amd_gart_64.c sgmap, pages, need) < 0) pages 425 arch/x86/kernel/amd_gart_64.c pages = 0; pages 433 arch/x86/kernel/amd_gart_64.c pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE); pages 436 arch/x86/kernel/amd_gart_64.c if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) pages 457 arch/x86/kernel/amd_gart_64.c panic("dma_map_sg: overflow on %lu pages\n", pages); pages 459 arch/x86/kernel/amd_gart_64.c iommu_full(dev, pages << PAGE_SHIFT, dir); pages 36 arch/x86/kernel/irq_64.c struct page *pages[IRQ_STACK_SIZE / PAGE_SIZE]; pages 43 arch/x86/kernel/irq_64.c pages[i] = pfn_to_page(pa >> PAGE_SHIFT); pages 46 arch/x86/kernel/irq_64.c va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL); pages 666 arch/x86/kernel/machine_kexec_64.c int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) pages 676 arch/x86/kernel/machine_kexec_64.c return set_memory_decrypted((unsigned long)vaddr, pages); pages 679 arch/x86/kernel/machine_kexec_64.c void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) pages 688 arch/x86/kernel/machine_kexec_64.c set_memory_encrypted((unsigned long)vaddr, pages); pages 332 arch/x86/kvm/mmu.c u64 start_gfn, u64 pages) pages 337 arch/x86/kvm/mmu.c range.pages = pages; pages 2509 arch/x86/kvm/mmu.c struct kvm_mmu_pages pages; pages 2513 arch/x86/kvm/mmu.c while (mmu_unsync_walk(parent, &pages)) { pages 2516 arch/x86/kvm/mmu.c for_each_sp(pages, sp, parents, i) pages 2524 arch/x86/kvm/mmu.c for_each_sp(pages, sp, parents, i) { pages 2797 arch/x86/kvm/mmu.c struct kvm_mmu_pages pages; pages 2802 arch/x86/kvm/mmu.c while (mmu_unsync_walk(parent, &pages)) { pages 2805 arch/x86/kvm/mmu.c for_each_sp(pages, sp, parents, i) { pages 3235 arch/x86/kvm/mmu.c struct page *pages[PTE_PREFETCH_NUM]; pages 3246 arch/x86/kvm/mmu.c ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start); pages 3252 arch/x86/kvm/mmu.c page_to_pfn(pages[i]), true, true); pages 3253 arch/x86/kvm/mmu.c put_page(pages[i]); pages 429 arch/x86/kvm/svm.c struct page **pages; pages 1833 arch/x86/kvm/svm.c struct page **pages; pages 1854 arch/x86/kvm/svm.c pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO, pages 1857 arch/x86/kvm/svm.c pages = kmalloc(size, GFP_KERNEL_ACCOUNT); pages 1859 arch/x86/kvm/svm.c if (!pages) pages 1863 arch/x86/kvm/svm.c npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); pages 1872 arch/x86/kvm/svm.c return pages; pages 1876 arch/x86/kvm/svm.c release_pages(pages, npinned); pages 1878 arch/x86/kvm/svm.c kvfree(pages); pages 1882 arch/x86/kvm/svm.c static void sev_unpin_memory(struct kvm *kvm, struct page **pages, pages 1887 arch/x86/kvm/svm.c release_pages(pages, npages); pages 1888 arch/x86/kvm/svm.c kvfree(pages); pages 1892 arch/x86/kvm/svm.c static void sev_clflush_pages(struct page *pages[], unsigned long npages) pages 1897 arch/x86/kvm/svm.c if (npages == 0 || pages == NULL) pages 1901 arch/x86/kvm/svm.c page_virtual = kmap_atomic(pages[i]); pages 1916 arch/x86/kvm/svm.c sev_clflush_pages(region->pages, region->npages); pages 1918 arch/x86/kvm/svm.c sev_unpin_memory(kvm, region->pages, region->npages); pages 6499 arch/x86/kvm/svm.c unsigned long i = idx + 1, pages = 1; pages 6506 arch/x86/kvm/svm.c pages++; pages 6513 arch/x86/kvm/svm.c return pages; pages 6518 arch/x86/kvm/svm.c unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i; pages 6554 arch/x86/kvm/svm.c for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) { pages 6564 arch/x86/kvm/svm.c pages = get_num_contig_pages(i, inpages, npages); pages 6566 arch/x86/kvm/svm.c len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size); pages 6955 arch/x86/kvm/svm.c struct page **pages; pages 6966 arch/x86/kvm/svm.c pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1); pages 6967 arch/x86/kvm/svm.c if (!pages) pages 6974 arch/x86/kvm/svm.c if (get_num_contig_pages(0, pages, n) != n) { pages 6985 arch/x86/kvm/svm.c data->guest_address = __sme_page_pa(pages[0]) + offset; pages 7015 arch/x86/kvm/svm.c sev_unpin_memory(kvm, pages, n); pages 7090 arch/x86/kvm/svm.c region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1); pages 7091 arch/x86/kvm/svm.c if (!region->pages) { pages 7102 arch/x86/kvm/svm.c sev_clflush_pages(region->pages, region->npages); pages 485 arch/x86/kvm/vmx/vmx.c range->pages); pages 2505 arch/x86/kvm/vmx/vmx.c struct page *pages; pages 2508 arch/x86/kvm/vmx/vmx.c pages = __alloc_pages_node(node, flags, vmcs_config.order); pages 2509 arch/x86/kvm/vmx/vmx.c if (!pages) pages 2511 arch/x86/kvm/vmx/vmx.c vmcs = page_address(pages); pages 49 arch/x86/mm/cpu_entry_area.c cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot) pages 51 arch/x86/mm/cpu_entry_area.c for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE) pages 973 arch/x86/mm/init.c unsigned long pages; pages 975 arch/x86/mm/init.c pages = generic_max_swapfile_size(); pages 987 arch/x86/mm/init.c pages = min_t(unsigned long long, l1tf_limit, pages); pages 989 arch/x86/mm/init.c return pages; pages 451 arch/x86/mm/init_64.c unsigned long pages = 0, paddr_next; pages 479 arch/x86/mm/init_64.c pages++; pages 486 arch/x86/mm/init_64.c pages++; pages 491 arch/x86/mm/init_64.c update_page_count(PG_LEVEL_4K, pages); pages 505 arch/x86/mm/init_64.c unsigned long pages = 0, paddr_next; pages 550 arch/x86/mm/init_64.c pages++; pages 558 arch/x86/mm/init_64.c pages++; pages 576 arch/x86/mm/init_64.c update_page_count(PG_LEVEL_2M, pages); pages 590 arch/x86/mm/init_64.c unsigned long pages = 0, paddr_next; pages 637 arch/x86/mm/init_64.c pages++; pages 645 arch/x86/mm/init_64.c pages++; pages 665 arch/x86/mm/init_64.c update_page_count(PG_LEVEL_1G, pages); pages 961 arch/x86/mm/init_64.c unsigned long next, pages = 0; pages 997 arch/x86/mm/init_64.c pages++; pages 1025 arch/x86/mm/init_64.c update_page_count(PG_LEVEL_4K, -pages); pages 1032 arch/x86/mm/init_64.c unsigned long next, pages = 0; pages 1054 arch/x86/mm/init_64.c pages++; pages 1081 arch/x86/mm/init_64.c update_page_count(PG_LEVEL_2M, -pages); pages 1088 arch/x86/mm/init_64.c unsigned long next, pages = 0; pages 1110 arch/x86/mm/init_64.c pages++; pages 1136 arch/x86/mm/init_64.c update_page_count(PG_LEVEL_1G, -pages); pages 1143 arch/x86/mm/init_64.c unsigned long next, pages = 0; pages 1168 arch/x86/mm/init_64.c update_page_count(PG_LEVEL_512G, -pages); pages 114 arch/x86/mm/pageattr-test.c static struct page *pages[NPAGES]; pages 174 arch/x86/mm/pageattr-test.c pages[k] = pfn_to_page(pfn + k); pages 191 arch/x86/mm/pageattr-test.c err = cpa_set_pages_array(pages, len[i], PAGE_CPA_TEST); pages 47 arch/x86/mm/pageattr.c struct page **pages; pages 74 arch/x86/mm/pageattr.c void update_page_count(int level, unsigned long pages) pages 78 arch/x86/mm/pageattr.c direct_pages_count[level] += pages; pages 261 arch/x86/mm/pageattr.c struct page *page = cpa->pages[idx]; pages 1671 arch/x86/mm/pageattr.c struct page **pages) pages 1716 arch/x86/mm/pageattr.c cpa.pages = pages; pages 1774 arch/x86/mm/pageattr.c static inline int cpa_set_pages_array(struct page **pages, int numpages, pages 1778 arch/x86/mm/pageattr.c CPA_PAGES_ARRAY, pages); pages 1781 arch/x86/mm/pageattr.c static inline int cpa_clear_pages_array(struct page **pages, int numpages, pages 1785 arch/x86/mm/pageattr.c CPA_PAGES_ARRAY, pages); pages 2006 arch/x86/mm/pageattr.c static int _set_pages_array(struct page **pages, int numpages, pages 2017 arch/x86/mm/pageattr.c if (PageHighMem(pages[i])) pages 2019 arch/x86/mm/pageattr.c start = page_to_pfn(pages[i]) << PAGE_SHIFT; pages 2029 arch/x86/mm/pageattr.c ret = cpa_set_pages_array(pages, numpages, pages 2036 arch/x86/mm/pageattr.c 0, CPA_PAGES_ARRAY, pages); pages 2043 arch/x86/mm/pageattr.c if (PageHighMem(pages[i])) pages 2045 arch/x86/mm/pageattr.c start = page_to_pfn(pages[i]) << PAGE_SHIFT; pages 2052 arch/x86/mm/pageattr.c int set_pages_array_uc(struct page **pages, int numpages) pages 2054 arch/x86/mm/pageattr.c return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_UC_MINUS); pages 2058 arch/x86/mm/pageattr.c int set_pages_array_wc(struct page **pages, int numpages) pages 2060 arch/x86/mm/pageattr.c return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WC); pages 2064 arch/x86/mm/pageattr.c int set_pages_array_wt(struct page **pages, int numpages) pages 2066 arch/x86/mm/pageattr.c return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WT); pages 2078 arch/x86/mm/pageattr.c int set_pages_array_wb(struct page **pages, int numpages) pages 2086 arch/x86/mm/pageattr.c retval = cpa_clear_pages_array(pages, numpages, pages 2092 arch/x86/mm/pageattr.c if (PageHighMem(pages[i])) pages 2094 arch/x86/mm/pageattr.c start = page_to_pfn(pages[i]) << PAGE_SHIFT; pages 32 arch/x86/um/asm/mm_context.h struct ldt_entry * pages[LDT_PAGES_MAX]; pages 82 arch/x86/um/ldt.c if (copy_to_user(ptr, ldt->u.pages[i], size)) { pages 161 arch/x86/um/ldt.c ldt->u.pages[i] = (struct ldt_entry *) pages 163 arch/x86/um/ldt.c if (!ldt->u.pages[i]) { pages 171 arch/x86/um/ldt.c memcpy(ldt->u.pages[0], &entry0, pages 173 arch/x86/um/ldt.c memcpy(ldt->u.pages[0]+1, ldt->u.entries+1, pages 185 arch/x86/um/ldt.c ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] + pages 347 arch/x86/um/ldt.c new_mm->arch.ldt.u.pages[i] = pages 349 arch/x86/um/ldt.c memcpy(new_mm->arch.ldt.u.pages[i], pages 350 arch/x86/um/ldt.c from_mm->arch.ldt.u.pages[i], PAGE_SIZE); pages 368 arch/x86/um/ldt.c free_page((long) mm->arch.ldt.u.pages[i]); pages 43 arch/x86/xen/mmu.c int nr, struct page **pages) pages 46 arch/x86/xen/mmu.c return xen_xlate_unmap_gfn_range(vma, nr, pages); pages 48 arch/x86/xen/mmu.c if (!pages) pages 2717 arch/x86/xen/mmu_pv.c unsigned int domid, bool no_translate, struct page **pages) pages 700 arch/x86/xen/p2m.c struct page **pages, unsigned int count) pages 729 arch/x86/xen/p2m.c pfn = page_to_pfn(pages[i]); pages 746 arch/x86/xen/p2m.c struct page **pages, unsigned int count) pages 754 arch/x86/xen/p2m.c unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); pages 755 arch/x86/xen/p2m.c unsigned long pfn = page_to_pfn(pages[i]); pages 925 block/bio.c struct page **pages = (struct page **)bv; pages 937 block/bio.c pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); pages 939 block/bio.c size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); pages 944 block/bio.c struct page *page = pages[i]; pages 1326 block/bio.c page = map_data->pages[i / nr_pages]; pages 1406 block/bio.c struct page **pages; pages 1411 block/bio.c bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs); pages 1424 block/bio.c struct page *page = pages[j]; pages 1448 block/bio.c put_page(pages[j++]); pages 1449 block/bio.c kvfree(pages); pages 1640 block/blk-iocost.c u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1); pages 1671 block/blk-iocost.c cost += pages * coef_page; pages 263 block/blk-lib.c sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); pages 265 block/blk-lib.c return min(pages, (sector_t)BIO_MAX_PAGES); pages 399 crypto/af_alg.c n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off); pages 412 crypto/af_alg.c sg_set_page(sgl->sg + i, sgl->pages[i], plen, off); pages 436 crypto/af_alg.c put_page(sgl->pages[i]); pages 1356 crypto/tcrypt.c unsigned int pages = DIV_ROUND_UP(k, PAGE_SIZE); pages 1359 crypto/tcrypt.c sg_init_table(cur->sg, pages); pages 204 drivers/android/binder_alloc.c page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; pages 232 drivers/android/binder_alloc.c page = &alloc->pages[index]; pages 285 drivers/android/binder_alloc.c page = &alloc->pages[index]; pages 697 drivers/android/binder_alloc.c alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE, pages 698 drivers/android/binder_alloc.c sizeof(alloc->pages[0]), pages 700 drivers/android/binder_alloc.c if (alloc->pages == NULL) { pages 724 drivers/android/binder_alloc.c kfree(alloc->pages); pages 725 drivers/android/binder_alloc.c alloc->pages = NULL; pages 771 drivers/android/binder_alloc.c if (alloc->pages) { pages 778 drivers/android/binder_alloc.c if (!alloc->pages[i].page_ptr) pages 782 drivers/android/binder_alloc.c &alloc->pages[i].lru); pages 788 drivers/android/binder_alloc.c __free_page(alloc->pages[i].page_ptr); pages 791 drivers/android/binder_alloc.c kfree(alloc->pages); pages 853 drivers/android/binder_alloc.c page = &alloc->pages[i]; pages 930 drivers/android/binder_alloc.c index = page - alloc->pages; pages 1084 drivers/android/binder_alloc.c lru_page = &alloc->pages[index]; pages 101 drivers/android/binder_alloc.h struct binder_lru_page *pages; pages 104 drivers/android/binder_alloc_selftest.c if (!alloc->pages[page_index].page_ptr || pages 105 drivers/android/binder_alloc_selftest.c !list_empty(&alloc->pages[page_index].lru)) { pages 107 drivers/android/binder_alloc_selftest.c alloc->pages[page_index].page_ptr ? pages 147 drivers/android/binder_alloc_selftest.c if (list_empty(&alloc->pages[i].lru)) { pages 150 drivers/android/binder_alloc_selftest.c alloc->pages[i].page_ptr ? "alloc" : "free", i); pages 167 drivers/android/binder_alloc_selftest.c if (alloc->pages[i].page_ptr) { pages 169 drivers/android/binder_alloc_selftest.c list_empty(&alloc->pages[i].lru) ? pages 2233 drivers/ata/libata-scsi.c static const u8 pages[] = { pages 2244 drivers/ata/libata-scsi.c num_pages = sizeof(pages); pages 2248 drivers/ata/libata-scsi.c memcpy(rbuf + 4, pages, num_pages); pages 184 drivers/ata/pata_octeon_cf.c reg_tim.s.pages = 0; pages 55 drivers/auxdisplay/cfag12864bfb.c struct page *pages = virt_to_page(cfag12864b_buffer); pages 57 drivers/auxdisplay/cfag12864bfb.c return vm_map_pages_zero(vma, &pages, 1); pages 226 drivers/auxdisplay/ht16k33.c struct page *pages = virt_to_page(priv->fbdev.buffer); pages 228 drivers/auxdisplay/ht16k33.c return vm_map_pages_zero(vma, &pages, 1); pages 322 drivers/base/firmware_loader/fallback.c page_data = kmap(fw_priv->pages[page_nr]); pages 329 drivers/base/firmware_loader/fallback.c kunmap(fw_priv->pages[page_nr]); pages 69 drivers/base/firmware_loader/firmware.h struct page **pages; pages 275 drivers/base/firmware_loader/main.c if (!fw_priv->pages) pages 279 drivers/base/firmware_loader/main.c __free_page(fw_priv->pages[i]); pages 280 drivers/base/firmware_loader/main.c kvfree(fw_priv->pages); pages 281 drivers/base/firmware_loader/main.c fw_priv->pages = NULL; pages 298 drivers/base/firmware_loader/main.c memcpy(new_pages, fw_priv->pages, pages 302 drivers/base/firmware_loader/main.c kvfree(fw_priv->pages); pages 303 drivers/base/firmware_loader/main.c fw_priv->pages = new_pages; pages 308 drivers/base/firmware_loader/main.c fw_priv->pages[fw_priv->nr_pages] = pages 311 drivers/base/firmware_loader/main.c if (!fw_priv->pages[fw_priv->nr_pages]) pages 322 drivers/base/firmware_loader/main.c if (!fw_priv->pages) pages 326 drivers/base/firmware_loader/main.c fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0, pages 332 drivers/base/firmware_loader/main.c kvfree(fw_priv->pages); pages 333 drivers/base/firmware_loader/main.c fw_priv->pages = NULL; pages 406 drivers/base/firmware_loader/main.c page = fw_priv->pages[fw_priv->nr_pages - 1]; pages 549 drivers/base/firmware_loader/main.c fw->pages = fw_priv->pages; pages 135 drivers/block/brd.c struct page *pages[FREE_BATCH]; pages 142 drivers/block/brd.c (void **)pages, pos, FREE_BATCH); pages 147 drivers/block/brd.c BUG_ON(pages[i]->index < pos); pages 148 drivers/block/brd.c pos = pages[i]->index; pages 150 drivers/block/brd.c BUG_ON(!ret || ret != pages[i]); pages 151 drivers/block/brd.c __free_page(pages[i]); pages 353 drivers/block/drbd/drbd_bitmap.c static void bm_free_pages(struct page **pages, unsigned long number) pages 356 drivers/block/drbd/drbd_bitmap.c if (!pages) pages 360 drivers/block/drbd/drbd_bitmap.c if (!pages[i]) { pages 365 drivers/block/drbd/drbd_bitmap.c __free_page(pages[i]); pages 366 drivers/block/drbd/drbd_bitmap.c pages[i] = NULL; pages 398 drivers/block/drbd/drbd_int.h struct page *pages; pages 1717 drivers/block/drbd/drbd_int.h struct page *page = peer_req->pages; pages 1634 drivers/block/drbd/drbd_main.c struct page *page = peer_req->pages; pages 390 drivers/block/drbd/drbd_receiver.c peer_req->pages = page; pages 410 drivers/block/drbd/drbd_receiver.c drbd_free_pages(device, peer_req->pages, is_net); pages 1613 drivers/block/drbd/drbd_receiver.c if (blkdev_issue_write_same(bdev, s, nr, GFP_NOIO, peer_req->pages)) pages 1643 drivers/block/drbd/drbd_receiver.c struct page *page = peer_req->pages; pages 1950 drivers/block/drbd/drbd_receiver.c page = peer_req->pages; pages 2638 drivers/block/drbd/drbd_receiver.c D_ASSERT(peer_device, peer_req->pages == NULL); pages 2646 drivers/block/drbd/drbd_receiver.c D_ASSERT(peer_device, peer_req->pages == NULL); pages 2650 drivers/block/drbd/drbd_receiver.c } else if (peer_req->pages == NULL) { pages 289 drivers/block/drbd/drbd_worker.c struct page *page = peer_req->pages; pages 1087 drivers/block/drbd/drbd_worker.c struct page *page = peer_req->pages; pages 537 drivers/block/pktcdvd.c pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO); pages 538 drivers/block/pktcdvd.c if (!pkt->pages[i]) pages 564 drivers/block/pktcdvd.c if (pkt->pages[i]) pages 565 drivers/block/pktcdvd.c __free_page(pkt->pages[i]); pages 586 drivers/block/pktcdvd.c __free_page(pkt->pages[i]); pages 1039 drivers/block/pktcdvd.c f, pkt->pages[p], offset); pages 1040 drivers/block/pktcdvd.c if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset)) pages 1277 drivers/block/pktcdvd.c struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE]; pages 1963 drivers/block/rbd.c struct page **pages; pages 1979 drivers/block/rbd.c pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); pages 1980 drivers/block/rbd.c if (IS_ERR(pages)) pages 1981 drivers/block/rbd.c return PTR_ERR(pages); pages 1987 drivers/block/rbd.c NULL, 0, pages, &reply_len); pages 1991 drivers/block/rbd.c p = page_address(pages[0]); pages 2016 drivers/block/rbd.c ceph_copy_from_page_vector(pages, rbd_dev->object_map, pages 2020 drivers/block/rbd.c ceph_release_page_vector(pages, num_pages); pages 2110 drivers/block/rbd.c p = page_address(osd_data->pages[0]); pages 2157 drivers/block/rbd.c struct page **pages; pages 2165 drivers/block/rbd.c pages = ceph_alloc_page_vector(1, GFP_NOIO); pages 2166 drivers/block/rbd.c if (IS_ERR(pages)) pages 2167 drivers/block/rbd.c return PTR_ERR(pages); pages 2169 drivers/block/rbd.c p = start = page_address(pages[0]); pages 2180 drivers/block/rbd.c osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0, pages 2317 drivers/block/rbd.c struct page **pages; pages 2327 drivers/block/rbd.c pages = ceph_alloc_page_vector(1, GFP_NOIO); pages 2328 drivers/block/rbd.c if (IS_ERR(pages)) pages 2329 drivers/block/rbd.c return PTR_ERR(pages); pages 2332 drivers/block/rbd.c osd_req_op_raw_data_in_pages(osd_req, which, pages, pages 4936 drivers/block/rbd.c struct page **pages; pages 4948 drivers/block/rbd.c pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); pages 4949 drivers/block/rbd.c if (IS_ERR(pages)) { pages 4950 drivers/block/rbd.c ret = PTR_ERR(pages); pages 4955 drivers/block/rbd.c osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false, pages 4965 drivers/block/rbd.c ceph_copy_from_page_vector(pages, buf, 0, ret); pages 308 drivers/block/xen-blkback/blkback.c struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; pages 314 drivers/block/xen-blkback/blkback.c unmap_data.pages = pages; pages 327 drivers/block/xen-blkback/blkback.c pages[segs_to_unmap] = persistent_gnt->page; pages 335 drivers/block/xen-blkback/blkback.c put_free_pages(ring, pages, segs_to_unmap); pages 349 drivers/block/xen-blkback/blkback.c struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; pages 355 drivers/block/xen-blkback/blkback.c unmap_data.pages = pages; pages 370 drivers/block/xen-blkback/blkback.c pages[segs_to_unmap] = persistent_gnt->page; pages 375 drivers/block/xen-blkback/blkback.c put_free_pages(ring, pages, segs_to_unmap); pages 383 drivers/block/xen-blkback/blkback.c put_free_pages(ring, pages, segs_to_unmap); pages 696 drivers/block/xen-blkback/blkback.c struct grant_page **pages, pages 704 drivers/block/xen-blkback/blkback.c if (pages[i]->persistent_gnt != NULL) { pages 705 drivers/block/xen-blkback/blkback.c put_persistent_gnt(ring, pages[i]->persistent_gnt); pages 708 drivers/block/xen-blkback/blkback.c if (pages[i]->handle == BLKBACK_INVALID_HANDLE) pages 710 drivers/block/xen-blkback/blkback.c unmap_pages[invcount] = pages[i]->page; pages 711 drivers/block/xen-blkback/blkback.c gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page), pages 712 drivers/block/xen-blkback/blkback.c GNTMAP_host_map, pages[i]->handle); pages 713 drivers/block/xen-blkback/blkback.c pages[i]->handle = BLKBACK_INVALID_HANDLE; pages 730 drivers/block/xen-blkback/blkback.c put_free_pages(ring, data->pages, data->count); pages 756 drivers/block/xen-blkback/blkback.c struct grant_page **pages = req->segments; pages 759 drivers/block/xen-blkback/blkback.c invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs, pages 766 drivers/block/xen-blkback/blkback.c work->pages = req->unmap_pages; pages 781 drivers/block/xen-blkback/blkback.c struct grant_page *pages[], pages 792 drivers/block/xen-blkback/blkback.c invcount = xen_blkbk_unmap_prepare(ring, pages, batch, pages 799 drivers/block/xen-blkback/blkback.c pages += batch; pages 805 drivers/block/xen-blkback/blkback.c struct grant_page *pages[], pages 833 drivers/block/xen-blkback/blkback.c pages[i]->gref); pages 841 drivers/block/xen-blkback/blkback.c pages[i]->page = persistent_gnt->page; pages 842 drivers/block/xen-blkback/blkback.c pages[i]->persistent_gnt = persistent_gnt; pages 844 drivers/block/xen-blkback/blkback.c if (get_free_page(ring, &pages[i]->page)) pages 846 drivers/block/xen-blkback/blkback.c addr = vaddr(pages[i]->page); pages 847 drivers/block/xen-blkback/blkback.c pages_to_gnt[segs_to_map] = pages[i]->page; pages 848 drivers/block/xen-blkback/blkback.c pages[i]->persistent_gnt = NULL; pages 853 drivers/block/xen-blkback/blkback.c flags, pages[i]->gref, pages 872 drivers/block/xen-blkback/blkback.c if (!pages[seg_idx]->persistent_gnt) { pages 877 drivers/block/xen-blkback/blkback.c put_free_pages(ring, &pages[seg_idx]->page, 1); pages 878 drivers/block/xen-blkback/blkback.c pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; pages 882 drivers/block/xen-blkback/blkback.c pages[seg_idx]->handle = map[new_map_idx].handle; pages 904 drivers/block/xen-blkback/blkback.c persistent_gnt->page = pages[seg_idx]->page; pages 911 drivers/block/xen-blkback/blkback.c pages[seg_idx]->persistent_gnt = persistent_gnt; pages 940 drivers/block/xen-blkback/blkback.c pages[i]->handle = BLKBACK_INVALID_HANDLE; pages 960 drivers/block/xen-blkback/blkback.c struct grant_page **pages = pending_req->indirect_pages; pages 970 drivers/block/xen-blkback/blkback.c pages[i]->gref = req->u.indirect.indirect_grefs[i]; pages 972 drivers/block/xen-blkback/blkback.c rc = xen_blkbk_map(ring, pages, indirect_grefs, true); pages 983 drivers/block/xen-blkback/blkback.c segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); pages 1004 drivers/block/xen-blkback/blkback.c xen_blkbk_unmap(ring, pages, indirect_grefs); pages 1238 drivers/block/xen-blkback/blkback.c struct grant_page **pages = pending_req->segments; pages 1301 drivers/block/xen-blkback/blkback.c pages[i]->gref = req->u.rw.seg[i].gref; pages 1365 drivers/block/xen-blkback/blkback.c pages[i]->page, pages 187 drivers/block/zram/zram_drv.c const unsigned long pages) pages 195 drivers/block/zram/zram_drv.c if (pages > cur_max) pages 197 drivers/block/zram/zram_drv.c &zram->stats.max_used_pages, cur_max, pages); pages 224 drivers/char/agp/agp.h kvfree(mem->pages); pages 318 drivers/char/agp/amd-k7-agp.c page_to_phys(mem->pages[i]), pages 83 drivers/char/agp/amd64-agp.c page_to_phys(mem->pages[i]), pages 307 drivers/char/agp/ati-agp.c page_to_phys(mem->pages[i]), pages 259 drivers/char/agp/efficeon-agp.c unsigned long insert = efficeon_mask_memory(mem->pages[i]); pages 93 drivers/char/agp/generic.c mem->pages = kvmalloc(size, GFP_KERNEL); pages 118 drivers/char/agp/generic.c if (new->pages == NULL) { pages 144 drivers/char/agp/generic.c if (new->pages == NULL) { pages 189 drivers/char/agp/generic.c curr->pages[i], pages 194 drivers/char/agp/generic.c curr->pages[i], pages 271 drivers/char/agp/generic.c new->pages[i] = page; pages 1099 drivers/char/agp/generic.c page_to_phys(mem->pages[i]), pages 1167 drivers/char/agp/generic.c int pages; pages 1169 drivers/char/agp/generic.c pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; pages 1175 drivers/char/agp/generic.c new->pages[i] = NULL; pages 1178 drivers/char/agp/generic.c new->num_scratch_pages = pages; pages 1208 drivers/char/agp/generic.c mem->pages[i] = page; pages 1213 drivers/char/agp/generic.c set_pages_array_uc(mem->pages, num_pages); pages 1246 drivers/char/agp/generic.c set_pages_array_wb(mem->pages, mem->page_count); pages 1250 drivers/char/agp/generic.c page = mem->pages[i]; pages 1258 drivers/char/agp/generic.c mem->pages[i] = NULL; pages 363 drivers/char/agp/hp-agp.c paddr = page_to_phys(mem->pages[i]); pages 301 drivers/char/agp/i460-agp.c mem, pg_start, type, page_to_phys(mem->pages[0])); pages 328 drivers/char/agp/i460-agp.c paddr = page_to_phys(mem->pages[i]); pages 453 drivers/char/agp/i460-agp.c mem->pages[i] = lp->page; pages 482 drivers/char/agp/i460-agp.c mem->pages[i] = NULL; pages 98 drivers/char/agp/intel-gtt.c static int intel_gtt_map_memory(struct page **pages, pages 111 drivers/char/agp/intel-gtt.c sg_set_page(sg, pages[i], PAGE_SIZE, 0); pages 262 drivers/char/agp/intel-gtt.c new->pages[0] = page; pages 265 drivers/char/agp/intel-gtt.c new->pages[1] = new->pages[0] + 1; pages 266 drivers/char/agp/intel-gtt.c new->pages[2] = new->pages[1] + 1; pages 267 drivers/char/agp/intel-gtt.c new->pages[3] = new->pages[2] + 1; pages 272 drivers/char/agp/intel-gtt.c new->physical = page_to_phys(new->pages[0]); pages 281 drivers/char/agp/intel-gtt.c i8xx_destroy_pages(curr->pages[0]); pages 283 drivers/char/agp/intel-gtt.c agp_bridge->driver->agp_destroy_page(curr->pages[0], pages 285 drivers/char/agp/intel-gtt.c agp_bridge->driver->agp_destroy_page(curr->pages[0], pages 884 drivers/char/agp/intel-gtt.c struct page **pages, pages 890 drivers/char/agp/intel-gtt.c dma_addr_t addr = page_to_phys(pages[i]); pages 930 drivers/char/agp/intel-gtt.c ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); pages 938 drivers/char/agp/intel-gtt.c intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, pages 228 drivers/char/agp/nvidia-agp.c page_to_phys(mem->pages[i]), mask_type), pages 154 drivers/char/agp/parisc-agp.c paddr = page_to_phys(mem->pages[i]); pages 354 drivers/char/agp/sworks-agp.c page_to_phys(mem->pages[i]), mem->type), pages 186 drivers/char/agp/uninorth-agp.c gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL; pages 188 drivers/char/agp/uninorth-agp.c gp[i] = cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) | pages 190 drivers/char/agp/uninorth-agp.c flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])), pages 191 drivers/char/agp/uninorth-agp.c (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000); pages 186 drivers/char/mspec.c int pages, vdata_size; pages 197 drivers/char/mspec.c pages = vma_pages(vma); pages 198 drivers/char/mspec.c vdata_size = sizeof(struct vma_data) + pages * sizeof(long); pages 413 drivers/char/virtio_console.c int pages) pages 423 drivers/char/virtio_console.c buf = kmalloc(struct_size(buf, sg, pages), GFP_KERNEL); pages 427 drivers/char/virtio_console.c buf->sgpages = pages; pages 428 drivers/char/virtio_console.c if (pages > 0) { pages 59 drivers/crypto/omap-crypto.c int pages; pages 63 drivers/crypto/omap-crypto.c pages = get_order(new_len); pages 65 drivers/crypto/omap-crypto.c buf = (void *)__get_free_pages(GFP_ATOMIC, pages); pages 162 drivers/crypto/omap-crypto.c int pages; pages 171 drivers/crypto/omap-crypto.c pages = get_order(len); pages 177 drivers/crypto/omap-crypto.c free_pages((unsigned long)buf, pages); pages 700 drivers/crypto/omap-sham.c int pages; pages 706 drivers/crypto/omap-sham.c pages = get_order(ctx->total); pages 708 drivers/crypto/omap-sham.c buf = (void *)__get_free_pages(GFP_ATOMIC, pages); pages 519 drivers/crypto/s5p-sss.c void *pages; pages 527 drivers/crypto/s5p-sss.c pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len)); pages 528 drivers/crypto/s5p-sss.c if (!pages) { pages 534 drivers/crypto/s5p-sss.c s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0); pages 537 drivers/crypto/s5p-sss.c sg_set_buf(*dst, pages, len); pages 1027 drivers/crypto/s5p-sss.c unsigned int pages, len; pages 1031 drivers/crypto/s5p-sss.c pages = get_order(len); pages 1033 drivers/crypto/s5p-sss.c buf = (void *)__get_free_pages(GFP_ATOMIC, pages); pages 345 drivers/crypto/stm32/stm32-cryp.c int pages, total_in, total_out; pages 353 drivers/crypto/stm32/stm32-cryp.c pages = total_in ? get_order(total_in) : 1; pages 354 drivers/crypto/stm32/stm32-cryp.c buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); pages 357 drivers/crypto/stm32/stm32-cryp.c pages = total_out ? get_order(total_out) : 1; pages 358 drivers/crypto/stm32/stm32-cryp.c buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); pages 647 drivers/crypto/stm32/stm32-cryp.c int pages, len; pages 656 drivers/crypto/stm32/stm32-cryp.c pages = len ? get_order(len) : 1; pages 657 drivers/crypto/stm32/stm32-cryp.c free_pages((unsigned long)buf_in, pages); pages 660 drivers/crypto/stm32/stm32-cryp.c pages = len ? get_order(len) : 1; pages 661 drivers/crypto/stm32/stm32-cryp.c free_pages((unsigned long)buf_out, pages); pages 20 drivers/dma-buf/udmabuf.c struct page **pages; pages 28 drivers/dma-buf/udmabuf.c vmf->page = ubuf->pages[vmf->pgoff]; pages 59 drivers/dma-buf/udmabuf.c ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount, pages 91 drivers/dma-buf/udmabuf.c put_page(ubuf->pages[pg]); pages 92 drivers/dma-buf/udmabuf.c kfree(ubuf->pages); pages 99 drivers/dma-buf/udmabuf.c struct page *page = ubuf->pages[page_num]; pages 148 drivers/dma-buf/udmabuf.c ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages), pages 150 drivers/dma-buf/udmabuf.c if (!ubuf->pages) { pages 179 drivers/dma-buf/udmabuf.c ubuf->pages[pgbuf++] = page; pages 203 drivers/dma-buf/udmabuf.c put_page(ubuf->pages[--pgbuf]); pages 206 drivers/dma-buf/udmabuf.c kfree(ubuf->pages); pages 182 drivers/dma/sh/rcar-dmac.c struct list_head pages; pages 515 drivers/dma/sh/rcar-dmac.c list_add_tail(&page->node, &chan->desc.pages); pages 642 drivers/dma/sh/rcar-dmac.c list_add_tail(&page->node, &chan->desc.pages); pages 1026 drivers/dma/sh/rcar-dmac.c INIT_LIST_HEAD(&rchan->desc.pages); pages 1077 drivers/dma/sh/rcar-dmac.c list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) { pages 395 drivers/dma/ste_dma40.c int pages; pages 3392 drivers/dma/ste_dma40.c base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; pages 3396 drivers/dma/ste_dma40.c base->lcla_pool.pages); pages 3400 drivers/dma/ste_dma40.c base->lcla_pool.pages); pages 3404 drivers/dma/ste_dma40.c free_pages(page_list[j], base->lcla_pool.pages); pages 3414 drivers/dma/ste_dma40.c free_pages(page_list[j], base->lcla_pool.pages); pages 3425 drivers/dma/ste_dma40.c __func__, base->lcla_pool.pages); pages 3677 drivers/dma/ste_dma40.c base->lcla_pool.pages); pages 41 drivers/edac/edac_mc.h #define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT)) pages 44 drivers/edac/edac_mc.h #define PAGES_TO_MiB(pages) ((pages) << (PAGE_SHIFT - 20)) pages 1074 drivers/firewire/core-cdev.c if (a->data == 0 || client->buffer.pages == NULL || pages 1667 drivers/firewire/core-cdev.c if (client->buffer.pages != NULL) pages 1753 drivers/firewire/core-cdev.c if (client->buffer.pages) pages 35 drivers/firewire/core-iso.c buffer->pages = kmalloc_array(page_count, sizeof(buffer->pages[0]), pages 37 drivers/firewire/core-iso.c if (buffer->pages == NULL) pages 41 drivers/firewire/core-iso.c buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); pages 42 drivers/firewire/core-iso.c if (buffer->pages[i] == NULL) pages 63 drivers/firewire/core-iso.c address = dma_map_page(card->device, buffer->pages[i], pages 68 drivers/firewire/core-iso.c set_page_private(buffer->pages[i], address); pages 97 drivers/firewire/core-iso.c return vm_map_pages_zero(vma, buffer->pages, pages 108 drivers/firewire/core-iso.c address = page_private(buffer->pages[i]); pages 113 drivers/firewire/core-iso.c __free_page(buffer->pages[i]); pages 115 drivers/firewire/core-iso.c kfree(buffer->pages); pages 116 drivers/firewire/core-iso.c buffer->pages = NULL; pages 130 drivers/firewire/core-iso.c address = page_private(buffer->pages[i]); pages 1092 drivers/firewire/net.c kunmap(dev->broadcast_rcv_buffer.pages[u]); pages 1156 drivers/firewire/net.c ptr = kmap(dev->broadcast_rcv_buffer.pages[u]); pages 89 drivers/firewire/ohci.c struct page *pages[AR_BUFFERS]; pages 654 drivers/firewire/ohci.c return page_private(ctx->pages[i]); pages 682 drivers/firewire/ohci.c if (ctx->pages[i]) { pages 686 drivers/firewire/ohci.c __free_page(ctx->pages[i]); pages 975 drivers/firewire/ohci.c struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES]; pages 983 drivers/firewire/ohci.c ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32); pages 984 drivers/firewire/ohci.c if (!ctx->pages[i]) pages 986 drivers/firewire/ohci.c dma_addr = dma_map_page(ohci->card.device, ctx->pages[i], pages 989 drivers/firewire/ohci.c __free_page(ctx->pages[i]); pages 990 drivers/firewire/ohci.c ctx->pages[i] = NULL; pages 993 drivers/firewire/ohci.c set_page_private(ctx->pages[i], dma_addr); pages 997 drivers/firewire/ohci.c pages[i] = ctx->pages[i]; pages 999 drivers/firewire/ohci.c pages[AR_BUFFERS + i] = ctx->pages[i]; pages 1000 drivers/firewire/ohci.c ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL); pages 3270 drivers/firewire/ohci.c page_bus = page_private(buffer->pages[page]); pages 3353 drivers/firewire/ohci.c page_bus = page_private(buffer->pages[page]); pages 3416 drivers/firewire/ohci.c page_bus = page_private(buffer->pages[page]); pages 32 drivers/firmware/efi/capsule-loader.c __free_page(cap_info->pages[--cap_info->index]); pages 60 drivers/firmware/efi/capsule-loader.c temp_page = krealloc(cap_info->pages, pages 66 drivers/firmware/efi/capsule-loader.c cap_info->pages = temp_page; pages 118 drivers/firmware/efi/capsule-loader.c cap_info->capsule = vmap(cap_info->pages, cap_info->index, pages 191 drivers/firmware/efi/capsule-loader.c cap_info->pages[cap_info->index] = page; pages 196 drivers/firmware/efi/capsule-loader.c page = cap_info->pages[cap_info->index - 1]; pages 279 drivers/firmware/efi/capsule-loader.c kfree(cap_info->pages); pages 304 drivers/firmware/efi/capsule-loader.c cap_info->pages = kzalloc(sizeof(void *), GFP_KERNEL); pages 305 drivers/firmware/efi/capsule-loader.c if (!cap_info->pages) { pages 312 drivers/firmware/efi/capsule-loader.c kfree(cap_info->pages); pages 215 drivers/firmware/efi/capsule.c int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages) pages 251 drivers/firmware/efi/capsule.c PAGE_SIZE - (u64)*pages % PAGE_SIZE); pages 254 drivers/firmware/efi/capsule.c sglist[j].data = *pages++; pages 172 drivers/firmware/efi/libstub/gop.c si->pages = 1; pages 269 drivers/firmware/efi/libstub/gop.c si->pages = 1; pages 122 drivers/firmware/efi/libstub/random.c unsigned long pages; pages 130 drivers/firmware/efi/libstub/random.c pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; pages 133 drivers/firmware/efi/libstub/random.c EFI_LOADER_DATA, pages, &target); pages 19 drivers/fpga/dfl-afu-dma-region.c static void put_all_pages(struct page **pages, int npages) pages 24 drivers/fpga/dfl-afu-dma-region.c if (pages[i]) pages 25 drivers/fpga/dfl-afu-dma-region.c put_page(pages[i]); pages 54 drivers/fpga/dfl-afu-dma-region.c region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); pages 55 drivers/fpga/dfl-afu-dma-region.c if (!region->pages) { pages 61 drivers/fpga/dfl-afu-dma-region.c region->pages); pages 75 drivers/fpga/dfl-afu-dma-region.c put_all_pages(region->pages, pinned); pages 77 drivers/fpga/dfl-afu-dma-region.c kfree(region->pages); pages 97 drivers/fpga/dfl-afu-dma-region.c put_all_pages(region->pages, npages); pages 98 drivers/fpga/dfl-afu-dma-region.c kfree(region->pages); pages 117 drivers/fpga/dfl-afu-dma-region.c if (page_to_pfn(region->pages[i]) + 1 != pages 118 drivers/fpga/dfl-afu-dma-region.c page_to_pfn(region->pages[i + 1])) pages 231 drivers/fpga/dfl-afu-dma-region.c if (region->pages) pages 354 drivers/fpga/dfl-afu-dma-region.c region->pages[0], 0, pages 57 drivers/fpga/dfl-afu.h struct page **pages; pages 251 drivers/fpga/fpga-mgr.c struct page **pages; pages 272 drivers/fpga/fpga-mgr.c pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); pages 273 drivers/fpga/fpga-mgr.c if (!pages) pages 279 drivers/fpga/fpga-mgr.c pages[index] = vmalloc_to_page(p); pages 281 drivers/fpga/fpga-mgr.c pages[index] = kmap_to_page((void *)p); pages 282 drivers/fpga/fpga-mgr.c if (!pages[index]) { pages 283 drivers/fpga/fpga-mgr.c kfree(pages); pages 293 drivers/fpga/fpga-mgr.c rc = sg_alloc_table_from_pages(&sgt, pages, index, offset_in_page(buf), pages 295 drivers/fpga/fpga-mgr.c kfree(pages); pages 507 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); pages 1734 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); pages 1822 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c if (bo->tbo.ttm->pages[0]) { pages 640 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c if (bo->tbo.ttm->pages[i] != e->user_pages[i]) { pages 54 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); pages 222 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c int pages) pages 238 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c for (i = 0; i < pages; i++, p++) { pages 240 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c adev->gart.pages[p] = NULL; pages 274 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c int pages, dma_addr_t *dma_addr, uint64_t flags, pages 287 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c for (i = 0; i < pages; i++) { pages 311 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c int pages, struct page **pagelist, dma_addr_t *dma_addr, pages 327 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c for (i = 0; i < pages; i++, p++) pages 328 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c adev->gart.pages[p] = pagelist ? pagelist[i] : NULL; pages 334 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags, pages 377 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c adev->gart.pages = vzalloc(array_size(sizeof(void *), pages 379 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c if (adev->gart.pages == NULL) pages 396 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c vfree(adev->gart.pages); pages 397 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c adev->gart.pages = NULL; pages 50 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h struct page **pages; pages 65 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h int pages); pages 67 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h int pages, dma_addr_t *dma_addr, uint64_t flags, pages 70 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h int pages, struct page **pagelist, pages 332 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); pages 1213 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_err_handler_data *data, int pages) pages 1216 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c unsigned int new_space = old_space + pages; pages 1236 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c unsigned long *bps, int pages) pages 1240 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c int i = pages; pages 1243 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (!con || !con->eh_data || !bps || pages <= 0) pages 1251 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (data->space_left <= pages) pages 1252 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (amdgpu_ras_realloc_eh_data_space(adev, data, pages)) { pages 1260 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c data->space_left -= pages; pages 495 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h unsigned long *bps, int pages); pages 121 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __field(u32, pages) pages 130 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->pages = bo->tbo.num_pages; pages 138 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->bo, __entry->pages, __entry->type, pages 787 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) pages 858 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c pages[i] = hmm_device_entry_to_page(range, pfns[i]); pages 859 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c if (unlikely(!pages[i])) { pages 925 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) pages 930 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c ttm->pages[i] = pages ? pages[i] : NULL; pages 950 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, pages 963 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, pages 996 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c ttm->pages[0] == hmm_device_entry_to_page(gtt->range, pages 1015 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c ttm->pages, gtt->ttm.dma_address, flags); pages 1026 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c &ttm->pages[page_idx], pages 1030 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c ttm->pages, gtt->ttm.dma_address, flags); pages 1083 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c ttm->pages, gtt->ttm.dma_address, flags); pages 1269 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, pages 2273 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c page = adev->gart.pages[p]; pages 2279 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c kunmap(adev->gart.pages[p]); pages 107 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages); pages 111 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h struct page **pages) pages 121 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); pages 220 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c unsigned pages = mem->num_pages; pages 229 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c for (usage = 0; nodes && pages; pages -= nodes->size, nodes++) pages 324 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c unsigned long pages = rounddown_pow_of_two(pages_left); pages 326 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, pages 335 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c pages_left -= pages; pages 339 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c unsigned long pages = min(pages_left, pages_per_node); pages 342 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c if (pages == pages_per_node) pages 346 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c pages, alignment, 0, pages 354 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c pages_left -= pages; pages 391 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c unsigned pages = mem->num_pages; pages 397 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c while (pages) { pages 398 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c pages -= nodes->size; pages 64 drivers/gpu/drm/ati_pcigart.c unsigned long pages; pages 77 drivers/gpu/drm/ati_pcigart.c pages = (entry->pages <= max_pages) pages 78 drivers/gpu/drm/ati_pcigart.c ? entry->pages : max_pages; pages 80 drivers/gpu/drm/ati_pcigart.c for (i = 0; i < pages; i++) { pages 105 drivers/gpu/drm/ati_pcigart.c unsigned long pages; pages 146 drivers/gpu/drm/ati_pcigart.c pages = (entry->pages <= max_real_pages) pages 147 drivers/gpu/drm/ati_pcigart.c ? entry->pages : max_real_pages; pages 156 drivers/gpu/drm/ati_pcigart.c for (i = 0; i < pages; i++) { pages 206 drivers/gpu/drm/drm_agpsupport.c unsigned long pages; pages 215 drivers/gpu/drm/drm_agpsupport.c pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; pages 217 drivers/gpu/drm/drm_agpsupport.c memory = agp_allocate_memory(dev->agp->bridge, pages, type); pages 226 drivers/gpu/drm/drm_agpsupport.c entry->pages = pages; pages 376 drivers/gpu/drm/drm_agpsupport.c drm_free_agp(entry->memory, entry->pages); pages 457 drivers/gpu/drm/drm_agpsupport.c drm_free_agp(entry->memory, entry->pages); pages 301 drivers/gpu/drm/drm_bufs.c (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { pages 759 drivers/gpu/drm/drm_bufs.c (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { pages 60 drivers/gpu/drm/drm_cache.c static void drm_cache_flush_clflush(struct page *pages[], pages 67 drivers/gpu/drm/drm_cache.c drm_clflush_page(*pages++); pages 81 drivers/gpu/drm/drm_cache.c drm_clflush_pages(struct page *pages[], unsigned long num_pages) pages 86 drivers/gpu/drm/drm_cache.c drm_cache_flush_clflush(pages, num_pages); pages 96 drivers/gpu/drm/drm_cache.c struct page *page = pages[i]; pages 556 drivers/gpu/drm/drm_gem.c struct page *p, **pages; pages 571 drivers/gpu/drm/drm_gem.c pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); pages 572 drivers/gpu/drm/drm_gem.c if (pages == NULL) pages 581 drivers/gpu/drm/drm_gem.c pages[i] = p; pages 592 drivers/gpu/drm/drm_gem.c return pages; pages 598 drivers/gpu/drm/drm_gem.c if (!pagevec_add(&pvec, pages[i])) pages 604 drivers/gpu/drm/drm_gem.c kvfree(pages); pages 616 drivers/gpu/drm/drm_gem.c void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, pages 636 drivers/gpu/drm/drm_gem.c if (!pages[i]) pages 640 drivers/gpu/drm/drm_gem.c set_page_dirty(pages[i]); pages 643 drivers/gpu/drm/drm_gem.c mark_page_accessed(pages[i]); pages 646 drivers/gpu/drm/drm_gem.c if (!pagevec_add(&pvec, pages[i])) pages 652 drivers/gpu/drm/drm_gem.c kvfree(pages); pages 117 drivers/gpu/drm/drm_gem_shmem_helper.c kvfree(shmem->pages); pages 125 drivers/gpu/drm/drm_gem_shmem_helper.c if (shmem->pages) pages 141 drivers/gpu/drm/drm_gem_shmem_helper.c struct page **pages; pages 146 drivers/gpu/drm/drm_gem_shmem_helper.c pages = drm_gem_get_pages(obj); pages 147 drivers/gpu/drm/drm_gem_shmem_helper.c if (IS_ERR(pages)) { pages 148 drivers/gpu/drm/drm_gem_shmem_helper.c DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); pages 150 drivers/gpu/drm/drm_gem_shmem_helper.c return PTR_ERR(pages); pages 153 drivers/gpu/drm/drm_gem_shmem_helper.c shmem->pages = pages; pages 192 drivers/gpu/drm/drm_gem_shmem_helper.c drm_gem_put_pages(obj, shmem->pages, pages 195 drivers/gpu/drm/drm_gem_shmem_helper.c shmem->pages = NULL; pages 260 drivers/gpu/drm/drm_gem_shmem_helper.c shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, pages 479 drivers/gpu/drm/drm_gem_shmem_helper.c if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages)) pages 482 drivers/gpu/drm/drm_gem_shmem_helper.c page = shmem->pages[vmf->pgoff]; pages 591 drivers/gpu/drm/drm_gem_shmem_helper.c return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT); pages 669 drivers/gpu/drm/drm_gem_shmem_helper.c shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); pages 670 drivers/gpu/drm/drm_gem_shmem_helper.c if (!shmem->pages) { pages 675 drivers/gpu/drm/drm_gem_shmem_helper.c ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages); pages 687 drivers/gpu/drm/drm_gem_shmem_helper.c kvfree(shmem->pages); pages 147 drivers/gpu/drm/drm_legacy.h int pages; pages 77 drivers/gpu/drm/drm_memory.c && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= pages 93 drivers/gpu/drm/drm_memory.c phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE); pages 103 drivers/gpu/drm/drm_memory.c void drm_free_agp(struct agp_memory *handle, int pages) pages 793 drivers/gpu/drm/drm_prime.c struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages) pages 804 drivers/gpu/drm/drm_prime.c ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, pages 947 drivers/gpu/drm/drm_prime.c int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, pages 965 drivers/gpu/drm/drm_prime.c if (pages) pages 966 drivers/gpu/drm/drm_prime.c pages[index] = page; pages 60 drivers/gpu/drm/drm_scatter.c for (i = 0; i < entry->pages; i++) { pages 92 drivers/gpu/drm/drm_scatter.c unsigned long pages, i, j; pages 109 drivers/gpu/drm/drm_scatter.c pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; pages 110 drivers/gpu/drm/drm_scatter.c DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); pages 112 drivers/gpu/drm/drm_scatter.c entry->pages = pages; pages 113 drivers/gpu/drm/drm_scatter.c entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); pages 119 drivers/gpu/drm/drm_scatter.c entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); pages 126 drivers/gpu/drm/drm_scatter.c entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); pages 137 drivers/gpu/drm/drm_scatter.c memset(entry->virtual, 0, pages << PAGE_SHIFT); pages 144 drivers/gpu/drm/drm_scatter.c for (i = (unsigned long)entry->virtual, j = 0; j < pages; pages 163 drivers/gpu/drm/drm_scatter.c for (i = 0; i < pages; i++) { pages 162 drivers/gpu/drm/drm_vm.c agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) pages 173 drivers/gpu/drm/drm_vm.c page = agpmem->memory->pages[offset]; pages 180 drivers/gpu/drm/drm_vm.c agpmem->memory->pages[offset], pages 142 drivers/gpu/drm/drm_vma_manager.c unsigned long pages) pages 167 drivers/gpu/drm/drm_vma_manager.c if (offset < start + pages) pages 202 drivers/gpu/drm/drm_vma_manager.c struct drm_vma_offset_node *node, unsigned long pages) pages 210 drivers/gpu/drm/drm_vma_manager.c &node->vm_node, pages); pages 198 drivers/gpu/drm/etnaviv/etnaviv_dump.c struct page **pages; pages 205 drivers/gpu/drm/etnaviv/etnaviv_dump.c pages = etnaviv_gem_get_pages(obj); pages 207 drivers/gpu/drm/etnaviv/etnaviv_dump.c if (!IS_ERR(pages)) { pages 213 drivers/gpu/drm/etnaviv/etnaviv_dump.c *bomap++ = cpu_to_le64(page_to_phys(*pages++)); pages 68 drivers/gpu/drm/etnaviv/etnaviv_gem.c etnaviv_obj->pages = p; pages 81 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (etnaviv_obj->pages) { pages 82 drivers/gpu/drm/etnaviv/etnaviv_gem.c drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages, pages 85 drivers/gpu/drm/etnaviv/etnaviv_gem.c etnaviv_obj->pages = NULL; pages 95 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (!etnaviv_obj->pages) { pages 106 drivers/gpu/drm/etnaviv/etnaviv_gem.c sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages); pages 118 drivers/gpu/drm/etnaviv/etnaviv_gem.c return etnaviv_obj->pages; pages 178 drivers/gpu/drm/etnaviv/etnaviv_gem.c struct page **pages, *page; pages 191 drivers/gpu/drm/etnaviv/etnaviv_gem.c pages = etnaviv_gem_get_pages(etnaviv_obj); pages 194 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (IS_ERR(pages)) { pages 195 drivers/gpu/drm/etnaviv/etnaviv_gem.c err = PTR_ERR(pages); pages 202 drivers/gpu/drm/etnaviv/etnaviv_gem.c page = pages[pgoff]; pages 256 drivers/gpu/drm/etnaviv/etnaviv_gem.c struct page **pages; pages 283 drivers/gpu/drm/etnaviv/etnaviv_gem.c pages = etnaviv_gem_get_pages(etnaviv_obj); pages 284 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (IS_ERR(pages)) { pages 285 drivers/gpu/drm/etnaviv/etnaviv_gem.c ret = PTR_ERR(pages); pages 353 drivers/gpu/drm/etnaviv/etnaviv_gem.c struct page **pages; pages 357 drivers/gpu/drm/etnaviv/etnaviv_gem.c pages = etnaviv_gem_get_pages(obj); pages 358 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (IS_ERR(pages)) pages 361 drivers/gpu/drm/etnaviv/etnaviv_gem.c return vmap(pages, obj->base.size >> PAGE_SHIFT, pages 676 drivers/gpu/drm/etnaviv/etnaviv_gem.c struct page **pages = pvec + pinned; pages 679 drivers/gpu/drm/etnaviv/etnaviv_gem.c !userptr->ro ? FOLL_WRITE : 0, pages); pages 690 drivers/gpu/drm/etnaviv/etnaviv_gem.c etnaviv_obj->pages = pvec; pages 702 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (etnaviv_obj->pages) { pages 705 drivers/gpu/drm/etnaviv/etnaviv_gem.c release_pages(etnaviv_obj->pages, npages); pages 706 drivers/gpu/drm/etnaviv/etnaviv_gem.c kvfree(etnaviv_obj->pages); pages 46 drivers/gpu/drm/etnaviv/etnaviv_gem.h struct page **pages; pages 19 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */ pages 22 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c return drm_prime_pages_to_sg(etnaviv_obj->pages, npages); pages 80 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c if (etnaviv_obj->pages) pages 81 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c kvfree(etnaviv_obj->pages); pages 123 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); pages 124 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c if (!etnaviv_obj->pages) { pages 129 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages, pages 95 drivers/gpu/drm/exynos/exynos_drm_fbdev.c exynos_gem->kvaddr = (void __iomem *) vmap(exynos_gem->pages, nr_pages, pages 381 drivers/gpu/drm/exynos/exynos_drm_g2d.c struct page **pages; pages 401 drivers/gpu/drm/exynos/exynos_drm_g2d.c pages = frame_vector_pages(g2d_userptr->vec); pages 402 drivers/gpu/drm/exynos/exynos_drm_g2d.c if (!IS_ERR(pages)) { pages 406 drivers/gpu/drm/exynos/exynos_drm_g2d.c set_page_dirty_lock(pages[i]); pages 58 drivers/gpu/drm/exynos/exynos_drm_gem.c exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *), pages 60 drivers/gpu/drm/exynos/exynos_drm_gem.c if (!exynos_gem->pages) { pages 81 drivers/gpu/drm/exynos/exynos_drm_gem.c if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL, pages 101 drivers/gpu/drm/exynos/exynos_drm_gem.c kvfree(exynos_gem->pages); pages 122 drivers/gpu/drm/exynos/exynos_drm_gem.c kvfree(exynos_gem->pages); pages 399 drivers/gpu/drm/exynos/exynos_drm_gem.c pfn = page_to_pfn(exynos_gem->pages[page_offset]); pages 469 drivers/gpu/drm/exynos/exynos_drm_gem.c return drm_prime_pages_to_sg(exynos_gem->pages, npages); pages 490 drivers/gpu/drm/exynos/exynos_drm_gem.c exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); pages 491 drivers/gpu/drm/exynos/exynos_drm_gem.c if (!exynos_gem->pages) { pages 496 drivers/gpu/drm/exynos/exynos_drm_gem.c ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL, pages 519 drivers/gpu/drm/exynos/exynos_drm_gem.c kvfree(exynos_gem->pages); pages 51 drivers/gpu/drm/exynos/exynos_drm_gem.h struct page **pages; pages 93 drivers/gpu/drm/gma500/framebuffer.c int pages = info->fix.line_length >> 12; pages 94 drivers/gpu/drm/gma500/framebuffer.c psb_gtt_roll(dev, gtt, var->yoffset * pages); pages 168 drivers/gpu/drm/gma500/gem.c pfn = page_to_pfn(r->pages[page_offset]); pages 401 drivers/gpu/drm/gma500/gma_display.c tmp_src = kmap(gt->pages[i]); pages 403 drivers/gpu/drm/gma500/gma_display.c kunmap(gt->pages[i]); pages 80 drivers/gpu/drm/gma500/gtt.c struct page **pages; pages 83 drivers/gpu/drm/gma500/gtt.c if (r->pages == NULL) { pages 91 drivers/gpu/drm/gma500/gtt.c pages = r->pages; pages 95 drivers/gpu/drm/gma500/gtt.c set_pages_array_wc(pages, r->npage); pages 100 drivers/gpu/drm/gma500/gtt.c pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), pages 105 drivers/gpu/drm/gma500/gtt.c pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), pages 140 drivers/gpu/drm/gma500/gtt.c set_pages_array_wb(r->pages, r->npage); pages 174 drivers/gpu/drm/gma500/gtt.c pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), pages 179 drivers/gpu/drm/gma500/gtt.c pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), pages 196 drivers/gpu/drm/gma500/gtt.c struct page **pages; pages 198 drivers/gpu/drm/gma500/gtt.c WARN_ON(gt->pages); pages 200 drivers/gpu/drm/gma500/gtt.c pages = drm_gem_get_pages(>->gem); pages 201 drivers/gpu/drm/gma500/gtt.c if (IS_ERR(pages)) pages 202 drivers/gpu/drm/gma500/gtt.c return PTR_ERR(pages); pages 205 drivers/gpu/drm/gma500/gtt.c gt->pages = pages; pages 221 drivers/gpu/drm/gma500/gtt.c drm_gem_put_pages(>->gem, gt->pages, true, false); pages 222 drivers/gpu/drm/gma500/gtt.c gt->pages = NULL; pages 254 drivers/gpu/drm/gma500/gtt.c gt->pages, (gpu_base + gt->offset), pages 567 drivers/gpu/drm/gma500/gtt.c if (range->pages) { pages 38 drivers/gpu/drm/gma500/gtt.h struct page **pages; /* Backing pages if present */ pages 696 drivers/gpu/drm/gma500/mmu.c int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, pages 737 drivers/gpu/drm/gma500/mmu.c pte = psb_mmu_mask_pte(page_to_pfn(*pages++), pages 77 drivers/gpu/drm/gma500/mmu.h extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, pages 10513 drivers/gpu/drm/i915/display/intel_display.c base = sg_dma_address(obj->mm.pages->sgl); pages 1323 drivers/gpu/drm/i915/display/intel_overlay.c overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl); pages 22 drivers/gpu/drm/i915/gem/i915_gem_clflush.c drm_clflush_sg(obj->mm.pages); pages 117 drivers/gpu/drm/i915/gem/i915_gem_clflush.c } else if (obj->mm.pages) { pages 16 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c struct sg_table *pages; pages 24 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c vma->pages = sleeve->pages; pages 32 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c GEM_BUG_ON(!vma->pages); pages 33 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c vma->pages = NULL; pages 57 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c struct sg_table *pages, pages 78 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c sleeve->pages = pages; pages 170 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c drm_clflush_sg(w->sleeve->pages); pages 266 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c struct sg_table *pages, pages 274 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c sleeve = create_sleeve(ce->vm, obj, pages, page_sizes); pages 17 drivers/gpu/drm/i915/gem/i915_gem_client_blt.h struct sg_table *pages, pages 39 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL); pages 43 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c src = obj->mm.pages->sgl; pages 45 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c for (i = 0; i < obj->mm.pages->nents; i++) { pages 229 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c struct sg_table *pages; pages 232 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c pages = dma_buf_map_attachment(obj->base.import_attach, pages 234 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c if (IS_ERR(pages)) pages 235 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c return PTR_ERR(pages); pages 237 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c sg_page_sizes = i915_sg_page_sizes(pages->sgl); pages 239 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c __i915_gem_object_set_pages(obj, pages, sg_page_sizes); pages 245 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c struct sg_table *pages) pages 247 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c dma_buf_unmap_attachment(obj->base.import_attach, pages, pages 140 drivers/gpu/drm/i915/gem/i915_gem_internal.c struct sg_table *pages) pages 142 drivers/gpu/drm/i915/gem/i915_gem_internal.c i915_gem_gtt_finish_pages(obj, pages); pages 143 drivers/gpu/drm/i915/gem/i915_gem_internal.c internal_free_pages(pages); pages 35 drivers/gpu/drm/i915/gem/i915_gem_object.h struct sg_table *pages, pages 236 drivers/gpu/drm/i915/gem/i915_gem_object.h struct sg_table *pages, pages 256 drivers/gpu/drm/i915/gem/i915_gem_object.h return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)); pages 53 drivers/gpu/drm/i915/gem/i915_gem_object_types.h struct sg_table *pages); pages 163 drivers/gpu/drm/i915/gem/i915_gem_object_types.h struct sg_table *pages; pages 12 drivers/gpu/drm/i915/gem/i915_gem_pages.c struct sg_table *pages, pages 25 drivers/gpu/drm/i915/gem/i915_gem_pages.c drm_clflush_sg(pages); pages 29 drivers/gpu/drm/i915/gem/i915_gem_pages.c obj->mm.get_page.sg_pos = pages->sgl; pages 32 drivers/gpu/drm/i915/gem/i915_gem_pages.c obj->mm.pages = pages; pages 156 drivers/gpu/drm/i915/gem/i915_gem_pages.c struct sg_table *pages; pages 158 drivers/gpu/drm/i915/gem/i915_gem_pages.c pages = fetch_and_zero(&obj->mm.pages); pages 159 drivers/gpu/drm/i915/gem/i915_gem_pages.c if (IS_ERR_OR_NULL(pages)) pages 160 drivers/gpu/drm/i915/gem/i915_gem_pages.c return pages; pages 179 drivers/gpu/drm/i915/gem/i915_gem_pages.c return pages; pages 185 drivers/gpu/drm/i915/gem/i915_gem_pages.c struct sg_table *pages; pages 205 drivers/gpu/drm/i915/gem/i915_gem_pages.c pages = __i915_gem_object_unset_pages(obj); pages 213 drivers/gpu/drm/i915/gem/i915_gem_pages.c if (!pages && !i915_gem_object_needs_async_cancel(obj)) pages 214 drivers/gpu/drm/i915/gem/i915_gem_pages.c pages = ERR_PTR(-EINVAL); pages 216 drivers/gpu/drm/i915/gem/i915_gem_pages.c if (!IS_ERR(pages)) pages 217 drivers/gpu/drm/i915/gem/i915_gem_pages.c obj->ops->put_pages(obj, pages); pages 231 drivers/gpu/drm/i915/gem/i915_gem_pages.c struct sg_table *sgt = obj->mm.pages; pages 235 drivers/gpu/drm/i915/gem/i915_gem_pages.c struct page **pages = stack_pages; pages 246 drivers/gpu/drm/i915/gem/i915_gem_pages.c pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); pages 247 drivers/gpu/drm/i915/gem/i915_gem_pages.c if (!pages) pages 252 drivers/gpu/drm/i915/gem/i915_gem_pages.c pages[i++] = page; pages 268 drivers/gpu/drm/i915/gem/i915_gem_pages.c addr = vmap(pages, n_pages, 0, pgprot); pages 270 drivers/gpu/drm/i915/gem/i915_gem_pages.c if (pages != stack_pages) pages 271 drivers/gpu/drm/i915/gem/i915_gem_pages.c kvfree(pages); pages 95 drivers/gpu/drm/i915/gem/i915_gem_phys.c struct sg_table *pages) pages 97 drivers/gpu/drm/i915/gem/i915_gem_phys.c dma_addr_t dma = sg_dma_address(pages->sgl); pages 98 drivers/gpu/drm/i915/gem/i915_gem_phys.c void *vaddr = sg_page(pages->sgl); pages 100 drivers/gpu/drm/i915/gem/i915_gem_phys.c __i915_gem_object_release_shmem(obj, pages, false); pages 130 drivers/gpu/drm/i915/gem/i915_gem_phys.c sg_free_table(pages); pages 131 drivers/gpu/drm/i915/gem/i915_gem_phys.c kfree(pages); pages 152 drivers/gpu/drm/i915/gem/i915_gem_phys.c struct sg_table *pages; pages 185 drivers/gpu/drm/i915/gem/i915_gem_phys.c pages = __i915_gem_object_unset_pages(obj); pages 196 drivers/gpu/drm/i915/gem/i915_gem_phys.c if (!IS_ERR_OR_NULL(pages)) pages 197 drivers/gpu/drm/i915/gem/i915_gem_phys.c i915_gem_shmem_ops.put_pages(obj, pages); pages 203 drivers/gpu/drm/i915/gem/i915_gem_phys.c if (!IS_ERR_OR_NULL(pages)) { pages 204 drivers/gpu/drm/i915/gem/i915_gem_phys.c unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl); pages 206 drivers/gpu/drm/i915/gem/i915_gem_phys.c __i915_gem_object_set_pages(obj, pages, sg_page_sizes); pages 227 drivers/gpu/drm/i915/gem/i915_gem_shmem.c obj->mm.pages = ERR_PTR(-EFAULT); pages 277 drivers/gpu/drm/i915/gem/i915_gem_shmem.c struct sg_table *pages, pages 288 drivers/gpu/drm/i915/gem/i915_gem_shmem.c drm_clflush_sg(pages); pages 294 drivers/gpu/drm/i915/gem/i915_gem_shmem.c shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) pages 300 drivers/gpu/drm/i915/gem/i915_gem_shmem.c __i915_gem_object_release_shmem(obj, pages, true); pages 302 drivers/gpu/drm/i915/gem/i915_gem_shmem.c i915_gem_gtt_finish_pages(obj, pages); pages 305 drivers/gpu/drm/i915/gem/i915_gem_shmem.c i915_gem_object_save_bit_17_swizzle(obj, pages); pages 310 drivers/gpu/drm/i915/gem/i915_gem_shmem.c for_each_sgt_page(page, sgt_iter, pages) { pages 324 drivers/gpu/drm/i915/gem/i915_gem_shmem.c sg_free_table(pages); pages 325 drivers/gpu/drm/i915/gem/i915_gem_shmem.c kfree(pages); pages 509 drivers/gpu/drm/i915/gem/i915_gem_stolen.c struct sg_table *pages = pages 513 drivers/gpu/drm/i915/gem/i915_gem_stolen.c if (IS_ERR(pages)) pages 514 drivers/gpu/drm/i915/gem/i915_gem_stolen.c return PTR_ERR(pages); pages 516 drivers/gpu/drm/i915/gem/i915_gem_stolen.c __i915_gem_object_set_pages(obj, pages, obj->stolen->size); pages 522 drivers/gpu/drm/i915/gem/i915_gem_stolen.c struct sg_table *pages) pages 525 drivers/gpu/drm/i915/gem/i915_gem_stolen.c sg_free_table(pages); pages 526 drivers/gpu/drm/i915/gem/i915_gem_stolen.c kfree(pages); pages 687 drivers/gpu/drm/i915/gem/i915_gem_stolen.c vma->pages = obj->mm.pages; pages 514 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct sg_table *pages = ERR_PTR(ret); pages 517 drivers/gpu/drm/i915/gem/i915_gem_userptr.c pages = __i915_gem_userptr_alloc_pages(obj, pvec, pages 519 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (!IS_ERR(pages)) { pages 521 drivers/gpu/drm/i915/gem/i915_gem_userptr.c pages = NULL; pages 525 drivers/gpu/drm/i915/gem/i915_gem_userptr.c obj->userptr.work = ERR_CAST(pages); pages 526 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (IS_ERR(pages)) pages 585 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct sg_table *pages; pages 639 drivers/gpu/drm/i915/gem/i915_gem_userptr.c pages = ERR_PTR(pinned); pages 642 drivers/gpu/drm/i915/gem/i915_gem_userptr.c pages = __i915_gem_userptr_get_pages_schedule(obj); pages 643 drivers/gpu/drm/i915/gem/i915_gem_userptr.c active = pages == ERR_PTR(-EAGAIN); pages 645 drivers/gpu/drm/i915/gem/i915_gem_userptr.c pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages); pages 646 drivers/gpu/drm/i915/gem/i915_gem_userptr.c active = !IS_ERR(pages); pages 651 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (IS_ERR(pages)) pages 655 drivers/gpu/drm/i915/gem/i915_gem_userptr.c return PTR_ERR_OR_ZERO(pages); pages 660 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct sg_table *pages) pages 668 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (!pages) pages 671 drivers/gpu/drm/i915/gem/i915_gem_userptr.c __i915_gem_object_release_shmem(obj, pages, true); pages 672 drivers/gpu/drm/i915/gem/i915_gem_userptr.c i915_gem_gtt_finish_pages(obj, pages); pages 682 drivers/gpu/drm/i915/gem/i915_gem_userptr.c for_each_sgt_page(page, sgt_iter, pages) { pages 711 drivers/gpu/drm/i915/gem/i915_gem_userptr.c sg_free_table(pages); pages 712 drivers/gpu/drm/i915/gem/i915_gem_userptr.c kfree(pages); pages 12 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c struct sg_table *pages) pages 17 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg)) pages 20 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c sg_free_table(pages); pages 21 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c kfree(pages); pages 30 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c struct sg_table *pages; pages 33 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c pages = kmalloc(sizeof(*pages), GFP); pages 34 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c if (!pages) pages 37 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c if (sg_alloc_table(pages, npages, GFP)) { pages 38 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c kfree(pages); pages 42 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c sg = pages->sgl; pages 56 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) { pages 60 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c src = pages->sgl; pages 64 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c if (i915_gem_gtt_prepare_pages(obj, pages)) pages 67 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c __i915_gem_object_set_pages(obj, pages, PAGE_SIZE); pages 72 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c huge_free_pages(obj, pages); pages 79 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c struct sg_table *pages) pages 81 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c i915_gem_gtt_finish_pages(obj, pages); pages 82 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c huge_free_pages(obj, pages); pages 132 drivers/gpu/drm/i915/gem/selftests/huge_pages.c struct sg_table *pages) pages 134 drivers/gpu/drm/i915/gem/selftests/huge_pages.c i915_gem_gtt_finish_pages(obj, pages); pages 135 drivers/gpu/drm/i915/gem/selftests/huge_pages.c huge_pages_free_pages(pages); pages 273 drivers/gpu/drm/i915/gem/selftests/huge_pages.c struct sg_table *pages) pages 275 drivers/gpu/drm/i915/gem/selftests/huge_pages.c sg_free_table(pages); pages 276 drivers/gpu/drm/i915/gem/selftests/huge_pages.c kfree(pages); pages 280 drivers/gpu/drm/i915/gem/selftests/huge_pages.c struct sg_table *pages) pages 282 drivers/gpu/drm/i915/gem/selftests/huge_pages.c fake_free_huge_pages(obj, pages); pages 1087 drivers/gpu/drm/i915/gem/selftests/huge_pages.c static unsigned int pages[ARRAY_SIZE(page_sizes)]; pages 1104 drivers/gpu/drm/i915/gem/selftests/huge_pages.c pages[n++] = BIT(i); pages 1111 drivers/gpu/drm/i915/gem/selftests/huge_pages.c size |= pages[i]; pages 1123 drivers/gpu/drm/i915/gem/selftests/huge_pages.c page_sizes |= pages[i]; pages 68 drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm.pages, pages 27 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0); pages 60 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c put_page(mock->pages[i]); pages 69 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL); pages 83 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c return kmap(mock->pages[page_num]); pages 90 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c return kunmap(mock->pages[page_num]); pages 123 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c mock->pages[i] = alloc_page(GFP_KERNEL); pages 124 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c if (!mock->pages[i]) pages 141 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c put_page(mock->pages[i]); pages 14 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h struct page *pages[]; pages 522 drivers/gpu/drm/i915/gt/intel_ringbuffer.c return sg_page(obj->mm.pages->sgl); pages 24 drivers/gpu/drm/i915/gt/selftest_timeline.c return sg_page(obj->mm.pages->sgl); pages 370 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) + pages 418 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c .pages = obj->mm.pages, pages 426 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c drm_clflush_sg(dummy.pages); pages 589 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c struct sg_table *pages = uc_fw->obj->mm.pages; pages 595 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c return sg_pcopy_to_buffer(pages->sgl, pages->nents, dst, size, offset); pages 80 drivers/gpu/drm/i915/gvt/dmabuf.c struct sg_table *pages) pages 82 drivers/gpu/drm/i915/gvt/dmabuf.c sg_free_table(pages); pages 83 drivers/gpu/drm/i915/gvt/dmabuf.c kfree(pages); pages 139 drivers/gpu/drm/i915/i915_gem.c void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; pages 761 drivers/gpu/drm/i915/i915_gem_fence_reg.c struct sg_table *pages) pages 771 drivers/gpu/drm/i915/i915_gem_fence_reg.c for_each_sgt_page(page, sgt_iter, pages) { pages 792 drivers/gpu/drm/i915/i915_gem_fence_reg.c struct sg_table *pages) pages 810 drivers/gpu/drm/i915/i915_gem_fence_reg.c for_each_sgt_page(page, sgt_iter, pages) { pages 64 drivers/gpu/drm/i915/i915_gem_fence_reg.h struct sg_table *pages); pages 66 drivers/gpu/drm/i915/i915_gem_fence_reg.h struct sg_table *pages); pages 176 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(vma->pages); pages 178 drivers/gpu/drm/i915/i915_gem_gtt.c vma->pages = vma->obj->mm.pages; pages 187 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(!vma->pages); pages 189 drivers/gpu/drm/i915/i915_gem_gtt.c if (vma->pages != vma->obj->mm.pages) { pages 190 drivers/gpu/drm/i915/i915_gem_gtt.c sg_free_table(vma->pages); pages 191 drivers/gpu/drm/i915/i915_gem_gtt.c kfree(vma->pages); pages 193 drivers/gpu/drm/i915/i915_gem_gtt.c vma->pages = NULL; pages 342 drivers/gpu/drm/i915/i915_gem_gtt.c page = stash->pvec.pages[--stash->pvec.nr]; pages 355 drivers/gpu/drm/i915/i915_gem_gtt.c memcpy(stash->pvec.pages + stash->pvec.nr, pages 356 drivers/gpu/drm/i915/i915_gem_gtt.c pvec->pages + pvec->nr - nr, pages 357 drivers/gpu/drm/i915/i915_gem_gtt.c sizeof(pvec->pages[0]) * nr); pages 401 drivers/gpu/drm/i915/i915_gem_gtt.c stack.pages[stack.nr++] = page; pages 404 drivers/gpu/drm/i915/i915_gem_gtt.c if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) { pages 405 drivers/gpu/drm/i915/i915_gem_gtt.c page = stack.pages[--stack.nr]; pages 418 drivers/gpu/drm/i915/i915_gem_gtt.c WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr)); pages 460 drivers/gpu/drm/i915/i915_gem_gtt.c set_pages_array_wb(pvec->pages, pvec->nr); pages 1161 drivers/gpu/drm/i915/i915_gem_gtt.c struct scatterlist *sg = vma->pages->sgl; pages 1805 drivers/gpu/drm/i915/i915_gem_gtt.c vma->pages = ERR_PTR(-ENODEV); pages 1811 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(!vma->pages); pages 1813 drivers/gpu/drm/i915/i915_gem_gtt.c vma->pages = NULL; pages 2144 drivers/gpu/drm/i915/i915_gem_gtt.c struct sg_table *pages) pages 2148 drivers/gpu/drm/i915/i915_gem_gtt.c pages->sgl, pages->nents, pages 2160 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(obj->mm.pages == pages); pages 2207 drivers/gpu/drm/i915/i915_gem_gtt.c for_each_sgt_dma(addr, sgt_iter, vma->pages) pages 2248 drivers/gpu/drm/i915/i915_gem_gtt.c for_each_sgt_dma(addr, iter, vma->pages) pages 2419 drivers/gpu/drm/i915/i915_gem_gtt.c intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, pages 2528 drivers/gpu/drm/i915/i915_gem_gtt.c struct sg_table *pages) pages 2542 drivers/gpu/drm/i915/i915_gem_gtt.c dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL); pages 2549 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(vma->pages); pages 2796 drivers/gpu/drm/i915/i915_gem_gtt.c set_pages_array_wb(pvec->pages, pvec->nr); pages 3578 drivers/gpu/drm/i915/i915_gem_gtt.c vma->pages = vma->obj->mm.pages; pages 3582 drivers/gpu/drm/i915/i915_gem_gtt.c vma->pages = pages 3587 drivers/gpu/drm/i915/i915_gem_gtt.c vma->pages = pages 3592 drivers/gpu/drm/i915/i915_gem_gtt.c vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); pages 3597 drivers/gpu/drm/i915/i915_gem_gtt.c if (IS_ERR(vma->pages)) { pages 3598 drivers/gpu/drm/i915/i915_gem_gtt.c ret = PTR_ERR(vma->pages); pages 3599 drivers/gpu/drm/i915/i915_gem_gtt.c vma->pages = NULL; pages 588 drivers/gpu/drm/i915/i915_gem_gtt.h struct sg_table *pages); pages 590 drivers/gpu/drm/i915/i915_gem_gtt.h struct sg_table *pages); pages 217 drivers/gpu/drm/i915/i915_gpu_error.c p = pv->pages[--pv->nr]; pages 285 drivers/gpu/drm/i915/i915_gpu_error.c return dst->pages[dst->page_count++] = page; pages 394 drivers/gpu/drm/i915/i915_gpu_error.c dst->pages[dst->page_count++] = ptr; pages 587 drivers/gpu/drm/i915/i915_gpu_error.c err_puts(m, ascii85_encode(obj->pages[page][i], out)); pages 900 drivers/gpu/drm/i915/i915_gpu_error.c free_page((unsigned long)obj->pages[page]); pages 971 drivers/gpu/drm/i915/i915_gpu_error.c if (!vma || !vma->pages) pages 992 drivers/gpu/drm/i915/i915_gpu_error.c for_each_sgt_dma(dma, iter, vma->pages) { pages 1006 drivers/gpu/drm/i915/i915_gpu_error.c pool_free(&compress->pool, dst->pages[dst->page_count]); pages 1352 drivers/gpu/drm/i915/i915_gpu_error.c .pages = obj->mm.pages, pages 133 drivers/gpu/drm/i915/i915_gpu_error.h u32 *pages[0]; pages 336 drivers/gpu/drm/i915/i915_vma.c GEM_BUG_ON(!vma->pages); pages 596 drivers/gpu/drm/i915/i915_vma.c GEM_BUG_ON(vma->pages); pages 749 drivers/gpu/drm/i915/i915_vma.c GEM_BUG_ON(vma->pages); pages 59 drivers/gpu/drm/i915/i915_vma.h struct sg_table *pages; pages 405 drivers/gpu/drm/i915/i915_vma.h GEM_BUG_ON(!vma->pages); pages 406 drivers/gpu/drm/i915/i915_vma.h return sg_page(vma->pages->sgl); pages 51 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c struct sg_table *pages) pages 53 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c sg_free_table(pages); pages 54 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c kfree(pages); pages 61 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c struct sg_table *pages; pages 66 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c pages = kmalloc(sizeof(*pages), GFP); pages 67 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c if (!pages) pages 71 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c if (sg_alloc_table(pages, rem, GFP)) { pages 72 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c kfree(pages); pages 78 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c for (sg = pages->sgl; sg; sg = sg_next(sg)) { pages 93 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c __i915_gem_object_set_pages(obj, pages, sg_page_sizes); pages 100 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c struct sg_table *pages) pages 102 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c fake_free_pages(obj, pages); pages 292 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c mock_vma.pages = obj->mm.pages; pages 1237 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c vma->pages = obj->mm.pages; pages 560 drivers/gpu/drm/i915/selftests/i915_vma.c if (vma->pages->nents > rotated_size(a, b)) { pages 562 drivers/gpu/drm/i915/selftests/i915_vma.c rotated_size(a, b), vma->pages->nents); pages 574 drivers/gpu/drm/i915/selftests/i915_vma.c if (vma->pages == obj->mm.pages) { pages 580 drivers/gpu/drm/i915/selftests/i915_vma.c sg = vma->pages->sgl; pages 626 drivers/gpu/drm/i915/selftests/i915_vma.c for_each_sgt_dma(dma, sgt, vma->pages) { pages 674 drivers/gpu/drm/i915/selftests/i915_vma.c if (vma->pages == vma->obj->mm.pages) { pages 686 drivers/gpu/drm/i915/selftests/i915_vma.c if (vma->pages != vma->obj->mm.pages) { pages 101 drivers/gpu/drm/lima/lima_gem.c pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV); pages 33 drivers/gpu/drm/lima/lima_gem_prime.c return drm_prime_pages_to_sg(bo->pages, npages); pages 13 drivers/gpu/drm/lima/lima_object.c kfree(bo->pages); pages 27 drivers/gpu/drm/lima/lima_object.c if (bo->pages) pages 28 drivers/gpu/drm/lima/lima_object.c drm_gem_put_pages(&bo->gem, bo->pages, true, true); pages 81 drivers/gpu/drm/lima/lima_object.c bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL); pages 82 drivers/gpu/drm/lima/lima_object.c if (!bo->pages) { pages 88 drivers/gpu/drm/lima/lima_object.c sgt, bo->pages, bo->pages_dma_addr, npages); pages 95 drivers/gpu/drm/lima/lima_object.c bo->pages = drm_gem_get_pages(&bo->gem); pages 96 drivers/gpu/drm/lima/lima_object.c if (IS_ERR(bo->pages)) { pages 97 drivers/gpu/drm/lima/lima_object.c ret = ERR_CAST(bo->pages); pages 98 drivers/gpu/drm/lima/lima_object.c bo->pages = NULL; pages 103 drivers/gpu/drm/lima/lima_object.c dma_addr_t addr = dma_map_page(dev->dev, bo->pages[i], 0, pages 14 drivers/gpu/drm/lima/lima_object.h struct page **pages; pages 261 drivers/gpu/drm/mediatek/mtk_drm_gem.c mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL); pages 262 drivers/gpu/drm/mediatek/mtk_drm_gem.c if (!mtk_gem->pages) pages 266 drivers/gpu/drm/mediatek/mtk_drm_gem.c mtk_gem->pages[i++] = sg_page_iter_page(&iter); pages 270 drivers/gpu/drm/mediatek/mtk_drm_gem.c mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, pages 283 drivers/gpu/drm/mediatek/mtk_drm_gem.c if (!mtk_gem->pages) pages 288 drivers/gpu/drm/mediatek/mtk_drm_gem.c kfree((void *)mtk_gem->pages); pages 32 drivers/gpu/drm/mediatek/mtk_drm_gem.h struct page **pages; pages 891 drivers/gpu/drm/msm/adreno/a6xx_gmu.c __free_pages(bo->pages[i], 0); pages 894 drivers/gpu/drm/msm/adreno/a6xx_gmu.c kfree(bo->pages); pages 912 drivers/gpu/drm/msm/adreno/a6xx_gmu.c bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL); pages 913 drivers/gpu/drm/msm/adreno/a6xx_gmu.c if (!bo->pages) { pages 919 drivers/gpu/drm/msm/adreno/a6xx_gmu.c bo->pages[i] = alloc_page(GFP_KERNEL); pages 920 drivers/gpu/drm/msm/adreno/a6xx_gmu.c if (!bo->pages[i]) pages 929 drivers/gpu/drm/msm/adreno/a6xx_gmu.c page_to_phys(bo->pages[i]), PAGE_SIZE, pages 944 drivers/gpu/drm/msm/adreno/a6xx_gmu.c bo->virt = vmap(bo->pages, count, VM_IOREMAP, pages 956 drivers/gpu/drm/msm/adreno/a6xx_gmu.c if (bo->pages[i]) pages 957 drivers/gpu/drm/msm/adreno/a6xx_gmu.c __free_pages(bo->pages[i], 0); pages 960 drivers/gpu/drm/msm/adreno/a6xx_gmu.c kfree(bo->pages); pages 16 drivers/gpu/drm/msm/adreno/a6xx_gmu.h struct page **pages; pages 111 drivers/gpu/drm/msm/msm_gem.c if (!msm_obj->pages) { pages 127 drivers/gpu/drm/msm/msm_gem.c msm_obj->pages = p; pages 145 drivers/gpu/drm/msm/msm_gem.c return msm_obj->pages; pages 157 drivers/gpu/drm/msm/msm_gem.c kvfree(msm_obj->pages); pages 164 drivers/gpu/drm/msm/msm_gem.c if (msm_obj->pages) { pages 178 drivers/gpu/drm/msm/msm_gem.c drm_gem_put_pages(obj, msm_obj->pages, true, false); pages 182 drivers/gpu/drm/msm/msm_gem.c msm_obj->pages = NULL; pages 255 drivers/gpu/drm/msm/msm_gem.c struct page **pages; pages 277 drivers/gpu/drm/msm/msm_gem.c pages = get_pages(obj); pages 278 drivers/gpu/drm/msm/msm_gem.c if (IS_ERR(pages)) { pages 279 drivers/gpu/drm/msm/msm_gem.c ret = vmf_error(PTR_ERR(pages)); pages 286 drivers/gpu/drm/msm/msm_gem.c pfn = page_to_pfn(pages[pgoff]); pages 423 drivers/gpu/drm/msm/msm_gem.c struct page **pages; pages 438 drivers/gpu/drm/msm/msm_gem.c pages = get_pages(obj); pages 439 drivers/gpu/drm/msm/msm_gem.c if (IS_ERR(pages)) pages 440 drivers/gpu/drm/msm/msm_gem.c return PTR_ERR(pages); pages 575 drivers/gpu/drm/msm/msm_gem.c struct page **pages = get_pages(obj); pages 576 drivers/gpu/drm/msm/msm_gem.c if (IS_ERR(pages)) { pages 577 drivers/gpu/drm/msm/msm_gem.c ret = PTR_ERR(pages); pages 580 drivers/gpu/drm/msm/msm_gem.c msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, pages 916 drivers/gpu/drm/msm/msm_gem.c if (msm_obj->pages) pages 917 drivers/gpu/drm/msm/msm_gem.c kvfree(msm_obj->pages); pages 1053 drivers/gpu/drm/msm/msm_gem.c struct page **pages; pages 1069 drivers/gpu/drm/msm/msm_gem.c pages = get_pages(obj); pages 1070 drivers/gpu/drm/msm/msm_gem.c if (IS_ERR(pages)) { pages 1071 drivers/gpu/drm/msm/msm_gem.c ret = PTR_ERR(pages); pages 1135 drivers/gpu/drm/msm/msm_gem.c msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); pages 1136 drivers/gpu/drm/msm/msm_gem.c if (!msm_obj->pages) { pages 1142 drivers/gpu/drm/msm/msm_gem.c ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); pages 70 drivers/gpu/drm/msm/msm_gem.h struct page **pages; pages 19 drivers/gpu/drm/msm/msm_gem_prime.c if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */ pages 22 drivers/gpu/drm/msm/msm_gem_prime.c return drm_prime_pages_to_sg(msm_obj->pages, npages); pages 1598 drivers/gpu/drm/nouveau/nouveau_bo.c drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, pages 1627 drivers/gpu/drm/nouveau/nouveau_bo.c addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE, pages 279 drivers/gpu/drm/nouveau/nouveau_dmem.c unsigned long *pages) pages 285 drivers/gpu/drm/nouveau/nouveau_dmem.c memset(pages, 0xff, npages * sizeof(*pages)); pages 307 drivers/gpu/drm/nouveau/nouveau_dmem.c pages[c] = chunk->pfn_first + i; pages 35 drivers/gpu/drm/nouveau/nouveau_prime.c return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages); pages 87 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c struct page *pages[]; pages 209 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c node->base.vaddr = vmap(node->pages, size >> PAGE_SHIFT, VM_MAP, pages 340 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c __free_page(node->pages[i]); pages 435 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) + pages 439 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c node->dma_addrs = (void *)(node->pages + npages); pages 453 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c node->pages[i] = p; pages 502 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c for (i = 0; i < npages && node->pages[i] != NULL; i++) { pages 507 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c __free_page(node->pages[i]); pages 34 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c u64 pages; pages 58 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c if (mem->pages == 1 && mem->mem) pages 66 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c return nvkm_mem(memory)->pages << PAGE_SHIFT; pages 87 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c while (mem->pages--) { pages 89 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c mem->dma[mem->pages], PAGE_SIZE, pages 91 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c __free_page(mem->mem[mem->pages]); pages 137 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c *pmap = vmap(mem->mem, mem->pages, VM_MAP, PAGE_KERNEL); pages 183 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c mem->pages = size >> PAGE_SHIFT; pages 204 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c for (mem->pages = 0; size; size--, mem->pages++) { pages 209 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev, pages 212 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c if (dma_mapping_error(dev, mem->dma[mem->pages])) { pages 217 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c mem->mem[mem->pages] = p; pages 354 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c struct page **pages, u32 npages, u32 roll) pages 388 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c data[i] = (pages && pages[n]) ? pages 389 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c page_to_phys(pages[n]) : engine->dmm->dummy_pa; pages 468 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static int fill(struct tcm_area *area, struct page **pages, pages 500 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c dmm_txn_append(txn, &p_area, pages, npages, roll); pages 516 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c int tiler_pin(struct tiler_block *block, struct page **pages, pages 521 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c ret = fill(&block->area, pages, npages, roll, wait); pages 90 drivers/gpu/drm/omapdrm/omap_dmm_tiler.h int tiler_pin(struct tiler_block *block, struct page **pages, pages 87 drivers/gpu/drm/omapdrm/omap_gem.c struct page **pages; pages 225 drivers/gpu/drm/omapdrm/omap_gem.c struct page **pages; pages 236 drivers/gpu/drm/omapdrm/omap_gem.c if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages) pages 239 drivers/gpu/drm/omapdrm/omap_gem.c pages = drm_gem_get_pages(obj); pages 240 drivers/gpu/drm/omapdrm/omap_gem.c if (IS_ERR(pages)) { pages 241 drivers/gpu/drm/omapdrm/omap_gem.c dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); pages 242 drivers/gpu/drm/omapdrm/omap_gem.c return PTR_ERR(pages); pages 256 drivers/gpu/drm/omapdrm/omap_gem.c addrs[i] = dma_map_page(dev->dev, pages[i], pages 281 drivers/gpu/drm/omapdrm/omap_gem.c omap_obj->pages = pages; pages 288 drivers/gpu/drm/omapdrm/omap_gem.c drm_gem_put_pages(obj, pages, true, false); pages 311 drivers/gpu/drm/omapdrm/omap_gem.c drm_gem_put_pages(obj, omap_obj->pages, true, false); pages 312 drivers/gpu/drm/omapdrm/omap_gem.c omap_obj->pages = NULL; pages 356 drivers/gpu/drm/omapdrm/omap_gem.c if (omap_obj->pages) { pages 358 drivers/gpu/drm/omapdrm/omap_gem.c pfn = page_to_pfn(omap_obj->pages[pgoff]); pages 379 drivers/gpu/drm/omapdrm/omap_gem.c struct page *pages[64]; /* XXX is this too much to have on stack? */ pages 445 drivers/gpu/drm/omapdrm/omap_gem.c memcpy(pages, &omap_obj->pages[base_pgoff], pages 447 drivers/gpu/drm/omapdrm/omap_gem.c memset(pages + slots, 0, pages 450 drivers/gpu/drm/omapdrm/omap_gem.c err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); pages 665 drivers/gpu/drm/omapdrm/omap_gem.c ret = tiler_pin(omap_obj->block, omap_obj->pages, npages, pages 724 drivers/gpu/drm/omapdrm/omap_gem.c struct page **pages = omap_obj->pages; pages 734 drivers/gpu/drm/omapdrm/omap_gem.c addr = dma_map_page(dev->dev, pages[i], 0, pages 803 drivers/gpu/drm/omapdrm/omap_gem.c ret = tiler_pin(block, omap_obj->pages, npages, pages 913 drivers/gpu/drm/omapdrm/omap_gem.c int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, pages 927 drivers/gpu/drm/omapdrm/omap_gem.c if (!omap_obj->pages) { pages 932 drivers/gpu/drm/omapdrm/omap_gem.c *pages = omap_obj->pages; pages 970 drivers/gpu/drm/omapdrm/omap_gem.c omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT, pages 1000 drivers/gpu/drm/omapdrm/omap_gem.c WARN_ON(!omap_obj->pages); /* this can't happen */ pages 1002 drivers/gpu/drm/omapdrm/omap_gem.c omap_obj->pages, npages, pages 1098 drivers/gpu/drm/omapdrm/omap_gem.c if (omap_obj->pages) { pages 1100 drivers/gpu/drm/omapdrm/omap_gem.c kfree(omap_obj->pages); pages 1256 drivers/gpu/drm/omapdrm/omap_gem.c struct page **pages; pages 1261 drivers/gpu/drm/omapdrm/omap_gem.c pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); pages 1262 drivers/gpu/drm/omapdrm/omap_gem.c if (!pages) { pages 1268 drivers/gpu/drm/omapdrm/omap_gem.c omap_obj->pages = pages; pages 1271 drivers/gpu/drm/omapdrm/omap_gem.c pages[i++] = sg_page_iter_page(&iter); pages 79 drivers/gpu/drm/omapdrm/omap_gem.h int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, pages 69 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c struct page **pages; pages 77 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c return omap_gem_get_pages(obj, &pages, true); pages 92 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c struct page **pages; pages 93 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c omap_gem_get_pages(obj, &pages, false); pages 95 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c return kmap(pages[page_num]); pages 102 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c struct page **pages; pages 103 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c omap_gem_get_pages(obj, &pages, false); pages 104 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c kunmap(pages[page_num]); pages 455 drivers/gpu/drm/panfrost/panfrost_mmu.c struct page **pages; pages 477 drivers/gpu/drm/panfrost/panfrost_mmu.c if (!bo->base.pages) { pages 486 drivers/gpu/drm/panfrost/panfrost_mmu.c pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, pages 488 drivers/gpu/drm/panfrost/panfrost_mmu.c if (!pages) { pages 495 drivers/gpu/drm/panfrost/panfrost_mmu.c bo->base.pages = pages; pages 498 drivers/gpu/drm/panfrost/panfrost_mmu.c pages = bo->base.pages; pages 504 drivers/gpu/drm/panfrost/panfrost_mmu.c pages[i] = shmem_read_mapping_page(mapping, i); pages 505 drivers/gpu/drm/panfrost/panfrost_mmu.c if (IS_ERR(pages[i])) { pages 507 drivers/gpu/drm/panfrost/panfrost_mmu.c ret = PTR_ERR(pages[i]); pages 515 drivers/gpu/drm/panfrost/panfrost_mmu.c ret = sg_alloc_table_from_pages(sgt, pages + page_offset, pages 653 drivers/gpu/drm/radeon/radeon.h struct page **pages; pages 667 drivers/gpu/drm/radeon/radeon.h int pages); pages 669 drivers/gpu/drm/radeon/radeon.h int pages, struct page **pagelist, pages 245 drivers/gpu/drm/radeon/radeon_gart.c int pages) pages 257 drivers/gpu/drm/radeon/radeon_gart.c for (i = 0; i < pages; i++, p++) { pages 258 drivers/gpu/drm/radeon/radeon_gart.c if (rdev->gart.pages[p]) { pages 259 drivers/gpu/drm/radeon/radeon_gart.c rdev->gart.pages[p] = NULL; pages 290 drivers/gpu/drm/radeon/radeon_gart.c int pages, struct page **pagelist, dma_addr_t *dma_addr, pages 305 drivers/gpu/drm/radeon/radeon_gart.c for (i = 0; i < pages; i++, p++) { pages 306 drivers/gpu/drm/radeon/radeon_gart.c rdev->gart.pages[p] = pagelist[i]; pages 336 drivers/gpu/drm/radeon/radeon_gart.c if (rdev->gart.pages) { pages 353 drivers/gpu/drm/radeon/radeon_gart.c rdev->gart.pages = vzalloc(array_size(sizeof(void *), pages 355 drivers/gpu/drm/radeon/radeon_gart.c if (rdev->gart.pages == NULL) { pages 385 drivers/gpu/drm/radeon/radeon_gart.c vfree(rdev->gart.pages); pages 387 drivers/gpu/drm/radeon/radeon_gart.c rdev->gart.pages = NULL; pages 39 drivers/gpu/drm/radeon/radeon_prime.c return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); pages 20 drivers/gpu/drm/radeon/radeon_trace.h __field(u32, pages) pages 25 drivers/gpu/drm/radeon/radeon_trace.h __entry->pages = bo->tbo.num_pages; pages 27 drivers/gpu/drm/radeon/radeon_trace.h TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) pages 512 drivers/gpu/drm/radeon/radeon_ttm.c struct page **pages = ttm->pages + pinned; pages 515 drivers/gpu/drm/radeon/radeon_ttm.c pages, NULL); pages 523 drivers/gpu/drm/radeon/radeon_ttm.c r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, pages 534 drivers/gpu/drm/radeon/radeon_ttm.c drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, pages 543 drivers/gpu/drm/radeon/radeon_ttm.c release_pages(ttm->pages, pinned); pages 597 drivers/gpu/drm/radeon/radeon_ttm.c ttm->pages, gtt->ttm.dma_address, flags); pages 684 drivers/gpu/drm/radeon/radeon_ttm.c drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, pages 1030 drivers/gpu/drm/radeon/radeon_ttm.c page = rdev->gart.pages[p]; pages 1036 drivers/gpu/drm/radeon/radeon_ttm.c kunmap(rdev->gart.pages[p]); pages 81 drivers/gpu/drm/rockchip/rockchip_drm_gem.c rk_obj->pages = drm_gem_get_pages(&rk_obj->base); pages 82 drivers/gpu/drm/rockchip/rockchip_drm_gem.c if (IS_ERR(rk_obj->pages)) pages 83 drivers/gpu/drm/rockchip/rockchip_drm_gem.c return PTR_ERR(rk_obj->pages); pages 87 drivers/gpu/drm/rockchip/rockchip_drm_gem.c rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); pages 109 drivers/gpu/drm/rockchip/rockchip_drm_gem.c drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false); pages 117 drivers/gpu/drm/rockchip/rockchip_drm_gem.c drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true); pages 134 drivers/gpu/drm/rockchip/rockchip_drm_gem.c rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, pages 206 drivers/gpu/drm/rockchip/rockchip_drm_gem.c if (rk_obj->pages) pages 222 drivers/gpu/drm/rockchip/rockchip_drm_gem.c return vm_map_pages(vma, rk_obj->pages, count); pages 247 drivers/gpu/drm/rockchip/rockchip_drm_gem.c if (rk_obj->pages) pages 443 drivers/gpu/drm/rockchip/rockchip_drm_gem.c if (rk_obj->pages) pages 444 drivers/gpu/drm/rockchip/rockchip_drm_gem.c return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); pages 546 drivers/gpu/drm/rockchip/rockchip_drm_gem.c if (rk_obj->pages) pages 547 drivers/gpu/drm/rockchip/rockchip_drm_gem.c return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, pages 560 drivers/gpu/drm/rockchip/rockchip_drm_gem.c if (rk_obj->pages) { pages 24 drivers/gpu/drm/rockchip/rockchip_drm_gem.h struct page **pages; pages 262 drivers/gpu/drm/tegra/fb.c if (bo->pages) { pages 263 drivers/gpu/drm/tegra/fb.c bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP, pages 352 drivers/gpu/drm/tegra/fb.c if (bo && bo->pages) { pages 52 drivers/gpu/drm/tegra/gem.c return vmap(obj->pages, obj->num_pages, VM_MAP, pages 77 drivers/gpu/drm/tegra/gem.c return vmap(obj->pages + page, 1, VM_MAP, pages 205 drivers/gpu/drm/tegra/gem.c if (bo->pages) { pages 208 drivers/gpu/drm/tegra/gem.c drm_gem_put_pages(&bo->gem, bo->pages, true, true); pages 220 drivers/gpu/drm/tegra/gem.c bo->pages = drm_gem_get_pages(&bo->gem); pages 221 drivers/gpu/drm/tegra/gem.c if (IS_ERR(bo->pages)) pages 222 drivers/gpu/drm/tegra/gem.c return PTR_ERR(bo->pages); pages 226 drivers/gpu/drm/tegra/gem.c bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); pages 245 drivers/gpu/drm/tegra/gem.c drm_gem_put_pages(&bo->gem, bo->pages, false, false); pages 433 drivers/gpu/drm/tegra/gem.c if (!bo->pages) pages 437 drivers/gpu/drm/tegra/gem.c page = bo->pages[offset]; pages 452 drivers/gpu/drm/tegra/gem.c if (!bo->pages) { pages 510 drivers/gpu/drm/tegra/gem.c if (bo->pages) { pages 518 drivers/gpu/drm/tegra/gem.c sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); pages 545 drivers/gpu/drm/tegra/gem.c if (bo->pages) pages 564 drivers/gpu/drm/tegra/gem.c if (bo->pages) pages 578 drivers/gpu/drm/tegra/gem.c if (bo->pages) pages 39 drivers/gpu/drm/tegra/gem.h struct page **pages; pages 66 drivers/gpu/drm/ttm/ttm_agp_backend.c struct page *page = ttm->pages[i]; pages 71 drivers/gpu/drm/ttm/ttm_agp_backend.c mem->pages[mem->page_count++] = page; pages 316 drivers/gpu/drm/ttm/ttm_bo_util.c struct page *d = ttm->pages[page]; pages 338 drivers/gpu/drm/ttm/ttm_bo_util.c struct page *s = ttm->pages[page]; pages 604 drivers/gpu/drm/ttm/ttm_bo_util.c map->page = ttm->pages[start_page]; pages 613 drivers/gpu/drm/ttm/ttm_bo_util.c map->virtual = vmap(ttm->pages + start_page, num_pages, pages 263 drivers/gpu/drm/ttm/ttm_bo_vm.c page = ttm->pages[page_offset]; pages 405 drivers/gpu/drm/ttm/ttm_bo_vm.c unsigned long pages) pages 412 drivers/gpu/drm/ttm/ttm_bo_vm.c node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages); pages 247 drivers/gpu/drm/ttm/ttm_page_alloc.c static void ttm_pages_put(struct page *pages[], unsigned npages, pages 253 drivers/gpu/drm/ttm/ttm_page_alloc.c if (ttm_set_pages_array_wb(pages, npages)) pages 259 drivers/gpu/drm/ttm/ttm_page_alloc.c if (ttm_set_pages_wb(pages[i], pages_nr)) pages 262 drivers/gpu/drm/ttm/ttm_page_alloc.c __free_pages(pages[i], order); pages 442 drivers/gpu/drm/ttm/ttm_page_alloc.c static int ttm_set_pages_caching(struct page **pages, pages 449 drivers/gpu/drm/ttm/ttm_page_alloc.c r = ttm_set_pages_array_uc(pages, cpages); pages 454 drivers/gpu/drm/ttm/ttm_page_alloc.c r = ttm_set_pages_array_wc(pages, cpages); pages 469 drivers/gpu/drm/ttm/ttm_page_alloc.c static void ttm_handle_caching_state_failure(struct list_head *pages, pages 487 drivers/gpu/drm/ttm/ttm_page_alloc.c static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, pages 519 drivers/gpu/drm/ttm/ttm_page_alloc.c ttm_handle_caching_state_failure(pages, pages 527 drivers/gpu/drm/ttm/ttm_page_alloc.c list_add(&p->lru, pages); pages 544 drivers/gpu/drm/ttm/ttm_page_alloc.c ttm_handle_caching_state_failure(pages, pages 557 drivers/gpu/drm/ttm/ttm_page_alloc.c ttm_handle_caching_state_failure(pages, pages 630 drivers/gpu/drm/ttm/ttm_page_alloc.c struct list_head *pages, pages 647 drivers/gpu/drm/ttm/ttm_page_alloc.c list_splice_init(&pool->list, pages); pages 668 drivers/gpu/drm/ttm/ttm_page_alloc.c list_cut_position(pages, &pool->list, p); pages 678 drivers/gpu/drm/ttm/ttm_page_alloc.c list_for_each_entry(page, pages, lru) { pages 700 drivers/gpu/drm/ttm/ttm_page_alloc.c r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate, pages 708 drivers/gpu/drm/ttm/ttm_page_alloc.c static void ttm_put_pages(struct page **pages, unsigned npages, int flags, pages 723 drivers/gpu/drm/ttm/ttm_page_alloc.c struct page *p = pages[i]; pages 727 drivers/gpu/drm/ttm/ttm_page_alloc.c if (!pages[i]) { pages 736 drivers/gpu/drm/ttm/ttm_page_alloc.c if (++p != pages[i + j]) pages 744 drivers/gpu/drm/ttm/ttm_page_alloc.c if (page_count(pages[i]) != 1) pages 746 drivers/gpu/drm/ttm/ttm_page_alloc.c __free_pages(pages[i], order); pages 750 drivers/gpu/drm/ttm/ttm_page_alloc.c pages[i++] = NULL; pages 764 drivers/gpu/drm/ttm/ttm_page_alloc.c struct page *p = pages[i]; pages 771 drivers/gpu/drm/ttm/ttm_page_alloc.c if (++p != pages[i + j]) pages 777 drivers/gpu/drm/ttm/ttm_page_alloc.c list_add_tail(&pages[i]->lru, &huge->list); pages 780 drivers/gpu/drm/ttm/ttm_page_alloc.c pages[i++] = NULL; pages 799 drivers/gpu/drm/ttm/ttm_page_alloc.c if (pages[i]) { pages 800 drivers/gpu/drm/ttm/ttm_page_alloc.c if (page_count(pages[i]) != 1) pages 802 drivers/gpu/drm/ttm/ttm_page_alloc.c list_add_tail(&pages[i]->lru, &pool->list); pages 803 drivers/gpu/drm/ttm/ttm_page_alloc.c pages[i] = NULL; pages 826 drivers/gpu/drm/ttm/ttm_page_alloc.c static int ttm_get_pages(struct page **pages, unsigned npages, int flags, pages 873 drivers/gpu/drm/ttm/ttm_page_alloc.c pages[i++] = p++; pages 889 drivers/gpu/drm/ttm/ttm_page_alloc.c if (i > first && pages[i - 1] == p - 1) pages 890 drivers/gpu/drm/ttm/ttm_page_alloc.c swap(p, pages[i - 1]); pages 892 drivers/gpu/drm/ttm/ttm_page_alloc.c pages[i++] = p; pages 911 drivers/gpu/drm/ttm/ttm_page_alloc.c pages[count++] = &p[j]; pages 925 drivers/gpu/drm/ttm/ttm_page_alloc.c if (count > first && pages[count - 1] == tmp - 1) pages 926 drivers/gpu/drm/ttm/ttm_page_alloc.c swap(tmp, pages[count - 1]); pages 927 drivers/gpu/drm/ttm/ttm_page_alloc.c pages[count++] = tmp; pages 935 drivers/gpu/drm/ttm/ttm_page_alloc.c ttm_put_pages(pages, count, flags, cstate); pages 1038 drivers/gpu/drm/ttm/ttm_page_alloc.c if (!ttm->pages[i]) pages 1041 drivers/gpu/drm/ttm/ttm_page_alloc.c ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE); pages 1045 drivers/gpu/drm/ttm/ttm_page_alloc.c ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, pages 1062 drivers/gpu/drm/ttm/ttm_page_alloc.c ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, pages 1070 drivers/gpu/drm/ttm/ttm_page_alloc.c ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], pages 1108 drivers/gpu/drm/ttm/ttm_page_alloc.c struct page *p = tt->ttm.pages[i]; pages 1112 drivers/gpu/drm/ttm/ttm_page_alloc.c if (++p != tt->ttm.pages[j]) pages 1118 drivers/gpu/drm/ttm/ttm_page_alloc.c tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], pages 1145 drivers/gpu/drm/ttm/ttm_page_alloc.c struct page *p = tt->ttm.pages[i]; pages 1148 drivers/gpu/drm/ttm/ttm_page_alloc.c if (!tt->dma_address[i] || !tt->ttm.pages[i]) { pages 1154 drivers/gpu/drm/ttm/ttm_page_alloc.c if (++p != tt->ttm.pages[j]) pages 267 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c struct page **pages, unsigned cpages) pages 272 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c r = ttm_set_pages_array_uc(pages, cpages); pages 278 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c r = ttm_set_pages_array_wc(pages, cpages); pages 371 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c struct page *pages[], unsigned npages) pages 384 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c ttm_set_pages_array_wb(pages, npages)) pages 847 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c ttm->pages[index] = d_page->p; pages 929 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], pages 938 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c ttm->pages[j] = ttm->pages[j - 1] + 1; pages 966 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], pages 1041 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c ttm->pages[count] = d_page->p; pages 1073 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c ttm->pages[i] = NULL; pages 87 drivers/gpu/drm/ttm/ttm_tt.c ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*), pages 89 drivers/gpu/drm/ttm/ttm_tt.c if (!ttm->pages) pages 96 drivers/gpu/drm/ttm/ttm_tt.c ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages, pages 97 drivers/gpu/drm/ttm/ttm_tt.c sizeof(*ttm->ttm.pages) + pages 100 drivers/gpu/drm/ttm/ttm_tt.c if (!ttm->ttm.pages) pages 102 drivers/gpu/drm/ttm/ttm_tt.c ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); pages 164 drivers/gpu/drm/ttm/ttm_tt.c drm_clflush_pages(ttm->pages, ttm->num_pages); pages 167 drivers/gpu/drm/ttm/ttm_tt.c cur_page = ttm->pages[i]; pages 183 drivers/gpu/drm/ttm/ttm_tt.c cur_page = ttm->pages[j]; pages 254 drivers/gpu/drm/ttm/ttm_tt.c kvfree(ttm->pages); pages 255 drivers/gpu/drm/ttm/ttm_tt.c ttm->pages = NULL; pages 302 drivers/gpu/drm/ttm/ttm_tt.c if (ttm->pages) pages 303 drivers/gpu/drm/ttm/ttm_tt.c kvfree(ttm->pages); pages 306 drivers/gpu/drm/ttm/ttm_tt.c ttm->pages = NULL; pages 371 drivers/gpu/drm/ttm/ttm_tt.c to_page = ttm->pages[i]; pages 420 drivers/gpu/drm/ttm/ttm_tt.c from_page = ttm->pages[i]; pages 457 drivers/gpu/drm/ttm/ttm_tt.c ttm->pages[i]->mapping = ttm->bdev->dev_mapping; pages 479 drivers/gpu/drm/ttm/ttm_tt.c struct page **page = ttm->pages; pages 82 drivers/gpu/drm/udl/udl_dmabuf.c if (!obj->pages) { pages 91 drivers/gpu/drm/udl/udl_dmabuf.c obj->sg = drm_prime_pages_to_sg(obj->pages, page_count); pages 203 drivers/gpu/drm/udl/udl_dmabuf.c obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); pages 204 drivers/gpu/drm/udl/udl_dmabuf.c if (obj->pages == NULL) { pages 209 drivers/gpu/drm/udl/udl_dmabuf.c drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); pages 81 drivers/gpu/drm/udl/udl_drv.h struct page **pages; pages 112 drivers/gpu/drm/udl/udl_gem.c if (!obj->pages) pages 115 drivers/gpu/drm/udl/udl_gem.c page = obj->pages[page_offset]; pages 121 drivers/gpu/drm/udl/udl_gem.c struct page **pages; pages 123 drivers/gpu/drm/udl/udl_gem.c if (obj->pages) pages 126 drivers/gpu/drm/udl/udl_gem.c pages = drm_gem_get_pages(&obj->base); pages 127 drivers/gpu/drm/udl/udl_gem.c if (IS_ERR(pages)) pages 128 drivers/gpu/drm/udl/udl_gem.c return PTR_ERR(pages); pages 130 drivers/gpu/drm/udl/udl_gem.c obj->pages = pages; pages 138 drivers/gpu/drm/udl/udl_gem.c kvfree(obj->pages); pages 139 drivers/gpu/drm/udl/udl_gem.c obj->pages = NULL; pages 143 drivers/gpu/drm/udl/udl_gem.c drm_gem_put_pages(&obj->base, obj->pages, false, false); pages 144 drivers/gpu/drm/udl/udl_gem.c obj->pages = NULL; pages 163 drivers/gpu/drm/udl/udl_gem.c obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL); pages 193 drivers/gpu/drm/udl/udl_gem.c if (obj->pages) pages 61 drivers/gpu/drm/vgem/vgem_drv.c kvfree(vgem_obj->pages); pages 88 drivers/gpu/drm/vgem/vgem_drv.c if (obj->pages) { pages 89 drivers/gpu/drm/vgem/vgem_drv.c get_page(obj->pages[page_offset]); pages 90 drivers/gpu/drm/vgem/vgem_drv.c vmf->page = obj->pages[page_offset]; pages 294 drivers/gpu/drm/vgem/vgem_drv.c struct page **pages; pages 296 drivers/gpu/drm/vgem/vgem_drv.c pages = drm_gem_get_pages(&bo->base); pages 297 drivers/gpu/drm/vgem/vgem_drv.c if (IS_ERR(pages)) { pages 300 drivers/gpu/drm/vgem/vgem_drv.c return pages; pages 303 drivers/gpu/drm/vgem/vgem_drv.c bo->pages = pages; pages 307 drivers/gpu/drm/vgem/vgem_drv.c return bo->pages; pages 314 drivers/gpu/drm/vgem/vgem_drv.c drm_gem_put_pages(&bo->base, bo->pages, true, true); pages 315 drivers/gpu/drm/vgem/vgem_drv.c bo->pages = NULL; pages 324 drivers/gpu/drm/vgem/vgem_drv.c struct page **pages; pages 326 drivers/gpu/drm/vgem/vgem_drv.c pages = vgem_pin_pages(bo); pages 327 drivers/gpu/drm/vgem/vgem_drv.c if (IS_ERR(pages)) pages 328 drivers/gpu/drm/vgem/vgem_drv.c return PTR_ERR(pages); pages 333 drivers/gpu/drm/vgem/vgem_drv.c drm_clflush_pages(pages, n_pages); pages 349 drivers/gpu/drm/vgem/vgem_drv.c return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT); pages 373 drivers/gpu/drm/vgem/vgem_drv.c obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); pages 374 drivers/gpu/drm/vgem/vgem_drv.c if (!obj->pages) { pages 380 drivers/gpu/drm/vgem/vgem_drv.c drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL, pages 389 drivers/gpu/drm/vgem/vgem_drv.c struct page **pages; pages 391 drivers/gpu/drm/vgem/vgem_drv.c pages = vgem_pin_pages(bo); pages 392 drivers/gpu/drm/vgem/vgem_drv.c if (IS_ERR(pages)) pages 395 drivers/gpu/drm/vgem/vgem_drv.c return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); pages 46 drivers/gpu/drm/vgem/vgem_drv.h struct page **pages; pages 134 drivers/gpu/drm/via/via_dmablit.c vsg->pages[VIA_PFN(cur_mem) - pages 191 drivers/gpu/drm/via/via_dmablit.c put_user_pages_dirty_lock(vsg->pages, vsg->num_pages, pages 195 drivers/gpu/drm/via/via_dmablit.c vfree(vsg->pages); pages 239 drivers/gpu/drm/via/via_dmablit.c vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages)); pages 240 drivers/gpu/drm/via/via_dmablit.c if (NULL == vsg->pages) pages 245 drivers/gpu/drm/via/via_dmablit.c vsg->pages); pages 41 drivers/gpu/drm/via/via_dmablit.h struct page **pages; pages 74 drivers/gpu/drm/virtio/virtgpu_drv.h struct sg_table *pages; pages 76 drivers/gpu/drm/virtio/virtgpu_object.c if (bo->pages) pages 207 drivers/gpu/drm/virtio/virtgpu_object.c struct page **pages = bo->tbo.ttm->pages; pages 216 drivers/gpu/drm/virtio/virtgpu_object.c if (bo->pages) pages 221 drivers/gpu/drm/virtio/virtgpu_object.c bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL); pages 222 drivers/gpu/drm/virtio/virtgpu_object.c if (!bo->pages) pages 229 drivers/gpu/drm/virtio/virtgpu_object.c ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, pages 236 drivers/gpu/drm/virtio/virtgpu_object.c kfree(bo->pages); pages 237 drivers/gpu/drm/virtio/virtgpu_object.c bo->pages = NULL; pages 243 drivers/gpu/drm/virtio/virtgpu_object.c sg_free_table(bo->pages); pages 244 drivers/gpu/drm/virtio/virtgpu_object.c kfree(bo->pages); pages 245 drivers/gpu/drm/virtio/virtgpu_object.c bo->pages = NULL; pages 37 drivers/gpu/drm/virtio/virtgpu_prime.c if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages) pages 41 drivers/gpu/drm/virtio/virtgpu_prime.c return drm_prime_pages_to_sg(bo->tbo.ttm->pages, pages 258 drivers/gpu/drm/virtio/virtgpu_ttm.c if (bo->pages) pages 496 drivers/gpu/drm/virtio/virtgpu_vq.c bo->pages->sgl, bo->pages->nents, pages 903 drivers/gpu/drm/virtio/virtgpu_vq.c bo->pages->sgl, bo->pages->nents, pages 973 drivers/gpu/drm/virtio/virtgpu_vq.c if (!obj->pages) { pages 983 drivers/gpu/drm/virtio/virtgpu_vq.c obj->pages->sgl, obj->pages->nents, pages 987 drivers/gpu/drm/virtio/virtgpu_vq.c nents = obj->pages->nents; pages 998 drivers/gpu/drm/virtio/virtgpu_vq.c for_each_sg(obj->pages->sgl, sg, nents, si) { pages 1026 drivers/gpu/drm/virtio/virtgpu_vq.c obj->pages->sgl, obj->mapped, pages 90 drivers/gpu/drm/vkms/vkms_drv.h struct page **pages; pages 35 drivers/gpu/drm/vkms/vkms_gem.c WARN_ON(gem->pages); pages 59 drivers/gpu/drm/vkms/vkms_gem.c if (obj->pages) { pages 60 drivers/gpu/drm/vkms/vkms_gem.c get_page(obj->pages[page_offset]); pages 61 drivers/gpu/drm/vkms/vkms_gem.c vmf->page = obj->pages[page_offset]; pages 153 drivers/gpu/drm/vkms/vkms_gem.c if (!vkms_obj->pages) { pages 154 drivers/gpu/drm/vkms/vkms_gem.c struct page **pages = drm_gem_get_pages(gem_obj); pages 156 drivers/gpu/drm/vkms/vkms_gem.c if (IS_ERR(pages)) pages 157 drivers/gpu/drm/vkms/vkms_gem.c return pages; pages 159 drivers/gpu/drm/vkms/vkms_gem.c if (cmpxchg(&vkms_obj->pages, NULL, pages)) pages 160 drivers/gpu/drm/vkms/vkms_gem.c drm_gem_put_pages(gem_obj, pages, false, true); pages 163 drivers/gpu/drm/vkms/vkms_gem.c return vkms_obj->pages; pages 173 drivers/gpu/drm/vkms/vkms_gem.c WARN_ON(vkms_obj->pages); pages 183 drivers/gpu/drm/vkms/vkms_gem.c drm_gem_put_pages(obj, vkms_obj->pages, false, true); pages 184 drivers/gpu/drm/vkms/vkms_gem.c vkms_obj->pages = NULL; pages 199 drivers/gpu/drm/vkms/vkms_gem.c struct page **pages = _get_pages(vkms_obj); pages 201 drivers/gpu/drm/vkms/vkms_gem.c if (IS_ERR(pages)) { pages 202 drivers/gpu/drm/vkms/vkms_gem.c ret = PTR_ERR(pages); pages 206 drivers/gpu/drm/vkms/vkms_gem.c vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL); pages 216 drivers/gpu/drm/vkms/vkms_gem.c drm_gem_put_pages(obj, vkms_obj->pages, false, true); pages 217 drivers/gpu/drm/vkms/vkms_gem.c vkms_obj->pages = NULL; pages 482 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c d.dst_pages = dst->ttm->pages; pages 483 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c d.src_pages = src->ttm->pages; pages 297 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h struct page **pages; pages 319 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h struct page **pages; pages 286 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c return viter->pages[viter->i]; pages 300 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c return page_to_phys(viter->pages[viter->i]); pages 330 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c viter->pages = vsgt->pages; pages 427 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c vsgt->pages = vmw_tt->dma_ttm.ttm.pages; pages 445 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, pages 160 drivers/gpu/drm/xen/xen_drm_front.c u32 bpp, u64 size, struct page **pages) pages 182 drivers/gpu/drm/xen/xen_drm_front.c buf_cfg.pages = pages; pages 148 drivers/gpu/drm/xen/xen_drm_front.h u32 bpp, u64 size, struct page **pages); pages 29 drivers/gpu/drm/xen/xen_drm_front_gem.c struct page **pages; pages 48 drivers/gpu/drm/xen/xen_drm_front_gem.c xen_obj->pages = kvmalloc_array(xen_obj->num_pages, pages 50 drivers/gpu/drm/xen/xen_drm_front_gem.c return !xen_obj->pages ? -ENOMEM : 0; pages 55 drivers/gpu/drm/xen/xen_drm_front_gem.c kvfree(xen_obj->pages); pages 56 drivers/gpu/drm/xen/xen_drm_front_gem.c xen_obj->pages = NULL; pages 103 drivers/gpu/drm/xen/xen_drm_front_gem.c xen_obj->pages); pages 119 drivers/gpu/drm/xen/xen_drm_front_gem.c xen_obj->pages = drm_gem_get_pages(&xen_obj->base); pages 120 drivers/gpu/drm/xen/xen_drm_front_gem.c if (IS_ERR_OR_NULL(xen_obj->pages)) { pages 121 drivers/gpu/drm/xen/xen_drm_front_gem.c ret = PTR_ERR(xen_obj->pages); pages 122 drivers/gpu/drm/xen/xen_drm_front_gem.c xen_obj->pages = NULL; pages 153 drivers/gpu/drm/xen/xen_drm_front_gem.c if (xen_obj->pages) { pages 156 drivers/gpu/drm/xen/xen_drm_front_gem.c xen_obj->pages); pages 160 drivers/gpu/drm/xen/xen_drm_front_gem.c xen_obj->pages, true, false); pages 172 drivers/gpu/drm/xen/xen_drm_front_gem.c return xen_obj->pages; pages 179 drivers/gpu/drm/xen/xen_drm_front_gem.c if (!xen_obj->pages) pages 182 drivers/gpu/drm/xen/xen_drm_front_gem.c return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages); pages 206 drivers/gpu/drm/xen/xen_drm_front_gem.c ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages, pages 213 drivers/gpu/drm/xen/xen_drm_front_gem.c 0, 0, 0, size, xen_obj->pages); pages 253 drivers/gpu/drm/xen/xen_drm_front_gem.c ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); pages 279 drivers/gpu/drm/xen/xen_drm_front_gem.c if (!xen_obj->pages) pages 283 drivers/gpu/drm/xen/xen_drm_front_gem.c return vmap(xen_obj->pages, xen_obj->num_pages, pages 183 drivers/hv/hyperv_vmbus.h struct page *pages, u32 pagecnt); pages 193 drivers/hv/ring_buffer.c struct page *pages, u32 page_cnt) pages 209 drivers/hv/ring_buffer.c pages_wraparound[0] = pages; pages 211 drivers/hv/ring_buffer.c pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1]; pages 526 drivers/hwmon/pmbus/adm1275.c info->pages = 1; pages 377 drivers/hwmon/pmbus/ibm-cffps.c .pages = 1, pages 388 drivers/hwmon/pmbus/ibm-cffps.c .pages = 2, pages 179 drivers/hwmon/pmbus/inspur-ipsps.c .pages = 1, pages 105 drivers/hwmon/pmbus/ir35221.c info->pages = 2; pages 22 drivers/hwmon/pmbus/ir38064.c .pages = 1, pages 33 drivers/hwmon/pmbus/irps5401.c .pages = 5, pages 107 drivers/hwmon/pmbus/isl68137.c .pages = 2, pages 434 drivers/hwmon/pmbus/lm25066.c info->pages = 1; pages 642 drivers/hwmon/pmbus/ltc2978.c info->pages = LTC2974_NUM_PAGES; pages 645 drivers/hwmon/pmbus/ltc2978.c for (i = 0; i < info->pages; i++) { pages 654 drivers/hwmon/pmbus/ltc2978.c info->pages = LTC2974_NUM_PAGES; pages 658 drivers/hwmon/pmbus/ltc2978.c for (i = 0; i < info->pages; i++) { pages 670 drivers/hwmon/pmbus/ltc2978.c info->pages = LTC2978_NUM_PAGES; pages 686 drivers/hwmon/pmbus/ltc2978.c info->pages = LTC3880_NUM_PAGES; pages 701 drivers/hwmon/pmbus/ltc2978.c info->pages = LTC3880_NUM_PAGES; pages 716 drivers/hwmon/pmbus/ltc2978.c info->pages = LTC3883_NUM_PAGES; pages 727 drivers/hwmon/pmbus/ltc2978.c info->pages = LTC3880_NUM_PAGES; pages 744 drivers/hwmon/pmbus/ltc2978.c info->num_regulators = info->pages; pages 146 drivers/hwmon/pmbus/ltc3815.c .pages = 1, pages 65 drivers/hwmon/pmbus/max16064.c .pages = 4, pages 16 drivers/hwmon/pmbus/max20751.c .pages = 1, pages 248 drivers/hwmon/pmbus/max31785.c .pages = MAX31785_NR_PAGES, pages 318 drivers/hwmon/pmbus/max31785.c info->pages = virtual + 1; pages 249 drivers/hwmon/pmbus/max34440.c .pages = 14, pages 291 drivers/hwmon/pmbus/max34440.c .pages = 12, pages 334 drivers/hwmon/pmbus/max34440.c .pages = 7, pages 371 drivers/hwmon/pmbus/max34440.c .pages = 21, pages 395 drivers/hwmon/pmbus/max34440.c .pages = 18, pages 426 drivers/hwmon/pmbus/max34440.c .pages = 23, pages 140 drivers/hwmon/pmbus/max8688.c .pages = 1, pages 19 drivers/hwmon/pmbus/pmbus.c int pages; pages 68 drivers/hwmon/pmbus/pmbus.c for (page = 0; page < info->pages; page++) { pages 94 drivers/hwmon/pmbus/pmbus.c if (!info->pages) { pages 109 drivers/hwmon/pmbus/pmbus.c info->pages = page; pages 111 drivers/hwmon/pmbus/pmbus.c info->pages = 1; pages 183 drivers/hwmon/pmbus/pmbus.c info->pages = device_info->pages; pages 191 drivers/hwmon/pmbus/pmbus.c .pages = 1, pages 195 drivers/hwmon/pmbus/pmbus.c .pages = 0, pages 199 drivers/hwmon/pmbus/pmbus.c .pages = 1, pages 383 drivers/hwmon/pmbus/pmbus.h int pages; /* Total number of pages */ pages 474 drivers/hwmon/pmbus/pmbus_core.c for (i = 0; i < data->info->pages; i++) pages 569 drivers/hwmon/pmbus/pmbus_core.c for (i = 0; i < info->pages; i++) { pages 1279 drivers/hwmon/pmbus/pmbus_core.c for (p = 1; p < info->pages; p++) { pages 1298 drivers/hwmon/pmbus/pmbus_core.c int page, pages; pages 1301 drivers/hwmon/pmbus/pmbus_core.c pages = paged ? info->pages : 1; pages 1302 drivers/hwmon/pmbus/pmbus_core.c for (page = 0; page < pages; page++) { pages 1849 drivers/hwmon/pmbus/pmbus_core.c for (page = 0; page < info->pages; page++) { pages 2144 drivers/hwmon/pmbus/pmbus_core.c if (data->info->pages) pages 2157 drivers/hwmon/pmbus/pmbus_core.c if (info->pages <= 0 || info->pages > PMBUS_PAGES) { pages 2158 drivers/hwmon/pmbus/pmbus_core.c dev_err(dev, "Bad number of PMBus pages: %d\n", info->pages); pages 2162 drivers/hwmon/pmbus/pmbus_core.c for (page = 0; page < info->pages; page++) { pages 2309 drivers/hwmon/pmbus/pmbus_core.c data->info->pages * 10, sizeof(*entries), pages 2314 drivers/hwmon/pmbus/pmbus_core.c for (i = 0; i < data->info->pages; ++i) { pages 49 drivers/hwmon/pmbus/pxe1610.c .pages = PXE1610_NUM_PAGES, pages 16 drivers/hwmon/pmbus/tps40422.c .pages = 2, pages 54 drivers/hwmon/pmbus/tps53679.c .pages = TPS53679_PAGE_NUM, pages 530 drivers/hwmon/pmbus/ucd9000.c info->pages = ret; pages 531 drivers/hwmon/pmbus/ucd9000.c if (!info->pages) { pages 549 drivers/hwmon/pmbus/ucd9000.c if (page >= info->pages) pages 134 drivers/hwmon/pmbus/ucd9200.c info->pages = 0; pages 138 drivers/hwmon/pmbus/ucd9200.c info->pages++; pages 140 drivers/hwmon/pmbus/ucd9200.c if (!info->pages) { pages 144 drivers/hwmon/pmbus/ucd9200.c dev_info(&client->dev, "%d rails configured\n", info->pages); pages 154 drivers/hwmon/pmbus/ucd9200.c for (i = 0; i < info->pages; i++) { pages 175 drivers/hwmon/pmbus/ucd9200.c if (info->pages > 1) pages 185 drivers/hwmon/pmbus/ucd9200.c for (i = 1; i < info->pages; i++) pages 363 drivers/hwmon/pmbus/zl6100.c info->pages = 1; pages 136 drivers/hwtracing/coresight/coresight-catu.c ptr = page_address(table_pages->pages[pg_idx]); pages 261 drivers/hwtracing/coresight/coresight-catu.c ssize_t size, void **pages) pages 272 drivers/hwtracing/coresight/coresight-catu.c size >> PAGE_SHIFT, pages); pages 330 drivers/hwtracing/coresight/coresight-catu.c struct etr_buf *etr_buf, int node, void **pages) pages 344 drivers/hwtracing/coresight/coresight-catu.c etr_buf->size, pages); pages 374 drivers/hwtracing/coresight/coresight-etb10.c struct perf_event *event, void **pages, pages 388 drivers/hwtracing/coresight/coresight-etb10.c buf->data_pages = pages; pages 207 drivers/hwtracing/coresight/coresight-etm-perf.c static void *etm_setup_aux(struct perf_event *event, void **pages, pages 280 drivers/hwtracing/coresight/coresight-etm-perf.c sink_ops(sink)->alloc_buffer(sink, event, pages, pages 389 drivers/hwtracing/coresight/coresight-tmc-etf.c struct perf_event *event, void **pages, pages 404 drivers/hwtracing/coresight/coresight-tmc-etf.c buf->data_pages = pages; pages 46 drivers/hwtracing/coresight/coresight-tmc-etr.c void **pages; pages 171 drivers/hwtracing/coresight/coresight-tmc-etr.c if (tmc_pages->pages && tmc_pages->pages[i]) pages 172 drivers/hwtracing/coresight/coresight-tmc-etr.c __free_page(tmc_pages->pages[i]); pages 175 drivers/hwtracing/coresight/coresight-tmc-etr.c kfree(tmc_pages->pages); pages 177 drivers/hwtracing/coresight/coresight-tmc-etr.c tmc_pages->pages = NULL; pages 192 drivers/hwtracing/coresight/coresight-tmc-etr.c enum dma_data_direction dir, void **pages) pages 204 drivers/hwtracing/coresight/coresight-tmc-etr.c tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages), pages 206 drivers/hwtracing/coresight/coresight-tmc-etr.c if (!tmc_pages->pages) { pages 213 drivers/hwtracing/coresight/coresight-tmc-etr.c if (pages && pages[i]) { pages 214 drivers/hwtracing/coresight/coresight-tmc-etr.c page = virt_to_page(pages[i]); pages 225 drivers/hwtracing/coresight/coresight-tmc-etr.c tmc_pages->pages[i] = page; pages 274 drivers/hwtracing/coresight/coresight-tmc-etr.c sg_table->table_vaddr = vmap(table_pages->pages, pages 285 drivers/hwtracing/coresight/coresight-tmc-etr.c static int tmc_alloc_data_pages(struct tmc_sg_table *sg_table, void **pages) pages 292 drivers/hwtracing/coresight/coresight-tmc-etr.c DMA_FROM_DEVICE, pages); pages 294 drivers/hwtracing/coresight/coresight-tmc-etr.c sg_table->data_vaddr = vmap(sg_table->data_pages.pages, pages 319 drivers/hwtracing/coresight/coresight-tmc-etr.c void **pages) pages 332 drivers/hwtracing/coresight/coresight-tmc-etr.c rc = tmc_alloc_data_pages(sg_table, pages); pages 401 drivers/hwtracing/coresight/coresight-tmc-etr.c *bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset; pages 557 drivers/hwtracing/coresight/coresight-tmc-etr.c unsigned long size, void **pages) pages 570 drivers/hwtracing/coresight/coresight-tmc-etr.c sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages); pages 592 drivers/hwtracing/coresight/coresight-tmc-etr.c void **pages) pages 598 drivers/hwtracing/coresight/coresight-tmc-etr.c if (pages) pages 672 drivers/hwtracing/coresight/coresight-tmc-etr.c void **pages) pages 678 drivers/hwtracing/coresight/coresight-tmc-etr.c etr_buf->size, pages); pages 798 drivers/hwtracing/coresight/coresight-tmc-etr.c void **pages) pages 808 drivers/hwtracing/coresight/coresight-tmc-etr.c node, pages); pages 827 drivers/hwtracing/coresight/coresight-tmc-etr.c int node, void **pages) pages 859 drivers/hwtracing/coresight/coresight-tmc-etr.c if (!pages && pages 862 drivers/hwtracing/coresight/coresight-tmc-etr.c etr_buf, node, pages); pages 865 drivers/hwtracing/coresight/coresight-tmc-etr.c etr_buf, node, pages); pages 868 drivers/hwtracing/coresight/coresight-tmc-etr.c etr_buf, node, pages); pages 1204 drivers/hwtracing/coresight/coresight-tmc-etr.c int nr_pages, void **pages, bool snapshot) pages 1243 drivers/hwtracing/coresight/coresight-tmc-etr.c void **pages, bool snapshot) pages 1279 drivers/hwtracing/coresight/coresight-tmc-etr.c etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot); pages 1307 drivers/hwtracing/coresight/coresight-tmc-etr.c void **pages, bool snapshot) pages 1313 drivers/hwtracing/coresight/coresight-tmc-etr.c return alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot); pages 1318 drivers/hwtracing/coresight/coresight-tmc-etr.c int nr_pages, void **pages, bool snapshot) pages 1322 drivers/hwtracing/coresight/coresight-tmc-etr.c pages, snapshot); pages 1325 drivers/hwtracing/coresight/coresight-tmc-etr.c pages, snapshot); pages 1330 drivers/hwtracing/coresight/coresight-tmc-etr.c int nr_pages, void **pages, bool snapshot) pages 1342 drivers/hwtracing/coresight/coresight-tmc-etr.c etr_buf = get_perf_etr_buf(drvdata, event, nr_pages, pages, snapshot); pages 1362 drivers/hwtracing/coresight/coresight-tmc-etr.c struct perf_event *event, void **pages, pages 1369 drivers/hwtracing/coresight/coresight-tmc-etr.c nr_pages, pages, snapshot); pages 1378 drivers/hwtracing/coresight/coresight-tmc-etr.c etr_perf->pages = pages; pages 1433 drivers/hwtracing/coresight/coresight-tmc-etr.c dst_pages = (char **)etr_perf->pages; pages 214 drivers/hwtracing/coresight/coresight-tmc.h int node, void **pages); pages 230 drivers/hwtracing/coresight/coresight-tmc.h struct page **pages; pages 313 drivers/hwtracing/coresight/coresight-tmc.h void **pages); pages 1455 drivers/iio/adc/at91-sama5d2_adc.c unsigned int pages = DIV_ROUND_UP(AT91_HWFIFO_MAX_SIZE * pages 1470 drivers/iio/adc/at91-sama5d2_adc.c pages * PAGE_SIZE, pages 1497 drivers/iio/adc/at91-sama5d2_adc.c dma_free_coherent(st->dma_st.dma_chan->device->dev, pages * PAGE_SIZE, pages 1510 drivers/iio/adc/at91-sama5d2_adc.c unsigned int pages = DIV_ROUND_UP(AT91_HWFIFO_MAX_SIZE * pages 1521 drivers/iio/adc/at91-sama5d2_adc.c dma_free_coherent(st->dma_st.dma_chan->device->dev, pages * PAGE_SIZE, pages 216 drivers/infiniband/core/umem_odp.c size_t pages; pages 229 drivers/infiniband/core/umem_odp.c pages = (umem_odp->interval_tree.last - pages 232 drivers/infiniband/core/umem_odp.c if (!pages) pages 243 drivers/infiniband/core/umem_odp.c pages, sizeof(*umem_odp->page_list), GFP_KERNEL); pages 248 drivers/infiniband/core/umem_odp.c pages, sizeof(*umem_odp->dma_list), GFP_KERNEL); pages 2181 drivers/infiniband/hw/bnxt_re/ib_verbs.c wqe->frmr.page_list = mr->pages; pages 3331 drivers/infiniband/hw/bnxt_re/ib_verbs.c if (mr->pages) { pages 3334 drivers/infiniband/hw/bnxt_re/ib_verbs.c kfree(mr->pages); pages 3336 drivers/infiniband/hw/bnxt_re/ib_verbs.c mr->pages = NULL; pages 3352 drivers/infiniband/hw/bnxt_re/ib_verbs.c mr->pages[mr->npages++] = addr; pages 3396 drivers/infiniband/hw/bnxt_re/ib_verbs.c mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); pages 3397 drivers/infiniband/hw/bnxt_re/ib_verbs.c if (!mr->pages) { pages 3413 drivers/infiniband/hw/bnxt_re/ib_verbs.c kfree(mr->pages); pages 115 drivers/infiniband/hw/bnxt_re/ib_verbs.h u64 *pages; pages 399 drivers/infiniband/hw/bnxt_re/main.c int pages, int type, u32 ring_mask, pages 415 drivers/infiniband/hw/bnxt_re/main.c if (pages > 1) { pages 977 drivers/infiniband/hw/bnxt_re/main.c int pages; pages 1010 drivers/infiniband/hw/bnxt_re/main.c pages = rdev->nq[i].hwq.pbl[rdev->nq[i].hwq.level].pg_count; pages 1011 drivers/infiniband/hw/bnxt_re/main.c rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type, pages 1340 drivers/infiniband/hw/bnxt_re/main.c int pages, vid; pages 1389 drivers/infiniband/hw/bnxt_re/main.c pages = rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count; pages 1391 drivers/infiniband/hw/bnxt_re/main.c rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type, pages 86 drivers/infiniband/hw/bnxt_re/qplib_res.c struct scatterlist *sghead, u32 pages, pages 94 drivers/infiniband/hw/bnxt_re/qplib_res.c pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL); pages 98 drivers/infiniband/hw/bnxt_re/qplib_res.c pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL); pages 108 drivers/infiniband/hw/bnxt_re/qplib_res.c for (i = 0; i < pages; i++) { pages 166 drivers/infiniband/hw/bnxt_re/qplib_res.c u32 pages, maps, slots, size, aux_pages = 0, aux_size = 0; pages 187 drivers/infiniband/hw/bnxt_re/qplib_res.c pages = (slots * size) / pg_size + aux_pages; pages 189 drivers/infiniband/hw/bnxt_re/qplib_res.c pages++; pages 190 drivers/infiniband/hw/bnxt_re/qplib_res.c if (!pages) pages 195 drivers/infiniband/hw/bnxt_re/qplib_res.c pages = sg_info->npages; pages 200 drivers/infiniband/hw/bnxt_re/qplib_res.c if (sghead && (pages == MAX_PBL_LVL_0_PGS)) pages 202 drivers/infiniband/hw/bnxt_re/qplib_res.c pages, maps, pg_size); pages 211 drivers/infiniband/hw/bnxt_re/qplib_res.c if (pages > MAX_PBL_LVL_0_PGS) { pages 212 drivers/infiniband/hw/bnxt_re/qplib_res.c if (pages > MAX_PBL_LVL_1_PGS) { pages 229 drivers/infiniband/hw/bnxt_re/qplib_res.c pages, maps, pg_size); pages 258 drivers/infiniband/hw/bnxt_re/qplib_res.c pages, maps, pg_size); pages 662 drivers/infiniband/hw/bnxt_re/qplib_sp.c int pg_ptrs, pages, i, rc; pages 671 drivers/infiniband/hw/bnxt_re/qplib_sp.c pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT; pages 672 drivers/infiniband/hw/bnxt_re/qplib_sp.c if (!pages) pages 673 drivers/infiniband/hw/bnxt_re/qplib_sp.c pages++; pages 675 drivers/infiniband/hw/bnxt_re/qplib_sp.c if (pages > MAX_PBL_LVL_1_PGS) { pages 678 drivers/infiniband/hw/bnxt_re/qplib_sp.c pages, MAX_PBL_LVL_1_PGS); pages 685 drivers/infiniband/hw/bnxt_re/qplib_sp.c mr->hwq.max_elements = pages; pages 745 drivers/infiniband/hw/bnxt_re/qplib_sp.c int pg_ptrs, pages, rc; pages 749 drivers/infiniband/hw/bnxt_re/qplib_sp.c pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT; pages 750 drivers/infiniband/hw/bnxt_re/qplib_sp.c if (!pages) pages 751 drivers/infiniband/hw/bnxt_re/qplib_sp.c pages++; pages 753 drivers/infiniband/hw/bnxt_re/qplib_sp.c if (pages > MAX_PBL_LVL_1_PGS) pages 756 drivers/infiniband/hw/bnxt_re/qplib_sp.c frpl->hwq.max_elements = pages; pages 97 drivers/infiniband/hw/cxgb3/iwch_mem.c int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset) pages 99 drivers/infiniband/hw/cxgb3/iwch_mem.c return cxio_write_pbl(&mhp->rhp->rdev, pages, pages 340 drivers/infiniband/hw/cxgb3/iwch_provider.c kfree(mhp->pages); pages 436 drivers/infiniband/hw/cxgb3/iwch_provider.c __be64 *pages; pages 469 drivers/infiniband/hw/cxgb3/iwch_provider.c pages = (__be64 *) __get_free_page(GFP_KERNEL); pages 470 drivers/infiniband/hw/cxgb3/iwch_provider.c if (!pages) { pages 478 drivers/infiniband/hw/cxgb3/iwch_provider.c pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter)); pages 479 drivers/infiniband/hw/cxgb3/iwch_provider.c if (i == PAGE_SIZE / sizeof(*pages)) { pages 480 drivers/infiniband/hw/cxgb3/iwch_provider.c err = iwch_write_pbl(mhp, pages, i, n); pages 489 drivers/infiniband/hw/cxgb3/iwch_provider.c err = iwch_write_pbl(mhp, pages, i, n); pages 492 drivers/infiniband/hw/cxgb3/iwch_provider.c free_page((unsigned long) pages); pages 605 drivers/infiniband/hw/cxgb3/iwch_provider.c mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); pages 606 drivers/infiniband/hw/cxgb3/iwch_provider.c if (!mhp->pages) pages 636 drivers/infiniband/hw/cxgb3/iwch_provider.c kfree(mhp->pages); pages 650 drivers/infiniband/hw/cxgb3/iwch_provider.c mhp->pages[mhp->npages++] = addr; pages 80 drivers/infiniband/hw/cxgb3/iwch_provider.h u64 *pages; pages 343 drivers/infiniband/hw/cxgb3/iwch_provider.h int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset); pages 184 drivers/infiniband/hw/cxgb3/iwch_qp.c *p = cpu_to_be64((u64)mhp->pages[i]); pages 511 drivers/infiniband/hw/cxgb4/mem.c __be64 *pages; pages 557 drivers/infiniband/hw/cxgb4/mem.c pages = (__be64 *) __get_free_page(GFP_KERNEL); pages 558 drivers/infiniband/hw/cxgb4/mem.c if (!pages) { pages 566 drivers/infiniband/hw/cxgb4/mem.c pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter)); pages 567 drivers/infiniband/hw/cxgb4/mem.c if (i == PAGE_SIZE / sizeof(*pages)) { pages 568 drivers/infiniband/hw/cxgb4/mem.c err = write_pbl(&mhp->rhp->rdev, pages, pages 579 drivers/infiniband/hw/cxgb4/mem.c err = write_pbl(&mhp->rhp->rdev, pages, pages 584 drivers/infiniband/hw/cxgb4/mem.c free_page((unsigned long) pages); pages 2019 drivers/infiniband/hw/hfi1/hfi.h size_t npages, bool writable, struct page **pages); pages 370 drivers/infiniband/hw/hfi1/tid_rdma.c qpriv->pages = kzalloc_node(TID_RDMA_MAX_PAGES * pages 371 drivers/infiniband/hw/hfi1/tid_rdma.c sizeof(*qpriv->pages), pages 373 drivers/infiniband/hw/hfi1/tid_rdma.c if (!qpriv->pages) pages 433 drivers/infiniband/hw/hfi1/tid_rdma.c kfree(qpriv->pages); pages 434 drivers/infiniband/hw/hfi1/tid_rdma.c qpriv->pages = NULL; pages 876 drivers/infiniband/hw/hfi1/tid_rdma.c struct page **pages, pages 891 drivers/infiniband/hw/hfi1/tid_rdma.c vaddr = page_address(pages[0]); pages 894 drivers/infiniband/hw/hfi1/tid_rdma.c this_vaddr = i < npages ? page_address(pages[i]) : NULL; pages 971 drivers/infiniband/hw/hfi1/tid_rdma.c u32 *idx, u32 pages, u32 sets) pages 973 drivers/infiniband/hw/hfi1/tid_rdma.c while (pages) { pages 974 drivers/infiniband/hw/hfi1/tid_rdma.c u32 maxpages = pages; pages 983 drivers/infiniband/hw/hfi1/tid_rdma.c pages -= maxpages; pages 1014 drivers/infiniband/hw/hfi1/tid_rdma.c struct page **pages, pages 1026 drivers/infiniband/hw/hfi1/tid_rdma.c v0 = page_address(pages[i]); pages 1029 drivers/infiniband/hw/hfi1/tid_rdma.c page_address(pages[i + 1]) : NULL; pages 1081 drivers/infiniband/hw/hfi1/tid_rdma.c struct page **pages, pages 1091 drivers/infiniband/hw/hfi1/tid_rdma.c pages[i++] = virt_to_page(sge->vaddr); pages 1134 drivers/infiniband/hw/hfi1/tid_rdma.c static int dma_map_flow(struct tid_rdma_flow *flow, struct page **pages) pages 1144 drivers/infiniband/hw/hfi1/tid_rdma.c pages[pset->idx], pages 1169 drivers/infiniband/hw/hfi1/tid_rdma.c struct page **pages, pages 1179 drivers/infiniband/hw/hfi1/tid_rdma.c return dma_map_flow(flow, pages); pages 1183 drivers/infiniband/hw/hfi1/tid_rdma.c npages = kern_find_pages(flow, pages, ss, last); pages 1187 drivers/infiniband/hw/hfi1/tid_rdma.c tid_rdma_find_phys_blocks_4k(flow, pages, npages, pages 1191 drivers/infiniband/hw/hfi1/tid_rdma.c tid_rdma_find_phys_blocks_8k(flow, pages, npages, pages 1194 drivers/infiniband/hw/hfi1/tid_rdma.c return dma_map_flow(flow, pages); pages 1482 drivers/infiniband/hw/hfi1/tid_rdma.c if (kern_get_phys_blocks(flow, qpriv->pages, ss, last)) { pages 198 drivers/infiniband/hw/hfi1/user_exp_rcv.c struct page **pages; pages 204 drivers/infiniband/hw/hfi1/user_exp_rcv.c pages = &node->pages[idx]; pages 206 drivers/infiniband/hw/hfi1/user_exp_rcv.c pages = &tidbuf->pages[idx]; pages 208 drivers/infiniband/hw/hfi1/user_exp_rcv.c hfi1_release_user_pages(fd->mm, pages, npages, mapped); pages 220 drivers/infiniband/hw/hfi1/user_exp_rcv.c struct page **pages = NULL; pages 241 drivers/infiniband/hw/hfi1/user_exp_rcv.c pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); pages 242 drivers/infiniband/hw/hfi1/user_exp_rcv.c if (!pages) pages 251 drivers/infiniband/hw/hfi1/user_exp_rcv.c kfree(pages); pages 255 drivers/infiniband/hw/hfi1/user_exp_rcv.c pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages); pages 257 drivers/infiniband/hw/hfi1/user_exp_rcv.c kfree(pages); pages 260 drivers/infiniband/hw/hfi1/user_exp_rcv.c tidbuf->pages = pages; pages 501 drivers/infiniband/hw/hfi1/user_exp_rcv.c kfree(tidbuf->pages); pages 592 drivers/infiniband/hw/hfi1/user_exp_rcv.c struct page **pages = tidbuf->pages; pages 603 drivers/infiniband/hw/hfi1/user_exp_rcv.c pfn = page_to_pfn(pages[0]); pages 605 drivers/infiniband/hw/hfi1/user_exp_rcv.c this_pfn = i < npages ? page_to_pfn(pages[i]) : 0; pages 758 drivers/infiniband/hw/hfi1/user_exp_rcv.c struct page **pages = tbuf->pages + pageidx; pages 770 drivers/infiniband/hw/hfi1/user_exp_rcv.c __va(page_to_phys(pages[0])), pages 781 drivers/infiniband/hw/hfi1/user_exp_rcv.c node->phys = page_to_phys(pages[0]); pages 787 drivers/infiniband/hw/hfi1/user_exp_rcv.c memcpy(node->pages, pages, sizeof(struct page *) * npages); pages 62 drivers/infiniband/hw/hfi1/user_exp_rcv.h struct page **pages; pages 75 drivers/infiniband/hw/hfi1/user_exp_rcv.h struct page *pages[0]; pages 104 drivers/infiniband/hw/hfi1/user_pages.c bool writable, struct page **pages) pages 109 drivers/infiniband/hw/hfi1/user_pages.c ret = get_user_pages_fast(vaddr, npages, gup_flags, pages); pages 85 drivers/infiniband/hw/hfi1/user_sdma.c static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, pages 756 drivers/infiniband/hw/hfi1/user_sdma.c ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx], pages 973 drivers/infiniband/hw/hfi1/user_sdma.c struct page **pages; pages 976 drivers/infiniband/hw/hfi1/user_sdma.c pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); pages 977 drivers/infiniband/hw/hfi1/user_sdma.c if (!pages) pages 979 drivers/infiniband/hw/hfi1/user_sdma.c memcpy(pages, node->pages, node->npages * sizeof(*pages)); pages 992 drivers/infiniband/hw/hfi1/user_sdma.c pages + node->npages); pages 994 drivers/infiniband/hw/hfi1/user_sdma.c kfree(pages); pages 998 drivers/infiniband/hw/hfi1/user_sdma.c unpin_vector_pages(pq->mm, pages, node->npages, pinned); pages 1001 drivers/infiniband/hw/hfi1/user_sdma.c kfree(node->pages); pages 1003 drivers/infiniband/hw/hfi1/user_sdma.c node->pages = pages; pages 1011 drivers/infiniband/hw/hfi1/user_sdma.c unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages); pages 1035 drivers/infiniband/hw/hfi1/user_sdma.c iovec->pages = node->pages; pages 1063 drivers/infiniband/hw/hfi1/user_sdma.c iovec->pages = node->pages; pages 1079 drivers/infiniband/hw/hfi1/user_sdma.c static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, pages 1082 drivers/infiniband/hw/hfi1/user_sdma.c hfi1_release_user_pages(mm, pages + start, npages, false); pages 1083 drivers/infiniband/hw/hfi1/user_sdma.c kfree(pages); pages 148 drivers/infiniband/hw/hfi1/user_sdma.h struct page **pages; pages 158 drivers/infiniband/hw/hfi1/user_sdma.h struct page **pages; pages 162 drivers/infiniband/hw/hfi1/verbs.h struct page **pages; /* for TID page scan */ pages 1835 drivers/infiniband/hw/hns/hns_roce_hw_v1.c u64 *pages; pages 1885 drivers/infiniband/hw/hns/hns_roce_hw_v1.c pages = (u64 *) __get_free_page(GFP_KERNEL); pages 1886 drivers/infiniband/hw/hns/hns_roce_hw_v1.c if (!pages) pages 1891 drivers/infiniband/hw/hns/hns_roce_hw_v1.c pages[i] = ((u64)sg_page_iter_dma_address(&sg_iter)) >> 12; pages 1903 drivers/infiniband/hw/hns/hns_roce_hw_v1.c mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i])); pages 1907 drivers/infiniband/hw/hns/hns_roce_hw_v1.c (u32)(pages[i] >> PAGES_SHIFT_32)); pages 1912 drivers/infiniband/hw/hns/hns_roce_hw_v1.c MPT_BYTE_36_PA1_L_S, (u32)(pages[i])); pages 1916 drivers/infiniband/hw/hns/hns_roce_hw_v1.c (u32)(pages[i] >> PAGES_SHIFT_24)); pages 1921 drivers/infiniband/hw/hns/hns_roce_hw_v1.c MPT_BYTE_40_PA2_L_S, (u32)(pages[i])); pages 1925 drivers/infiniband/hw/hns/hns_roce_hw_v1.c (u32)(pages[i] >> PAGES_SHIFT_16)); pages 1930 drivers/infiniband/hw/hns/hns_roce_hw_v1.c MPT_BYTE_44_PA3_L_S, (u32)(pages[i])); pages 1934 drivers/infiniband/hw/hns/hns_roce_hw_v1.c (u32)(pages[i] >> PAGES_SHIFT_8)); pages 1937 drivers/infiniband/hw/hns/hns_roce_hw_v1.c mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i])); pages 1941 drivers/infiniband/hw/hns/hns_roce_hw_v1.c (u32)(pages[i] >> PAGES_SHIFT_32)); pages 1946 drivers/infiniband/hw/hns/hns_roce_hw_v1.c MPT_BYTE_56_PA5_L_S, (u32)(pages[i])); pages 1950 drivers/infiniband/hw/hns/hns_roce_hw_v1.c (u32)(pages[i] >> PAGES_SHIFT_24)); pages 1955 drivers/infiniband/hw/hns/hns_roce_hw_v1.c MPT_BYTE_60_PA6_L_S, (u32)(pages[i])); pages 1959 drivers/infiniband/hw/hns/hns_roce_hw_v1.c (u32)(pages[i] >> PAGES_SHIFT_16)); pages 1966 drivers/infiniband/hw/hns/hns_roce_hw_v1.c free_page((unsigned long) pages); pages 2227 drivers/infiniband/hw/hns/hns_roce_hw_v2.c u64 *pages; pages 2236 drivers/infiniband/hw/hns/hns_roce_hw_v2.c pages = (u64 *)__get_free_page(GFP_KERNEL); pages 2237 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (!pages) pages 2243 drivers/infiniband/hw/hns/hns_roce_hw_v2.c pages[i] = page_addr >> 6; pages 2251 drivers/infiniband/hw/hns/hns_roce_hw_v2.c mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0])); pages 2253 drivers/infiniband/hw/hns/hns_roce_hw_v2.c V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0])); pages 2255 drivers/infiniband/hw/hns/hns_roce_hw_v2.c mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1])); pages 2257 drivers/infiniband/hw/hns/hns_roce_hw_v2.c V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1])); pages 2263 drivers/infiniband/hw/hns/hns_roce_hw_v2.c free_page((unsigned long)pages); pages 1029 drivers/infiniband/hw/hns/hns_roce_mr.c u64 *pages; pages 1054 drivers/infiniband/hw/hns/hns_roce_mr.c pages = (u64 *) __get_free_pages(GFP_KERNEL, order); pages 1055 drivers/infiniband/hw/hns/hns_roce_mr.c if (!pages) pages 1070 drivers/infiniband/hw/hns/hns_roce_mr.c pages[i++] = page_addr; pages 1074 drivers/infiniband/hw/hns/hns_roce_mr.c ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages); pages 1083 drivers/infiniband/hw/hns/hns_roce_mr.c ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages); pages 1086 drivers/infiniband/hw/hns/hns_roce_mr.c free_pages((unsigned long) pages, order); pages 155 drivers/infiniband/hw/i40iw/i40iw_pble.c chunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT; pages 266 drivers/infiniband/hw/i40iw/i40iw_pble.c status = i40iw_get_vmalloc_mem(dev->hw, chunk, info->pages); pages 278 drivers/infiniband/hw/i40iw/i40iw_pble.c (info->pages << PBLE_512_SHIFT)); pages 285 drivers/infiniband/hw/i40iw/i40iw_pble.c for (i = 0; i < info->pages; i++) { pages 335 drivers/infiniband/hw/i40iw/i40iw_pble.c u32 pages; pages 350 drivers/infiniband/hw/i40iw/i40iw_pble.c pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD - pages 352 drivers/infiniband/hw/i40iw/i40iw_pble.c pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT); pages 355 drivers/infiniband/hw/i40iw/i40iw_pble.c info.pages = pages; pages 359 drivers/infiniband/hw/i40iw/i40iw_pble.c (pages == I40IW_HMC_PD_CNT_IN_SD) && pages 366 drivers/infiniband/hw/i40iw/i40iw_pble.c pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr); pages 87 drivers/infiniband/hw/i40iw/i40iw_pble.h u32 pages; pages 135 drivers/infiniband/hw/mlx4/mlx4_ib.h __be64 *pages; pages 97 drivers/infiniband/hw/mlx4/mr.c u64 cur_start_addr, u64 *pages, pages 127 drivers/infiniband/hw/mlx4/mr.c pages[*npages] = cur_start_addr + (mtt_size * k); pages 135 drivers/infiniband/hw/mlx4/mr.c *npages, pages); pages 185 drivers/infiniband/hw/mlx4/mr.c u64 *pages; pages 196 drivers/infiniband/hw/mlx4/mr.c pages = (u64 *) __get_free_page(GFP_KERNEL); pages 197 drivers/infiniband/hw/mlx4/mr.c if (!pages) pages 217 drivers/infiniband/hw/mlx4/mr.c pages, &start_index, pages 234 drivers/infiniband/hw/mlx4/mr.c cur_start_addr, pages, pages 241 drivers/infiniband/hw/mlx4/mr.c err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages); pages 244 drivers/infiniband/hw/mlx4/mr.c free_page((unsigned long) pages); pages 565 drivers/infiniband/hw/mlx4/mr.c mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL); pages 566 drivers/infiniband/hw/mlx4/mr.c if (!mr->pages) pages 569 drivers/infiniband/hw/mlx4/mr.c mr->page_map = dma_map_single(device->dev.parent, mr->pages, pages 580 drivers/infiniband/hw/mlx4/mr.c free_page((unsigned long)mr->pages); pages 587 drivers/infiniband/hw/mlx4/mr.c if (mr->pages) { pages 592 drivers/infiniband/hw/mlx4/mr.c free_page((unsigned long)mr->pages); pages 593 drivers/infiniband/hw/mlx4/mr.c mr->pages = NULL; pages 801 drivers/infiniband/hw/mlx4/mr.c mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT); pages 662 drivers/infiniband/hw/mthca/mthca_cmd.c __be64 *pages; pages 673 drivers/infiniband/hw/mthca/mthca_cmd.c pages = mailbox->buf; pages 694 drivers/infiniband/hw/mthca/mthca_cmd.c pages[nent * 2] = cpu_to_be64(virt); pages 698 drivers/infiniband/hw/mthca/mthca_cmd.c pages[nent * 2 + 1] = pages 449 drivers/infiniband/hw/mthca/mthca_memfree.c struct page *pages[1]; pages 476 drivers/infiniband/hw/mthca/mthca_memfree.c FOLL_WRITE | FOLL_LONGTERM, pages); pages 480 drivers/infiniband/hw/mthca/mthca_memfree.c sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE, pages 485 drivers/infiniband/hw/mthca/mthca_memfree.c put_user_page(pages[0]); pages 863 drivers/infiniband/hw/mthca/mthca_provider.c u64 *pages; pages 899 drivers/infiniband/hw/mthca/mthca_provider.c pages = (u64 *) __get_free_page(GFP_KERNEL); pages 900 drivers/infiniband/hw/mthca/mthca_provider.c if (!pages) { pages 907 drivers/infiniband/hw/mthca/mthca_provider.c write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages)); pages 910 drivers/infiniband/hw/mthca/mthca_provider.c pages[i++] = sg_page_iter_dma_address(&sg_iter); pages 917 drivers/infiniband/hw/mthca/mthca_provider.c err = mthca_write_mtt(dev, mr->mtt, n, pages, i); pages 926 drivers/infiniband/hw/mthca/mthca_provider.c err = mthca_write_mtt(dev, mr->mtt, n, pages, i); pages 928 drivers/infiniband/hw/mthca/mthca_provider.c free_page((unsigned long) pages); pages 197 drivers/infiniband/hw/ocrdma/ocrdma.h u64 *pages; pages 924 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c kfree(mr->pages); pages 2064 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c fbo = mr->ibmr.iova - mr->pages[0]; pages 2075 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c u64 buf_addr = mr->pages[i]; pages 2929 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); pages 2930 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c if (!mr->pages) { pages 2958 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c kfree(mr->pages); pages 2971 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c mr->pages[mr->npages++] = addr; pages 490 drivers/infiniband/hw/qedr/qedr.h u64 *pages; pages 367 drivers/infiniband/hw/qib/qib_init.c struct page **pages; pages 370 drivers/infiniband/hw/qib/qib_init.c pages = vzalloc(array_size(sizeof(struct page *), pages 372 drivers/infiniband/hw/qib/qib_init.c if (!pages) pages 380 drivers/infiniband/hw/qib/qib_init.c dd->pageshadow = pages; pages 385 drivers/infiniband/hw/qib/qib_init.c vfree(pages); pages 663 drivers/infiniband/hw/qib/qib_user_sdma.c struct page *pages[8]; pages 673 drivers/infiniband/hw/qib/qib_user_sdma.c ret = get_user_pages_fast(addr, j, FOLL_LONGTERM, pages); pages 688 drivers/infiniband/hw/qib/qib_user_sdma.c pages[i], 1, fofs, flen, NULL); pages 709 drivers/infiniband/hw/qib/qib_user_sdma.c put_user_page(pages[i++]); pages 83 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h void **pages; pages 145 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h u64 *pages; pages 322 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h return pdir->pages[offset / PAGE_SIZE] + (offset % PAGE_SIZE); pages 171 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->ring_state = cq->pdir.pages[0]; pages 934 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c dev->async_ring_state = dev->async_pdir.pages[0]; pages 943 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c dev->cq_ring_state = dev->cq_pdir.pages[0]; pages 84 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c pdir->pages = kcalloc(npages, sizeof(*pdir->pages), pages 86 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c if (!pdir->pages) pages 92 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c pdir->pages[i] = dma_alloc_coherent(&dev->pdev->dev, pages 96 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c if (!pdir->pages[i]) pages 124 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c if (pdir->pages) { pages 127 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c for (i = 0; i < pdir->npages && pdir->pages[i]; i++) { pages 131 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c pdir->pages[i], page_dma); pages 134 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c kfree(pdir->pages); pages 224 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c mr->pages = kzalloc(size, GFP_KERNEL); pages 225 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c if (!mr->pages) { pages 264 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c kfree(mr->pages); pages 295 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c kfree(mr->pages); pages 308 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c mr->pages[mr->npages++] = addr; pages 335 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c qp->sq.ring = qp->pdir.pages[0]; pages 606 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c return pvrdma_page_dir_insert_page_list(&mr->pdir, mr->pages, pages 522 drivers/infiniband/ulp/iser/iscsi_iser.h u64 *pages; pages 159 drivers/infiniband/ulp/iser/iser_memory.c iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]); pages 221 drivers/infiniband/ulp/iser/iser_memory.c page_vec->pages[page_vec->npages++] = addr; pages 250 drivers/infiniband/ulp/iser/iser_memory.c fmr = ib_fmr_pool_map_phys(fmr_pool, page_vec->pages, pages 251 drivers/infiniband/ulp/iser/iser_memory.c page_vec->npages, page_vec->pages[0]); pages 181 drivers/infiniband/ulp/iser/iser_verbs.c page_vec->pages = (u64 *)(page_vec + 1); pages 1491 drivers/infiniband/ulp/srp/ib_srp.c fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages, pages 1626 drivers/infiniband/ulp/srp/ib_srp.c state->pages[state->npages++] = dma_addr & dev->mr_page_mask; pages 1650 drivers/infiniband/ulp/srp/ib_srp.c state->pages = req->map_page; pages 1750 drivers/infiniband/ulp/srp/ib_srp.c state.pages = idb_pages; pages 1751 drivers/infiniband/ulp/srp/ib_srp.c state.pages[0] = (req->indirect_dma_addr & pages 342 drivers/infiniband/ulp/srp/ib_srp.h u64 *pages; pages 898 drivers/iommu/amd_iommu.c u64 pages; pages 901 drivers/iommu/amd_iommu.c pages = iommu_num_pages(address, size, PAGE_SIZE); pages 904 drivers/iommu/amd_iommu.c if (pages > 1) { pages 929 drivers/iommu/amd_iommu.c u64 pages; pages 932 drivers/iommu/amd_iommu.c pages = iommu_num_pages(address, size, PAGE_SIZE); pages 935 drivers/iommu/amd_iommu.c if (pages > 1) { pages 1764 drivers/iommu/amd_iommu.c unsigned int pages, u64 dma_mask) pages 1768 drivers/iommu/amd_iommu.c pages = __roundup_pow_of_two(pages); pages 1771 drivers/iommu/amd_iommu.c pfn = alloc_iova_fast(&dma_dom->iovad, pages, pages 1775 drivers/iommu/amd_iommu.c pfn = alloc_iova_fast(&dma_dom->iovad, pages, pages 1783 drivers/iommu/amd_iommu.c unsigned int pages) pages 1785 drivers/iommu/amd_iommu.c pages = __roundup_pow_of_two(pages); pages 1788 drivers/iommu/amd_iommu.c free_iova_fast(&dma_dom->iovad, address, pages); pages 2420 drivers/iommu/amd_iommu.c unsigned int pages; pages 2424 drivers/iommu/amd_iommu.c pages = iommu_num_pages(paddr, size, PAGE_SIZE); pages 2427 drivers/iommu/amd_iommu.c address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask); pages 2434 drivers/iommu/amd_iommu.c for (i = 0; i < pages; ++i) { pages 2462 drivers/iommu/amd_iommu.c dma_ops_free_iova(dma_dom, address, pages); pages 2477 drivers/iommu/amd_iommu.c unsigned int pages; pages 2479 drivers/iommu/amd_iommu.c pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); pages 2483 drivers/iommu/amd_iommu.c for (i = 0; i < pages; ++i) { pages 2495 drivers/iommu/amd_iommu.c dma_ops_free_iova(dma_dom, dma_addr, pages); pages 2497 drivers/iommu/amd_iommu.c pages = __roundup_pow_of_two(pages); pages 2498 drivers/iommu/amd_iommu.c queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0); pages 2604 drivers/iommu/amd_iommu.c int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE); pages 2606 drivers/iommu/amd_iommu.c for (j = 0; j < pages; ++j) { pages 2642 drivers/iommu/amd_iommu.c int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE); pages 2644 drivers/iommu/amd_iommu.c for (j = 0; j < pages; ++j) { pages 485 drivers/iommu/dma-iommu.c static void __iommu_dma_free_pages(struct page **pages, int count) pages 488 drivers/iommu/dma-iommu.c __free_page(pages[count]); pages 489 drivers/iommu/dma-iommu.c kvfree(pages); pages 495 drivers/iommu/dma-iommu.c struct page **pages; pages 502 drivers/iommu/dma-iommu.c pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL); pages 503 drivers/iommu/dma-iommu.c if (!pages) pages 540 drivers/iommu/dma-iommu.c __iommu_dma_free_pages(pages, i); pages 545 drivers/iommu/dma-iommu.c pages[i++] = page++; pages 547 drivers/iommu/dma-iommu.c return pages; pages 574 drivers/iommu/dma-iommu.c struct page **pages; pages 592 drivers/iommu/dma-iommu.c pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, pages 594 drivers/iommu/dma-iommu.c if (!pages) pages 602 drivers/iommu/dma-iommu.c if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) pages 617 drivers/iommu/dma-iommu.c vaddr = dma_common_pages_remap(pages, size, prot, pages 633 drivers/iommu/dma-iommu.c __iommu_dma_free_pages(pages, count); pages 646 drivers/iommu/dma-iommu.c static int __iommu_dma_mmap(struct page **pages, size_t size, pages 649 drivers/iommu/dma-iommu.c return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT); pages 926 drivers/iommu/dma-iommu.c struct page *page = NULL, **pages = NULL; pages 938 drivers/iommu/dma-iommu.c pages = dma_common_find_pages(cpu_addr); pages 939 drivers/iommu/dma-iommu.c if (!pages) pages 947 drivers/iommu/dma-iommu.c if (pages) pages 948 drivers/iommu/dma-iommu.c __iommu_dma_free_pages(pages, count); pages 1045 drivers/iommu/dma-iommu.c struct page **pages = dma_common_find_pages(cpu_addr); pages 1047 drivers/iommu/dma-iommu.c if (pages) pages 1048 drivers/iommu/dma-iommu.c return __iommu_dma_mmap(pages, size, vma); pages 1067 drivers/iommu/dma-iommu.c struct page **pages = dma_common_find_pages(cpu_addr); pages 1069 drivers/iommu/dma-iommu.c if (pages) { pages 1070 drivers/iommu/dma-iommu.c return sg_alloc_table_from_pages(sgt, pages, pages 1488 drivers/iommu/intel-iommu.c unsigned long pfn, unsigned int pages, pages 1491 drivers/iommu/intel-iommu.c unsigned int mask = ilog2(__roundup_pow_of_two(pages)); pages 1495 drivers/iommu/intel-iommu.c BUG_ON(pages == 0); pages 1523 drivers/iommu/intel-iommu.c unsigned long pfn, unsigned int pages) pages 1527 drivers/iommu/intel-iommu.c iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); pages 2199 drivers/iommu/intel-iommu.c unsigned long pages) pages 2213 drivers/iommu/intel-iommu.c pages >>= VTD_STRIDE_SHIFT; pages 2214 drivers/iommu/intel-iommu.c if (!pages) pages 127 drivers/iommu/intel-pasid.c struct page *pages; pages 155 drivers/iommu/intel-pasid.c pages = alloc_pages_node(info->iommu->node, pages 157 drivers/iommu/intel-pasid.c if (!pages) { pages 162 drivers/iommu/intel-pasid.c pasid_table->table = page_address(pages); pages 43 drivers/iommu/intel-svm.c struct page *pages; pages 46 drivers/iommu/intel-svm.c pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER); pages 47 drivers/iommu/intel-svm.c if (!pages) { pages 52 drivers/iommu/intel-svm.c iommu->prq = page_address(pages); pages 103 drivers/iommu/intel-svm.c unsigned long address, unsigned long pages, int ih) pages 107 drivers/iommu/intel-svm.c if (pages == -1) { pages 114 drivers/iommu/intel-svm.c int mask = ilog2(__roundup_pow_of_two(pages)); pages 133 drivers/iommu/intel-svm.c if (pages == -1) { pages 136 drivers/iommu/intel-svm.c } else if (pages > 1) { pages 140 drivers/iommu/intel-svm.c unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT); pages 155 drivers/iommu/intel-svm.c unsigned long pages, int ih) pages 161 drivers/iommu/intel-svm.c intel_flush_svm_range_dev(svm, sdev, address, pages, ih); pages 534 drivers/iommu/intel_irq_remapping.c struct page *pages; pages 543 drivers/iommu/intel_irq_remapping.c pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, pages 545 drivers/iommu/intel_irq_remapping.c if (!pages) { pages 576 drivers/iommu/intel_irq_remapping.c ir_table->base = page_address(pages); pages 620 drivers/iommu/intel_irq_remapping.c __free_pages(pages, INTR_REMAP_PAGE_ORDER); pages 176 drivers/iommu/io-pgtable-arm-v7s.c static dma_addr_t __arm_v7s_dma_addr(void *pages) pages 178 drivers/iommu/io-pgtable-arm-v7s.c return (dma_addr_t)virt_to_phys(pages); pages 225 drivers/iommu/io-pgtable-arm.c static dma_addr_t __arm_lpae_dma_addr(void *pages) pages 227 drivers/iommu/io-pgtable-arm.c return (dma_addr_t)virt_to_phys(pages); pages 237 drivers/iommu/io-pgtable-arm.c void *pages; pages 245 drivers/iommu/io-pgtable-arm.c pages = page_address(p); pages 247 drivers/iommu/io-pgtable-arm.c dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); pages 255 drivers/iommu/io-pgtable-arm.c if (dma != virt_to_phys(pages)) pages 259 drivers/iommu/io-pgtable-arm.c return pages; pages 269 drivers/iommu/io-pgtable-arm.c static void __arm_lpae_free_pages(void *pages, size_t size, pages 273 drivers/iommu/io-pgtable-arm.c dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), pages 275 drivers/iommu/io-pgtable-arm.c free_pages((unsigned long)pages, get_order(size)); pages 496 drivers/iommu/iova.c fq->entries[idx].pages); pages 550 drivers/iommu/iova.c unsigned long pfn, unsigned long pages, pages 574 drivers/iommu/iova.c fq->entries[idx].pages = pages; pages 31 drivers/lightnvm/pblk-rb.c list_for_each_entry_safe(p, t, &rb->pages, list) { pages 32 drivers/lightnvm/pblk-rb.c free_pages((unsigned long)page_address(p->pages), p->order); pages 102 drivers/lightnvm/pblk-rb.c INIT_LIST_HEAD(&rb->pages); pages 128 drivers/lightnvm/pblk-rb.c page_set->pages = alloc_pages(GFP_KERNEL, order); pages 129 drivers/lightnvm/pblk-rb.c if (!page_set->pages) { pages 136 drivers/lightnvm/pblk-rb.c kaddr = page_address(page_set->pages); pages 152 drivers/lightnvm/pblk-rb.c list_add_tail(&page_set->list, &rb->pages); pages 158 drivers/lightnvm/pblk.h struct page *pages; pages 198 drivers/lightnvm/pblk.h struct list_head pages; /* List of data pages */ pages 2079 drivers/md/dm-crypt.c unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100; pages 2084 drivers/md/dm-crypt.c pages /= dm_crypt_clients_n; pages 2085 drivers/md/dm-crypt.c if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT) pages 2086 drivers/md/dm-crypt.c pages = DM_CRYPT_MIN_PAGES_PER_CLIENT; pages 2087 drivers/md/dm-crypt.c dm_crypt_pages_per_client = pages; pages 58 drivers/md/dm-kcopyd.c struct page_list *pages; pages 251 drivers/md/dm-kcopyd.c pl->next = kc->pages; pages 252 drivers/md/dm-kcopyd.c kc->pages = pl; pages 261 drivers/md/dm-kcopyd.c unsigned int nr, struct page_list **pages) pages 265 drivers/md/dm-kcopyd.c *pages = NULL; pages 271 drivers/md/dm-kcopyd.c pl = kc->pages; pages 274 drivers/md/dm-kcopyd.c kc->pages = pl->next; pages 277 drivers/md/dm-kcopyd.c pl->next = *pages; pages 278 drivers/md/dm-kcopyd.c *pages = pl; pages 284 drivers/md/dm-kcopyd.c if (*pages) pages 285 drivers/md/dm-kcopyd.c kcopyd_put_pages(kc, *pages); pages 331 drivers/md/dm-kcopyd.c drop_pages(kc->pages); pages 332 drivers/md/dm-kcopyd.c kc->pages = NULL; pages 364 drivers/md/dm-kcopyd.c struct page_list *pages; pages 495 drivers/md/dm-kcopyd.c if (job->pages && job->pages != &zero_page_list) pages 496 drivers/md/dm-kcopyd.c kcopyd_put_pages(kc, job->pages); pages 557 drivers/md/dm-kcopyd.c .mem.ptr.pl = job->pages, pages 589 drivers/md/dm-kcopyd.c r = kcopyd_get_pages(job->kc, nr_pages, &job->pages); pages 682 drivers/md/dm-kcopyd.c else if (job->pages == &zero_page_list) pages 822 drivers/md/dm-kcopyd.c job->pages = NULL; pages 827 drivers/md/dm-kcopyd.c job->pages = &zero_page_list; pages 940 drivers/md/dm-kcopyd.c kc->pages = NULL; pages 226 drivers/md/dm-writecache.c struct page **pages; pages 261 drivers/md/dm-writecache.c pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL); pages 262 drivers/md/dm-writecache.c if (!pages) { pages 280 drivers/md/dm-writecache.c pages[i++] = pfn_t_to_page(pfn); pages 284 drivers/md/dm-writecache.c wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL); pages 289 drivers/md/dm-writecache.c kvfree(pages); pages 300 drivers/md/dm-writecache.c kvfree(pages); pages 57 drivers/md/md-bitmap.c if (page >= bitmap->pages) { pages 837 drivers/md/md-bitmap.c int pages; pages 842 drivers/md/md-bitmap.c pages = store->file_pages; pages 845 drivers/md/md-bitmap.c while (pages--) pages 846 drivers/md/md-bitmap.c if (map[pages] != sb_page) /* 0 is sb_page, release it below */ pages 847 drivers/md/md-bitmap.c free_buffers(map[pages]); pages 1737 drivers/md/md-bitmap.c unsigned long k, pages; pages 1758 drivers/md/md-bitmap.c pages = bitmap->counts.pages; pages 1763 drivers/md/md-bitmap.c for (k = 0; k < pages; k++) pages 1887 drivers/md/md-bitmap.c bitmap->counts.pages, bmname(bitmap)); pages 2039 drivers/md/md-bitmap.c counts->pages - counts->missing_pages, pages 2040 drivers/md/md-bitmap.c counts->pages, pages 2041 drivers/md/md-bitmap.c (counts->pages - counts->missing_pages) pages 2073 drivers/md/md-bitmap.c long pages; pages 2123 drivers/md/md-bitmap.c pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO); pages 2125 drivers/md/md-bitmap.c new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL); pages 2148 drivers/md/md-bitmap.c bitmap->counts.pages = pages; pages 2149 drivers/md/md-bitmap.c bitmap->counts.missing_pages = pages; pages 2161 drivers/md/md-bitmap.c for (page = 0; page < pages; page++) { pages 2174 drivers/md/md-bitmap.c bitmap->counts.pages = old_counts.pages; pages 2175 drivers/md/md-bitmap.c bitmap->counts.missing_pages = old_counts.pages; pages 2219 drivers/md/md-bitmap.c for (k = 0; k < old_counts.pages; k++) pages 185 drivers/md/md-bitmap.h unsigned long pages; /* total number of pages pages 1124 drivers/md/md-cluster.c unsigned long my_pages = bitmap->counts.pages; pages 1159 drivers/md/md-cluster.c counts->pages = my_pages; pages 1162 drivers/md/md-cluster.c if (my_pages != counts->pages) pages 34 drivers/md/raid1-10.c struct page *pages[RESYNC_PAGES]; pages 48 drivers/md/raid1-10.c rp->pages[i] = alloc_page(gfp_flags); pages 49 drivers/md/raid1-10.c if (!rp->pages[i]) pages 57 drivers/md/raid1-10.c put_page(rp->pages[i]); pages 66 drivers/md/raid1-10.c put_page(rp->pages[i]); pages 74 drivers/md/raid1-10.c get_page(rp->pages[i]); pages 82 drivers/md/raid1-10.c return rp->pages[idx]; pages 1988 drivers/md/raid1.c struct page **pages = get_resync_pages(bio)->pages; pages 2022 drivers/md/raid1.c pages[idx], pages 2076 drivers/md/raid1.c pages[idx], pages 2091 drivers/md/raid1.c pages[idx], pages 2154 drivers/md/raid1.c struct page **ppages = get_resync_pages(pbio)->pages; pages 2155 drivers/md/raid1.c struct page **spages = get_resync_pages(sbio)->pages; pages 2030 drivers/md/raid10.c fpages = get_resync_pages(fbio)->pages; pages 2046 drivers/md/raid10.c tpages = get_resync_pages(tbio)->pages; pages 2158 drivers/md/raid10.c struct page **pages = get_resync_pages(bio)->pages; pages 2174 drivers/md/raid10.c pages[idx], pages 2182 drivers/md/raid10.c pages[idx], pages 4444 drivers/md/raid10.c struct page **pages; pages 4634 drivers/md/raid10.c pages = get_resync_pages(r10_bio->devs[0].bio)->pages; pages 4636 drivers/md/raid10.c struct page *page = pages[s / (PAGE_SIZE >> 9)]; pages 4773 drivers/md/raid10.c struct page **pages; pages 4782 drivers/md/raid10.c pages = get_resync_pages(r10_bio->devs[0].bio)->pages; pages 4811 drivers/md/raid10.c pages[idx], pages 2890 drivers/md/raid5-cache.c int pages = 0; pages 2906 drivers/md/raid5-cache.c pages++; pages 2908 drivers/md/raid5-cache.c WARN_ON(pages == 0); pages 2919 drivers/md/raid5-cache.c reserve = (1 + pages) << (PAGE_SHIFT - 9); pages 2930 drivers/md/raid5-cache.c ret = r5l_log_stripe(log, sh, pages, 0); pages 166 drivers/media/common/saa7146/saa7146_core.c int pages = (length+PAGE_SIZE-1)/PAGE_SIZE; pages 173 drivers/media/common/saa7146/saa7146_core.c if (!(pt->slist = vmalloc_to_sg(mem, pages))) pages 179 drivers/media/common/saa7146/saa7146_core.c pt->nents = pages; pages 427 drivers/media/common/videobuf2/videobuf2-dma-contig.c struct page **pages; pages 436 drivers/media/common/videobuf2/videobuf2-dma-contig.c pages = frame_vector_pages(buf->vec); pages 438 drivers/media/common/videobuf2/videobuf2-dma-contig.c BUG_ON(IS_ERR(pages)); pages 442 drivers/media/common/videobuf2/videobuf2-dma-contig.c set_page_dirty_lock(pages[i]); pages 37 drivers/media/common/videobuf2/videobuf2-dma-sg.c struct page **pages; pages 65 drivers/media/common/videobuf2/videobuf2-dma-sg.c struct page *pages; pages 74 drivers/media/common/videobuf2/videobuf2-dma-sg.c pages = NULL; pages 75 drivers/media/common/videobuf2/videobuf2-dma-sg.c while (!pages) { pages 76 drivers/media/common/videobuf2/videobuf2-dma-sg.c pages = alloc_pages(GFP_KERNEL | __GFP_ZERO | pages 78 drivers/media/common/videobuf2/videobuf2-dma-sg.c if (pages) pages 83 drivers/media/common/videobuf2/videobuf2-dma-sg.c __free_page(buf->pages[last_page]); pages 89 drivers/media/common/videobuf2/videobuf2-dma-sg.c split_page(pages, order); pages 91 drivers/media/common/videobuf2/videobuf2-dma-sg.c buf->pages[last_page++] = &pages[i]; pages 123 drivers/media/common/videobuf2/videobuf2-dma-sg.c buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *), pages 125 drivers/media/common/videobuf2/videobuf2-dma-sg.c if (!buf->pages) pages 132 drivers/media/common/videobuf2/videobuf2-dma-sg.c ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, pages 166 drivers/media/common/videobuf2/videobuf2-dma-sg.c __free_page(buf->pages[num_pages]); pages 168 drivers/media/common/videobuf2/videobuf2-dma-sg.c kvfree(buf->pages); pages 189 drivers/media/common/videobuf2/videobuf2-dma-sg.c __free_page(buf->pages[i]); pages 190 drivers/media/common/videobuf2/videobuf2-dma-sg.c kvfree(buf->pages); pages 247 drivers/media/common/videobuf2/videobuf2-dma-sg.c buf->pages = frame_vector_pages(vec); pages 248 drivers/media/common/videobuf2/videobuf2-dma-sg.c if (IS_ERR(buf->pages)) pages 252 drivers/media/common/videobuf2/videobuf2-dma-sg.c if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, pages 297 drivers/media/common/videobuf2/videobuf2-dma-sg.c set_page_dirty_lock(buf->pages[i]); pages 312 drivers/media/common/videobuf2/videobuf2-dma-sg.c buf->vaddr = vm_map_ram(buf->pages, pages 337 drivers/media/common/videobuf2/videobuf2-dma-sg.c err = vm_map_pages(vma, buf->pages, buf->num_pages); pages 132 drivers/media/common/videobuf2/videobuf2-vmalloc.c struct page **pages; pages 137 drivers/media/common/videobuf2/videobuf2-vmalloc.c pages = frame_vector_pages(buf->vec); pages 143 drivers/media/common/videobuf2/videobuf2-vmalloc.c set_page_dirty_lock(pages[i]); pages 82 drivers/media/firewire/firedtv-fw.c char *pages[N_PAGES]; pages 114 drivers/media/firewire/firedtv-fw.c p = ctx->pages[i / PACKETS_PER_PAGE] pages 159 drivers/media/firewire/firedtv-fw.c ctx->pages[i] = page_address(ctx->buffer.pages[i]); pages 846 drivers/media/pci/intel/ipu3/ipu3-cio2.c unsigned int pages = DIV_ROUND_UP(vb->planes[0].length, CIO2_PAGE_SIZE); pages 847 drivers/media/pci/intel/ipu3/ipu3-cio2.c unsigned int lops = DIV_ROUND_UP(pages + 1, entries_per_page); pages 877 drivers/media/pci/intel/ipu3/ipu3-cio2.c if (!pages--) pages 119 drivers/media/platform/rockchip/rga/rga-buf.c unsigned int *pages; pages 124 drivers/media/platform/rockchip/rga/rga-buf.c pages = rga->src_mmu_pages; pages 126 drivers/media/platform/rockchip/rga/rga-buf.c pages = rga->dst_mmu_pages; pages 139 drivers/media/platform/rockchip/rga/rga-buf.c pages[mapped_size + p] = phys; pages 146 drivers/media/platform/rockchip/rga/rga-buf.c dma_sync_single_for_device(rga->dev, virt_to_phys(pages), pages 92 drivers/media/v4l2-core/videobuf-dma-sg.c static struct scatterlist *videobuf_pages_to_sg(struct page **pages, pages 98 drivers/media/v4l2-core/videobuf-dma-sg.c if (NULL == pages[0]) pages 105 drivers/media/v4l2-core/videobuf-dma-sg.c if (PageHighMem(pages[0])) pages 108 drivers/media/v4l2-core/videobuf-dma-sg.c sg_set_page(&sglist[0], pages[0], pages 112 drivers/media/v4l2-core/videobuf-dma-sg.c if (NULL == pages[i]) pages 114 drivers/media/v4l2-core/videobuf-dma-sg.c if (PageHighMem(pages[i])) pages 116 drivers/media/v4l2-core/videobuf-dma-sg.c sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0); pages 175 drivers/media/v4l2-core/videobuf-dma-sg.c dma->pages = kmalloc_array(dma->nr_pages, sizeof(struct page *), pages 177 drivers/media/v4l2-core/videobuf-dma-sg.c if (NULL == dma->pages) pages 187 drivers/media/v4l2-core/videobuf-dma-sg.c flags | FOLL_LONGTERM, dma->pages, NULL); pages 290 drivers/media/v4l2-core/videobuf-dma-sg.c if (dma->pages) { pages 291 drivers/media/v4l2-core/videobuf-dma-sg.c dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages, pages 351 drivers/media/v4l2-core/videobuf-dma-sg.c if (dma->pages) { pages 354 drivers/media/v4l2-core/videobuf-dma-sg.c set_page_dirty_lock(dma->pages[i]); pages 355 drivers/media/v4l2-core/videobuf-dma-sg.c put_page(dma->pages[i]); pages 357 drivers/media/v4l2-core/videobuf-dma-sg.c kfree(dma->pages); pages 358 drivers/media/v4l2-core/videobuf-dma-sg.c dma->pages = NULL; pages 506 drivers/media/v4l2-core/videobuf-dma-sg.c int err, pages; pages 523 drivers/media/v4l2-core/videobuf-dma-sg.c pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT; pages 526 drivers/media/v4l2-core/videobuf-dma-sg.c pages); pages 559 drivers/media/v4l2-core/videobuf-dma-sg.c pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT; pages 561 drivers/media/v4l2-core/videobuf-dma-sg.c bus, pages); pages 159 drivers/media/v4l2-core/videobuf-vmalloc.c int pages; pages 176 drivers/media/v4l2-core/videobuf-vmalloc.c pages = PAGE_ALIGN(vb->size); pages 189 drivers/media/v4l2-core/videobuf-vmalloc.c mem->vaddr = vmalloc_user(pages); pages 191 drivers/media/v4l2-core/videobuf-vmalloc.c printk(KERN_ERR "vmalloc (%d pages) failed\n", pages); pages 195 drivers/media/v4l2-core/videobuf-vmalloc.c mem->vaddr, pages); pages 215 drivers/media/v4l2-core/videobuf-vmalloc.c int retval, pages; pages 233 drivers/media/v4l2-core/videobuf-vmalloc.c pages = PAGE_ALIGN(vma->vm_end - vma->vm_start); pages 234 drivers/media/v4l2-core/videobuf-vmalloc.c mem->vaddr = vmalloc_user(pages); pages 236 drivers/media/v4l2-core/videobuf-vmalloc.c printk(KERN_ERR "vmalloc (%d pages) failed\n", pages); pages 239 drivers/media/v4l2-core/videobuf-vmalloc.c dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages); pages 742 drivers/misc/fastrpc.c struct fastrpc_phy_page *pages; pages 765 drivers/misc/fastrpc.c pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) + pages 789 drivers/misc/fastrpc.c pages[i].addr = ctx->maps[i]->phys; pages 793 drivers/misc/fastrpc.c pages[i].addr += ctx->args[i].ptr - pages 799 drivers/misc/fastrpc.c pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; pages 814 drivers/misc/fastrpc.c pages[i].addr = ctx->buf->phys - pages 817 drivers/misc/fastrpc.c pages[i].addr = pages[i].addr & PAGE_MASK; pages 821 drivers/misc/fastrpc.c pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; pages 847 drivers/misc/fastrpc.c pages[i].addr = ctx->maps[i]->phys; pages 848 drivers/misc/fastrpc.c pages[i].size = ctx->maps[i]->size; pages 974 drivers/misc/fastrpc.c struct fastrpc_phy_page pages[1]; pages 1037 drivers/misc/fastrpc.c pages[0].addr = imem->phys; pages 1038 drivers/misc/fastrpc.c pages[0].size = imem->size; pages 1040 drivers/misc/fastrpc.c args[3].ptr = (u64)(uintptr_t) pages; pages 1041 drivers/misc/fastrpc.c args[3].length = 1 * sizeof(*pages); pages 288 drivers/misc/habanalabs/debugfs.c phys_pg_pack->pages[j]); pages 840 drivers/misc/habanalabs/habanalabs.h u64 *pages; pages 96 drivers/misc/habanalabs/memory.c phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL); pages 97 drivers/misc/habanalabs/memory.c if (!phys_pg_pack->pages) { pages 104 drivers/misc/habanalabs/memory.c phys_pg_pack->pages[i] = paddr + i * page_size; pages 107 drivers/misc/habanalabs/memory.c phys_pg_pack->pages[i] = (u64) gen_pool_alloc( pages 110 drivers/misc/habanalabs/memory.c if (!phys_pg_pack->pages[i]) { pages 148 drivers/misc/habanalabs/memory.c gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i], pages 151 drivers/misc/habanalabs/memory.c kvfree(phys_pg_pack->pages); pages 274 drivers/misc/habanalabs/memory.c gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0], pages 283 drivers/misc/habanalabs/memory.c phys_pg_pack->pages[i], pages 291 drivers/misc/habanalabs/memory.c kvfree(phys_pg_pack->pages); pages 690 drivers/misc/habanalabs/memory.c phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64), pages 692 drivers/misc/habanalabs/memory.c if (!phys_pg_pack->pages) { pages 713 drivers/misc/habanalabs/memory.c phys_pg_pack->pages[j++] = dma_addr; pages 754 drivers/misc/habanalabs/memory.c paddr = phys_pg_pack->pages[i]; pages 778 drivers/misc/habanalabs/memory.c phys_pg_pack->pages[i], page_size); pages 803 drivers/misc/habanalabs/memory.c *paddr = phys_pg_pack->pages[0]; pages 1331 drivers/misc/habanalabs/memory.c struct page **pages; pages 1341 drivers/misc/habanalabs/memory.c pages = frame_vector_pages(userptr->vec); pages 1342 drivers/misc/habanalabs/memory.c if (!IS_ERR(pages)) { pages 1346 drivers/misc/habanalabs/memory.c set_page_dirty_lock(pages[i]); pages 62 drivers/misc/mic/scif/scif_debugfs.c j, pin->pages[j], pages 63 drivers/misc/mic/scif/scif_debugfs.c page_address(pin->pages[j])); pages 653 drivers/misc/mic/scif/scif_dma.c struct page **pages = window->pinned_pages->pages; pages 655 drivers/misc/mic/scif/scif_dma.c va = page_address(pages[page_nr]) + page_off; pages 199 drivers/misc/mic/scif/scif_epd.h int map_flags, scif_pinned_pages_t *pages); pages 180 drivers/misc/mic/scif/scif_fence.c struct page **pages = window->pinned_pages->pages; pages 184 drivers/misc/mic/scif/scif_fence.c return page_address(pages[page_nr]) + page_off; pages 213 drivers/misc/mic/scif/scif_mmap.c struct scif_range **pages) pages 252 drivers/misc/mic/scif/scif_mmap.c *pages = kzalloc(sizeof(**pages), GFP_KERNEL); pages 253 drivers/misc/mic/scif/scif_mmap.c if (!*pages) { pages 259 drivers/misc/mic/scif/scif_mmap.c (*pages)->phys_addr = scif_zalloc(nr_pages * sizeof(dma_addr_t)); pages 260 drivers/misc/mic/scif/scif_mmap.c if (!((*pages)->phys_addr)) { pages 267 drivers/misc/mic/scif/scif_mmap.c ((*pages)->va = scif_zalloc(nr_pages * sizeof(void *))); pages 268 drivers/misc/mic/scif/scif_mmap.c if (!(*pages)->va) { pages 274 drivers/misc/mic/scif/scif_mmap.c (*pages)->cookie = window; pages 275 drivers/misc/mic/scif/scif_mmap.c (*pages)->nr_pages = nr_pages; pages 276 drivers/misc/mic/scif/scif_mmap.c (*pages)->prot_flags = window->prot; pages 279 drivers/misc/mic/scif/scif_mmap.c (*pages)->phys_addr[i] = pages 282 drivers/misc/mic/scif/scif_mmap.c (*pages)->phys_addr[i] = scif_get_phys((*pages)->phys_addr[i], pages 285 drivers/misc/mic/scif/scif_mmap.c (*pages)->va[i] = pages 287 drivers/misc/mic/scif/scif_mmap.c (*pages)->phys_addr[i] - pages 295 drivers/misc/mic/scif/scif_mmap.c if (*pages) { pages 296 drivers/misc/mic/scif/scif_mmap.c scif_free((*pages)->phys_addr, pages 298 drivers/misc/mic/scif/scif_mmap.c scif_free((*pages)->va, pages 300 drivers/misc/mic/scif/scif_mmap.c kfree(*pages); pages 301 drivers/misc/mic/scif/scif_mmap.c *pages = NULL; pages 310 drivers/misc/mic/scif/scif_mmap.c int scif_put_pages(struct scif_range *pages) pages 316 drivers/misc/mic/scif/scif_mmap.c if (!pages || !pages->cookie) pages 319 drivers/misc/mic/scif/scif_mmap.c window = pages->cookie; pages 336 drivers/misc/mic/scif/scif_mmap.c scif_put_window(window, pages->nr_pages); pages 356 drivers/misc/mic/scif/scif_mmap.c scif_free(pages->phys_addr, pages->nr_pages * sizeof(dma_addr_t)); pages 357 drivers/misc/mic/scif/scif_mmap.c scif_free(pages->va, pages->nr_pages * sizeof(void *)); pages 358 drivers/misc/mic/scif/scif_mmap.c kfree(pages); pages 90 drivers/misc/mic/scif/scif_rma.c pin->pages = scif_zalloc(nr_pages * sizeof(*pin->pages)); pages 91 drivers/misc/mic/scif/scif_rma.c if (!pin->pages) pages 117 drivers/misc/mic/scif/scif_rma.c if (pin->pages[j] && !kernel) { pages 119 drivers/misc/mic/scif/scif_rma.c SetPageDirty(pin->pages[j]); pages 120 drivers/misc/mic/scif/scif_rma.c put_page(pin->pages[j]); pages 124 drivers/misc/mic/scif/scif_rma.c scif_free(pin->pages, pages 125 drivers/misc/mic/scif/scif_rma.c pin->nr_pages * sizeof(*pin->pages)); pages 540 drivers/misc/mic/scif/scif_rma.c sg_set_page(sg, pin->pages[i], PAGE_SIZE, 0x0); pages 588 drivers/misc/mic/scif/scif_rma.c phys_prev = page_to_phys(pin->pages[i]); pages 593 drivers/misc/mic/scif/scif_rma.c phys_curr = page_to_phys(pin->pages[k]); pages 608 drivers/misc/mic/scif/scif_rma.c pin->pages[i])), pages 614 drivers/misc/mic/scif/scif_rma.c window->dma_addr[j] = page_to_phys(pin->pages[i]); pages 1308 drivers/misc/mic/scif/scif_rma.c int map_flags, scif_pinned_pages_t *pages) pages 1348 drivers/misc/mic/scif/scif_rma.c pinned_pages->pages[i] = pages 1351 drivers/misc/mic/scif/scif_rma.c pinned_pages->pages[i] = pages 1382 drivers/misc/mic/scif/scif_rma.c pinned_pages->pages); pages 1389 drivers/misc/mic/scif/scif_rma.c if (pinned_pages->pages[i]) pages 1391 drivers/misc/mic/scif/scif_rma.c pinned_pages->pages[i]); pages 1409 drivers/misc/mic/scif/scif_rma.c *pages = pinned_pages; pages 1418 drivers/misc/mic/scif/scif_rma.c *pages = NULL; pages 1425 drivers/misc/mic/scif/scif_rma.c int map_flags, scif_pinned_pages_t *pages) pages 1427 drivers/misc/mic/scif/scif_rma.c return __scif_pin_pages(addr, len, &prot, map_flags, pages); pages 192 drivers/misc/mic/scif/scif_rma.h struct page **pages; pages 277 drivers/misc/mic/vop/vop_main.c void *pages, pages 286 drivers/misc/mic/vop/vop_main.c vring_init(&vring, num, pages, MIC_VIRTIO_RING_ALIGN); pages 86 drivers/misc/sram-exec.c int pages; pages 103 drivers/misc/sram-exec.c pages = PAGE_ALIGN(size) / PAGE_SIZE; pages 107 drivers/misc/sram-exec.c set_memory_nx((unsigned long)base, pages); pages 108 drivers/misc/sram-exec.c set_memory_rw((unsigned long)base, pages); pages 112 drivers/misc/sram-exec.c set_memory_ro((unsigned long)base, pages); pages 113 drivers/misc/sram-exec.c set_memory_x((unsigned long)base, pages); pages 241 drivers/misc/vmw_balloon.c struct list_head pages; pages 695 drivers/misc/vmw_balloon.c list_add(&page->lru, &ctl->pages); pages 873 drivers/misc/vmw_balloon.c list_for_each_entry(page, &ctl->pages, lru) pages 1001 drivers/misc/vmw_balloon.c struct list_head *pages, pages 1009 drivers/misc/vmw_balloon.c balloon_page_list_enqueue(&b->b_dev_info, pages); pages 1017 drivers/misc/vmw_balloon.c list_for_each_entry(page, pages, lru) { pages 1021 drivers/misc/vmw_balloon.c list_splice_init(pages, &b->huge_pages); pages 1044 drivers/misc/vmw_balloon.c struct list_head *pages, pages 1055 drivers/misc/vmw_balloon.c *n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages, pages 1065 drivers/misc/vmw_balloon.c list_move(&page->lru, pages); pages 1111 drivers/misc/vmw_balloon.c .pages = LIST_HEAD_INIT(ctl.pages), pages 1122 drivers/misc/vmw_balloon.c VM_BUG_ON(!list_empty(&ctl.pages)); pages 1148 drivers/misc/vmw_balloon.c vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages, pages 1200 drivers/misc/vmw_balloon.c .pages = LIST_HEAD_INIT(ctl.pages), pages 1215 drivers/misc/vmw_balloon.c VM_BUG_ON(!list_empty(&ctl.pages)); pages 1241 drivers/misc/vmw_balloon.c vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages, pages 1277 drivers/misc/vmw_balloon.c vmballoon_release_page_list(&ctl.pages, &ctl.n_pages, pages 480 drivers/misc/vmw_vmci/vmci_host.c page_store.pages = alloc_info.ppn_va; pages 626 drivers/misc/vmw_vmci/vmci_queue_pair.c static void qp_release_pages(struct page **pages, pages 633 drivers/misc/vmw_vmci/vmci_queue_pair.c set_page_dirty(pages[i]); pages 635 drivers/misc/vmw_vmci/vmci_queue_pair.c put_page(pages[i]); pages 636 drivers/misc/vmw_vmci/vmci_queue_pair.c pages[i] = NULL; pages 701 drivers/misc/vmw_vmci/vmci_queue_pair.c produce_uva = page_store->pages; pages 702 drivers/misc/vmw_vmci/vmci_queue_pair.c consume_uva = page_store->pages + pages 2239 drivers/misc/vmw_vmci/vmci_queue_pair.c page_store.pages = guest_mem; pages 106 drivers/misc/vmw_vmci/vmci_queue_pair.h u64 pages; pages 58 drivers/mtd/devices/block2mtd.c int pages = len >> PAGE_SHIFT; pages 62 drivers/mtd/devices/block2mtd.c while (pages) { pages 79 drivers/mtd/devices/block2mtd.c pages--; pages 1177 drivers/mtd/mtdswap.c unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages; pages 1210 drivers/mtd/mtdswap.c pages = d->mbd_dev->size; pages 1211 drivers/mtd/mtdswap.c for (i = 0; i < pages; i++) pages 1250 drivers/mtd/mtdswap.c seq_printf(s, "total pages: %u\n", pages); pages 1282 drivers/mtd/mtdswap.c unsigned int i, eblk_bytes, pages, blocks; pages 1290 drivers/mtd/mtdswap.c pages = d->mbd_dev->size; pages 1296 drivers/mtd/mtdswap.c d->page_data = vmalloc(array_size(pages, sizeof(int))); pages 1309 drivers/mtd/mtdswap.c for (i = 0; i < pages; i++) pages 1270 drivers/mtd/nand/raw/diskonchip.c this->bbt_td->pages[0] = doc->mh0_page + 1; pages 1276 drivers/mtd/nand/raw/diskonchip.c this->bbt_md->pages[0] = doc->mh1_page + 1; pages 1304 drivers/mtd/nand/raw/diskonchip.c this->bbt_td->pages[0] = 2; pages 167 drivers/mtd/nand/raw/lpc32xx_mlc.c .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 }, pages 173 drivers/mtd/nand/raw/lpc32xx_mlc.c .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 }, pages 59 drivers/mtd/nand/raw/meson_nand.c #define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \ pages 66 drivers/mtd/nand/raw/meson_nand.c ((pages) & 0x3f) \ pages 272 drivers/mtd/nand/raw/meson_nand.c int len = mtd->writesize, pagesize, pages; pages 283 drivers/mtd/nand/raw/meson_nand.c pages = len / nand->ecc.size; pages 286 drivers/mtd/nand/raw/meson_nand.c NFC_CMD_SHORTMODE_DISABLE, pagesize, pages); pages 270 drivers/mtd/nand/raw/nand_bbt.c res = read_bbt(this, buf, td->pages[i], pages 278 drivers/mtd/nand/raw/nand_bbt.c res = read_bbt(this, buf, td->pages[0], pages 396 drivers/mtd/nand/raw/nand_bbt.c scan_read(this, buf, (loff_t)td->pages[0] << this->page_shift, pages 400 drivers/mtd/nand/raw/nand_bbt.c td->pages[0], td->version[0]); pages 405 drivers/mtd/nand/raw/nand_bbt.c scan_read(this, buf, (loff_t)md->pages[0] << this->page_shift, pages 409 drivers/mtd/nand/raw/nand_bbt.c md->pages[0], md->version[0]); pages 556 drivers/mtd/nand/raw/nand_bbt.c td->pages[i] = -1; pages 566 drivers/mtd/nand/raw/nand_bbt.c td->pages[i] = actblock << blocktopage; pages 578 drivers/mtd/nand/raw/nand_bbt.c if (td->pages[i] == -1) pages 582 drivers/mtd/nand/raw/nand_bbt.c td->pages[i], td->version[i]); pages 632 drivers/mtd/nand/raw/nand_bbt.c if (td->pages[chip] != -1) pages 633 drivers/mtd/nand/raw/nand_bbt.c return td->pages[chip] >> pages 665 drivers/mtd/nand/raw/nand_bbt.c if (!md || md->pages[chip] != page) pages 699 drivers/mtd/nand/raw/nand_bbt.c td->pages[chip] = -1; pages 881 drivers/mtd/nand/raw/nand_bbt.c td->pages[chip++] = page; pages 941 drivers/mtd/nand/raw/nand_bbt.c if (td->pages[i] == -1 && md->pages[i] == -1) { pages 944 drivers/mtd/nand/raw/nand_bbt.c } else if (td->pages[i] == -1) { pages 947 drivers/mtd/nand/raw/nand_bbt.c } else if (md->pages[i] == -1) { pages 962 drivers/mtd/nand/raw/nand_bbt.c if (td->pages[i] == -1) { pages 989 drivers/mtd/nand/raw/nand_bbt.c rd->pages[i] = -1; pages 1000 drivers/mtd/nand/raw/nand_bbt.c rd2->pages[i] = -1; pages 1116 drivers/mtd/nand/raw/nand_bbt.c if (td->pages[i] == -1) pages 1118 drivers/mtd/nand/raw/nand_bbt.c block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift); pages 305 drivers/mtd/nand/raw/nandsim.c union ns_mem *pages; pages 564 drivers/mtd/nand/raw/nandsim.c ns->pages = vmalloc(array_size(sizeof(union ns_mem), ns->geom.pgnum)); pages 565 drivers/mtd/nand/raw/nandsim.c if (!ns->pages) { pages 570 drivers/mtd/nand/raw/nandsim.c ns->pages[i].byte = NULL; pages 602 drivers/mtd/nand/raw/nandsim.c if (ns->pages) { pages 604 drivers/mtd/nand/raw/nandsim.c if (ns->pages[i].byte) pages 606 drivers/mtd/nand/raw/nandsim.c ns->pages[i].byte); pages 609 drivers/mtd/nand/raw/nandsim.c vfree(ns->pages); pages 1366 drivers/mtd/nand/raw/nandsim.c return &(ns->pages[ns->regs.row]); pages 376 drivers/mtd/tests/torturetest.c int bytes, bits, pages, first; pages 383 drivers/mtd/tests/torturetest.c bytes = bits = pages = 0; pages 387 drivers/mtd/tests/torturetest.c pages++; pages 390 drivers/mtd/tests/torturetest.c pages, bytes, bits); pages 141 drivers/net/ethernet/amd/xgbe/xgbe-desc.c if (ring->rx_hdr_pa.pages) { pages 144 drivers/net/ethernet/amd/xgbe/xgbe-desc.c put_page(ring->rx_hdr_pa.pages); pages 146 drivers/net/ethernet/amd/xgbe/xgbe-desc.c ring->rx_hdr_pa.pages = NULL; pages 152 drivers/net/ethernet/amd/xgbe/xgbe-desc.c if (ring->rx_buf_pa.pages) { pages 155 drivers/net/ethernet/amd/xgbe/xgbe-desc.c put_page(ring->rx_buf_pa.pages); pages 157 drivers/net/ethernet/amd/xgbe/xgbe-desc.c ring->rx_buf_pa.pages = NULL; pages 289 drivers/net/ethernet/amd/xgbe/xgbe-desc.c struct page *pages = NULL; pages 300 drivers/net/ethernet/amd/xgbe/xgbe-desc.c pages = alloc_pages_node(node, gfp, order); pages 301 drivers/net/ethernet/amd/xgbe/xgbe-desc.c if (pages) pages 308 drivers/net/ethernet/amd/xgbe/xgbe-desc.c if (!pages && (node != NUMA_NO_NODE)) { pages 313 drivers/net/ethernet/amd/xgbe/xgbe-desc.c if (!pages) pages 317 drivers/net/ethernet/amd/xgbe/xgbe-desc.c pages_dma = dma_map_page(pdata->dev, pages, 0, pages 320 drivers/net/ethernet/amd/xgbe/xgbe-desc.c put_page(pages); pages 324 drivers/net/ethernet/amd/xgbe/xgbe-desc.c pa->pages = pages; pages 336 drivers/net/ethernet/amd/xgbe/xgbe-desc.c get_page(pa->pages); pages 349 drivers/net/ethernet/amd/xgbe/xgbe-desc.c pa->pages = NULL; pages 362 drivers/net/ethernet/amd/xgbe/xgbe-desc.c if (!ring->rx_hdr_pa.pages) { pages 368 drivers/net/ethernet/amd/xgbe/xgbe-desc.c if (!ring->rx_buf_pa.pages) { pages 490 drivers/net/ethernet/amd/xgbe/xgbe-desc.c if (rdata->rx.hdr.pa.pages) pages 491 drivers/net/ethernet/amd/xgbe/xgbe-desc.c put_page(rdata->rx.hdr.pa.pages); pages 493 drivers/net/ethernet/amd/xgbe/xgbe-desc.c if (rdata->rx.hdr.pa_unmap.pages) { pages 497 drivers/net/ethernet/amd/xgbe/xgbe-desc.c put_page(rdata->rx.hdr.pa_unmap.pages); pages 500 drivers/net/ethernet/amd/xgbe/xgbe-desc.c if (rdata->rx.buf.pa.pages) pages 501 drivers/net/ethernet/amd/xgbe/xgbe-desc.c put_page(rdata->rx.buf.pa.pages); pages 503 drivers/net/ethernet/amd/xgbe/xgbe-desc.c if (rdata->rx.buf.pa_unmap.pages) { pages 507 drivers/net/ethernet/amd/xgbe/xgbe-desc.c put_page(rdata->rx.buf.pa_unmap.pages); pages 2567 drivers/net/ethernet/amd/xgbe/xgbe-drv.c packet = page_address(rdata->rx.hdr.pa.pages) + pages 2784 drivers/net/ethernet/amd/xgbe/xgbe-drv.c rdata->rx.buf.pa.pages, pages 2788 drivers/net/ethernet/amd/xgbe/xgbe-drv.c rdata->rx.buf.pa.pages = NULL; pages 386 drivers/net/ethernet/amd/xgbe/xgbe.h struct page *pages; pages 3045 drivers/net/ethernet/broadcom/bnx2.c int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT; pages 3047 drivers/net/ethernet/broadcom/bnx2.c bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages); pages 3064 drivers/net/ethernet/broadcom/bnx2.c unsigned int i, frag_len, frag_size, pages; pages 3070 drivers/net/ethernet/broadcom/bnx2.c pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT; pages 3073 drivers/net/ethernet/broadcom/bnx2.c for (i = 0; i < pages; i++) { pages 3083 drivers/net/ethernet/broadcom/bnx2.c pages - i); pages 3101 drivers/net/ethernet/broadcom/bnx2.c if (i == pages - 1) pages 3114 drivers/net/ethernet/broadcom/bnx2.c pages - i); pages 3216 drivers/net/ethernet/broadcom/bnx2.c int pages; pages 3218 drivers/net/ethernet/broadcom/bnx2.c pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT; pages 3220 drivers/net/ethernet/broadcom/bnx2.c bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages); pages 5407 drivers/net/ethernet/broadcom/bnx2.c int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; pages 5409 drivers/net/ethernet/broadcom/bnx2.c jumbo_size = size * pages; pages 587 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c u16 pages, pages 612 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) { pages 614 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c pages, cqe_idx); pages 622 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { pages 755 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c u16 pages, pages 801 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, pages 961 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c u16 frag_size, pages; pages 993 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c pages = (frag_size + tpa_info->full_page - 1) / pages 996 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c pages = SGE_PAGE_ALIGN(frag_size) >> pages 999 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c bnx2x_tpa_stop(bp, fp, tpa_info, pages, pages 1006 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe); pages 3395 drivers/net/ethernet/broadcom/bnxt/bnxt.c int pages; pages 3397 drivers/net/ethernet/broadcom/bnxt/bnxt.c pages = ring_size / desc_per_pg; pages 3399 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (!pages) pages 3402 drivers/net/ethernet/broadcom/bnxt/bnxt.c pages++; pages 3404 drivers/net/ethernet/broadcom/bnxt/bnxt.c while (pages & (pages - 1)) pages 3405 drivers/net/ethernet/broadcom/bnxt/bnxt.c pages++; pages 3407 drivers/net/ethernet/broadcom/bnxt/bnxt.c return pages; pages 785 drivers/net/ethernet/broadcom/cnic.c int pages, int use_pg_tbl) pages 790 drivers/net/ethernet/broadcom/cnic.c size = pages * (sizeof(void *) + sizeof(dma_addr_t)); pages 795 drivers/net/ethernet/broadcom/cnic.c dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); pages 796 drivers/net/ethernet/broadcom/cnic.c dma->num_pages = pages; pages 798 drivers/net/ethernet/broadcom/cnic.c for (i = 0; i < pages; i++) { pages 809 drivers/net/ethernet/broadcom/cnic.c dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) & pages 1021 drivers/net/ethernet/broadcom/cnic.c static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) pages 1028 drivers/net/ethernet/broadcom/cnic.c udev->l2_ring_size = pages * CNIC_PAGE_SIZE; pages 1049 drivers/net/ethernet/broadcom/cnic.c static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) pages 1057 drivers/net/ethernet/broadcom/cnic.c if (__cnic_alloc_uio_rings(udev, pages)) { pages 1075 drivers/net/ethernet/broadcom/cnic.c if (__cnic_alloc_uio_rings(udev, pages)) pages 1244 drivers/net/ethernet/broadcom/cnic.c int i, j, n, ret, pages; pages 1276 drivers/net/ethernet/broadcom/cnic.c pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / pages 1279 drivers/net/ethernet/broadcom/cnic.c ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); pages 1305 drivers/net/ethernet/broadcom/cnic.c pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE; pages 1306 drivers/net/ethernet/broadcom/cnic.c ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); pages 1465 drivers/net/ethernet/broadcom/cnic.c int hq_bds, pages; pages 1475 drivers/net/ethernet/broadcom/cnic.c pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; pages 1476 drivers/net/ethernet/broadcom/cnic.c hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); pages 1610 drivers/net/ethernet/broadcom/cnic.c int ret, pages; pages 1632 drivers/net/ethernet/broadcom/cnic.c pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE; pages 1634 drivers/net/ethernet/broadcom/cnic.c ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); pages 1638 drivers/net/ethernet/broadcom/cnic.c pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE; pages 1639 drivers/net/ethernet/broadcom/cnic.c ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); pages 1643 drivers/net/ethernet/broadcom/cnic.c pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; pages 1644 drivers/net/ethernet/broadcom/cnic.c ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); pages 160 drivers/net/ethernet/brocade/bna/bfi_enet.h u16 pages; /* # of pages */ pages 1297 drivers/net/ethernet/brocade/bna/bna_tx_rx.c (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \ pages 934 drivers/net/ethernet/cortina/gemini.c unsigned int pages = len >> fpp_order; pages 951 drivers/net/ethernet/cortina/gemini.c geth->freeq_pages = kcalloc(pages, sizeof(*geth->freeq_pages), pages 955 drivers/net/ethernet/cortina/gemini.c geth->num_freeq_pages = pages; pages 957 drivers/net/ethernet/cortina/gemini.c dev_info(geth->dev, "allocate %d pages for queue\n", pages); pages 958 drivers/net/ethernet/cortina/gemini.c for (pn = 0; pn < pages; pn++) pages 1007 drivers/net/ethernet/cortina/gemini.c unsigned int pages = len >> fpp_order; pages 1014 drivers/net/ethernet/cortina/gemini.c for (pn = 0; pn < pages; pn++) { pages 817 drivers/net/ethernet/emulex/benet/be_cmds.c static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, pages 824 drivers/net/ethernet/emulex/benet/be_cmds.c pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); pages 825 drivers/net/ethernet/emulex/benet/be_cmds.c pages[i].hi = cpu_to_le32(upper_32_bits(dma)); pages 1023 drivers/net/ethernet/emulex/benet/be_cmds.c be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); pages 1209 drivers/net/ethernet/emulex/benet/be_cmds.c be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); pages 1284 drivers/net/ethernet/emulex/benet/be_cmds.c be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); pages 1328 drivers/net/ethernet/emulex/benet/be_cmds.c be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); pages 1386 drivers/net/ethernet/emulex/benet/be_cmds.c be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); pages 1429 drivers/net/ethernet/emulex/benet/be_cmds.c be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); pages 393 drivers/net/ethernet/emulex/benet/be_cmds.h struct phys_addr pages[8]; pages 496 drivers/net/ethernet/emulex/benet/be_cmds.h struct phys_addr pages[8]; pages 558 drivers/net/ethernet/emulex/benet/be_cmds.h struct phys_addr pages[8]; pages 567 drivers/net/ethernet/emulex/benet/be_cmds.h struct phys_addr pages[8]; pages 592 drivers/net/ethernet/emulex/benet/be_cmds.h struct phys_addr pages[8]; pages 609 drivers/net/ethernet/emulex/benet/be_cmds.h struct phys_addr pages[2]; pages 50 drivers/net/ethernet/google/gve/gve.h struct page **pages; /* list of num_entries pages */ pages 532 drivers/net/ethernet/google/gve/gve_main.c int pages) pages 538 drivers/net/ethernet/google/gve/gve_main.c if (pages + priv->num_registered_pages > priv->max_registered_pages) { pages 541 drivers/net/ethernet/google/gve/gve_main.c pages + priv->num_registered_pages, pages 548 drivers/net/ethernet/google/gve/gve_main.c qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL); pages 550 drivers/net/ethernet/google/gve/gve_main.c if (!qpl->pages) pages 552 drivers/net/ethernet/google/gve/gve_main.c qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses), pages 558 drivers/net/ethernet/google/gve/gve_main.c for (i = 0; i < pages; i++) { pages 559 drivers/net/ethernet/google/gve/gve_main.c err = gve_alloc_page(&priv->pdev->dev, &qpl->pages[i], pages 567 drivers/net/ethernet/google/gve/gve_main.c priv->num_registered_pages += pages; pages 587 drivers/net/ethernet/google/gve/gve_main.c if (!qpl->pages) pages 593 drivers/net/ethernet/google/gve/gve_main.c gve_free_page(&priv->pdev->dev, qpl->pages[i], pages 598 drivers/net/ethernet/google/gve/gve_main.c kvfree(qpl->pages); pages 77 drivers/net/ethernet/google/gve/gve_rx.c struct page *page = rx->data.qpl->pages[i]; pages 31 drivers/net/ethernet/google/gve/gve_tx.c fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP, pages 3029 drivers/net/ethernet/intel/e1000e/netdev.c u32 pages = 0; pages 3126 drivers/net/ethernet/intel/e1000e/netdev.c pages = PAGE_USE_COUNT(adapter->netdev->mtu); pages 3127 drivers/net/ethernet/intel/e1000e/netdev.c if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) pages 3128 drivers/net/ethernet/intel/e1000e/netdev.c adapter->rx_ps_pages = pages; pages 33 drivers/net/ethernet/intel/i40e/i40e_xsk.c umem->pages[i].dma = dma; pages 40 drivers/net/ethernet/intel/i40e/i40e_xsk.c dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, pages 42 drivers/net/ethernet/intel/i40e/i40e_xsk.c umem->pages[i].dma = 0; pages 62 drivers/net/ethernet/intel/i40e/i40e_xsk.c dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, pages 65 drivers/net/ethernet/intel/i40e/i40e_xsk.c umem->pages[i].dma = 0; pages 36 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c umem->pages[i].dma = dma; pages 43 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, pages 45 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c umem->pages[i].dma = 0; pages 58 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, pages 61 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c umem->pages[i].dma = 0; pages 1520 drivers/net/ethernet/mellanox/mlx4/fw.c __be64 *pages; pages 1530 drivers/net/ethernet/mellanox/mlx4/fw.c pages = mailbox->buf; pages 1552 drivers/net/ethernet/mellanox/mlx4/fw.c pages[nent * 2] = cpu_to_be64(virt); pages 1556 drivers/net/ethernet/mellanox/mlx4/fw.c pages[nent * 2 + 1] = pages 21 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c umem->pages[i].dma = dma; pages 28 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE, pages 30 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c umem->pages[i].dma = 0; pages 43 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE, pages 45 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c umem->pages[i].dma = 0; pages 143 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages); pages 591 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages) pages 594 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c int prev_pages = *pages; pages 603 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c while (*pages) { pages 605 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages); pages 608 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c if (*pages < prev_pages) { pages 610 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c prev_pages = *pages; pages 1086 drivers/net/ethernet/sfc/siena_sriov.c struct list_head pages; pages 1094 drivers/net/ethernet/sfc/siena_sriov.c INIT_LIST_HEAD(&pages); pages 1095 drivers/net/ethernet/sfc/siena_sriov.c list_splice_tail_init(&nic_data->local_page_list, &pages); pages 1123 drivers/net/ethernet/sfc/siena_sriov.c if (list_empty(&pages)) { pages 1136 drivers/net/ethernet/sfc/siena_sriov.c &pages, struct efx_endpoint_page, link); pages 1149 drivers/net/ethernet/sfc/siena_sriov.c while (!list_empty(&pages)) { pages 1151 drivers/net/ethernet/sfc/siena_sriov.c &pages, struct efx_endpoint_page, link); pages 41 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c if (desc_data->rx.hdr.pa.pages) pages 42 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c put_page(desc_data->rx.hdr.pa.pages); pages 44 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c if (desc_data->rx.hdr.pa_unmap.pages) { pages 48 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c put_page(desc_data->rx.hdr.pa_unmap.pages); pages 51 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c if (desc_data->rx.buf.pa.pages) pages 52 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c put_page(desc_data->rx.buf.pa.pages); pages 54 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c if (desc_data->rx.buf.pa_unmap.pages) { pages 58 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c put_page(desc_data->rx.buf.pa_unmap.pages); pages 93 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c if (ring->rx_hdr_pa.pages) { pages 96 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c put_page(ring->rx_hdr_pa.pages); pages 98 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c ring->rx_hdr_pa.pages = NULL; pages 104 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c if (ring->rx_buf_pa.pages) { pages 107 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c put_page(ring->rx_buf_pa.pages); pages 109 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c ring->rx_buf_pa.pages = NULL; pages 334 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c struct page *pages = NULL; pages 340 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c pages = alloc_pages(gfp, order); pages 341 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c if (pages) pages 346 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c if (!pages) pages 350 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c pages_dma = dma_map_page(pdata->dev, pages, 0, pages 353 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c put_page(pages); pages 357 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c pa->pages = pages; pages 369 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c get_page(pa->pages); pages 382 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c pa->pages = NULL; pages 395 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c if (!ring->rx_hdr_pa.pages) { pages 402 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c if (!ring->rx_buf_pa.pages) { pages 1008 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c packet = page_address(desc_data->rx.hdr.pa.pages) + pages 1025 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c desc_data->rx.buf.pa.pages, pages 1028 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c desc_data->rx.buf.pa.pages = NULL; pages 1206 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c desc_data->rx.buf.pa.pages, pages 1210 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c desc_data->rx.buf.pa.pages = NULL; pages 222 drivers/net/ethernet/synopsys/dwc-xlgmac.h struct page *pages; pages 447 drivers/net/hyperv/netvsc_drv.c int pages = 0; pages 456 drivers/net/hyperv/netvsc_drv.c pages += PFN_UP(offset + size); pages 458 drivers/net/hyperv/netvsc_drv.c return pages; pages 145 drivers/net/virtio_net.c struct page *pages; pages 297 drivers/net/virtio_net.c end->private = (unsigned long)rq->pages; pages 298 drivers/net/virtio_net.c rq->pages = page; pages 303 drivers/net/virtio_net.c struct page *p = rq->pages; pages 306 drivers/net/virtio_net.c rq->pages = (struct page *)p->private; pages 2669 drivers/net/virtio_net.c while (vi->rq[i].pages) pages 2851 drivers/net/virtio_net.c vi->rq[i].pages = NULL; pages 798 drivers/net/wireless/intel/iwlwifi/fw/dbg.c struct page *pages = pages 810 drivers/net/wireless/intel/iwlwifi/fw/dbg.c memcpy(paging->data, page_address(pages), pages 1987 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c int pages, remainder, i, ret; pages 2024 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c pages = priv->fw_size / RTL_FW_PAGE_SIZE; pages 2029 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c for (i = 0; i < pages; i++) { pages 138 drivers/pci/controller/pcie-rcar.c unsigned long pages; pages 938 drivers/pci/controller/pcie-rcar.c msi->pages = __get_free_pages(GFP_KERNEL, 0); pages 939 drivers/pci/controller/pcie-rcar.c if (!msi->pages) { pages 943 drivers/pci/controller/pcie-rcar.c base = virt_to_phys((void *)msi->pages); pages 968 drivers/pci/controller/pcie-rcar.c free_pages(msi->pages, 0); pages 55 drivers/pci/endpoint/pci-epc-mem.c int pages; pages 62 drivers/pci/endpoint/pci-epc-mem.c pages = size >> page_shift; pages 63 drivers/pci/endpoint/pci-epc-mem.c bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); pages 80 drivers/pci/endpoint/pci-epc-mem.c mem->pages = pages; pages 135 drivers/pci/endpoint/pci-epc-mem.c pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order); pages 817 drivers/perf/arm_spe_pmu.c static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages, pages 849 drivers/perf/arm_spe_pmu.c pglist[i] = virt_to_page(pages[i]); pages 164 drivers/platform/goldfish/goldfish_pipe.c struct page *pages[MAX_BUFFERS_PER_COMMAND]; pages 264 drivers/platform/goldfish/goldfish_pipe.c struct page *pages[MAX_BUFFERS_PER_COMMAND], pages 279 drivers/platform/goldfish/goldfish_pipe.c pages); pages 288 drivers/platform/goldfish/goldfish_pipe.c static void release_user_pages(struct page **pages, int pages_count, pages 295 drivers/platform/goldfish/goldfish_pipe.c set_page_dirty(pages[i]); pages 296 drivers/platform/goldfish/goldfish_pipe.c put_page(pages[i]); pages 301 drivers/platform/goldfish/goldfish_pipe.c static void populate_rw_params(struct page **pages, pages 315 drivers/platform/goldfish/goldfish_pipe.c unsigned long xaddr = page_to_phys(pages[0]); pages 325 drivers/platform/goldfish/goldfish_pipe.c xaddr = page_to_phys(pages[i]); pages 359 drivers/platform/goldfish/goldfish_pipe.c pipe->pages, &iter_last_page_size); pages 365 drivers/platform/goldfish/goldfish_pipe.c populate_rw_params(pipe->pages, pages_count, address, address_end, pages 375 drivers/platform/goldfish/goldfish_pipe.c release_user_pages(pipe->pages, pages_count, is_write, *consumed_size); pages 259 drivers/s390/block/xpram.c static int __init xpram_setup_sizes(unsigned long pages) pages 315 drivers/s390/block/xpram.c if (mem_needed > pages * 4) { pages 327 drivers/s390/block/xpram.c mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4; pages 68 drivers/s390/char/sclp.c int pages, rc; pages 70 drivers/s390/char/sclp.c rc = kstrtoint(str, 0, &pages); pages 71 drivers/s390/char/sclp.c if (!rc && pages >= SCLP_CONSOLE_PAGES) pages 72 drivers/s390/char/sclp.c sclp_console_pages = pages; pages 716 drivers/s390/char/tty3270.c int pages; pages 732 drivers/s390/char/tty3270.c for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) { pages 733 drivers/s390/char/tty3270.c tp->freemem_pages[pages] = (void *) pages 735 drivers/s390/char/tty3270.c if (!tp->freemem_pages[pages]) pages 738 drivers/s390/char/tty3270.c tp->freemem_pages[pages], PAGE_SIZE); pages 772 drivers/s390/char/tty3270.c while (pages--) pages 773 drivers/s390/char/tty3270.c free_pages((unsigned long) tp->freemem_pages[pages], 0); pages 788 drivers/s390/char/tty3270.c int pages; pages 794 drivers/s390/char/tty3270.c for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) pages 795 drivers/s390/char/tty3270.c free_pages((unsigned long) tp->freemem_pages[pages], 0); pages 48 drivers/s390/net/ctcm_dbug.c ctcm_dbf[x].pages, pages 57 drivers/s390/net/ctcm_dbug.h int pages; pages 59 drivers/s390/net/qeth_core.h int pages; pages 5436 drivers/s390/net/qeth_core_main.c qeth_dbf[x].pages, pages 196 drivers/sbus/char/oradax.c struct page *pages[DAX_CA_ELEMS][NUM_STREAM_TYPES]; pages 409 drivers/sbus/char/oradax.c struct page *p = ctx->pages[i][j]; pages 416 drivers/sbus/char/oradax.c ctx->pages[i][j] = NULL; pages 455 drivers/sbus/char/oradax.c &ctx->pages[i + idx][OUT]) != 0) { pages 465 drivers/sbus/char/oradax.c &ctx->pages[i + idx][PRI]) != 0) { pages 475 drivers/sbus/char/oradax.c &ctx->pages[i + idx][SEC]) != 0) { pages 485 drivers/sbus/char/oradax.c &ctx->pages[i + idx][TBL]) != 0) { pages 226 drivers/scsi/aacraid/aachba.c int pages, int nseg, int nseg_new); pages 4120 drivers/scsi/aacraid/aachba.c static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new) pages 4134 drivers/scsi/aacraid/aachba.c for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) { pages 4135 drivers/scsi/aacraid/aachba.c addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE; pages 4140 drivers/scsi/aacraid/aachba.c sge[pos].length = pages * PAGE_SIZE; pages 4151 drivers/scsi/aacraid/aachba.c rio2->sgeNominalSize = pages * PAGE_SIZE; pages 212 drivers/scsi/aic94xx/aic94xx_seq.c int pages = (size + LSEQ_CODEPAGE_SIZE - 1) / LSEQ_CODEPAGE_SIZE; pages 216 drivers/scsi/aic94xx/aic94xx_seq.c for (page = 0; page < pages; page++) { pages 276 drivers/scsi/aic94xx/aic94xx_seq.c const int pages = (size + MAX_DMA_OVLY_COUNT - 1) / MAX_DMA_OVLY_COUNT; pages 304 drivers/scsi/aic94xx/aic94xx_seq.c for (page = 0; page < pages; page++) { pages 694 drivers/scsi/be2iscsi/be_cmds.c static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, pages 702 drivers/scsi/be2iscsi/be_cmds.c pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); pages 703 drivers/scsi/be2iscsi/be_cmds.c pages[i].hi = cpu_to_le32(upper_32_bits(dma)); pages 764 drivers/scsi/be2iscsi/be_cmds.c be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); pages 826 drivers/scsi/be2iscsi/be_cmds.c be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); pages 887 drivers/scsi/be2iscsi/be_cmds.c be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); pages 1034 drivers/scsi/be2iscsi/be_cmds.c be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); pages 1102 drivers/scsi/be2iscsi/be_cmds.c be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); pages 1139 drivers/scsi/be2iscsi/be_cmds.c be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); pages 1190 drivers/scsi/be2iscsi/be_cmds.c pages); pages 1193 drivers/scsi/be2iscsi/be_cmds.c be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem); pages 318 drivers/scsi/be2iscsi/be_cmds.h struct phys_addr pages[8]; pages 618 drivers/scsi/be2iscsi/be_cmds.h struct phys_addr pages[4]; pages 653 drivers/scsi/be2iscsi/be_cmds.h struct phys_addr pages[8]; pages 912 drivers/scsi/be2iscsi/be_cmds.h struct phys_addr pages[8]; pages 933 drivers/scsi/be2iscsi/be_cmds.h struct phys_addr pages[16]; pages 947 drivers/scsi/be2iscsi/be_cmds.h struct phys_addr pages[26]; pages 956 drivers/scsi/be2iscsi/be_cmds.h struct phys_addr pages[8]; pages 954 drivers/scsi/be2iscsi/be_main.h u32 pages; /* queue size in pages */ pages 4158 drivers/scsi/dc395x.c int pages = (mem_needed+(PAGE_SIZE-1))/PAGE_SIZE; pages 4167 drivers/scsi/dc395x.c dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages); pages 4168 drivers/scsi/dc395x.c while (pages--) { pages 3600 drivers/scsi/hpsa.c int pages; pages 3613 drivers/scsi/hpsa.c pages = buf[3]; pages 3614 drivers/scsi/hpsa.c if ((pages + HPSA_VPD_HEADER_SZ) <= 255) pages 3615 drivers/scsi/hpsa.c bufsize = pages + HPSA_VPD_HEADER_SZ; pages 3626 drivers/scsi/hpsa.c pages = buf[3]; pages 3627 drivers/scsi/hpsa.c for (i = 1; i <= pages; i++) pages 3014 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c int pages; pages 3018 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c pages = 1; pages 3020 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c vscsi->cmd_q.size = pages; pages 3027 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1; pages 110 drivers/scsi/sg.c struct page **pages; pages 1246 drivers/scsi/sg.c struct page *page = nth_page(rsv_schp->pages[k], pages 1789 drivers/scsi/sg.c md->pages = req_schp->pages; pages 1864 drivers/scsi/sg.c schp->pages = kzalloc(sg_bufflen, gfp_flags); pages 1865 drivers/scsi/sg.c if (!schp->pages) pages 1917 drivers/scsi/sg.c schp->pages[k] = alloc_pages(gfp_mask, order); pages 1918 drivers/scsi/sg.c if (!schp->pages[k]) pages 1945 drivers/scsi/sg.c __free_pages(schp->pages[i], order); pages 1958 drivers/scsi/sg.c if (schp->pages && schp->sglist_len > 0) { pages 1962 drivers/scsi/sg.c for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { pages 1966 drivers/scsi/sg.c k, schp->pages[k])); pages 1967 drivers/scsi/sg.c __free_pages(schp->pages[k], schp->page_order); pages 1970 drivers/scsi/sg.c kfree(schp->pages); pages 1989 drivers/scsi/sg.c for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { pages 1991 drivers/scsi/sg.c if (__copy_to_user(outp, page_address(schp->pages[k]), pages 1996 drivers/scsi/sg.c if (__copy_to_user(outp, page_address(schp->pages[k]), pages 2044 drivers/scsi/sg.c req_schp->pages = rsv_schp->pages; pages 2068 drivers/scsi/sg.c req_schp->pages = NULL; pages 630 drivers/scsi/st.c mdata->pages = STp->buffer->mapped_pages; pages 635 drivers/scsi/st.c mdata->pages = STp->buffer->reserved_pages; pages 4905 drivers/scsi/st.c struct page **pages; pages 4920 drivers/scsi/st.c pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL); pages 4921 drivers/scsi/st.c if (pages == NULL) pages 4927 drivers/scsi/st.c pages); pages 4937 drivers/scsi/st.c flush_dcache_page(pages[i]); pages 4941 drivers/scsi/st.c STbp->mapped_pages = pages; pages 4947 drivers/scsi/st.c put_page(pages[j]); pages 4950 drivers/scsi/st.c kfree(pages); pages 929 drivers/spi/spi-pl022.c unsigned int pages; pages 1032 drivers/spi/spi-pl022.c pages = DIV_ROUND_UP(pl022->cur_transfer->len, PAGE_SIZE); pages 1033 drivers/spi/spi-pl022.c dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages); pages 1035 drivers/spi/spi-pl022.c ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_ATOMIC); pages 1039 drivers/spi/spi-pl022.c ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_ATOMIC); pages 33 drivers/staging/android/ion/ion_cma_heap.c struct page *pages; pages 42 drivers/staging/android/ion/ion_cma_heap.c pages = cma_alloc(cma_heap->cma, nr_pages, align, false); pages 43 drivers/staging/android/ion/ion_cma_heap.c if (!pages) pages 46 drivers/staging/android/ion/ion_cma_heap.c if (PageHighMem(pages)) { pages 48 drivers/staging/android/ion/ion_cma_heap.c struct page *page = pages; pages 59 drivers/staging/android/ion/ion_cma_heap.c memset(page_address(pages), 0, size); pages 70 drivers/staging/android/ion/ion_cma_heap.c sg_set_page(table->sgl, pages, size, 0); pages 72 drivers/staging/android/ion/ion_cma_heap.c buffer->priv_virt = pages; pages 79 drivers/staging/android/ion/ion_cma_heap.c cma_release(cma_heap->cma, pages, nr_pages); pages 86 drivers/staging/android/ion/ion_cma_heap.c struct page *pages = buffer->priv_virt; pages 90 drivers/staging/android/ion/ion_cma_heap.c cma_release(cma_heap->cma, pages, nr_pages); pages 29 drivers/staging/android/ion/ion_heap.c struct page **pages = vmalloc(array_size(npages, pages 31 drivers/staging/android/ion/ion_heap.c struct page **tmp = pages; pages 33 drivers/staging/android/ion/ion_heap.c if (!pages) pages 49 drivers/staging/android/ion/ion_heap.c vaddr = vmap(pages, npages, VM_MAP, pgprot); pages 50 drivers/staging/android/ion/ion_heap.c vfree(pages); pages 100 drivers/staging/android/ion/ion_heap.c static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) pages 102 drivers/staging/android/ion/ion_heap.c void *addr = vm_map_ram(pages, num, -1, pgprot); pages 118 drivers/staging/android/ion/ion_heap.c struct page *pages[32]; pages 121 drivers/staging/android/ion/ion_heap.c pages[p++] = sg_page_iter_page(&piter); pages 122 drivers/staging/android/ion/ion_heap.c if (p == ARRAY_SIZE(pages)) { pages 123 drivers/staging/android/ion/ion_heap.c ret = ion_heap_clear_pages(pages, p, pgprot); pages 130 drivers/staging/android/ion/ion_heap.c ret = ion_heap_clear_pages(pages, p, pgprot); pages 107 drivers/staging/android/ion/ion_system_heap.c struct list_head pages; pages 116 drivers/staging/android/ion/ion_system_heap.c INIT_LIST_HEAD(&pages); pages 122 drivers/staging/android/ion/ion_system_heap.c list_add_tail(&page->lru, &pages); pages 135 drivers/staging/android/ion/ion_system_heap.c list_for_each_entry_safe(page, tmp_page, &pages, lru) { pages 147 drivers/staging/android/ion/ion_system_heap.c list_for_each_entry_safe(page, tmp_page, &pages, lru) pages 334 drivers/staging/android/ion/ion_system_heap.c unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; pages 337 drivers/staging/android/ion/ion_system_heap.c for (i = 0; i < pages; i++) pages 145 drivers/staging/comedi/comedi_buf.c struct page **pages = NULL; pages 173 drivers/staging/comedi/comedi_buf.c pages = vmalloc(sizeof(struct page *) * n_pages); pages 174 drivers/staging/comedi/comedi_buf.c if (!pages) pages 179 drivers/staging/comedi/comedi_buf.c pages[i] = virt_to_page(buf->virt_addr); pages 183 drivers/staging/comedi/comedi_buf.c async->prealloc_buf = vmap(pages, n_pages, VM_MAP, pages 186 drivers/staging/comedi/comedi_buf.c vfree(pages); pages 3169 drivers/staging/exfat/exfat_super.c struct list_head *pages, unsigned int nr_pages) pages 3171 drivers/staging/exfat/exfat_super.c return mpage_readpages(mapping, pages, nr_pages, exfat_get_block); pages 20 drivers/staging/media/ipu3/ipu3-dmamap.c static void imgu_dmamap_free_buffer(struct page **pages, pages 26 drivers/staging/media/ipu3/ipu3-dmamap.c __free_page(pages[count]); pages 27 drivers/staging/media/ipu3/ipu3-dmamap.c kvfree(pages); pages 36 drivers/staging/media/ipu3/ipu3-dmamap.c struct page **pages; pages 42 drivers/staging/media/ipu3/ipu3-dmamap.c pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL); pages 44 drivers/staging/media/ipu3/ipu3-dmamap.c if (!pages) pages 72 drivers/staging/media/ipu3/ipu3-dmamap.c imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT); pages 77 drivers/staging/media/ipu3/ipu3-dmamap.c pages[i++] = page++; pages 80 drivers/staging/media/ipu3/ipu3-dmamap.c return pages; pages 99 drivers/staging/media/ipu3/ipu3-dmamap.c struct page **pages; pages 111 drivers/staging/media/ipu3/ipu3-dmamap.c pages = imgu_dmamap_alloc_buffer(size, GFP_KERNEL); pages 112 drivers/staging/media/ipu3/ipu3-dmamap.c if (!pages) pages 119 drivers/staging/media/ipu3/ipu3-dmamap.c page_to_phys(pages[i]), PAGE_SIZE); pages 131 drivers/staging/media/ipu3/ipu3-dmamap.c map->vma->pages = pages; pages 133 drivers/staging/media/ipu3/ipu3-dmamap.c if (map_vm_area(map->vma, PAGE_KERNEL, pages)) pages 149 drivers/staging/media/ipu3/ipu3-dmamap.c imgu_dmamap_free_buffer(pages, size); pages 190 drivers/staging/media/ipu3/ipu3-dmamap.c if (WARN_ON(!area) || WARN_ON(!area->pages)) pages 193 drivers/staging/media/ipu3/ipu3-dmamap.c imgu_dmamap_free_buffer(area->pages, map->size); pages 43 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c struct page **pages; pages 340 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c put_page(pagelistinfo->pages[i]); pages 360 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c struct page **pages; pages 377 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c (sizeof(u32) + sizeof(pages[0]) + pages 383 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c (num_pages * sizeof(pages[0]) + pages 399 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c pages = (struct page **)(addrs + num_pages); pages 400 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c scatterlist = (struct scatterlist *)(pages + num_pages); pages 416 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c pagelistinfo->pages = pages; pages 437 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c pages[actual_pages] = pg; pages 447 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c pages); pages 457 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c put_page(pages[actual_pages]); pages 477 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c sg_set_page(scatterlist + i, pages[i], len, offset); pages 548 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c struct page **pages = pagelistinfo->pages; pages 578 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c memcpy((char *)kmap(pages[0]) + pages 582 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c kunmap(pages[0]); pages 586 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c memcpy((char *)kmap(pages[num_pages - 1]) + pages 591 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c kunmap(pages[num_pages - 1]); pages 607 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c set_page_dirty(pages[i]); pages 2467 drivers/target/target_core_transport.c struct page **pages; pages 2483 drivers/target/target_core_transport.c pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL); pages 2484 drivers/target/target_core_transport.c if (!pages) pages 2489 drivers/target/target_core_transport.c pages[i] = sg_page(sg); pages 2492 drivers/target/target_core_transport.c cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); pages 2493 drivers/target/target_core_transport.c kfree(pages); pages 456 drivers/tee/optee/call.c void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, pages 486 drivers/tee/optee/call.c optee_page = page_to_phys(*pages) + pages 503 drivers/tee/optee/call.c pages++; pages 504 drivers/tee/optee/call.c optee_page = page_to_phys(*pages); pages 515 drivers/tee/optee/call.c int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE); pages 517 drivers/tee/optee/call.c return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE; pages 573 drivers/tee/optee/call.c struct page **pages, size_t num_pages, pages 599 drivers/tee/optee/call.c optee_fill_pages_list(pages_list, pages, num_pages, pages 648 drivers/tee/optee/call.c struct page **pages, size_t num_pages, pages 157 drivers/tee/optee/optee_private.h struct page **pages, size_t num_pages, pages 162 drivers/tee/optee/optee_private.h struct page **pages, size_t num_pages, pages 173 drivers/tee/optee/optee_private.h void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, pages 243 drivers/tee/optee/rpc.c struct page **pages; pages 247 drivers/tee/optee/rpc.c pages = tee_shm_get_pages(shm, &page_num); pages 248 drivers/tee/optee/rpc.c if (!pages || !page_num) { pages 274 drivers/tee/optee/rpc.c optee_fill_pages_list(pages_list, pages, page_num, pages 32 drivers/tee/optee/shm_pool.c struct page **pages; pages 34 drivers/tee/optee/shm_pool.c pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL); pages 35 drivers/tee/optee/shm_pool.c if (!pages) pages 39 drivers/tee/optee/shm_pool.c pages[i] = page; pages 44 drivers/tee/optee/shm_pool.c rc = optee_shm_register(shm->ctx, shm, pages, nr_pages, pages 46 drivers/tee/optee/shm_pool.c kfree(pages); pages 42 drivers/tee/tee_shm.c put_page(shm->pages[n]); pages 44 drivers/tee/tee_shm.c kfree(shm->pages); pages 262 drivers/tee/tee_shm.c shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); pages 263 drivers/tee/tee_shm.c if (!shm->pages) { pages 268 drivers/tee/tee_shm.c rc = get_user_pages_fast(start, num_pages, FOLL_WRITE, shm->pages); pages 287 drivers/tee/tee_shm.c rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages, pages 324 drivers/tee/tee_shm.c if (shm->pages) { pages 326 drivers/tee/tee_shm.c put_page(shm->pages[n]); pages 327 drivers/tee/tee_shm.c kfree(shm->pages); pages 415 drivers/usb/chipidea/udc.c int pages = TD_PAGE_COUNT; pages 434 drivers/usb/chipidea/udc.c pages--; pages 444 drivers/usb/chipidea/udc.c (unsigned)(pages * CI_HDRC_PAGE_SIZE)); pages 766 drivers/usb/gadget/function/f_fs.c struct page **pages; pages 776 drivers/usb/gadget/function/f_fs.c pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL); pages 777 drivers/usb/gadget/function/f_fs.c if (!pages) { pages 783 drivers/usb/gadget/function/f_fs.c pages[i] = vmalloc_to_page(ptr); pages 785 drivers/usb/gadget/function/f_fs.c if (sg_alloc_table_from_pages(sgt, pages, n_pages, 0, sz, GFP_KERNEL)) { pages 786 drivers/usb/gadget/function/f_fs.c kvfree(pages); pages 791 drivers/usb/gadget/function/f_fs.c kvfree(pages); pages 722 drivers/usb/storage/alauda.c unsigned int page, unsigned int pages, unsigned char *data) pages 727 drivers/usb/storage/alauda.c PBA_ZONE(pba), 0, PBA_LO(pba) + page, pages, 0, MEDIA_PORT(us) pages 730 drivers/usb/storage/alauda.c usb_stor_dbg(us, "pba %d page %d count %d\n", pba, page, pages); pages 738 drivers/usb/storage/alauda.c data, (MEDIA_INFO(us).pagesize + 64) * pages, NULL); pages 748 drivers/usb/storage/alauda.c unsigned int page, unsigned int pages, unsigned char *data) pages 753 drivers/usb/storage/alauda.c rc = alauda_read_block_raw(us, pba, page, pages, data); pages 758 drivers/usb/storage/alauda.c for (i = 0; i < pages; i++) { pages 801 drivers/usb/storage/alauda.c unsigned int page, unsigned int pages, pages 872 drivers/usb/storage/alauda.c for (i = page; i < page+pages; i++) { pages 945 drivers/usb/storage/alauda.c unsigned int pages; pages 958 drivers/usb/storage/alauda.c pages = min(sectors, blocksize - page); pages 959 drivers/usb/storage/alauda.c len = pages << pageshift; pages 966 drivers/usb/storage/alauda.c pages, lba, page); pages 978 drivers/usb/storage/alauda.c pages, pba, lba, page); pages 980 drivers/usb/storage/alauda.c result = alauda_read_block(us, pba, page, pages, buffer); pages 991 drivers/usb/storage/alauda.c sectors -= pages; pages 1046 drivers/usb/storage/alauda.c unsigned int pages = min(sectors, blocksize - page); pages 1047 drivers/usb/storage/alauda.c len = pages << pageshift; pages 1061 drivers/usb/storage/alauda.c result = alauda_write_lba(us, lba, page, pages, buffer, pages 1068 drivers/usb/storage/alauda.c sectors -= pages; pages 1164 drivers/usb/storage/alauda.c unsigned int page, pages; pages 1173 drivers/usb/storage/alauda.c pages = short_pack(srb->cmnd[8], srb->cmnd[7]); pages 1175 drivers/usb/storage/alauda.c usb_stor_dbg(us, "READ_10: page %d pagect %d\n", page, pages); pages 1177 drivers/usb/storage/alauda.c return alauda_read_data(us, page, pages); pages 1181 drivers/usb/storage/alauda.c unsigned int page, pages; pages 1190 drivers/usb/storage/alauda.c pages = short_pack(srb->cmnd[8], srb->cmnd[7]); pages 1192 drivers/usb/storage/alauda.c usb_stor_dbg(us, "WRITE_10: page %d pagect %d\n", page, pages); pages 1194 drivers/usb/storage/alauda.c return alauda_write_data(us, page, pages); pages 739 drivers/usb/storage/sddr09.c unsigned int page, pages; pages 770 drivers/usb/storage/sddr09.c pages = min(sectors, info->blocksize - page); pages 771 drivers/usb/storage/sddr09.c len = pages << info->pageshift; pages 787 drivers/usb/storage/sddr09.c pages, lba, page); pages 800 drivers/usb/storage/sddr09.c pages, pba, lba, page); pages 806 drivers/usb/storage/sddr09.c pages, info->pageshift, buffer, 0); pages 817 drivers/usb/storage/sddr09.c sectors -= pages; pages 852 drivers/usb/storage/sddr09.c unsigned int page, unsigned int pages, pages 919 drivers/usb/storage/sddr09.c for (i = page; i < page+pages; i++) { pages 963 drivers/usb/storage/sddr09.c unsigned int lba, maxlba, page, pages; pages 1015 drivers/usb/storage/sddr09.c pages = min(sectors, info->blocksize - page); pages 1016 drivers/usb/storage/sddr09.c len = (pages << info->pageshift); pages 1030 drivers/usb/storage/sddr09.c result = sddr09_write_lba(us, lba, page, pages, pages 1037 drivers/usb/storage/sddr09.c sectors -= pages; pages 1544 drivers/usb/storage/sddr09.c unsigned int page, pages; pages 1661 drivers/usb/storage/sddr09.c pages = short_pack(srb->cmnd[8], srb->cmnd[7]); pages 1664 drivers/usb/storage/sddr09.c page, pages); pages 1666 drivers/usb/storage/sddr09.c result = sddr09_read_data(us, page, pages); pages 1676 drivers/usb/storage/sddr09.c pages = short_pack(srb->cmnd[8], srb->cmnd[7]); pages 1679 drivers/usb/storage/sddr09.c page, pages); pages 1681 drivers/usb/storage/sddr09.c result = sddr09_write_data(us, page, pages); pages 201 drivers/usb/storage/sddr55.c unsigned short pages; pages 227 drivers/usb/storage/sddr55.c pages = min((unsigned int) sectors << info->smallpageshift, pages 229 drivers/usb/storage/sddr55.c len = pages << info->pageshift; pages 232 drivers/usb/storage/sddr55.c pages, pba, lba, page); pages 248 drivers/usb/storage/sddr55.c command[6] = LSB_of(pages << (1 - info->smallpageshift)); pages 295 drivers/usb/storage/sddr55.c sectors -= pages >> info->smallpageshift; pages 321 drivers/usb/storage/sddr55.c unsigned short pages; pages 354 drivers/usb/storage/sddr55.c pages = min((unsigned int) sectors << info->smallpageshift, pages 356 drivers/usb/storage/sddr55.c len = pages << info->pageshift; pages 363 drivers/usb/storage/sddr55.c pages, pba, lba, page); pages 424 drivers/usb/storage/sddr55.c command[4] |= LSB_of(pages >> info->smallpageshift); pages 503 drivers/usb/storage/sddr55.c sectors -= pages >> info->smallpageshift; pages 792 drivers/usb/storage/sddr55.c unsigned short pages; pages 928 drivers/usb/storage/sddr55.c pages = short_pack(srb->cmnd[8], srb->cmnd[7]); pages 953 drivers/usb/storage/sddr55.c pba, lba, page, pages); pages 955 drivers/usb/storage/sddr55.c return sddr55_write_data(us, lba, page, pages); pages 958 drivers/usb/storage/sddr55.c pba, lba, page, pages); pages 960 drivers/usb/storage/sddr55.c return sddr55_read_data(us, lba, page, pages); pages 338 drivers/vfio/vfio_iommu_spapr_tce.c unsigned long entry, unsigned long pages); pages 433 drivers/vfio/vfio_iommu_spapr_tce.c unsigned long entry, unsigned long pages) pages 438 drivers/vfio/vfio_iommu_spapr_tce.c unsigned long lastentry = entry + pages, firstentry = entry; pages 479 drivers/vfio/vfio_iommu_spapr_tce.c iommu_tce_kill(tbl, firstentry, pages); pages 501 drivers/vfio/vfio_iommu_spapr_tce.c unsigned long entry, unsigned long tce, unsigned long pages, pages 508 drivers/vfio/vfio_iommu_spapr_tce.c for (i = 0; i < pages; ++i) { pages 542 drivers/vfio/vfio_iommu_spapr_tce.c iommu_tce_kill(tbl, entry, pages); pages 549 drivers/vfio/vfio_iommu_spapr_tce.c unsigned long entry, unsigned long tce, unsigned long pages, pages 556 drivers/vfio/vfio_iommu_spapr_tce.c for (i = 0; i < pages; ++i) { pages 601 drivers/vfio/vfio_iommu_spapr_tce.c iommu_tce_kill(tbl, entry, pages); pages 637 drivers/vfio/vfio_iommu_spapr_tce.c unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; pages 640 drivers/vfio/vfio_iommu_spapr_tce.c account_locked_vm(container->mm, pages, false); pages 1274 drivers/vfio/vfio_iommu_type1.c struct page *pages; pages 1277 drivers/vfio/vfio_iommu_type1.c pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); pages 1278 drivers/vfio/vfio_iommu_type1.c if (!pages) pages 1281 drivers/vfio/vfio_iommu_type1.c ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2, pages 1292 drivers/vfio/vfio_iommu_type1.c __free_pages(pages, order); pages 577 drivers/vhost/scsi.c struct page **pages; pages 596 drivers/vhost/scsi.c pages = cmd->tvc_upages; pages 600 drivers/vhost/scsi.c cmd->tvc_upages = pages; pages 627 drivers/vhost/scsi.c struct page **pages = cmd->tvc_upages; pages 633 drivers/vhost/scsi.c bytes = iov_iter_get_pages(iter, pages, LONG_MAX, pages 643 drivers/vhost/scsi.c sg_set_page(sg++, pages[npages++], n, offset); pages 366 drivers/video/fbdev/efifb.c if (!screen_info.pages) pages 367 drivers/video/fbdev/efifb.c screen_info.pages = 1; pages 502 drivers/video/fbdev/efifb.c screen_info.pages); pages 646 drivers/video/fbdev/pvr2fb.c struct page **pages; pages 651 drivers/video/fbdev/pvr2fb.c pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); pages 652 drivers/video/fbdev/pvr2fb.c if (!pages) pages 655 drivers/video/fbdev/pvr2fb.c ret = get_user_pages_fast((unsigned long)buf, nr_pages, FOLL_WRITE, pages); pages 665 drivers/video/fbdev/pvr2fb.c start = (unsigned long)page_address(pages[0]); pages 666 drivers/video/fbdev/pvr2fb.c end = (unsigned long)page_address(pages[nr_pages]); pages 691 drivers/video/fbdev/pvr2fb.c dma_write_page(shdma, (unsigned long)page_address(pages[i]), 0); pages 702 drivers/video/fbdev/pvr2fb.c put_page(pages[i]); pages 704 drivers/video/fbdev/pvr2fb.c kfree(pages); pages 163 drivers/video/fbdev/ssd1307fb.c unsigned int pages = DIV_ROUND_UP(par->height, 8); pages 166 drivers/video/fbdev/ssd1307fb.c array = ssd1307fb_alloc_array(par->width * pages, SSD1307FB_DATA); pages 199 drivers/video/fbdev/ssd1307fb.c for (i = 0; i < pages; i++) { pages 205 drivers/video/fbdev/ssd1307fb.c if (i + 1 == pages && par->height % 8) pages 216 drivers/video/fbdev/ssd1307fb.c ssd1307fb_write_array(par->client, array, par->width * pages); pages 322 drivers/video/fbdev/vesafb.c vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages); pages 151 drivers/virt/fsl_hypervisor.c struct page **pages = NULL; pages 229 drivers/virt/fsl_hypervisor.c pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); pages 230 drivers/virt/fsl_hypervisor.c if (!pages) { pages 250 drivers/virt/fsl_hypervisor.c num_pages, param.source != -1 ? FOLL_WRITE : 0, pages); pages 264 drivers/virt/fsl_hypervisor.c sg_list[0].source = page_to_phys(pages[0]) + lb_offset; pages 268 drivers/virt/fsl_hypervisor.c sg_list[0].target = page_to_phys(pages[0]) + lb_offset; pages 278 drivers/virt/fsl_hypervisor.c sg_list[i].source = page_to_phys(pages[i]); pages 283 drivers/virt/fsl_hypervisor.c sg_list[i].target = page_to_phys(pages[i]); pages 295 drivers/virt/fsl_hypervisor.c if (pages) { pages 297 drivers/virt/fsl_hypervisor.c if (pages[i]) pages 298 drivers/virt/fsl_hypervisor.c put_page(pages[i]); pages 302 drivers/virt/fsl_hypervisor.c kfree(pages); pages 50 drivers/virt/vboxguest/vboxguest_core.c struct page **pages = NULL; pages 77 drivers/virt/vboxguest/vboxguest_core.c pages = kmalloc_array(size >> PAGE_SHIFT, sizeof(*pages), GFP_KERNEL); pages 78 drivers/virt/vboxguest/vboxguest_core.c if (!pages) pages 86 drivers/virt/vboxguest/vboxguest_core.c pages[i] = gdev->guest_mappings_dummy_page; pages 93 drivers/virt/vboxguest/vboxguest_core.c guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT), pages 123 drivers/virt/vboxguest/vboxguest_core.c kfree(pages); pages 271 drivers/virt/vboxguest/vboxguest_core.c struct page **pages; pages 274 drivers/virt/vboxguest/vboxguest_core.c pages = kmalloc_array(VMMDEV_MEMORY_BALLOON_CHUNK_PAGES, pages 275 drivers/virt/vboxguest/vboxguest_core.c sizeof(*pages), pages 277 drivers/virt/vboxguest/vboxguest_core.c if (!pages) pages 282 drivers/virt/vboxguest/vboxguest_core.c req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; pages 285 drivers/virt/vboxguest/vboxguest_core.c pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN); pages 286 drivers/virt/vboxguest/vboxguest_core.c if (!pages[i]) { pages 291 drivers/virt/vboxguest/vboxguest_core.c req->phys_page[i] = page_to_phys(pages[i]); pages 301 drivers/virt/vboxguest/vboxguest_core.c gdev->mem_balloon.pages[chunk_idx] = pages; pages 307 drivers/virt/vboxguest/vboxguest_core.c __free_page(pages[i]); pages 308 drivers/virt/vboxguest/vboxguest_core.c kfree(pages); pages 322 drivers/virt/vboxguest/vboxguest_core.c struct page **pages = gdev->mem_balloon.pages[chunk_idx]; pages 327 drivers/virt/vboxguest/vboxguest_core.c req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; pages 330 drivers/virt/vboxguest/vboxguest_core.c req->phys_page[i] = page_to_phys(pages[i]); pages 339 drivers/virt/vboxguest/vboxguest_core.c __free_page(pages[i]); pages 340 drivers/virt/vboxguest/vboxguest_core.c kfree(pages); pages 341 drivers/virt/vboxguest/vboxguest_core.c gdev->mem_balloon.pages[chunk_idx] = NULL; pages 374 drivers/virt/vboxguest/vboxguest_core.c gdev->mem_balloon.pages = pages 377 drivers/virt/vboxguest/vboxguest_core.c if (!gdev->mem_balloon.pages) pages 36 drivers/virt/vboxguest/vboxguest_core.h struct page ***pages; pages 200 drivers/virt/vboxguest/vboxguest_utils.c *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]); pages 362 drivers/virt/vboxguest/vboxguest_utils.c dst_pg_lst->pages[i] = page_to_phys(page); pages 366 drivers/virt/vboxguest/vboxguest_utils.c *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]); pages 349 drivers/virt/vboxguest/vmmdev.h u32 pages; pages 176 drivers/virtio/virtio_balloon.c LIST_HEAD(pages); pages 194 drivers/virtio/virtio_balloon.c balloon_page_push(&pages, page); pages 201 drivers/virtio/virtio_balloon.c while ((page = balloon_page_pop(&pages))) { pages 222 drivers/virtio/virtio_balloon.c struct list_head *pages) pages 226 drivers/virtio/virtio_balloon.c list_for_each_entry_safe(page, next, pages, lru) { pages 240 drivers/virtio/virtio_balloon.c LIST_HEAD(pages); pages 254 drivers/virtio/virtio_balloon.c list_add(&page->lru, &pages); pages 266 drivers/virtio/virtio_balloon.c release_pages_balloon(vb, &pages); pages 2162 drivers/virtio/virtio_ring.c void *pages, pages 2172 drivers/virtio/virtio_ring.c vring_init(&vring, num, pages, vring_align); pages 459 drivers/xen/balloon.c LIST_HEAD(pages); pages 473 drivers/xen/balloon.c list_add(&page->lru, &pages); pages 490 drivers/xen/balloon.c list_for_each_entry_safe(page, tmp, &pages, lru) { pages 597 drivers/xen/balloon.c int alloc_xenballooned_pages(int nr_pages, struct page **pages) pages 610 drivers/xen/balloon.c pages[pgno++] = page; pages 634 drivers/xen/balloon.c free_xenballooned_pages(pgno, pages); pages 644 drivers/xen/balloon.c void free_xenballooned_pages(int nr_pages, struct page **pages) pages 651 drivers/xen/balloon.c if (pages[i]) pages 652 drivers/xen/balloon.c balloon_append(pages[i]); pages 667 drivers/xen/balloon.c unsigned long pages) pages 676 drivers/xen/balloon.c extra_pfn_end = min(max_pfn, start_pfn + pages); pages 64 drivers/xen/gntdev-common.h struct page **pages; pages 60 drivers/xen/gntdev-dmabuf.c struct page **pages; pages 201 drivers/xen/gntdev-dmabuf.c dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages) pages 212 drivers/xen/gntdev-dmabuf.c ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0, pages 288 drivers/xen/gntdev-dmabuf.c sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages, pages 382 drivers/xen/gntdev-dmabuf.c struct page **pages; pages 400 drivers/xen/gntdev-dmabuf.c gntdev_dmabuf->pages = args->pages; pages 504 drivers/xen/gntdev-dmabuf.c args.pages = map->pages; pages 522 drivers/xen/gntdev-dmabuf.c dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs, pages 545 drivers/xen/gntdev-dmabuf.c xen_page_to_gfn(pages[i]), 0); pages 567 drivers/xen/gntdev-dmabuf.c kfree(gntdev_dmabuf->pages); pages 587 drivers/xen/gntdev-dmabuf.c gntdev_dmabuf->pages = kcalloc(count, pages 588 drivers/xen/gntdev-dmabuf.c sizeof(gntdev_dmabuf->pages[0]), pages 590 drivers/xen/gntdev-dmabuf.c if (!gntdev_dmabuf->pages) pages 668 drivers/xen/gntdev-dmabuf.c gntdev_dmabuf->pages[i++] = page; pages 671 drivers/xen/gntdev-dmabuf.c ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages, pages 69 drivers/xen/gntdev.c int offset, int pages); pages 106 drivers/xen/gntdev.c args.pages = map->pages; pages 114 drivers/xen/gntdev.c if (map->pages) pages 115 drivers/xen/gntdev.c gnttab_free_pages(map->count, map->pages); pages 120 drivers/xen/gntdev.c kfree(map->pages); pages 144 drivers/xen/gntdev.c add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); pages 150 drivers/xen/gntdev.c NULL == add->pages) pages 174 drivers/xen/gntdev.c args.pages = add->pages; pages 184 drivers/xen/gntdev.c if (gnttab_alloc_pages(count, add->pages)) pages 258 drivers/xen/gntdev.c if (map->pages && !use_ptemod) pages 309 drivers/xen/gntdev.c pfn_to_kaddr(page_to_pfn(map->pages[i])); pages 325 drivers/xen/gntdev.c pfn_to_kaddr(page_to_pfn(map->pages[i])); pages 326 drivers/xen/gntdev.c BUG_ON(PageHighMem(map->pages[i])); pages 339 drivers/xen/gntdev.c map->pages, map->count); pages 356 drivers/xen/gntdev.c bfn = pfn_to_bfn(page_to_pfn(map->pages[i])); pages 365 drivers/xen/gntdev.c int pages) pages 372 drivers/xen/gntdev.c if (pgno >= offset && pgno < offset + pages) { pages 374 drivers/xen/gntdev.c uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); pages 382 drivers/xen/gntdev.c unmap_data.pages = map->pages + offset; pages 383 drivers/xen/gntdev.c unmap_data.count = pages; pages 389 drivers/xen/gntdev.c for (i = 0; i < pages; i++) { pages 401 drivers/xen/gntdev.c int pages) pages 405 drivers/xen/gntdev.c pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages); pages 410 drivers/xen/gntdev.c while (pages && !err) { pages 411 drivers/xen/gntdev.c while (pages && map->unmap_ops[offset].handle == -1) { pages 413 drivers/xen/gntdev.c pages--; pages 416 drivers/xen/gntdev.c while (range < pages) { pages 423 drivers/xen/gntdev.c pages -= range; pages 467 drivers/xen/gntdev.c return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT]; pages 830 drivers/xen/gntdev.c struct page *pages[GNTDEV_COPY_BATCH]; pages 848 drivers/xen/gntdev.c batch->pages[batch->nr_pages++] = page; pages 861 drivers/xen/gntdev.c put_page(batch->pages[i]); pages 1137 drivers/xen/gntdev.c err = vm_map_pages_zero(vma, map->pages, map->count); pages 776 drivers/xen/grant-table.c int gnttab_pages_set_private(int nr_pages, struct page **pages) pages 788 drivers/xen/grant-table.c set_page_private(pages[i], (unsigned long)foreign); pages 790 drivers/xen/grant-table.c SetPagePrivate(pages[i]); pages 802 drivers/xen/grant-table.c int gnttab_alloc_pages(int nr_pages, struct page **pages) pages 806 drivers/xen/grant-table.c ret = alloc_xenballooned_pages(nr_pages, pages); pages 810 drivers/xen/grant-table.c ret = gnttab_pages_set_private(nr_pages, pages); pages 812 drivers/xen/grant-table.c gnttab_free_pages(nr_pages, pages); pages 818 drivers/xen/grant-table.c void gnttab_pages_clear_private(int nr_pages, struct page **pages) pages 823 drivers/xen/grant-table.c if (PagePrivate(pages[i])) { pages 825 drivers/xen/grant-table.c kfree((void *)page_private(pages[i])); pages 827 drivers/xen/grant-table.c ClearPagePrivate(pages[i]); pages 838 drivers/xen/grant-table.c void gnttab_free_pages(int nr_pages, struct page **pages) pages 840 drivers/xen/grant-table.c gnttab_pages_clear_private(nr_pages, pages); pages 841 drivers/xen/grant-table.c free_xenballooned_pages(nr_pages, pages); pages 875 drivers/xen/grant-table.c args->pages[i] = page; pages 880 drivers/xen/grant-table.c xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages); pages 889 drivers/xen/grant-table.c ret = gnttab_pages_set_private(args->nr_pages, args->pages); pages 910 drivers/xen/grant-table.c gnttab_pages_clear_private(args->nr_pages, args->pages); pages 913 drivers/xen/grant-table.c args->frames[i] = page_to_xen_pfn(args->pages[i]); pages 923 drivers/xen/grant-table.c xenmem_reservation_va_mapping_update(args->nr_pages, args->pages, pages 1010 drivers/xen/grant-table.c void gnttab_foreach_grant(struct page **pages, pages 1021 drivers/xen/grant-table.c xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]); pages 1034 drivers/xen/grant-table.c struct page **pages, unsigned int count) pages 1048 drivers/xen/grant-table.c SetPageForeign(pages[i]); pages 1049 drivers/xen/grant-table.c foreign = xen_page_foreign(pages[i]); pages 1073 drivers/xen/grant-table.c return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count); pages 1079 drivers/xen/grant-table.c struct page **pages, unsigned int count) pages 1089 drivers/xen/grant-table.c ClearPageForeign(pages[i]); pages 1091 drivers/xen/grant-table.c return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count); pages 1116 drivers/xen/grant-table.c if (page_count(item->pages[pc]) > 1) { pages 1125 drivers/xen/grant-table.c item->pages, item->count); pages 30 drivers/xen/mem-reservation.c struct page **pages, pages 36 drivers/xen/mem-reservation.c struct page *page = pages[i]; pages 64 drivers/xen/mem-reservation.c struct page **pages) pages 69 drivers/xen/mem-reservation.c struct page *page = pages[i]; pages 34 drivers/xen/privcmd-buf.c struct page *pages[]; pages 60 drivers/xen/privcmd-buf.c __free_page(vma_priv->pages[i]); pages 143 drivers/xen/privcmd-buf.c vma_priv = kzalloc(struct_size(vma_priv, pages, count), GFP_KERNEL); pages 148 drivers/xen/privcmd-buf.c vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); pages 149 drivers/xen/privcmd-buf.c if (!vma_priv->pages[i]) pages 168 drivers/xen/privcmd-buf.c ret = vm_map_pages_zero(vma, vma_priv->pages, pages 93 drivers/xen/privcmd.c static void free_page_list(struct list_head *pages) pages 97 drivers/xen/privcmd.c list_for_each_entry_safe(p, n, pages, lru) pages 100 drivers/xen/privcmd.c INIT_LIST_HEAD(pages); pages 342 drivers/xen/privcmd.c struct page **pages = vma->vm_private_data; pages 347 drivers/xen/privcmd.c cur_pages = &pages[st->index]; pages 424 drivers/xen/privcmd.c struct page **pages; pages 426 drivers/xen/privcmd.c pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL); pages 427 drivers/xen/privcmd.c if (pages == NULL) pages 430 drivers/xen/privcmd.c rc = alloc_xenballooned_pages(numpgs, pages); pages 434 drivers/xen/privcmd.c kfree(pages); pages 438 drivers/xen/privcmd.c vma->vm_private_data = pages; pages 585 drivers/xen/privcmd.c struct page *pages[], unsigned int nr_pages) pages 601 drivers/xen/privcmd.c requested, FOLL_WRITE, pages); pages 606 drivers/xen/privcmd.c pages += pinned; pages 612 drivers/xen/privcmd.c static void unlock_pages(struct page *pages[], unsigned int nr_pages) pages 616 drivers/xen/privcmd.c if (!pages) pages 620 drivers/xen/privcmd.c if (pages[i]) pages 621 drivers/xen/privcmd.c put_page(pages[i]); pages 631 drivers/xen/privcmd.c struct page **pages = NULL; pages 676 drivers/xen/privcmd.c pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL); pages 677 drivers/xen/privcmd.c if (!pages) { pages 688 drivers/xen/privcmd.c rc = lock_pages(kbufs, kdata.num, pages, nr_pages); pages 702 drivers/xen/privcmd.c unlock_pages(pages, nr_pages); pages 704 drivers/xen/privcmd.c kfree(pages); pages 761 drivers/xen/privcmd.c struct page **pages; pages 768 drivers/xen/privcmd.c pages = vma->vm_private_data; pages 771 drivers/xen/privcmd.c page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]); pages 895 drivers/xen/privcmd.c struct page **pages = vma->vm_private_data; pages 900 drivers/xen/privcmd.c if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) pages 903 drivers/xen/privcmd.c rc = xen_unmap_domain_gfn_range(vma, numgfns, pages); pages 905 drivers/xen/privcmd.c free_xenballooned_pages(numpgs, pages); pages 909 drivers/xen/privcmd.c kfree(pages); pages 214 drivers/xen/xen-front-pgdir-shbuf.c if (!buf->pages || !buf->backend_map_handles || !buf->grefs) pages 225 drivers/xen/xen-front-pgdir-shbuf.c addr = xen_page_to_vaddr(buf->pages[i]); pages 230 drivers/xen/xen-front-pgdir-shbuf.c ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages, pages 293 drivers/xen/xen-front-pgdir-shbuf.c addr = xen_page_to_vaddr(buf->pages[cur_page]); pages 304 drivers/xen/xen-front-pgdir-shbuf.c ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages); pages 416 drivers/xen/xen-front-pgdir-shbuf.c xen_page_to_gfn(buf->pages[i]), pages 528 drivers/xen/xen-front-pgdir-shbuf.c buf->pages = cfg->pages; pages 134 drivers/xen/xen-scsiback.c struct page *pages[VSCSI_MAX_GRANTS]; pages 255 drivers/xen/xen-scsiback.c return vaddr_page(req->pages[seg]); pages 272 drivers/xen/xen-scsiback.c struct page *pages[VSCSI_GRANT_BATCH]; pages 291 drivers/xen/xen-scsiback.c pages[invcount] = req->pages[i]; pages 292 drivers/xen/xen-scsiback.c put_page(pages[invcount]); pages 296 drivers/xen/xen-scsiback.c err = gnttab_unmap_refs(unmap, NULL, pages, invcount); pages 302 drivers/xen/xen-scsiback.c err = gnttab_unmap_refs(unmap, NULL, pages, invcount); pages 306 drivers/xen/xen-scsiback.c put_free_pages(req->pages, req->n_grants); pages 501 drivers/xen/xen-scsiback.c pending_req->pages, pending_req->grant_handles, pages 536 drivers/xen/xen-scsiback.c pg = pending_req->pages + nr_sgl; pages 560 drivers/xen/xen-scsiback.c pg = pending_req->pages + nr_sgl; pages 63 drivers/xen/xenbus/xenbus_client.c struct page *pages[XENBUS_MAX_RING_PAGES]; pages 573 drivers/xen/xenbus/xenbus_client.c err = alloc_xenballooned_pages(nr_pages, node->hvm.pages); pages 577 drivers/xen/xenbus/xenbus_client.c gnttab_foreach_grant(node->hvm.pages, nr_grefs, pages 588 drivers/xen/xenbus/xenbus_client.c addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP, pages 612 drivers/xen/xenbus/xenbus_client.c free_xenballooned_pages(nr_pages, node->hvm.pages); pages 851 drivers/xen/xenbus/xenbus_client.c gnttab_foreach_grant(node->hvm.pages, node->nr_handles, pages 859 drivers/xen/xenbus/xenbus_client.c free_xenballooned_pages(nr_pages, node->hvm.pages); pages 48 drivers/xen/xlate_mmu.c static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn, pages 57 drivers/xen/xlate_mmu.c page = pages[i / XEN_PFN_PER_PAGE]; pages 71 drivers/xen/xlate_mmu.c struct page **pages; pages 99 drivers/xen/xlate_mmu.c struct page *page = info->pages[info->index++]; pages 148 drivers/xen/xlate_mmu.c struct page **pages) pages 163 drivers/xen/xlate_mmu.c data.pages = pages; pages 184 drivers/xen/xlate_mmu.c int nr, struct page **pages) pages 186 drivers/xen/xlate_mmu.c xen_for_each_gfn(pages, nr, unmap_gfn, NULL); pages 217 drivers/xen/xlate_mmu.c struct page **pages; pages 226 drivers/xen/xlate_mmu.c pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL); pages 227 drivers/xen/xlate_mmu.c if (!pages) pages 232 drivers/xen/xlate_mmu.c kfree(pages); pages 235 drivers/xen/xlate_mmu.c rc = alloc_xenballooned_pages(nr_pages, pages); pages 239 drivers/xen/xlate_mmu.c kfree(pages); pages 246 drivers/xen/xlate_mmu.c xen_for_each_gfn(pages, nr_grant_frames, setup_balloon_gfn, &data); pages 248 drivers/xen/xlate_mmu.c vaddr = vmap(pages, nr_pages, 0, PAGE_KERNEL); pages 252 drivers/xen/xlate_mmu.c free_xenballooned_pages(nr_pages, pages); pages 253 drivers/xen/xlate_mmu.c kfree(pages); pages 257 drivers/xen/xlate_mmu.c kfree(pages); pages 268 drivers/xen/xlate_mmu.c struct page **pages; pages 276 drivers/xen/xlate_mmu.c struct page *page = r->pages[r->i]; pages 290 drivers/xen/xlate_mmu.c .pages = vma->vm_private_data, pages 280 fs/9p/cache.c struct list_head *pages, pages 291 fs/9p/cache.c mapping, pages, nr_pages, pages 301 fs/9p/cache.c BUG_ON(!list_empty(pages)); pages 36 fs/9p/cache.h struct list_head *pages, pages 61 fs/9p/cache.h struct list_head *pages, pages 64 fs/9p/cache.h return __v9fs_readpages_from_fscache(inode, mapping, pages, pages 117 fs/9p/cache.h struct list_head *pages, pages 99 fs/9p/vfs_addr.c struct list_head *pages, unsigned nr_pages) pages 107 fs/9p/vfs_addr.c ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages); pages 111 fs/9p/vfs_addr.c ret = read_cache_pages(mapping, pages, v9fs_fid_readpage, pages 170 fs/afs/dir.c if (!afs_dir_check_page(dvnode, req->pages[i], req->actual_len)) pages 182 fs/afs/dir.c dbuf = kmap(req->pages[i]); pages 188 fs/afs/dir.c kunmap(req->pages[i]); pages 251 fs/afs/dir.c req->pages = req->array; pages 253 fs/afs/dir.c req->pages = kcalloc(nr_pages, sizeof(struct page *), pages 255 fs/afs/dir.c if (!req->pages) pages 268 fs/afs/dir.c req->pages + i); pages 277 fs/afs/dir.c req->pages[i] = __page_cache_alloc(gfp); pages 278 fs/afs/dir.c if (!req->pages[i]) pages 280 fs/afs/dir.c ret = add_to_page_cache_lru(req->pages[i], pages 286 fs/afs/dir.c set_page_private(req->pages[i], 1); pages 287 fs/afs/dir.c SetPagePrivate(req->pages[i]); pages 288 fs/afs/dir.c unlock_page(req->pages[i]); pages 477 fs/afs/dir.c page = req->pages[blkoff / PAGE_SIZE]; pages 26 fs/afs/file.c struct list_head *pages, unsigned nr_pages); pages 194 fs/afs/file.c if (req->pages) { pages 196 fs/afs/file.c if (req->pages[i]) pages 197 fs/afs/file.c put_page(req->pages[i]); pages 198 fs/afs/file.c if (req->pages != req->array) pages 199 fs/afs/file.c kfree(req->pages); pages 327 fs/afs/file.c req->pages = req->array; pages 328 fs/afs/file.c req->pages[0] = page; pages 419 fs/afs/file.c struct page *page = req->pages[req->index]; pages 421 fs/afs/file.c req->pages[req->index] = NULL; pages 441 fs/afs/file.c struct list_head *pages) pages 454 fs/afs/file.c first = lru_to_page(pages); pages 457 fs/afs/file.c for (p = first->lru.prev; p != pages; p = p->prev) { pages 474 fs/afs/file.c req->pages = req->array; pages 486 fs/afs/file.c page = lru_to_page(pages); pages 498 fs/afs/file.c req->pages[req->nr_pages++] = page; pages 524 fs/afs/file.c page = req->pages[i]; pages 542 fs/afs/file.c struct list_head *pages, unsigned nr_pages) pages 563 fs/afs/file.c pages, pages 575 fs/afs/file.c BUG_ON(!list_empty(pages)); pages 591 fs/afs/file.c while (!list_empty(pages)) { pages 592 fs/afs/file.c ret = afs_readpages_one(file, mapping, pages); pages 370 fs/afs/fsclient.c call->bvec[0].bv_page = req->pages[req->index]; pages 439 fs/afs/fsclient.c zero_user_segment(req->pages[req->index], pages 237 fs/afs/internal.h struct page **pages; pages 286 fs/afs/rxrpc.c struct page *pages[AFS_BVEC_MAX]; pages 290 fs/afs/rxrpc.c n = find_get_pages_contig(call->mapping, first, nr, pages); pages 300 fs/afs/rxrpc.c bv[i].bv_page = pages[i]; pages 55 fs/afs/write.c req->pages = req->array; pages 56 fs/afs/write.c req->pages[0] = page; pages 250 fs/afs/write.c pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); pages 254 fs/afs/write.c struct page *page = pv.pages[loop]; pages 293 fs/afs/write.c pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); pages 297 fs/afs/write.c struct page *page = pv.pages[loop]; pages 333 fs/afs/write.c first, count, pv.pages); pages 337 fs/afs/write.c priv = page_private(pv.pages[loop]); pages 339 fs/afs/write.c pv.pages[loop]->index, priv); pages 340 fs/afs/write.c set_page_private(pv.pages[loop], 0); pages 341 fs/afs/write.c end_page_writeback(pv.pages[loop]); pages 457 fs/afs/write.c struct page *pages[8], *page; pages 494 fs/afs/write.c if (n > ARRAY_SIZE(pages)) pages 495 fs/afs/write.c n = ARRAY_SIZE(pages); pages 496 fs/afs/write.c n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages); pages 500 fs/afs/write.c if (pages[0]->index != start) { pages 502 fs/afs/write.c put_page(pages[--n]); pages 508 fs/afs/write.c page = pages[loop]; pages 544 fs/afs/write.c put_page(pages[loop]); pages 482 fs/afs/yfsclient.c call->bvec[0].bv_page = req->pages[req->index]; pages 555 fs/afs/yfsclient.c zero_user_segment(req->pages[req->index], pages 619 fs/block_dev.c struct list_head *pages, unsigned nr_pages) pages 621 fs/block_dev.c return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block); pages 216 fs/btrfs/compression.c struct page *pages[16]; pages 227 fs/btrfs/compression.c nr_pages, ARRAY_SIZE(pages)), pages); pages 235 fs/btrfs/compression.c SetPageError(pages[i]); pages 236 fs/btrfs/compression.c end_page_writeback(pages[i]); pages 237 fs/btrfs/compression.c put_page(pages[i]); pages 1033 fs/btrfs/compression.c u64 start, struct page **pages, pages 1046 fs/btrfs/compression.c start, pages, pages 81 fs/btrfs/compression.h u64 start, struct page **pages, pages 145 fs/btrfs/compression.h struct page **pages, pages 1373 fs/btrfs/ctree.h const type *p = page_address(eb->pages[0]); \ pages 1380 fs/btrfs/ctree.h type *p = page_address(eb->pages[0]); \ pages 2947 fs/btrfs/ctree.h int btrfs_dirty_pages(struct inode *inode, struct page **pages, pages 523 fs/btrfs/disk-io.c if (page != eb->pages[0]) pages 4111 fs/btrfs/disk-io.c struct inode *btree_inode = buf->pages[0]->mapping->host; pages 4140 fs/btrfs/disk-io.c root = BTRFS_I(buf->pages[0]->mapping->host)->root; pages 1875 fs/btrfs/extent_io.c struct page *pages[16]; pages 1891 fs/btrfs/extent_io.c nr_pages, ARRAY_SIZE(pages)), pages); pages 1904 fs/btrfs/extent_io.c SetPagePrivate2(pages[i]); pages 1906 fs/btrfs/extent_io.c if (locked_page && pages[i] == locked_page) { pages 1907 fs/btrfs/extent_io.c put_page(pages[i]); pages 1912 fs/btrfs/extent_io.c clear_page_dirty_for_io(pages[i]); pages 1914 fs/btrfs/extent_io.c set_page_writeback(pages[i]); pages 1916 fs/btrfs/extent_io.c SetPageError(pages[i]); pages 1918 fs/btrfs/extent_io.c end_page_writeback(pages[i]); pages 1920 fs/btrfs/extent_io.c unlock_page(pages[i]); pages 1922 fs/btrfs/extent_io.c lock_page(pages[i]); pages 1923 fs/btrfs/extent_io.c if (!PageDirty(pages[i]) || pages 1924 fs/btrfs/extent_io.c pages[i]->mapping != mapping) { pages 1925 fs/btrfs/extent_io.c unlock_page(pages[i]); pages 1926 fs/btrfs/extent_io.c put_page(pages[i]); pages 1931 fs/btrfs/extent_io.c put_page(pages[i]); pages 2271 fs/btrfs/extent_io.c struct page *p = eb->pages[i]; pages 3265 fs/btrfs/extent_io.c struct page *pages[], int nr_pages, pages 3272 fs/btrfs/extent_io.c struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host); pages 3278 fs/btrfs/extent_io.c __do_readpage(tree, pages[index], btrfs_get_extent, em_cached, pages 3280 fs/btrfs/extent_io.c put_page(pages[index]); pages 3709 fs/btrfs/extent_io.c struct page *p = eb->pages[i]; pages 3731 fs/btrfs/extent_io.c unlock_page(eb->pages[i]); pages 3886 fs/btrfs/extent_io.c struct page *p = eb->pages[i]; pages 3911 fs/btrfs/extent_io.c struct page *p = eb->pages[i]; pages 3969 fs/btrfs/extent_io.c struct page *page = pvec.pages[i]; pages 4167 fs/btrfs/extent_io.c struct page *page = pvec.pages[i]; pages 4332 fs/btrfs/extent_io.c int extent_readpages(struct address_space *mapping, struct list_head *pages, pages 4343 fs/btrfs/extent_io.c while (!list_empty(pages)) { pages 4346 fs/btrfs/extent_io.c for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) { pages 4347 fs/btrfs/extent_io.c struct page *page = lru_to_page(pages); pages 4874 fs/btrfs/extent_io.c struct page *page = eb->pages[i]; pages 4980 fs/btrfs/extent_io.c new->pages[i] = p; pages 4981 fs/btrfs/extent_io.c copy_page(page_address(p), page_address(src->pages[i])); pages 5003 fs/btrfs/extent_io.c eb->pages[i] = alloc_page(GFP_NOFS); pages 5004 fs/btrfs/extent_io.c if (!eb->pages[i]) pages 5014 fs/btrfs/extent_io.c __free_page(eb->pages[i - 1]); pages 5067 fs/btrfs/extent_io.c struct page *p = eb->pages[i]; pages 5218 fs/btrfs/extent_io.c eb->pages[i] = p; pages 5261 fs/btrfs/extent_io.c unlock_page(eb->pages[i]); pages 5267 fs/btrfs/extent_io.c if (eb->pages[i]) pages 5268 fs/btrfs/extent_io.c unlock_page(eb->pages[i]); pages 5373 fs/btrfs/extent_io.c page = eb->pages[i]; pages 5408 fs/btrfs/extent_io.c set_page_dirty(eb->pages[i]); pages 5412 fs/btrfs/extent_io.c ASSERT(PageDirty(eb->pages[i])); pages 5427 fs/btrfs/extent_io.c page = eb->pages[i]; pages 5442 fs/btrfs/extent_io.c page = eb->pages[i]; pages 5466 fs/btrfs/extent_io.c page = eb->pages[i]; pages 5481 fs/btrfs/extent_io.c page = eb->pages[i]; pages 5497 fs/btrfs/extent_io.c page = eb->pages[i]; pages 5538 fs/btrfs/extent_io.c page = eb->pages[i]; pages 5549 fs/btrfs/extent_io.c page = eb->pages[locked_pages]; pages 5576 fs/btrfs/extent_io.c page = eb->pages[i]; pages 5608 fs/btrfs/extent_io.c page = eb->pages[i]; pages 5661 fs/btrfs/extent_io.c p = eb->pages[i]; pages 5686 fs/btrfs/extent_io.c page = eb->pages[i]; pages 5708 fs/btrfs/extent_io.c WARN_ON(!PageUptodate(eb->pages[0])); pages 5709 fs/btrfs/extent_io.c kaddr = page_address(eb->pages[0]); pages 5718 fs/btrfs/extent_io.c WARN_ON(!PageUptodate(eb->pages[0])); pages 5719 fs/btrfs/extent_io.c kaddr = page_address(eb->pages[0]); pages 5741 fs/btrfs/extent_io.c page = eb->pages[i]; pages 5771 fs/btrfs/extent_io.c page = eb->pages[i]; pages 5794 fs/btrfs/extent_io.c copy_page(page_address(dst->pages[i]), pages 5795 fs/btrfs/extent_io.c page_address(src->pages[i])); pages 5815 fs/btrfs/extent_io.c page = dst->pages[i]; pages 5878 fs/btrfs/extent_io.c page = eb->pages[i]; pages 5903 fs/btrfs/extent_io.c page = eb->pages[i]; pages 5914 fs/btrfs/extent_io.c page = eb->pages[++i]; pages 5945 fs/btrfs/extent_io.c page = eb->pages[i]; pages 5956 fs/btrfs/extent_io.c page = eb->pages[++i]; pages 6031 fs/btrfs/extent_io.c copy_pages(dst->pages[dst_i], dst->pages[src_i], pages 6078 fs/btrfs/extent_io.c copy_pages(dst->pages[dst_i], dst->pages[src_i], pages 188 fs/btrfs/extent_io.h struct page *pages[INLINE_EXTENT_BUFFER_PAGES]; pages 415 fs/btrfs/extent_io.h int extent_readpages(struct address_space *mapping, struct list_head *pages, pages 450 fs/btrfs/file.c static void btrfs_drop_pages(struct page **pages, size_t num_pages) pages 460 fs/btrfs/file.c ClearPageChecked(pages[i]); pages 461 fs/btrfs/file.c unlock_page(pages[i]); pages 462 fs/btrfs/file.c put_page(pages[i]); pages 515 fs/btrfs/file.c int btrfs_dirty_pages(struct inode *inode, struct page **pages, pages 567 fs/btrfs/file.c struct page *p = pages[i]; pages 1421 fs/btrfs/file.c static noinline int prepare_pages(struct inode *inode, struct page **pages, pages 1433 fs/btrfs/file.c pages[i] = find_or_create_page(inode->i_mapping, index + i, pages 1435 fs/btrfs/file.c if (!pages[i]) { pages 1442 fs/btrfs/file.c err = prepare_uptodate_page(inode, pages[i], pos, pages 1445 fs/btrfs/file.c err = prepare_uptodate_page(inode, pages[i], pages 1448 fs/btrfs/file.c put_page(pages[i]); pages 1456 fs/btrfs/file.c wait_on_page_writeback(pages[i]); pages 1462 fs/btrfs/file.c unlock_page(pages[faili]); pages 1463 fs/btrfs/file.c put_page(pages[faili]); pages 1481 fs/btrfs/file.c lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages, pages 1511 fs/btrfs/file.c unlock_page(pages[i]); pages 1512 fs/btrfs/file.c put_page(pages[i]); pages 1542 fs/btrfs/file.c set_page_extent_mapped(pages[i]); pages 1543 fs/btrfs/file.c WARN_ON(!PageLocked(pages[i])); pages 1593 fs/btrfs/file.c struct page **pages = NULL; pages 1608 fs/btrfs/file.c pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL); pages 1609 fs/btrfs/file.c if (!pages) pages 1691 fs/btrfs/file.c ret = prepare_pages(inode, pages, num_pages, pages 1701 fs/btrfs/file.c BTRFS_I(inode), pages, pages 1713 fs/btrfs/file.c copied = btrfs_copy_from_user(pos, write_bytes, pages, i); pages 1760 fs/btrfs/file.c ret = btrfs_dirty_pages(inode, pages, dirty_pages, pages 1778 fs/btrfs/file.c btrfs_drop_pages(pages, num_pages); pages 1797 fs/btrfs/file.c btrfs_drop_pages(pages, num_pages); pages 1809 fs/btrfs/file.c kfree(pages); pages 318 fs/btrfs/free-space-cache.c io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS); pages 319 fs/btrfs/free-space-cache.c if (!io_ctl->pages) pages 333 fs/btrfs/free-space-cache.c kfree(io_ctl->pages); pages 334 fs/btrfs/free-space-cache.c io_ctl->pages = NULL; pages 348 fs/btrfs/free-space-cache.c io_ctl->page = io_ctl->pages[io_ctl->index++]; pages 363 fs/btrfs/free-space-cache.c if (io_ctl->pages[i]) { pages 364 fs/btrfs/free-space-cache.c ClearPageChecked(io_ctl->pages[i]); pages 365 fs/btrfs/free-space-cache.c unlock_page(io_ctl->pages[i]); pages 366 fs/btrfs/free-space-cache.c put_page(io_ctl->pages[i]); pages 384 fs/btrfs/free-space-cache.c io_ctl->pages[i] = page; pages 404 fs/btrfs/free-space-cache.c clear_page_dirty_for_io(io_ctl->pages[i]); pages 405 fs/btrfs/free-space-cache.c set_page_extent_mapped(io_ctl->pages[i]); pages 480 fs/btrfs/free-space-cache.c tmp = page_address(io_ctl->pages[0]); pages 499 fs/btrfs/free-space-cache.c tmp = page_address(io_ctl->pages[0]); pages 1254 fs/btrfs/free-space-cache.c WARN_ON(io_ctl->pages); pages 1320 fs/btrfs/free-space-cache.c ret = btrfs_dirty_pages(inode, io_ctl->pages, io_ctl->num_pages, 0, pages 42 fs/btrfs/free-space-cache.h struct page **pages; pages 358 fs/btrfs/inode.c struct page **pages; pages 384 fs/btrfs/inode.c struct page **pages, pages 395 fs/btrfs/inode.c async_extent->pages = pages; pages 479 fs/btrfs/inode.c struct page **pages = NULL; pages 546 fs/btrfs/inode.c WARN_ON(pages); pages 547 fs/btrfs/inode.c pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); pages 548 fs/btrfs/inode.c if (!pages) { pages 580 fs/btrfs/inode.c pages, pages 587 fs/btrfs/inode.c struct page *page = pages[nr_pages - 1]; pages 615 fs/btrfs/inode.c compress_type, pages); pages 644 fs/btrfs/inode.c WARN_ON(pages[i]->mapping); pages 645 fs/btrfs/inode.c put_page(pages[i]); pages 647 fs/btrfs/inode.c kfree(pages); pages 676 fs/btrfs/inode.c total_compressed, pages, nr_pages, pages 681 fs/btrfs/inode.c pages = NULL; pages 688 fs/btrfs/inode.c if (pages) { pages 694 fs/btrfs/inode.c WARN_ON(pages[i]->mapping); pages 695 fs/btrfs/inode.c put_page(pages[i]); pages 697 fs/btrfs/inode.c kfree(pages); pages 698 fs/btrfs/inode.c pages = NULL; pages 735 fs/btrfs/inode.c if (!async_extent->pages) pages 739 fs/btrfs/inode.c WARN_ON(async_extent->pages[i]->mapping); pages 740 fs/btrfs/inode.c put_page(async_extent->pages[i]); pages 742 fs/btrfs/inode.c kfree(async_extent->pages); pages 744 fs/btrfs/inode.c async_extent->pages = NULL; pages 775 fs/btrfs/inode.c if (!async_extent->pages) { pages 881 fs/btrfs/inode.c ins.offset, async_extent->pages, pages 884 fs/btrfs/inode.c struct page *p = async_extent->pages[0]; pages 8895 fs/btrfs/inode.c struct list_head *pages, unsigned nr_pages) pages 8897 fs/btrfs/inode.c return extent_readpages(mapping, pages, nr_pages); pages 11049 fs/btrfs/inode.c sis->pages = bsi.nr_pages - 1; pages 1235 fs/btrfs/ioctl.c struct page **pages, pages 1320 fs/btrfs/ioctl.c pages[i] = page; pages 1334 fs/btrfs/ioctl.c wait_on_page_writeback(pages[i]); pages 1336 fs/btrfs/ioctl.c page_start = page_offset(pages[0]); pages 1337 fs/btrfs/ioctl.c page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE; pages 1362 fs/btrfs/ioctl.c clear_page_dirty_for_io(pages[i]); pages 1363 fs/btrfs/ioctl.c ClearPageChecked(pages[i]); pages 1364 fs/btrfs/ioctl.c set_page_extent_mapped(pages[i]); pages 1365 fs/btrfs/ioctl.c set_page_dirty(pages[i]); pages 1366 fs/btrfs/ioctl.c unlock_page(pages[i]); pages 1367 fs/btrfs/ioctl.c put_page(pages[i]); pages 1374 fs/btrfs/ioctl.c unlock_page(pages[i]); pages 1375 fs/btrfs/ioctl.c put_page(pages[i]); pages 1408 fs/btrfs/ioctl.c struct page **pages = NULL; pages 1440 fs/btrfs/ioctl.c pages = kmalloc_array(max_cluster, sizeof(struct page *), GFP_KERNEL); pages 1441 fs/btrfs/ioctl.c if (!pages) { pages 1529 fs/btrfs/ioctl.c ret = cluster_pages_for_defrag(inode, pages, i, cluster); pages 1592 fs/btrfs/ioctl.c kfree(pages); pages 137 fs/btrfs/lzo.c struct page **pages, pages 181 fs/btrfs/lzo.c pages[0] = out_page; pages 249 fs/btrfs/lzo.c pages[nr_pages++] = out_page; pages 285 fs/btrfs/lzo.c cpage_out = kmap(pages[0]); pages 288 fs/btrfs/lzo.c kunmap(pages[0]); pages 510 fs/btrfs/raid56.c static void run_xor(void **pages, int src_cnt, ssize_t len) pages 514 fs/btrfs/raid56.c void *dest = pages[src_cnt]; pages 518 fs/btrfs/raid56.c xor_blocks(xor_src_cnt, len, dest, pages + src_off); pages 23 fs/btrfs/tests/extent-io-tests.c struct page *pages[16]; pages 34 fs/btrfs/tests/extent-io-tests.c ARRAY_SIZE(pages)), pages); pages 37 fs/btrfs/tests/extent-io-tests.c !PageLocked(pages[i])) pages 39 fs/btrfs/tests/extent-io-tests.c if (flags & PROCESS_UNLOCK && PageLocked(pages[i])) pages 40 fs/btrfs/tests/extent-io-tests.c unlock_page(pages[i]); pages 41 fs/btrfs/tests/extent-io-tests.c put_page(pages[i]); pages 43 fs/btrfs/tests/extent-io-tests.c put_page(pages[i]); pages 231 fs/btrfs/tree-log.c return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, pages 237 fs/btrfs/tree-log.c filemap_fdatawait_range(buf->pages[0]->mapping, pages 7094 fs/btrfs/volumes.c SetPageUptodate(sb->pages[0]); pages 94 fs/btrfs/zlib.c struct page **pages, pages 133 fs/btrfs/zlib.c pages[0] = out_page; pages 175 fs/btrfs/zlib.c pages[nr_pages] = out_page; pages 373 fs/btrfs/zstd.c struct page **pages, pages 418 fs/btrfs/zstd.c pages[nr_pages++] = out_page; pages 465 fs/btrfs/zstd.c pages[nr_pages++] = out_page; pages 525 fs/btrfs/zstd.c pages[nr_pages++] = out_page; pages 1590 fs/buffer.c struct page *page = pvec.pages[i]; pages 683 fs/cachefiles/rdwr.c struct list_head *pages, pages 730 fs/cachefiles/rdwr.c list_for_each_entry_safe(page, _n, pages, lru) { pages 766 fs/cachefiles/rdwr.c if (list_empty(pages)) pages 778 fs/cachefiles/rdwr.c ret, *nr_pages, list_empty(pages) ? " empty" : ""); pages 835 fs/cachefiles/rdwr.c struct list_head *pages, pages 856 fs/cachefiles/rdwr.c list_for_each_entry(page, pages, lru) { pages 279 fs/ceph/addr.c struct page *page = osd_data->pages[i]; pages 300 fs/ceph/addr.c kfree(osd_data->pages); pages 319 fs/ceph/addr.c struct page **pages; pages 377 fs/ceph/addr.c pages = kmalloc_array(nr_pages, sizeof(*pages), GFP_KERNEL); pages 378 fs/ceph/addr.c if (!pages) { pages 403 fs/ceph/addr.c pages[i] = page; pages 405 fs/ceph/addr.c osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); pages 424 fs/ceph/addr.c ceph_fscache_readpage_cancel(inode, pages[i]); pages 425 fs/ceph/addr.c unlock_page(pages[i]); pages 427 fs/ceph/addr.c ceph_put_page_vector(pages, nr_pages, false); pages 737 fs/ceph/addr.c page = osd_data->pages[j]; pages 762 fs/ceph/addr.c release_pages(osd_data->pages, num_pages); pages 769 fs/ceph/addr.c mempool_free(osd_data->pages, pages 772 fs/ceph/addr.c kfree(osd_data->pages); pages 861 fs/ceph/addr.c struct page **pages = NULL, **data_pages; pages 877 fs/ceph/addr.c page = pvec.pages[i]; pages 958 fs/ceph/addr.c BUG_ON(pages); pages 960 fs/ceph/addr.c pages = kmalloc_array(max_pages, pages 961 fs/ceph/addr.c sizeof(*pages), pages 963 fs/ceph/addr.c if (!pages) { pages 965 fs/ceph/addr.c pages = mempool_alloc(pool, GFP_NOFS); pages 966 fs/ceph/addr.c BUG_ON(!pages); pages 996 fs/ceph/addr.c pages[locked_pages++] = page; pages 997 fs/ceph/addr.c pvec.pages[i] = NULL; pages 1009 fs/ceph/addr.c if (!pvec.pages[j]) pages 1012 fs/ceph/addr.c pvec.pages[n] = pvec.pages[j]; pages 1026 fs/ceph/addr.c offset = page_offset(pages[0]); pages 1047 fs/ceph/addr.c BUG_ON(len < page_offset(pages[locked_pages - 1]) + pages 1055 fs/ceph/addr.c data_pages = pages; pages 1058 fs/ceph/addr.c u64 cur_offset = page_offset(pages[i]); pages 1073 fs/ceph/addr.c data_pages = pages + i; pages 1077 fs/ceph/addr.c set_page_writeback(pages[i]); pages 1088 fs/ceph/addr.c len = get_writepages_data_length(inode, pages[i - 1], pages 1107 fs/ceph/addr.c data_pages = pages; pages 1108 fs/ceph/addr.c pages = kmalloc_array(locked_pages, sizeof(*pages), pages 1110 fs/ceph/addr.c if (!pages) { pages 1112 fs/ceph/addr.c pages = mempool_alloc(pool, GFP_NOFS); pages 1113 fs/ceph/addr.c BUG_ON(!pages); pages 1115 fs/ceph/addr.c memcpy(pages, data_pages + i, pages 1116 fs/ceph/addr.c locked_pages * sizeof(*pages)); pages 1118 fs/ceph/addr.c locked_pages * sizeof(*pages)); pages 1121 fs/ceph/addr.c index = pages[i - 1]->index + 1; pages 1123 fs/ceph/addr.c pages = NULL; pages 1132 fs/ceph/addr.c if (pages) pages 1146 fs/ceph/addr.c pvec.nr ? pvec.pages[0] : NULL); pages 1167 fs/ceph/addr.c page = pvec.pages[i]; pages 1822 fs/ceph/addr.c struct page **pages; pages 1922 fs/ceph/addr.c pages = ceph_alloc_page_vector(1, GFP_KERNEL); pages 1923 fs/ceph/addr.c if (IS_ERR(pages)) { pages 1924 fs/ceph/addr.c err = PTR_ERR(pages); pages 1928 fs/ceph/addr.c osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE, pages 254 fs/ceph/cache.c struct list_head *pages, pages 263 fs/ceph/cache.c ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages, pages 30 fs/ceph/cache.h struct list_head *pages, pages 69 fs/ceph/cache.h struct list_head *pages) pages 72 fs/ceph/cache.h return fscache_readpages_cancel(ci->fscache, pages); pages 122 fs/ceph/cache.h struct page *pages) pages 134 fs/ceph/cache.h struct list_head *pages, pages 165 fs/ceph/cache.h struct list_head *pages) pages 91 fs/ceph/file.c struct page *pages[ITER_GET_BVECS_PAGES]; pages 96 fs/ceph/file.c bytes = iov_iter_get_pages(iter, pages, maxsize - size, pages 106 fs/ceph/file.c .bv_page = pages[idx], pages 613 fs/ceph/file.c struct page **pages; pages 632 fs/ceph/file.c ret = iov_iter_get_pages_alloc(to, &pages, len, pages 648 fs/ceph/file.c pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); pages 649 fs/ceph/file.c if (IS_ERR(pages)) { pages 651 fs/ceph/file.c ret = PTR_ERR(pages); pages 656 fs/ceph/file.c osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off, pages 674 fs/ceph/file.c ceph_zero_page_vector_range(zoff, zlen, pages); pages 685 fs/ceph/file.c ceph_put_page_vector(pages, num_pages, false); pages 693 fs/ceph/file.c copied = copy_page_to_iter(pages[idx++], pages 702 fs/ceph/file.c ceph_release_page_vector(pages, num_pages); pages 1138 fs/ceph/file.c struct page **pages; pages 1189 fs/ceph/file.c pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); pages 1190 fs/ceph/file.c if (IS_ERR(pages)) { pages 1191 fs/ceph/file.c ret = PTR_ERR(pages); pages 1198 fs/ceph/file.c ret = copy_page_from_iter(pages[n], 0, plen, from); pages 1207 fs/ceph/file.c ceph_release_page_vector(pages, num_pages); pages 1213 fs/ceph/file.c osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, pages 1335 fs/cifs/cifsglob.h struct page **pages; pages 1361 fs/cifs/cifsglob.h struct page **pages; pages 561 fs/cifs/cifsproto.h struct cifs_writedata *cifs_writedata_direct_alloc(struct page **pages, pages 1700 fs/cifs/cifssmb.c .rq_pages = rdata->pages, pages 2059 fs/cifs/cifssmb.c kvfree(wdata->pages); pages 2104 fs/cifs/cifssmb.c wdata2->pages[j] = wdata->pages[i + j]; pages 2105 fs/cifs/cifssmb.c lock_page(wdata2->pages[j]); pages 2106 fs/cifs/cifssmb.c clear_page_dirty_for_io(wdata2->pages[j]); pages 2111 fs/cifs/cifssmb.c wdata2->offset = page_offset(wdata2->pages[0]); pages 2130 fs/cifs/cifssmb.c unlock_page(wdata2->pages[j]); pages 2132 fs/cifs/cifssmb.c SetPageError(wdata2->pages[j]); pages 2133 fs/cifs/cifssmb.c end_page_writeback(wdata2->pages[j]); pages 2134 fs/cifs/cifssmb.c put_page(wdata2->pages[j]); pages 2152 fs/cifs/cifssmb.c SetPageError(wdata->pages[i]); pages 2153 fs/cifs/cifssmb.c end_page_writeback(wdata->pages[i]); pages 2154 fs/cifs/cifssmb.c put_page(wdata->pages[i]); pages 2180 fs/cifs/cifssmb.c struct page *page = wdata->pages[i]; pages 2196 fs/cifs/cifssmb.c struct page **pages = pages 2198 fs/cifs/cifssmb.c if (pages) pages 2199 fs/cifs/cifssmb.c return cifs_writedata_direct_alloc(pages, complete); pages 2205 fs/cifs/cifssmb.c cifs_writedata_direct_alloc(struct page **pages, work_func_t complete) pages 2211 fs/cifs/cifssmb.c wdata->pages = pages; pages 2319 fs/cifs/cifssmb.c rqst.rq_pages = wdata->pages; pages 2165 fs/cifs/file.c PAGECACHE_TAG_DIRTY, tofind, wdata->pages); pages 2179 fs/cifs/file.c page = wdata->pages[i]; pages 2230 fs/cifs/file.c wdata->pages[i] = page; pages 2237 fs/cifs/file.c *index = wdata->pages[0]->index + 1; pages 2241 fs/cifs/file.c put_page(wdata->pages[i]); pages 2242 fs/cifs/file.c wdata->pages[i] = NULL; pages 2258 fs/cifs/file.c wdata->offset = page_offset(wdata->pages[0]); pages 2261 fs/cifs/file.c page_offset(wdata->pages[nr_pages - 1]), pages 2377 fs/cifs/file.c unlock_page(wdata->pages[i]); pages 2385 fs/cifs/file.c wdata->pages[i]); pages 2387 fs/cifs/file.c SetPageError(wdata->pages[i]); pages 2388 fs/cifs/file.c end_page_writeback(wdata->pages[i]); pages 2389 fs/cifs/file.c put_page(wdata->pages[i]); pages 2638 fs/cifs/file.c cifs_write_allocate_pages(struct page **pages, unsigned long num_pages) pages 2644 fs/cifs/file.c pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM); pages 2645 fs/cifs/file.c if (!pages[i]) { pages 2658 fs/cifs/file.c put_page(pages[i]); pages 2687 fs/cifs/file.c put_page(wdata->pages[i]); pages 2723 fs/cifs/file.c copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from); pages 2913 fs/cifs/file.c rc = cifs_write_allocate_pages(wdata->pages, nr_pages); pages 2915 fs/cifs/file.c kvfree(wdata->pages); pages 2926 fs/cifs/file.c put_page(wdata->pages[i]); pages 2927 fs/cifs/file.c kvfree(wdata->pages); pages 2938 fs/cifs/file.c put_page(wdata->pages[nr_pages - 1]); pages 3268 fs/cifs/file.c cifs_readdata_direct_alloc(struct page **pages, work_func_t complete) pages 3274 fs/cifs/file.c rdata->pages = pages; pages 3287 fs/cifs/file.c struct page **pages = pages 3291 fs/cifs/file.c if (pages) { pages 3292 fs/cifs/file.c ret = cifs_readdata_direct_alloc(pages, complete); pages 3294 fs/cifs/file.c kfree(pages); pages 3314 fs/cifs/file.c kvfree(rdata->pages); pages 3331 fs/cifs/file.c rdata->pages[i] = page; pages 3338 fs/cifs/file.c put_page(rdata->pages[i]); pages 3339 fs/cifs/file.c rdata->pages[i] = NULL; pages 3354 fs/cifs/file.c put_page(rdata->pages[i]); pages 3375 fs/cifs/file.c struct page *page = rdata->pages[i]; pages 3420 fs/cifs/file.c struct page *page = rdata->pages[i]; pages 3432 fs/cifs/file.c rdata->pages[i] = NULL; pages 3637 fs/cifs/file.c kvfree(rdata->pages); pages 4100 fs/cifs/file.c struct page *page = rdata->pages[i]; pages 4119 fs/cifs/file.c rdata->pages[i] = NULL; pages 4144 fs/cifs/file.c struct page *page = rdata->pages[i]; pages 4177 fs/cifs/file.c rdata->pages[i] = NULL; pages 4185 fs/cifs/file.c rdata->pages[i] = NULL; pages 4399 fs/cifs/file.c rdata->pages[rdata->nr_pages++] = page; pages 4414 fs/cifs/file.c page = rdata->pages[i]; pages 277 fs/cifs/fscache.c struct list_head *pages, pages 285 fs/cifs/fscache.c pages, nr_pages, pages 319 fs/cifs/fscache.c void __cifs_fscache_readpages_cancel(struct inode *inode, struct list_head *pages) pages 323 fs/cifs/fscache.c fscache_readpages_cancel(CIFS_I(inode)->fscache, pages); pages 94 fs/cifs/fscache.h struct list_head *pages, pages 98 fs/cifs/fscache.h return __cifs_readpages_from_fscache(inode, mapping, pages, pages 111 fs/cifs/fscache.h struct list_head *pages) pages 114 fs/cifs/fscache.h return __cifs_fscache_readpages_cancel(inode, pages); pages 148 fs/cifs/fscache.h struct list_head *pages, pages 158 fs/cifs/fscache.h struct list_head *pages) pages 850 fs/cifs/misc.c struct page **pages = NULL; pages 871 fs/cifs/misc.c pages = kmalloc_array(max_pages, sizeof(struct page *), pages 874 fs/cifs/misc.c if (!pages) { pages 875 fs/cifs/misc.c pages = vmalloc(array_size(max_pages, sizeof(struct page *))); pages 876 fs/cifs/misc.c if (!pages) { pages 885 fs/cifs/misc.c rc = iov_iter_get_pages(iter, pages, count, max_pages, &start); pages 910 fs/cifs/misc.c bv[npages + i].bv_page = pages[i]; pages 920 fs/cifs/misc.c kvfree(pages); pages 3788 fs/cifs/smb2ops.c struct page **pages; pages 3797 fs/cifs/smb2ops.c pages = kmalloc_array(npages, sizeof(struct page *), pages 3799 fs/cifs/smb2ops.c if (!pages) pages 3802 fs/cifs/smb2ops.c new_rq[i].rq_pages = pages; pages 3813 fs/cifs/smb2ops.c pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM); pages 3814 fs/cifs/smb2ops.c if (!pages[j]) pages 3859 fs/cifs/smb2ops.c unsigned int buf_data_size, struct page **pages, pages 3873 fs/cifs/smb2ops.c rqst.rq_pages = pages; pages 3892 fs/cifs/smb2ops.c read_data_into_pages(struct TCP_Server_Info *server, struct page **pages, pages 3899 fs/cifs/smb2ops.c struct page *page = pages[i]; pages 3921 fs/cifs/smb2ops.c init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size, pages 3932 fs/cifs/smb2ops.c bvec[i].bv_page = pages[i]; pages 3950 fs/cifs/smb2ops.c char *buf, unsigned int buf_len, struct page **pages, pages 4049 fs/cifs/smb2ops.c rdata->result = init_read_bvec(pages, npages, page_data_size, pages 4136 fs/cifs/smb2ops.c struct page **pages; pages 4156 fs/cifs/smb2ops.c pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); pages 4157 fs/cifs/smb2ops.c if (!pages) { pages 4163 fs/cifs/smb2ops.c pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM); pages 4164 fs/cifs/smb2ops.c if (!pages[i]) { pages 4171 fs/cifs/smb2ops.c rc = read_data_into_pages(server, pages, npages, len); pages 4197 fs/cifs/smb2ops.c dw->ppages = pages; pages 4206 fs/cifs/smb2ops.c pages, npages, len); pages 4218 fs/cifs/smb2ops.c pages, npages, len); pages 4223 fs/cifs/smb2ops.c put_page(pages[i]); pages 4224 fs/cifs/smb2ops.c kfree(pages); pages 3577 fs/cifs/smb2pdu.c server->smbd_conn, rdata->pages, pages 3637 fs/cifs/smb2pdu.c .rq_pages = rdata->pages, pages 3985 fs/cifs/smb2pdu.c server->smbd_conn, wdata->pages, pages 4020 fs/cifs/smb2pdu.c rqst.rq_pages = wdata->pages; pages 2447 fs/cifs/smbdirect.c struct smbd_connection *info, struct page *pages[], int num_pages, pages 2474 fs/cifs/smbdirect.c sg_set_page(&smbdirect_mr->sgl[0], pages[0], tailsz, offset); pages 2480 fs/cifs/smbdirect.c &smbdirect_mr->sgl[0], pages[0], PAGE_SIZE - offset, offset); pages 2483 fs/cifs/smbdirect.c sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0); pages 2486 fs/cifs/smbdirect.c sg_set_page(&smbdirect_mr->sgl[i], pages[i], pages 307 fs/cifs/smbdirect.h struct smbd_connection *info, struct page *pages[], int num_pages, pages 186 fs/cramfs/inode.c struct page *pages[BLKS_PER_BUF]; pages 224 fs/cramfs/inode.c pages[i] = page; pages 228 fs/cramfs/inode.c struct page *page = pages[i]; pages 235 fs/cramfs/inode.c pages[i] = NULL; pages 247 fs/cramfs/inode.c struct page *page = pages[i]; pages 297 fs/cramfs/inode.c static u32 cramfs_get_block_range(struct inode *inode, u32 pgoff, u32 *pages) pages 318 fs/cramfs/inode.c pgoff+i, pgoff + *pages - 1, pages 324 fs/cramfs/inode.c } while (++i < *pages); pages 326 fs/cramfs/inode.c *pages = i; pages 359 fs/cramfs/inode.c unsigned int pages, max_pages, offset; pages 382 fs/cramfs/inode.c pages = min(vma_pages(vma), max_pages - pgoff); pages 384 fs/cramfs/inode.c offset = cramfs_get_block_range(inode, pgoff, &pages); pages 394 fs/cramfs/inode.c if (pgoff + pages == max_pages && cramfs_last_page_is_shared(inode)) { pages 397 fs/cramfs/inode.c pages--; pages 400 fs/cramfs/inode.c if (!pages) { pages 405 fs/cramfs/inode.c if (pages == vma_pages(vma)) { pages 413 fs/cramfs/inode.c pages * PAGE_SIZE, vma->vm_page_prot); pages 422 fs/cramfs/inode.c for (i = 0; i < pages && !ret; i++) { pages 436 fs/cramfs/inode.c address, pages, vma_pages(vma), vma->vm_start, pages 461 fs/cramfs/inode.c unsigned int pages, block_pages, max_pages, offset; pages 463 fs/cramfs/inode.c pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; pages 465 fs/cramfs/inode.c if (pgoff >= max_pages || pages > max_pages - pgoff) pages 467 fs/cramfs/inode.c block_pages = pages; pages 469 fs/cramfs/inode.c if (!offset || block_pages != pages) pages 149 fs/direct-io.c struct page *pages[DIO_PAGES]; /* page buffer */ pages 171 fs/direct-io.c ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, pages 184 fs/direct-io.c dio->pages[0] = page; pages 220 fs/direct-io.c return dio->pages[sdio->head]; pages 494 fs/direct-io.c put_page(dio->pages[sdio->head++]); pages 1208 fs/direct-io.c memset(dio, 0, offsetof(struct dio, pages)); pages 285 fs/erofs/data.c struct list_head *pages, pages 291 fs/erofs/data.c struct page *page = list_last_entry(pages, struct page, lru); pages 296 fs/erofs/data.c page = list_entry(pages->prev, struct page, lru); pages 318 fs/erofs/data.c DBG_BUGON(!list_empty(pages)); pages 171 fs/erofs/zdata.c struct page **pages = clt->compressedpages; pages 172 fs/erofs/zdata.c pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages); pages 178 fs/erofs/zdata.c for (; pages < pcl->compressed_pages + clusterpages; ++pages) { pages 183 fs/erofs/zdata.c if (READ_ONCE(*pages)) pages 194 fs/erofs/zdata.c clt->compressedpages = pages; pages 199 fs/erofs/zdata.c if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t))) pages 764 fs/erofs/zdata.c struct page **pages, **compressed_pages, *page; pages 779 fs/erofs/zdata.c pages = pages_onstack; pages 782 fs/erofs/zdata.c pages = z_pagemap_global; pages 789 fs/erofs/zdata.c pages = kvmalloc_array(nr_pages, sizeof(struct page *), pages 793 fs/erofs/zdata.c if (!pages) { pages 795 fs/erofs/zdata.c pages = z_pagemap_global; pages 800 fs/erofs/zdata.c pages[i] = NULL; pages 829 fs/erofs/zdata.c if (pages[pagenr]) { pages 831 fs/erofs/zdata.c SetPageError(pages[pagenr]); pages 832 fs/erofs/zdata.c z_erofs_onlinepage_endio(pages[pagenr]); pages 835 fs/erofs/zdata.c pages[pagenr] = page; pages 865 fs/erofs/zdata.c if (pages[pagenr]) { pages 867 fs/erofs/zdata.c SetPageError(pages[pagenr]); pages 868 fs/erofs/zdata.c z_erofs_onlinepage_endio(pages[pagenr]); pages 871 fs/erofs/zdata.c pages[pagenr] = page; pages 898 fs/erofs/zdata.c .out = pages, pages 922 fs/erofs/zdata.c page = pages[i]; pages 938 fs/erofs/zdata.c if (pages == z_pagemap_global) pages 940 fs/erofs/zdata.c else if (pages != pages_onstack) pages 941 fs/erofs/zdata.c kvfree(pages); pages 1369 fs/erofs/zdata.c struct list_head *pages, pages 1381 fs/erofs/zdata.c trace_erofs_readpages(mapping->host, lru_to_page(pages), pages 1384 fs/erofs/zdata.c f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT; pages 1387 fs/erofs/zdata.c struct page *page = lru_to_page(pages); pages 32 fs/erofs/zpvec.h erofs_vtptr_t *pages; pages 44 fs/erofs/zpvec.h kunmap_atomic(ctor->pages); pages 60 fs/erofs/zpvec.h const erofs_vtptr_t t = ctor->pages[index]; pages 80 fs/erofs/zpvec.h ctor->pages = atomic ? pages 89 fs/erofs/zpvec.h erofs_vtptr_t *pages, pages 94 fs/erofs/zpvec.h ctor->pages = pages; pages 130 fs/erofs/zpvec.h ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type); pages 145 fs/erofs/zpvec.h t = ctor->pages[ctor->index]; pages 153 fs/erofs/zpvec.h ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0); pages 183 fs/exec.c static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages) pages 186 fs/exec.c long diff = (long)(pages - bprm->vma_pages); pages 191 fs/exec.c bprm->vma_pages = pages; pages 295 fs/exec.c static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages) pages 882 fs/ext2/inode.c struct list_head *pages, unsigned nr_pages) pages 884 fs/ext2/inode.c return mpage_readpages(mapping, pages, nr_pages, ext2_get_block); pages 3240 fs/ext4/ext4.h struct list_head *pages, struct page *page, pages 1717 fs/ext4/inode.c struct page *page = pvec.pages[i]; pages 2390 fs/ext4/inode.c struct page *page = pvec.pages[i]; pages 2660 fs/ext4/inode.c struct page *page = pvec.pages[i]; pages 3343 fs/ext4/inode.c struct list_head *pages, unsigned nr_pages) pages 3351 fs/ext4/inode.c return ext4_mpage_readpages(mapping, pages, NULL, nr_pages, true); pages 226 fs/ext4/readpage.c struct list_head *pages, struct page *page, pages 255 fs/ext4/readpage.c if (pages) { pages 256 fs/ext4/readpage.c page = lru_to_page(pages); pages 416 fs/ext4/readpage.c if (pages) pages 419 fs/ext4/readpage.c BUG_ON(pages && !list_empty(pages)); pages 384 fs/f2fs/checkpoint.c struct page *page = pvec.pages[i]; pages 1766 fs/f2fs/data.c struct list_head *pages, struct page *page, pages 1785 fs/f2fs/data.c if (pages) { pages 1786 fs/f2fs/data.c page = list_last_entry(pages, struct page, lru); pages 1804 fs/f2fs/data.c if (pages) pages 1807 fs/f2fs/data.c BUG_ON(pages && !list_empty(pages)); pages 1810 fs/f2fs/data.c return pages ? 0 : ret; pages 1831 fs/f2fs/data.c struct list_head *pages, unsigned nr_pages) pages 1834 fs/f2fs/data.c struct page *page = list_last_entry(pages, struct page, lru); pages 1842 fs/f2fs/data.c return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true); pages 2306 fs/f2fs/data.c struct page *page = pvec.pages[i]; pages 3116 fs/f2fs/data.c sis->pages = page_no - 1; pages 974 fs/f2fs/node.c struct page *pages[2]; pages 988 fs/f2fs/node.c pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]); pages 989 fs/f2fs/node.c if (IS_ERR(pages[i])) { pages 990 fs/f2fs/node.c err = PTR_ERR(pages[i]); pages 994 fs/f2fs/node.c nid[i + 1] = get_nid(pages[i], offset[i + 1], false); pages 997 fs/f2fs/node.c f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); pages 1001 fs/f2fs/node.c child_nid = get_nid(pages[idx], i, false); pages 1008 fs/f2fs/node.c if (set_nid(pages[idx], i, 0, false)) pages 1013 fs/f2fs/node.c dn->node_page = pages[idx]; pages 1019 fs/f2fs/node.c f2fs_put_page(pages[idx], 1); pages 1026 fs/f2fs/node.c f2fs_put_page(pages[i], 1); pages 1457 fs/f2fs/node.c struct page *page = pvec.pages[i]; pages 1676 fs/f2fs/node.c struct page *page = pvec.pages[i]; pages 1829 fs/f2fs/node.c struct page *page = pvec.pages[i]; pages 213 fs/fat/inode.c struct list_head *pages, unsigned nr_pages) pages 215 fs/fat/inode.c return mpage_readpages(mapping, pages, nr_pages, fat_get_block); pages 1587 fs/fs-writeback.c long pages; pages 1603 fs/fs-writeback.c pages = LONG_MAX; pages 1605 fs/fs-writeback.c pages = min(wb->avg_write_bandwidth / 2, pages 1607 fs/fs-writeback.c pages = min(pages, work->nr_pages); pages 1608 fs/fs-writeback.c pages = round_down(pages + MIN_WRITEBACK_PAGES, pages 1612 fs/fs-writeback.c return pages; pages 560 fs/fscache/page.c struct list_head *pages, pages 585 fs/fscache/page.c ASSERT(!list_empty(pages)); pages 627 fs/fscache/page.c op, pages, nr_pages, gfp); pages 632 fs/fscache/page.c op, pages, nr_pages, gfp); pages 766 fs/fscache/page.c struct list_head *pages) pages 770 fs/fscache/page.c list_for_each_entry(page, pages, lru) { pages 1205 fs/fscache/page.c fscache_mark_page_cached(op, pagevec->pages[loop]); pages 1236 fs/fscache/page.c struct page *page = pvec.pages[i]; pages 323 fs/fuse/cuse.c struct page *page = ap->pages[0]; pages 457 fs/fuse/cuse.c ap->pages = &ia->page; pages 961 fs/fuse/dev.c err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing); pages 1631 fs/fuse/dev.c release_pages(ra->ap.pages, ra->ap.num_pages); pages 1663 fs/fuse/dev.c args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0])); pages 1670 fs/fuse/dev.c ap->pages = (void *) (ra + 1); pages 1671 fs/fuse/dev.c ap->descs = (void *) (ap->pages + num_pages); pages 1691 fs/fuse/dev.c ap->pages[ap->num_pages] = page; pages 1216 fs/fuse/dir.c .pages = &page, pages 25 fs/fuse/file.c struct page **pages; pages 27 fs/fuse/file.c pages = kzalloc(npages * (sizeof(struct page *) + pages 29 fs/fuse/file.c *desc = (void *) (pages + npages); pages 31 fs/fuse/file.c return pages; pages 578 fs/fuse/file.c set_page_dirty_lock(ap->pages[i]); pages 579 fs/fuse/file.c put_page(ap->pages[i]); pages 657 fs/fuse/file.c ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL, pages 659 fs/fuse/file.c if (!ia->ap.pages) { pages 669 fs/fuse/file.c kfree(ia->ap.pages); pages 772 fs/fuse/file.c zero_user_segment(ap->pages[i], off, PAGE_SIZE); pages 776 fs/fuse/file.c loff_t pos = page_offset(ap->pages[0]) + num_read; pages 791 fs/fuse/file.c .ap.pages = &page, pages 852 fs/fuse/file.c mapping = ap->pages[i]->mapping; pages 867 fs/fuse/file.c struct page *page = ap->pages[i]; pages 887 fs/fuse/file.c loff_t pos = page_offset(ap->pages[0]); pages 939 fs/fuse/file.c ap->pages[ap->num_pages - 1]->index + 1 != page->index)) { pages 958 fs/fuse/file.c ap->pages[ap->num_pages] = page; pages 966 fs/fuse/file.c struct list_head *pages, unsigned nr_pages) pages 987 fs/fuse/file.c err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); pages 1109 fs/fuse/file.c fuse_wait_on_page_writeback(inode, ap->pages[i]->index); pages 1121 fs/fuse/file.c struct page *page = ap->pages[i]; pages 1186 fs/fuse/file.c ap->pages[ap->num_pages] = page; pages 1233 fs/fuse/file.c ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs); pages 1234 fs/fuse/file.c if (!ap->pages) { pages 1256 fs/fuse/file.c kfree(ap->pages); pages 1390 fs/fuse/file.c ret = iov_iter_get_pages(ii, &ap->pages[ap->num_pages], pages 1599 fs/fuse/file.c __free_page(ap->pages[i]); pages 1604 fs/fuse/file.c kfree(ap->pages); pages 1620 fs/fuse/file.c dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP); pages 1804 fs/fuse/file.c ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs); pages 1805 fs/fuse/file.c if (!ap->pages) { pages 1848 fs/fuse/file.c ap->pages[0] = tmp_page; pages 1912 fs/fuse/file.c struct page **pages; pages 1920 fs/fuse/file.c pages = fuse_pages_alloc(npages, GFP_NOFS, &descs); pages 1921 fs/fuse/file.c if (!pages) pages 1924 fs/fuse/file.c memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages); pages 1926 fs/fuse/file.c kfree(ap->pages); pages 1927 fs/fuse/file.c ap->pages = pages; pages 1986 fs/fuse/file.c swap(tmp->ia.ap.pages[0], new_ap->pages[0]); pages 2002 fs/fuse/file.c dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP); pages 2096 fs/fuse/file.c ap->pages[ap->num_pages] = tmp_page; pages 2747 fs/fuse/file.c ap.pages = fuse_pages_alloc(fc->max_pages, GFP_KERNEL, &ap.descs); pages 2749 fs/fuse/file.c if (!ap.pages || !iov_page) pages 2791 fs/fuse/file.c ap.pages[ap.num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); pages 2792 fs/fuse/file.c if (!ap.pages[ap.num_pages]) pages 2812 fs/fuse/file.c c = copy_page_from_iter(ap.pages[i], 0, PAGE_SIZE, &ii); pages 2852 fs/fuse/file.c vaddr = kmap_atomic(ap.pages[0]); pages 2881 fs/fuse/file.c c = copy_page_to_iter(ap.pages[i], 0, PAGE_SIZE, &ii); pages 2889 fs/fuse/file.c __free_page(ap.pages[--ap.num_pages]); pages 2890 fs/fuse/file.c kfree(ap.pages); pages 259 fs/fuse/fuse_i.h struct page **pages; pages 337 fs/fuse/readdir.c ap->pages = &page; pages 490 fs/fuse/virtio_fs.c page = ap->pages[i]; pages 827 fs/fuse/virtio_fs.c struct page **pages, pages 838 fs/fuse/virtio_fs.c sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset); pages 864 fs/fuse/virtio_fs.c ap->pages, ap->descs, pages 255 fs/gfs2/aops.c struct page *page = pvec->pages[i]; pages 600 fs/gfs2/aops.c struct list_head *pages, unsigned nr_pages) pages 613 fs/gfs2/aops.c ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); pages 129 fs/hpfs/file.c struct list_head *pages, unsigned nr_pages) pages 131 fs/hpfs/file.c return mpage_readpages(mapping, pages, nr_pages, hpfs_get_block); pages 120 fs/hugetlbfs/inode.c put_page(pvec->pages[i]); pages 439 fs/hugetlbfs/inode.c struct page *page = pvec.pages[i]; pages 3386 fs/io_uring.c size_t pages; pages 3388 fs/io_uring.c pages = (size_t)1 << get_order( pages 3390 fs/io_uring.c pages += (size_t)1 << get_order( pages 3393 fs/io_uring.c return pages; pages 3450 fs/io_uring.c struct page **pages = NULL; pages 3500 fs/io_uring.c if (!pages || nr_pages > got_pages) { pages 3502 fs/io_uring.c kvfree(pages); pages 3503 fs/io_uring.c pages = kvmalloc_array(nr_pages, sizeof(struct page *), pages 3508 fs/io_uring.c if (!pages || !vmas) { pages 3530 fs/io_uring.c pages, vmas); pages 3552 fs/io_uring.c put_user_pages(pages, pret); pages 3565 fs/io_uring.c imu->bvec[j].bv_page = pages[j]; pages 3578 fs/io_uring.c kvfree(pages); pages 3582 fs/io_uring.c kvfree(pages); pages 183 fs/iomap/buffered-io.c struct list_head *pages; pages 325 fs/iomap/buffered-io.c iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos, pages 328 fs/iomap/buffered-io.c while (!list_empty(pages)) { pages 329 fs/iomap/buffered-io.c struct page *page = lru_to_page(pages); pages 367 fs/iomap/buffered-io.c ctx->cur_page = iomap_next_page(inode, ctx->pages, pages 381 fs/iomap/buffered-io.c iomap_readpages(struct address_space *mapping, struct list_head *pages, pages 385 fs/iomap/buffered-io.c .pages = pages, pages 388 fs/iomap/buffered-io.c loff_t pos = page_offset(list_entry(pages->prev, struct page, lru)); pages 389 fs/iomap/buffered-io.c loff_t last = page_offset(list_entry(pages->next, struct page, lru)); pages 416 fs/iomap/buffered-io.c WARN_ON_ONCE(!ret && !list_empty(ctx.pages)); pages 96 fs/iomap/seek.c struct page *page = pvec.pages[i]; pages 174 fs/iomap/swapfile.c sis->pages = isi.nr_pages - 1; pages 42 fs/isofs/compress.c struct page **pages, unsigned poffset, pages 68 fs/isofs/compress.c if (!pages[i]) pages 70 fs/isofs/compress.c memset(page_address(pages[i]), 0, PAGE_SIZE); pages 71 fs/isofs/compress.c flush_dcache_page(pages[i]); pages 72 fs/isofs/compress.c SetPageUptodate(pages[i]); pages 122 fs/isofs/compress.c if (pages[curpage]) { pages 123 fs/isofs/compress.c stream.next_out = page_address(pages[curpage]) pages 175 fs/isofs/compress.c if (pages[curpage]) { pages 176 fs/isofs/compress.c flush_dcache_page(pages[curpage]); pages 177 fs/isofs/compress.c SetPageUptodate(pages[curpage]); pages 202 fs/isofs/compress.c struct page **pages) pages 217 fs/isofs/compress.c BUG_ON(!pages[full_page]); pages 224 fs/isofs/compress.c start_off = page_offset(pages[full_page]); pages 263 fs/isofs/compress.c pcount, pages, poffset, &err); pages 265 fs/isofs/compress.c pages += poffset >> PAGE_SHIFT; pages 285 fs/isofs/compress.c if (poffset && *pages) { pages 286 fs/isofs/compress.c memset(page_address(*pages) + poffset, 0, pages 288 fs/isofs/compress.c flush_dcache_page(*pages); pages 289 fs/isofs/compress.c SetPageUptodate(*pages); pages 309 fs/isofs/compress.c struct page **pages; pages 334 fs/isofs/compress.c pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1), pages 335 fs/isofs/compress.c sizeof(*pages), GFP_KERNEL); pages 336 fs/isofs/compress.c if (!pages) { pages 340 fs/isofs/compress.c pages[full_page] = page; pages 344 fs/isofs/compress.c pages[i] = grab_cache_page_nowait(mapping, index); pages 345 fs/isofs/compress.c if (pages[i]) { pages 346 fs/isofs/compress.c ClearPageError(pages[i]); pages 347 fs/isofs/compress.c kmap(pages[i]); pages 351 fs/isofs/compress.c err = zisofs_fill_pages(inode, full_page, pcount, pages); pages 355 fs/isofs/compress.c if (pages[i]) { pages 356 fs/isofs/compress.c flush_dcache_page(pages[i]); pages 358 fs/isofs/compress.c SetPageError(pages[i]); pages 359 fs/isofs/compress.c kunmap(pages[i]); pages 360 fs/isofs/compress.c unlock_page(pages[i]); pages 362 fs/isofs/compress.c put_page(pages[i]); pages 367 fs/isofs/compress.c kfree(pages); pages 1189 fs/isofs/inode.c struct list_head *pages, unsigned nr_pages) pages 1191 fs/isofs/inode.c return mpage_readpages(mapping, pages, nr_pages, isofs_get_block); pages 300 fs/jfs/inode.c struct list_head *pages, unsigned nr_pages) pages 302 fs/jfs/inode.c return mpage_readpages(mapping, pages, nr_pages, jfs_get_block); pages 385 fs/mpage.c mpage_readpages(struct address_space *mapping, struct list_head *pages, pages 395 fs/mpage.c struct page *page = lru_to_page(pages); pages 408 fs/mpage.c BUG_ON(!list_empty(pages)); pages 264 fs/nfs/blocklayout/blocklayout.c struct page **pages = header->args.pages; pages 310 fs/nfs/blocklayout/blocklayout.c zero_user_segment(pages[i], pg_offset, pg_len); pages 318 fs/nfs/blocklayout/blocklayout.c isect, pages[i], &map, &be, pages 408 fs/nfs/blocklayout/blocklayout.c struct page **pages = header->args.pages; pages 446 fs/nfs/blocklayout/blocklayout.c WRITE, isect, pages[i], &map, &be, pages 699 fs/nfs/blocklayout/blocklayout.c lgr->layoutp->pages, lgr->layoutp->len); pages 512 fs/nfs/blocklayout/dev.c xdr_init_decode_pages(&xdr, &buf, pdev->pages, pdev->pglen); pages 345 fs/nfs/dir.c int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc, pages 356 fs/nfs/dir.c error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages, pages 586 fs/nfs/dir.c void nfs_readdir_free_pages(struct page **pages, unsigned int npages) pages 590 fs/nfs/dir.c put_page(pages[i]); pages 598 fs/nfs/dir.c int nfs_readdir_alloc_pages(struct page **pages, unsigned int npages) pages 606 fs/nfs/dir.c pages[i] = page; pages 611 fs/nfs/dir.c nfs_readdir_free_pages(pages, i); pages 618 fs/nfs/dir.c struct page *pages[NFS_MAX_READDIR_PAGES]; pages 623 fs/nfs/dir.c unsigned int array_size = ARRAY_SIZE(pages); pages 644 fs/nfs/dir.c status = nfs_readdir_alloc_pages(pages, array_size); pages 649 fs/nfs/dir.c status = nfs_readdir_xdr_filler(pages, desc, &entry, file, inode); pages 654 fs/nfs/dir.c status = nfs_readdir_page_filler(desc, &entry, pages, page, pglen); pages 662 fs/nfs/dir.c nfs_readdir_free_pages(pages, array_size); pages 279 fs/nfs/direct.c static void nfs_direct_release_pages(struct page **pages, unsigned int npages) pages 283 fs/nfs/direct.c put_page(pages[i]); pages 400 fs/nfs/direct.c while (!list_empty(&hdr->pages)) { pages 401 fs/nfs/direct.c struct nfs_page *req = nfs_list_entry(hdr->pages.next); pages 683 fs/nfs/direct.c while (!list_empty(&data->pages)) { pages 684 fs/nfs/direct.c req = nfs_list_entry(data->pages.next); pages 761 fs/nfs/direct.c struct nfs_page *req = nfs_list_entry(hdr->pages.next); pages 790 fs/nfs/direct.c while (!list_empty(&hdr->pages)) { pages 792 fs/nfs/direct.c req = nfs_list_entry(hdr->pages.next); pages 494 fs/nfs/file.c *span = sis->pages; pages 667 fs/nfs/filelayout/filelayout.c xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len); pages 84 fs/nfs/filelayout/filelayoutdev.c xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen); pages 383 fs/nfs/flexfilelayout/flexfilelayout.c xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, pages 1680 fs/nfs/flexfilelayout/flexfilelayout.c list_for_each_entry(req, &cdata->pages, wb_list) pages 2114 fs/nfs/flexfilelayout/flexfilelayout.c .iov_base = page_address(ff_args->pages[0]), pages 2131 fs/nfs/flexfilelayout/flexfilelayout.c xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len); pages 2149 fs/nfs/flexfilelayout/flexfilelayout.c put_page(ff_args->pages[0]); pages 2167 fs/nfs/flexfilelayout/flexfilelayout.c ff_args->pages[0] = alloc_page(GFP_KERNEL); pages 2168 fs/nfs/flexfilelayout/flexfilelayout.c if (!ff_args->pages[0]) pages 118 fs/nfs/flexfilelayout/flexfilelayout.h struct page *pages[1]; pages 71 fs/nfs/flexfilelayout/flexfilelayoutdev.c xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen); pages 447 fs/nfs/fscache.c struct list_head *pages, pages 457 fs/nfs/fscache.c mapping, pages, nr_pages, pages 470 fs/nfs/fscache.c BUG_ON(!list_empty(pages)); pages 145 fs/nfs/fscache.h struct list_head *pages, pages 149 fs/nfs/fscache.h return __nfs_readpages_from_fscache(ctx, inode, mapping, pages, pages 224 fs/nfs/fscache.h struct list_head *pages, pages 425 fs/nfs/nfs2xdr.c static void encode_path(struct xdr_stream *xdr, struct page **pages, u32 length) pages 431 fs/nfs/nfs2xdr.c xdr_write_pages(xdr, pages, 0, length); pages 600 fs/nfs/nfs2xdr.c rpc_prepare_reply_pages(req, args->pages, args->pgbase, pages 636 fs/nfs/nfs2xdr.c rpc_prepare_reply_pages(req, args->pages, args->pgbase, pages 668 fs/nfs/nfs2xdr.c xdr_write_pages(xdr, args->pages, args->pgbase, count); pages 762 fs/nfs/nfs2xdr.c encode_path(xdr, args->pages, args->pathlen); pages 794 fs/nfs/nfs2xdr.c rpc_prepare_reply_pages(req, args->pages, 0, pages 50 fs/nfs/nfs3acl.c struct page *pages[NFSACL_MAXPAGES] = { }; pages 54 fs/nfs/nfs3acl.c .pages = pages, pages 100 fs/nfs/nfs3acl.c for (count = 0; count < NFSACL_MAXPAGES && args.pages[count]; count++) pages 101 fs/nfs/nfs3acl.c __free_page(args.pages[count]); pages 163 fs/nfs/nfs3acl.c struct page *pages[NFSACL_MAXPAGES]; pages 168 fs/nfs/nfs3acl.c .pages = pages, pages 202 fs/nfs/nfs3acl.c args.pages[args.npages] = alloc_page(GFP_KERNEL); pages 203 fs/nfs/nfs3acl.c if (args.pages[args.npages] == NULL) pages 239 fs/nfs/nfs3acl.c __free_page(args.pages[args.npages]); pages 229 fs/nfs/nfs3proc.c .pages = &page pages 531 fs/nfs/nfs3proc.c data->arg.symlink.pages = &page; pages 632 fs/nfs/nfs3proc.c u64 cookie, struct page **pages, unsigned int count, bool plus) pages 642 fs/nfs/nfs3proc.c .pages = pages pages 219 fs/nfs/nfs3xdr.c static void encode_nfspath3(struct xdr_stream *xdr, struct page **pages, pages 223 fs/nfs/nfs3xdr.c xdr_write_pages(xdr, pages, 0, length); pages 916 fs/nfs/nfs3xdr.c rpc_prepare_reply_pages(req, args->pages, args->pgbase, pages 949 fs/nfs/nfs3xdr.c rpc_prepare_reply_pages(req, args->pages, args->pgbase, pages 983 fs/nfs/nfs3xdr.c xdr_write_pages(xdr, args->pages, args->pgbase, args->count); pages 1084 fs/nfs/nfs3xdr.c encode_nfspath3(xdr, args->pages, args->pathlen); pages 1246 fs/nfs/nfs3xdr.c rpc_prepare_reply_pages(req, args->pages, 0, pages 1288 fs/nfs/nfs3xdr.c rpc_prepare_reply_pages(req, args->pages, 0, pages 1333 fs/nfs/nfs3xdr.c rpc_prepare_reply_pages(req, args->pages, 0, pages 1353 fs/nfs/nfs3xdr.c xdr_write_pages(xdr, args->pages, 0, args->len); pages 329 fs/nfs/nfs4proc.c start = p = kmap_atomic(*readdir->pages); pages 4419 fs/nfs/nfs4proc.c .pages = &page, pages 4791 fs/nfs/nfs4proc.c data->arg.u.symlink.pages = &page; pages 4868 fs/nfs/nfs4proc.c u64 cookie, struct page **pages, unsigned int count, bool plus) pages 4873 fs/nfs/nfs4proc.c .pages = pages, pages 4906 fs/nfs/nfs4proc.c u64 cookie, struct page **pages, unsigned int count, bool plus) pages 4914 fs/nfs/nfs4proc.c pages, count, plus); pages 5493 fs/nfs/nfs4proc.c struct page **pages) pages 5498 fs/nfs/nfs4proc.c spages = pages; pages 5509 fs/nfs/nfs4proc.c *pages++ = newpage; pages 5567 fs/nfs/nfs4proc.c static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) pages 5577 fs/nfs/nfs4proc.c _copy_from_pages(acl->data, pages, pgbase, acl_len); pages 5601 fs/nfs/nfs4proc.c struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, }; pages 5604 fs/nfs/nfs4proc.c .acl_pages = pages, pages 5618 fs/nfs/nfs4proc.c if (npages > ARRAY_SIZE(pages)) pages 5622 fs/nfs/nfs4proc.c pages[i] = alloc_page(GFP_KERNEL); pages 5623 fs/nfs/nfs4proc.c if (!pages[i]) pages 5649 fs/nfs/nfs4proc.c nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); pages 5655 fs/nfs/nfs4proc.c _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); pages 5661 fs/nfs/nfs4proc.c if (pages[i]) pages 5662 fs/nfs/nfs4proc.c __free_page(pages[i]); pages 5707 fs/nfs/nfs4proc.c struct page *pages[NFS4ACL_MAXPAGES]; pages 5710 fs/nfs/nfs4proc.c .acl_pages = pages, pages 5724 fs/nfs/nfs4proc.c if (npages > ARRAY_SIZE(pages)) pages 5737 fs/nfs/nfs4proc.c put_page(pages[i-1]); pages 1225 fs/nfs/nfs4xdr.c xdr_write_pages(xdr, create->u.symlink.pages, 0, pages 1742 fs/nfs/nfs4xdr.c xdr_write_pages(xdr, args->pages, args->pgbase, args->count); pages 2346 fs/nfs/nfs4xdr.c rpc_prepare_reply_pages(req, args->lg_args->layout.pages, 0, pages 2392 fs/nfs/nfs4xdr.c rpc_prepare_reply_pages(req, args->lg_args->layout.pages, 0, pages 2504 fs/nfs/nfs4xdr.c rpc_prepare_reply_pages(req, args->pages, args->pgbase, pages 2525 fs/nfs/nfs4xdr.c rpc_prepare_reply_pages(req, args->pages, args->pgbase, pages 2546 fs/nfs/nfs4xdr.c rpc_prepare_reply_pages(req, args->pages, args->pgbase, pages 3022 fs/nfs/nfs4xdr.c rpc_prepare_reply_pages(req, args->pdev->pages, args->pdev->pgbase, pages 3044 fs/nfs/nfs4xdr.c rpc_prepare_reply_pages(req, args->layout.pages, 0, pages 521 fs/nfs/pagelist.c INIT_LIST_HEAD(&hdr->pages); pages 576 fs/nfs/pagelist.c hdr->args.pages = hdr->page_array.pagevec; pages 764 fs/nfs/pagelist.c struct page **pages, pages 788 fs/nfs/pagelist.c pages = hdr->page_array.pagevec; pages 793 fs/nfs/pagelist.c nfs_list_move_request(req, &hdr->pages); pages 799 fs/nfs/pagelist.c *pages++ = last_page = req->wb_page; pages 1245 fs/nfs/pagelist.c LIST_HEAD(pages); pages 1249 fs/nfs/pagelist.c list_splice_init(&hdr->pages, &pages); pages 1250 fs/nfs/pagelist.c while (!list_empty(&pages)) { pages 1251 fs/nfs/pagelist.c struct nfs_page *req = nfs_list_entry(pages.next); pages 1257 fs/nfs/pagelist.c if (!list_empty(&pages)) { pages 1259 fs/nfs/pagelist.c hdr->completion_ops->error_cleanup(&pages, err); pages 971 fs/nfs/pnfs.c static void nfs4_free_pages(struct page **pages, size_t size) pages 975 fs/nfs/pnfs.c if (!pages) pages 979 fs/nfs/pnfs.c if (!pages[i]) pages 981 fs/nfs/pnfs.c __free_page(pages[i]); pages 983 fs/nfs/pnfs.c kfree(pages); pages 988 fs/nfs/pnfs.c struct page **pages; pages 991 fs/nfs/pnfs.c pages = kmalloc_array(size, sizeof(struct page *), gfp_flags); pages 992 fs/nfs/pnfs.c if (!pages) { pages 998 fs/nfs/pnfs.c pages[i] = alloc_page(gfp_flags); pages 999 fs/nfs/pnfs.c if (!pages[i]) { pages 1001 fs/nfs/pnfs.c nfs4_free_pages(pages, i); pages 1006 fs/nfs/pnfs.c return pages; pages 1033 fs/nfs/pnfs.c lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); pages 1034 fs/nfs/pnfs.c if (!lgp->args.layout.pages) { pages 1072 fs/nfs/pnfs.c nfs4_free_pages(lgp->args.layout.pages, max_pages); pages 2654 fs/nfs/pnfs.c list_splice_tail_init(&hdr->pages, &mirror->pg_list); pages 2700 fs/nfs/pnfs.c list_splice_init(&hdr->pages, &mirror->pg_list); pages 2777 fs/nfs/pnfs.c list_splice_tail_init(&hdr->pages, &mirror->pg_list); pages 2842 fs/nfs/pnfs.c list_splice_init(&hdr->pages, &mirror->pg_list); pages 213 fs/nfs/pnfs.h struct page **pages; pages 101 fs/nfs/pnfs_dev.c struct page **pages = NULL; pages 122 fs/nfs/pnfs_dev.c pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); pages 123 fs/nfs/pnfs_dev.c if (!pages) pages 127 fs/nfs/pnfs_dev.c pages[i] = alloc_page(gfp_flags); pages 128 fs/nfs/pnfs_dev.c if (!pages[i]) pages 134 fs/nfs/pnfs_dev.c pdev->pages = pages; pages 156 fs/nfs/pnfs_dev.c __free_page(pages[i]); pages 157 fs/nfs/pnfs_dev.c kfree(pages); pages 165 fs/nfs/pnfs_nfs.c LIST_HEAD(pages); pages 177 fs/nfs/pnfs_nfs.c list_splice_init(&bucket->committing, &pages); pages 179 fs/nfs/pnfs_nfs.c nfs_retry_commit(&pages, freeme, cinfo, i); pages 205 fs/nfs/pnfs_nfs.c list_add(&data->pages, list); pages 215 fs/nfs/pnfs_nfs.c void pnfs_fetch_commit_bucket_list(struct list_head *pages, pages 226 fs/nfs/pnfs_nfs.c list_splice_init(&bucket->committing, pages); pages 240 fs/nfs/pnfs_nfs.c pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages, pages 244 fs/nfs/pnfs_nfs.c if (list_empty(pages)) { pages 272 fs/nfs/pnfs_nfs.c list_add(&data->pages, &list); pages 283 fs/nfs/pnfs_nfs.c list_for_each_entry_safe(data, tmp, &list, pages) { pages 284 fs/nfs/pnfs_nfs.c list_del_init(&data->pages); pages 296 fs/nfs/pnfs_nfs.c LIST_HEAD(pages); pages 298 fs/nfs/pnfs_nfs.c pnfs_fetch_commit_bucket_list(&pages, data, cinfo); pages 301 fs/nfs/pnfs_nfs.c if (pnfs_generic_commit_cancel_empty_pagelist(&pages, pages 305 fs/nfs/pnfs_nfs.c nfs_init_commit(data, &pages, data->lseg, cinfo); pages 184 fs/nfs/proc.c .pages = &page pages 398 fs/nfs/proc.c .pages = &page, pages 494 fs/nfs/proc.c u64 cookie, struct page **pages, unsigned int count, bool plus) pages 501 fs/nfs/proc.c .pages = pages, pages 166 fs/nfs/read.c while (!list_empty(&hdr->pages)) { pages 167 fs/nfs/read.c struct nfs_page *req = nfs_list_entry(hdr->pages.next); pages 405 fs/nfs/read.c struct list_head *pages, unsigned nr_pages) pages 436 fs/nfs/read.c pages, &nr_pages); pages 443 fs/nfs/read.c ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); pages 94 fs/nfs/write.c INIT_LIST_HEAD(&p->pages); pages 1003 fs/nfs/write.c while (!list_empty(&hdr->pages)) { pages 1004 fs/nfs/write.c struct nfs_page *req = nfs_list_entry(hdr->pages.next); pages 1449 fs/nfs/write.c nfs_async_write_error(&hdr->pages, 0); pages 1754 fs/nfs/write.c list_splice_init(head, &data->pages); pages 1761 fs/nfs/write.c data->lwb = nfs_get_lwb(&data->pages); pages 1847 fs/nfs/write.c while (!list_empty(&data->pages)) { pages 1848 fs/nfs/write.c req = nfs_list_entry(data->pages.next); pages 200 fs/nfsd/nfs3proc.c nvecs = svc_fill_write_vector(rqstp, rqstp->rq_arg.pages, pages 288 fs/nfsd/nfs3proc.c page_address(rqstp->rq_arg.pages[0]), pages 1910 fs/nfsd/nfs4proc.c xdr->page_ptr = buf->pages - 1; pages 1911 fs/nfsd/nfs4proc.c buf->buflen = PAGE_SIZE * (1 + rqstp->rq_page_end - buf->pages) pages 1297 fs/nfsd/nfs4xdr.c int pages; pages 1301 fs/nfsd/nfs4xdr.c pages = len >> PAGE_SHIFT; pages 1302 fs/nfsd/nfs4xdr.c argp->pagelist += pages; pages 1303 fs/nfsd/nfs4xdr.c argp->pagelen -= pages * PAGE_SIZE; pages 1304 fs/nfsd/nfs4xdr.c len -= pages * PAGE_SIZE; pages 4563 fs/nfsd/nfs4xdr.c args->pagelist = rqstp->rq_arg.pages; pages 311 fs/nfsd/nfscache.c p = page_address(buf->pages[idx]) + base; pages 223 fs/nfsd/nfsproc.c nvecs = svc_fill_write_vector(rqstp, rqstp->rq_arg.pages, pages 459 fs/nfsd/nfsproc.c page_address(rqstp->rq_arg.pages[0]), pages 2155 fs/nilfs2/btree.c bh = head = page_buffers(pvec.pages[i]); pages 157 fs/nilfs2/inode.c struct list_head *pages, unsigned int nr_pages) pages 159 fs/nilfs2/inode.c return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); pages 255 fs/nilfs2/page.c struct page *page = pvec.pages[i], *dpage; pages 309 fs/nilfs2/page.c struct page *page = pvec.pages[i], *dpage; pages 370 fs/nilfs2/page.c struct page *page = pvec.pages[i]; pages 513 fs/nilfs2/page.c pvec.pages); pages 517 fs/nilfs2/page.c if (length > 0 && pvec.pages[0]->index > index) pages 520 fs/nilfs2/page.c b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits); pages 523 fs/nilfs2/page.c page = pvec.pages[i]; pages 711 fs/nilfs2/segment.c struct page *page = pvec.pages[i]; pages 752 fs/nilfs2/segment.c bh = head = page_buffers(pvec.pages[i]); pages 502 fs/ntfs/compress.c struct page **pages; pages 515 fs/ntfs/compress.c pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS); pages 522 fs/ntfs/compress.c if (unlikely(!pages || !bhs || !completed_pages)) { pages 524 fs/ntfs/compress.c kfree(pages); pages 537 fs/ntfs/compress.c pages[xpage] = page; pages 551 fs/ntfs/compress.c kfree(pages); pages 563 fs/ntfs/compress.c pages[i] = grab_cache_page_nowait(mapping, offset); pages 564 fs/ntfs/compress.c page = pages[i]; pages 579 fs/ntfs/compress.c pages[i] = NULL; pages 738 fs/ntfs/compress.c page = pages[cur_page]; pages 754 fs/ntfs/compress.c pages[cur_page] = NULL; pages 763 fs/ntfs/compress.c page = pages[cur_page]; pages 795 fs/ntfs/compress.c page = pages[cur_page]; pages 806 fs/ntfs/compress.c page = pages[cur_page]; pages 817 fs/ntfs/compress.c page = pages[cur2_page]; pages 833 fs/ntfs/compress.c pages[cur2_page] = NULL; pages 845 fs/ntfs/compress.c err = ntfs_decompress(pages, completed_pages, &cur_page, pages 860 fs/ntfs/compress.c page = pages[prev_cur_page]; pages 867 fs/ntfs/compress.c pages[prev_cur_page] = NULL; pages 886 fs/ntfs/compress.c page = pages[cur_page]; pages 897 fs/ntfs/compress.c pages[cur_page] = NULL; pages 902 fs/ntfs/compress.c kfree(pages); pages 938 fs/ntfs/compress.c page = pages[i]; pages 947 fs/ntfs/compress.c kfree(pages); pages 495 fs/ntfs/file.c pgoff_t index, const unsigned nr_pages, struct page **pages, pages 503 fs/ntfs/file.c pages[nr] = find_get_page_flags(mapping, index, FGP_LOCK | pages 505 fs/ntfs/file.c if (!pages[nr]) { pages 521 fs/ntfs/file.c pages[nr] = *cached_page; pages 531 fs/ntfs/file.c unlock_page(pages[--nr]); pages 532 fs/ntfs/file.c put_page(pages[nr]); pages 570 fs/ntfs/file.c static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, pages 600 fs/ntfs/file.c BUG_ON(!pages); pages 601 fs/ntfs/file.c BUG_ON(!*pages); pages 602 fs/ntfs/file.c vi = pages[0]->mapping->host; pages 607 fs/ntfs/file.c vi->i_ino, ni->type, pages[0]->index, nr_pages, pages 613 fs/ntfs/file.c page = pages[u]; pages 641 fs/ntfs/file.c page = pages[u]; pages 1217 fs/ntfs/file.c bh = head = page_buffers(pages[u]); pages 1334 fs/ntfs/file.c page = pages[u]; pages 1360 fs/ntfs/file.c static inline void ntfs_flush_dcache_pages(struct page **pages, pages 1371 fs/ntfs/file.c flush_dcache_page(pages[nr_pages]); pages 1385 fs/ntfs/file.c struct page **pages, const unsigned nr_pages, pages 1399 fs/ntfs/file.c vi = pages[0]->mapping->host; pages 1409 fs/ntfs/file.c page = pages[u]; pages 1539 fs/ntfs/file.c static int ntfs_commit_pages_after_write(struct page **pages, pages 1556 fs/ntfs/file.c BUG_ON(!pages); pages 1557 fs/ntfs/file.c page = pages[0]; pages 1566 fs/ntfs/file.c return ntfs_commit_pages_after_non_resident_write(pages, pages 1685 fs/ntfs/file.c static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages, pages 1688 fs/ntfs/file.c struct page **last_page = pages + nr_pages; pages 1697 fs/ntfs/file.c copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs, pages 1707 fs/ntfs/file.c } while (++pages < last_page); pages 1716 fs/ntfs/file.c zero_user(*pages, copied, len); pages 1720 fs/ntfs/file.c } while (++pages < last_page); pages 1737 fs/ntfs/file.c struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER]; pages 1844 fs/ntfs/file.c pages, &cached_page); pages 1855 fs/ntfs/file.c pages, do_pages, pos, bytes); pages 1858 fs/ntfs/file.c unlock_page(pages[--do_pages]); pages 1859 fs/ntfs/file.c put_page(pages[do_pages]); pages 1864 fs/ntfs/file.c u = (pos >> PAGE_SHIFT) - pages[0]->index; pages 1865 fs/ntfs/file.c copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs, pages 1867 fs/ntfs/file.c ntfs_flush_dcache_pages(pages + u, do_pages - u); pages 1870 fs/ntfs/file.c status = ntfs_commit_pages_after_write(pages, do_pages, pages 1876 fs/ntfs/file.c unlock_page(pages[--do_pages]); pages 1877 fs/ntfs/file.c put_page(pages[do_pages]); pages 6844 fs/ocfs2/alloc.c loff_t end, struct page **pages, pages 6859 fs/ocfs2/alloc.c page = pages[i]; pages 6874 fs/ocfs2/alloc.c if (pages) pages 6875 fs/ocfs2/alloc.c ocfs2_unlock_and_free_pages(pages, numpages); pages 6879 fs/ocfs2/alloc.c struct page **pages, int *num) pages 6892 fs/ocfs2/alloc.c pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS); pages 6893 fs/ocfs2/alloc.c if (!pages[numpages]) { pages 6905 fs/ocfs2/alloc.c if (pages) pages 6906 fs/ocfs2/alloc.c ocfs2_unlock_and_free_pages(pages, numpages); pages 6916 fs/ocfs2/alloc.c struct page **pages, int *num) pages 6923 fs/ocfs2/alloc.c return ocfs2_grab_pages(inode, start, end, pages, num); pages 6939 fs/ocfs2/alloc.c struct page **pages = NULL; pages 6951 fs/ocfs2/alloc.c pages = kcalloc(ocfs2_pages_per_cluster(sb), pages 6953 fs/ocfs2/alloc.c if (pages == NULL) { pages 6977 fs/ocfs2/alloc.c ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages, pages 6984 fs/ocfs2/alloc.c ocfs2_zero_cluster_pages(inode, range_start, range_end, pages, pages 6998 fs/ocfs2/alloc.c kfree(pages); pages 7060 fs/ocfs2/alloc.c struct page **pages = NULL; pages 7068 fs/ocfs2/alloc.c pages = kcalloc(ocfs2_pages_per_cluster(osb->sb), pages 7070 fs/ocfs2/alloc.c if (pages == NULL) { pages 7131 fs/ocfs2/alloc.c ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages); pages 7142 fs/ocfs2/alloc.c ret = ocfs2_read_inline_data(inode, pages[0], di_bh); pages 7155 fs/ocfs2/alloc.c pages[i], i > 0, &phys); pages 7186 fs/ocfs2/alloc.c if (pages) pages 7187 fs/ocfs2/alloc.c ocfs2_unlock_and_free_pages(pages, num_pages); pages 7212 fs/ocfs2/alloc.c kfree(pages); pages 260 fs/ocfs2/alloc.h struct page **pages, int *num); pages 355 fs/ocfs2/aops.c struct list_head *pages, unsigned nr_pages) pages 387 fs/ocfs2/aops.c last = lru_to_page(pages); pages 392 fs/ocfs2/aops.c err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block); pages 790 fs/ocfs2/aops.c void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) pages 795 fs/ocfs2/aops.c if (pages[i]) { pages 796 fs/ocfs2/aops.c unlock_page(pages[i]); pages 797 fs/ocfs2/aops.c mark_page_accessed(pages[i]); pages 798 fs/ocfs2/aops.c put_page(pages[i]); pages 22 fs/ocfs2/aops.h void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages); pages 65 fs/ocfs2/dlm/dlmdomain.c static void dlm_free_pagevec(void **vec, int pages) pages 67 fs/ocfs2/dlm/dlmdomain.c while (pages--) pages 68 fs/ocfs2/dlm/dlmdomain.c free_page((unsigned long)vec[pages]); pages 72 fs/ocfs2/dlm/dlmdomain.c static void **dlm_alloc_pagevec(int pages) pages 74 fs/ocfs2/dlm/dlmdomain.c void **vec = kmalloc_array(pages, sizeof(void *), GFP_KERNEL); pages 80 fs/ocfs2/dlm/dlmdomain.c for (i = 0; i < pages; i++) pages 85 fs/ocfs2/dlm/dlmdomain.c pages, (unsigned long)DLM_HASH_PAGES, pages 293 fs/omfs/file.c struct list_head *pages, unsigned nr_pages) pages 295 fs/omfs/file.c return mpage_readpages(mapping, pages, nr_pages, omfs_get_block); pages 90 fs/orangefs/inode.c struct page **pages; pages 97 fs/orangefs/inode.c struct inode *inode = ow->pages[0]->mapping->host; pages 108 fs/orangefs/inode.c set_page_writeback(ow->pages[i]); pages 109 fs/orangefs/inode.c ow->bv[i].bv_page = ow->pages[i]; pages 110 fs/orangefs/inode.c ow->bv[i].bv_len = min(page_offset(ow->pages[i]) + PAGE_SIZE, pages 112 fs/orangefs/inode.c max(ow->off, page_offset(ow->pages[i])); pages 115 fs/orangefs/inode.c page_offset(ow->pages[i]); pages 132 fs/orangefs/inode.c SetPageError(ow->pages[i]); pages 133 fs/orangefs/inode.c mapping_set_error(ow->pages[i]->mapping, ret); pages 134 fs/orangefs/inode.c if (PagePrivate(ow->pages[i])) { pages 136 fs/orangefs/inode.c page_private(ow->pages[i]); pages 137 fs/orangefs/inode.c ClearPagePrivate(ow->pages[i]); pages 138 fs/orangefs/inode.c put_page(ow->pages[i]); pages 141 fs/orangefs/inode.c end_page_writeback(ow->pages[i]); pages 142 fs/orangefs/inode.c unlock_page(ow->pages[i]); pages 147 fs/orangefs/inode.c if (PagePrivate(ow->pages[i])) { pages 149 fs/orangefs/inode.c page_private(ow->pages[i]); pages 150 fs/orangefs/inode.c ClearPagePrivate(ow->pages[i]); pages 151 fs/orangefs/inode.c put_page(ow->pages[i]); pages 154 fs/orangefs/inode.c end_page_writeback(ow->pages[i]); pages 155 fs/orangefs/inode.c unlock_page(ow->pages[i]); pages 183 fs/orangefs/inode.c ow->pages[ow->npages++] = page; pages 195 fs/orangefs/inode.c ow->pages[ow->npages++] = page; pages 228 fs/orangefs/inode.c ow->pages = kcalloc(ow->maxpages, sizeof(struct page *), GFP_KERNEL); pages 229 fs/orangefs/inode.c if (!ow->pages) { pages 235 fs/orangefs/inode.c kfree(ow->pages); pages 244 fs/orangefs/inode.c kfree(ow->pages); pages 39 fs/proc/meminfo.c unsigned long pages[NR_LRU_LISTS]; pages 53 fs/proc/meminfo.c pages[lru] = global_node_page_state(NR_LRU_BASE + lru); pages 65 fs/proc/meminfo.c show_val_kb(m, "Active: ", pages[LRU_ACTIVE_ANON] + pages 66 fs/proc/meminfo.c pages[LRU_ACTIVE_FILE]); pages 67 fs/proc/meminfo.c show_val_kb(m, "Inactive: ", pages[LRU_INACTIVE_ANON] + pages 68 fs/proc/meminfo.c pages[LRU_INACTIVE_FILE]); pages 69 fs/proc/meminfo.c show_val_kb(m, "Active(anon): ", pages[LRU_ACTIVE_ANON]); pages 70 fs/proc/meminfo.c show_val_kb(m, "Inactive(anon): ", pages[LRU_INACTIVE_ANON]); pages 71 fs/proc/meminfo.c show_val_kb(m, "Active(file): ", pages[LRU_ACTIVE_FILE]); pages 72 fs/proc/meminfo.c show_val_kb(m, "Inactive(file): ", pages[LRU_INACTIVE_FILE]); pages 73 fs/proc/meminfo.c show_val_kb(m, "Unevictable: ", pages[LRU_UNEVICTABLE]); pages 1652 fs/proc/task_mmu.c unsigned long pages; pages 1672 fs/proc/task_mmu.c md->pages += nr_pages; pages 1859 fs/proc/task_mmu.c if (!md->pages) pages 1868 fs/proc/task_mmu.c if (md->pages != md->anon && md->pages != md->dirty) pages 1869 fs/proc/task_mmu.c seq_printf(m, " mapped=%lu", md->pages); pages 1877 fs/proc/task_mmu.c if (md->active < md->pages && !is_vm_hugetlb_page(vma)) pages 404 fs/pstore/ram_core.c struct page **pages; pages 419 fs/pstore/ram_core.c pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); pages 420 fs/pstore/ram_core.c if (!pages) { pages 428 fs/pstore/ram_core.c pages[i] = pfn_to_page(addr >> PAGE_SHIFT); pages 430 fs/pstore/ram_core.c vaddr = vmap(pages, page_count, VM_MAP, prot); pages 431 fs/pstore/ram_core.c kfree(pages); pages 103 fs/qnx6/inode.c struct list_head *pages, unsigned nr_pages) pages 105 fs/qnx6/inode.c return mpage_readpages(mapping, pages, nr_pages, qnx6_get_block); pages 65 fs/ramfs/file-nommu.c struct page *pages; pages 84 fs/ramfs/file-nommu.c pages = alloc_pages(gfp, order); pages 85 fs/ramfs/file-nommu.c if (!pages) pages 92 fs/ramfs/file-nommu.c split_page(pages, order); pages 96 fs/ramfs/file-nommu.c __free_page(pages + loop); pages 100 fs/ramfs/file-nommu.c data = page_address(pages); pages 105 fs/ramfs/file-nommu.c struct page *page = pages + loop; pages 124 fs/ramfs/file-nommu.c __free_page(pages + loop++); pages 207 fs/ramfs/file-nommu.c struct page **pages = NULL, **ptr, *page; pages 223 fs/ramfs/file-nommu.c pages = kcalloc(lpages, sizeof(struct page *), GFP_KERNEL); pages 224 fs/ramfs/file-nommu.c if (!pages) pages 227 fs/ramfs/file-nommu.c nr = find_get_pages(inode->i_mapping, &pgoff, lpages, pages); pages 232 fs/ramfs/file-nommu.c ptr = pages; pages 240 fs/ramfs/file-nommu.c ret = (unsigned long) page_address(pages[0]); pages 243 fs/ramfs/file-nommu.c ptr = pages; pages 247 fs/ramfs/file-nommu.c kfree(pages); pages 1165 fs/reiserfs/inode.c struct list_head *pages, unsigned nr_pages) pages 1167 fs/reiserfs/inode.c return mpage_readpages(mapping, pages, nr_pages, reiserfs_get_block); pages 203 fs/splice.c buf->page = spd->pages[page_nr]; pages 261 fs/splice.c spd->pages = kmalloc_array(buffers, sizeof(struct page *), GFP_KERNEL); pages 265 fs/splice.c if (spd->pages && spd->partial) pages 268 fs/splice.c kfree(spd->pages); pages 278 fs/splice.c kfree(spd->pages); pages 371 fs/splice.c struct page **pages; pages 388 fs/splice.c res = iov_iter_get_pages_alloc(&to, &pages, len + offset, &base); pages 408 fs/splice.c vec[i].iov_base = page_address(pages[i]) + offset; pages 424 fs/splice.c put_page(pages[i]); pages 425 fs/splice.c kvfree(pages); pages 1220 fs/splice.c struct page *pages[16]; pages 1225 fs/splice.c copied = iov_iter_get_pages(from, pages, ~0UL, 16, &start); pages 1234 fs/splice.c buf.page = pages[n]; pages 1245 fs/splice.c put_page(pages[n]); pages 206 fs/squashfs/cache.c for (j = 0; j < cache->pages; j++) pages 245 fs/squashfs/cache.c cache->pages = block_size >> PAGE_SHIFT; pages 246 fs/squashfs/cache.c cache->pages = cache->pages ? cache->pages : 1; pages 258 fs/squashfs/cache.c entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL); pages 264 fs/squashfs/cache.c for (j = 0; j < cache->pages; j++) { pages 273 fs/squashfs/cache.c cache->pages, 0); pages 408 fs/squashfs/cache.c int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; pages 417 fs/squashfs/cache.c data = kcalloc(pages, sizeof(void *), GFP_KERNEL); pages 423 fs/squashfs/cache.c actor = squashfs_page_actor_init(data, pages, length); pages 429 fs/squashfs/cache.c for (i = 0; i < pages; i++, buffer += PAGE_SIZE) pages 22 fs/squashfs/file_direct.c int pages, struct page **page, int bytes); pages 36 fs/squashfs/file_direct.c int i, n, pages, missing_pages, bytes, res = -ENOMEM; pages 44 fs/squashfs/file_direct.c pages = end_index - start_index + 1; pages 46 fs/squashfs/file_direct.c page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL); pages 54 fs/squashfs/file_direct.c actor = squashfs_page_actor_init_special(page, pages, 0); pages 59 fs/squashfs/file_direct.c for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) { pages 84 fs/squashfs/file_direct.c res = squashfs_read_cache(target_page, block, bsize, pages, pages 105 fs/squashfs/file_direct.c pageaddr = kmap_atomic(page[pages - 1]); pages 111 fs/squashfs/file_direct.c for (i = 0; i < pages; i++) { pages 128 fs/squashfs/file_direct.c for (i = 0; i < pages; i++) { pages 145 fs/squashfs/file_direct.c int pages, struct page **page, int bytes) pages 158 fs/squashfs/file_direct.c for (n = 0; n < pages && bytes > 0; n++, pages 30 fs/squashfs/page_actor.c if (actor->next_page == actor->pages) pages 42 fs/squashfs/page_actor.c int pages, int length) pages 49 fs/squashfs/page_actor.c actor->length = length ? : pages * PAGE_SIZE; pages 51 fs/squashfs/page_actor.c actor->pages = pages; pages 71 fs/squashfs/page_actor.c return actor->pageaddr = actor->next_page == actor->pages ? NULL : pages 82 fs/squashfs/page_actor.c int pages, int length) pages 89 fs/squashfs/page_actor.c actor->length = length ? : pages * PAGE_SIZE; pages 91 fs/squashfs/page_actor.c actor->pages = pages; pages 12 fs/squashfs/page_actor.h int pages; pages 18 fs/squashfs/page_actor.h int pages, int length) pages 25 fs/squashfs/page_actor.h actor->length = length ? : pages * PAGE_SIZE; pages 27 fs/squashfs/page_actor.h actor->pages = pages; pages 40 fs/squashfs/page_actor.h return actor->next_page == actor->pages ? NULL : pages 58 fs/squashfs/page_actor.h int pages; pages 23 fs/squashfs/squashfs_fs_sb.h int pages; pages 199 fs/udf/inode.c struct list_head *pages, unsigned nr_pages) pages 201 fs/udf/inode.c return mpage_readpages(mapping, pages, nr_pages, udf_get_block); pages 1171 fs/xfs/xfs_aops.c struct list_head *pages, pages 1175 fs/xfs/xfs_aops.c return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops); pages 213 include/asm-generic/tlb.h struct page *pages[0]; pages 61 include/crypto/if_alg.h struct page *pages[ALG_MAX_PAGES]; pages 31 include/drm/drm_agpsupport.h void drm_free_agp(struct agp_memory * handle, int pages); pages 64 include/drm/drm_agpsupport.h static inline void drm_free_agp(struct agp_memory * handle, int pages) pages 38 include/drm/drm_cache.h void drm_clflush_pages(struct page *pages[], unsigned long num_pages); pages 387 include/drm/drm_gem.h void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, pages 37 include/drm/drm_gem_shmem_helper.h struct page **pages; pages 128 include/drm/drm_legacy.h int pages; pages 93 include/drm/drm_prime.h struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages); pages 106 include/drm/drm_prime.h int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, pages 70 include/drm/drm_vma_manager.h unsigned long pages); pages 72 include/drm/drm_vma_manager.h struct drm_vma_offset_node *node, unsigned long pages); pages 97 include/drm/drm_vma_manager.h unsigned long pages) pages 101 include/drm/drm_vma_manager.h node = drm_vma_offset_lookup_locked(mgr, start, pages); pages 40 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray) pages 42 include/drm/ttm/ttm_set_memory.h return set_pages_array_wb(pages, addrinarray); pages 45 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray) pages 47 include/drm/ttm/ttm_set_memory.h return set_pages_array_wc(pages, addrinarray); pages 50 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray) pages 52 include/drm/ttm/ttm_set_memory.h return set_pages_array_uc(pages, addrinarray); pages 78 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray) pages 83 include/drm/ttm/ttm_set_memory.h unmap_page_from_agp(pages[i]); pages 87 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray) pages 92 include/drm/ttm/ttm_set_memory.h map_page_into_agp(pages[i]); pages 96 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray) pages 101 include/drm/ttm/ttm_set_memory.h map_page_into_agp(pages[i]); pages 116 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray) pages 121 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray) pages 126 include/drm/ttm/ttm_set_memory.h static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray) pages 108 include/drm/ttm/ttm_tt.h struct page **pages; pages 73 include/linux/agp_backend.h struct page **pages; pages 57 include/linux/balloon_compaction.h struct list_head pages; /* Pages enqueued & handled to Host */ pages 68 include/linux/balloon_compaction.h struct list_head *pages); pages 70 include/linux/balloon_compaction.h struct list_head *pages, size_t n_req_pages); pages 76 include/linux/balloon_compaction.h INIT_LIST_HEAD(&balloon->pages); pages 105 include/linux/balloon_compaction.h list_add(&page->lru, &balloon->pages); pages 149 include/linux/balloon_compaction.h list_add(&page->lru, &balloon->pages); pages 188 include/linux/balloon_compaction.h static inline void balloon_page_push(struct list_head *pages, struct page *page) pages 190 include/linux/balloon_compaction.h list_add(&page->lru, pages); pages 200 include/linux/balloon_compaction.h static inline struct page *balloon_page_pop(struct list_head *pages) pages 202 include/linux/balloon_compaction.h struct page *page = list_first_entry_or_null(pages, struct page, lru); pages 805 include/linux/blkdev.h struct page **pages; pages 70 include/linux/bpf.h u32 pages; pages 644 include/linux/bpf.h int __bpf_prog_charge(struct user_struct *user, u32 pages); pages 645 include/linux/bpf.h void __bpf_prog_uncharge(struct user_struct *user, u32 pages); pages 657 include/linux/bpf.h int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); pages 658 include/linux/bpf.h void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); pages 788 include/linux/bpf.h static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) pages 793 include/linux/bpf.h static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) pages 304 include/linux/ceph/libceph.h extern void ceph_release_page_vector(struct page **pages, int num_pages); pages 305 include/linux/ceph/libceph.h extern void ceph_put_page_vector(struct page **pages, int num_pages, pages 308 include/linux/ceph/libceph.h extern int ceph_copy_user_to_page_vector(struct page **pages, pages 311 include/linux/ceph/libceph.h extern void ceph_copy_to_page_vector(struct page **pages, pages 314 include/linux/ceph/libceph.h extern void ceph_copy_from_page_vector(struct page **pages, pages 317 include/linux/ceph/libceph.h extern void ceph_zero_page_vector_range(int off, int len, struct page **pages); pages 178 include/linux/ceph/messenger.h struct page **pages; pages 360 include/linux/ceph/messenger.h void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, pages 67 include/linux/ceph/osd_client.h struct page **pages; pages 407 include/linux/ceph/osd_client.h struct page **pages, u64 length, pages 426 include/linux/ceph/osd_client.h struct page **pages, u64 length, pages 451 include/linux/ceph/osd_client.h struct page **pages, u64 length, pages 460 include/linux/ceph/osd_client.h struct page **pages, u64 length, pages 517 include/linux/ceph/osd_client.h struct page **pages, int nr_pages, pages 527 include/linux/ceph/osd_client.h struct page **pages, int nr_pages); pages 37 include/linux/cma.h extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); pages 805 include/linux/compat.h __u32 __user *pages, pages 213 include/linux/coresight.h struct perf_event *event, void **pages, pages 113 include/linux/dma-contiguous.h bool dma_release_from_contiguous(struct device *dev, struct page *pages, pages 153 include/linux/dma-contiguous.h bool dma_release_from_contiguous(struct device *dev, struct page *pages, pages 632 include/linux/dma-mapping.h void *dma_common_pages_remap(struct page **pages, size_t size, pages 161 include/linux/efi.h struct page **pages; pages 1539 include/linux/efi.h phys_addr_t *pages); pages 515 include/linux/filter.h u32 pages; pages 521 include/linux/filter.h u16 pages; /* Number of allocated pages */ pages 776 include/linux/filter.h set_memory_ro((unsigned long)fp, fp->pages); pages 784 include/linux/filter.h set_memory_ro((unsigned long)hdr, hdr->pages); pages 785 include/linux/filter.h set_memory_x((unsigned long)hdr, hdr->pages); pages 423 include/linux/firewire.h struct page **pages; pages 15 include/linux/firmware.h struct page **pages; pages 377 include/linux/fs.h struct list_head *pages, unsigned nr_pages); pages 159 include/linux/fscache-cache.h struct list_head *pages, pages 225 include/linux/fscache.h struct list_head *pages); pages 591 include/linux/fscache.h struct list_head *pages, pages 598 include/linux/fscache.h return __fscache_read_or_alloc_pages(cookie, mapping, pages, pages 648 include/linux/fscache.h struct list_head *pages) pages 651 include/linux/fscache.h __fscache_readpages_cancel(cookie, pages); pages 155 include/linux/iomap.h int iomap_readpages(struct address_space *mapping, struct list_head *pages, pages 55 include/linux/iova.h unsigned long pages; pages 149 include/linux/iova.h unsigned long pfn, unsigned long pages, pages 208 include/linux/iova.h unsigned long pfn, unsigned long pages, pages 386 include/linux/kexec.h static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) { return 0; } pages 390 include/linux/kexec.h static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { } pages 245 include/linux/kprobes.h struct list_head pages; /* list of kprobe_insn_page */ pages 703 include/linux/kvm_host.h struct page **pages, int nr_pages); pages 1485 include/linux/lsm_hooks.h int (*vm_enough_memory)(struct mm_struct *mm, long pages); pages 765 include/linux/mm.h void put_pages_list(struct list_head *pages); pages 1085 include/linux/mm.h void put_user_pages_dirty_lock(struct page **pages, unsigned long npages, pages 1088 include/linux/mm.h void put_user_pages(struct page **pages, unsigned long npages); pages 1534 include/linux/mm.h unsigned int gup_flags, struct page **pages, pages 1537 include/linux/mm.h unsigned int gup_flags, struct page **pages, pages 1540 include/linux/mm.h unsigned int gup_flags, struct page **pages, int *locked); pages 1542 include/linux/mm.h struct page **pages, unsigned int gup_flags); pages 1545 include/linux/mm.h unsigned int gup_flags, struct page **pages); pages 1547 include/linux/mm.h int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); pages 1548 include/linux/mm.h int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, pages 1595 include/linux/mm.h struct page **pages); pages 1596 include/linux/mm.h int get_kernel_page(unsigned long start, int write, struct page **pages); pages 1639 include/linux/mm.h struct page **pages); pages 2276 include/linux/mm.h extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); pages 2336 include/linux/mm.h unsigned long flags, struct page **pages); pages 2548 include/linux/mm.h int vm_map_pages(struct vm_area_struct *vma, struct page **pages, pages 2550 include/linux/mm.h int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, pages 109 include/linux/mm_types.h int pages; /* Nr of pages left */ pages 112 include/linux/mm_types.h short int pages; pages 726 include/linux/mm_types.h struct page **pages; pages 66 include/linux/mman.h static inline void vm_acct_memory(long pages) pages 68 include/linux/mman.h percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch); pages 71 include/linux/mman.h static inline void vm_unacct_memory(long pages) pages 73 include/linux/mman.h vm_acct_memory(-pages); pages 17 include/linux/mpage.h int mpage_readpages(struct address_space *mapping, struct list_head *pages, pages 43 include/linux/mtd/bbm.h int pages[NAND_MAX_CHIPS]; pages 237 include/linux/nfs_xdr.h struct page **pages; pages 645 include/linux/nfs_xdr.h struct page ** pages; pages 828 include/linux/nfs_xdr.h struct page ** pages; pages 837 include/linux/nfs_xdr.h struct page ** pages; pages 843 include/linux/nfs_xdr.h struct page ** pages; pages 853 include/linux/nfs_xdr.h struct page ** pages; pages 865 include/linux/nfs_xdr.h struct page ** pages; pages 906 include/linux/nfs_xdr.h struct page ** pages; pages 933 include/linux/nfs_xdr.h struct page ** pages; pages 951 include/linux/nfs_xdr.h struct page ** pages; pages 998 include/linux/nfs_xdr.h struct page ** pages; pages 1116 include/linux/nfs_xdr.h struct page ** pages; /* zero-copy data */ pages 1133 include/linux/nfs_xdr.h struct page ** pages; /* zero-copy data */ pages 1510 include/linux/nfs_xdr.h struct list_head pages; pages 1572 include/linux/nfs_xdr.h struct list_head pages; /* Coalesced requests we wish to flush */ pages 121 include/linux/pagemap.h void release_pages(struct page **pages, int nr); pages 353 include/linux/pagemap.h struct page **pages); pages 356 include/linux/pagemap.h struct page **pages) pages 359 include/linux/pagemap.h pages); pages 362 include/linux/pagemap.h unsigned int nr_pages, struct page **pages); pages 365 include/linux/pagemap.h struct page **pages); pages 368 include/linux/pagemap.h struct page **pages) pages 371 include/linux/pagemap.h nr_pages, pages); pages 391 include/linux/pagemap.h struct list_head *pages, filler_t *filler, void *data); pages 23 include/linux/pagevec.h struct page *pages[PAGEVEC_SIZE]; pages 81 include/linux/pagevec.h pvec->pages[pvec->nr++] = page; pages 81 include/linux/pci-epc.h int pages; pages 416 include/linux/perf_event.h void *(*setup_aux) (struct perf_event *event, void **pages, pages 133 include/linux/pktcdvd.h struct page *pages[PACKET_MAX_SIZE / FRAMES_PER_PAGE]; pages 275 include/linux/scatterlist.h int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, pages 279 include/linux/scatterlist.h int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, pages 19 include/linux/sched/numa_balancing.h extern void task_numa_fault(int last_node, int node, int pages, int flags); pages 26 include/linux/sched/numa_balancing.h static inline void task_numa_fault(int last_node, int node, int pages, pages 1228 include/linux/scif.h struct scif_range **pages); pages 1249 include/linux/scif.h int scif_put_pages(struct scif_range *pages); pages 159 include/linux/security.h extern int cap_vm_enough_memory(struct mm_struct *mm, long pages); pages 277 include/linux/security.h int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); pages 566 include/linux/security.h static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) pages 568 include/linux/security.h return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages)); pages 114 include/linux/sfi.h u64 pages; pages 113 include/linux/shmem_fs.h extern bool shmem_charge(struct inode *inode, long pages); pages 114 include/linux/shmem_fs.h extern void shmem_uncharge(struct inode *inode, long pages); pages 57 include/linux/splice.h struct page **pages; /* page map */ pages 179 include/linux/sunrpc/clnt.h void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages, pages 85 include/linux/sunrpc/gss_krb5.h struct page **pages); /* v2 encryption function */ pages 255 include/linux/sunrpc/gss_krb5.h struct xdr_buf *outbuf, struct page **pages); pages 272 include/linux/sunrpc/gss_krb5.h int offset, struct page **pages); pages 312 include/linux/sunrpc/gss_krb5.h struct page **pages); pages 521 include/linux/sunrpc/svc.h struct page **pages, pages 57 include/linux/sunrpc/xdr.h struct page ** pages; /* Array of pages */ pages 75 include/linux/sunrpc/xdr.h buf->pages = NULL; pages 224 include/linux/sunrpc/xdr.h extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase, pages 257 include/linux/sunrpc/xdr.h extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, pages 263 include/linux/sunrpc/xdr.h struct page **pages, unsigned int len); pages 575 include/linux/suspend.h unsigned long page_key_additional_pages(unsigned long pages); pages 576 include/linux/suspend.h int page_key_alloc(unsigned long pages); pages 584 include/linux/suspend.h static inline unsigned long page_key_additional_pages(unsigned long pages) pages 589 include/linux/suspend.h static inline int page_key_alloc(unsigned long pages) pages 246 include/linux/swap.h unsigned int pages; /* total of usable pages of swap */ pages 506 include/linux/swap.h #define free_pages_and_swap_cache(pages, nr) \ pages 507 include/linux/swap.h release_pages((pages), (nr)); pages 895 include/linux/syscalls.h const void __user * __user *pages, pages 111 include/linux/tee_drv.h struct page **pages, size_t num_pages, pages 195 include/linux/tee_drv.h struct page **pages; pages 424 include/linux/tee_drv.h return shm->pages; pages 225 include/linux/uio.h ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, pages 227 include/linux/uio.h ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, pages 100 include/linux/virtio_ring.h void *pages, pages 46 include/linux/vmalloc.h struct page **pages; pages 77 include/linux/vmalloc.h extern void *vm_map_ram(struct page **pages, unsigned int count, pages 119 include/linux/vmalloc.h extern void *vmap(struct page **pages, unsigned int count, pages 159 include/linux/vmalloc.h struct page **pages); pages 162 include/linux/vmalloc.h pgprot_t prot, struct page **pages); pages 175 include/linux/vmalloc.h pgprot_t prot, struct page **pages) pages 56 include/linux/zpool.h int zpool_shrink(struct zpool *pool, unsigned int pages, pages 100 include/linux/zpool.h int (*shrink)(void *pool, unsigned int pages, pages 49 include/media/videobuf-dma-sg.h struct page **pages; pages 47 include/net/xdp_sock.h struct xdp_umem_page *pages; pages 158 include/net/xdp_sock.h page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr; pages 167 include/net/xdp_sock.h return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK); pages 1550 include/sound/emu10k1.h int first_page, last_page, pages, mapped_page; pages 83 include/sound/memalloc.h int pages; /* allocated pages */ pages 69 include/sound/sof/stream.h uint32_t pages; pages 40 include/trace/events/tlb.h TP_PROTO(int reason, unsigned long pages), pages 41 include/trace/events/tlb.h TP_ARGS(reason, pages), pages 45 include/trace/events/tlb.h __field(unsigned long, pages) pages 50 include/trace/events/tlb.h __entry->pages = pages; pages 54 include/trace/events/tlb.h __entry->pages, pages 400 include/trace/events/writeback.h __field(long, pages) pages 403 include/trace/events/writeback.h __entry->pages = pages_written; pages 405 include/trace/events/writeback.h TP_printk("%ld", __entry->pages) pages 44 include/uapi/linux/screen_info.h __u16 pages; /* 0x32 */ pages 282 include/uapi/linux/vbox_vmmdev_types.h __u64 pages[1]; /** Page addresses. */ pages 90 include/xen/arm/page.h struct page **pages, unsigned int count); pages 94 include/xen/arm/page.h struct page **pages, unsigned int count); pages 27 include/xen/balloon.h int alloc_xenballooned_pages(int nr_pages, struct page **pages); pages 28 include/xen/balloon.h void free_xenballooned_pages(int nr_pages, struct page **pages); pages 76 include/xen/grant_table.h struct page **pages; pages 198 include/xen/grant_table.h int gnttab_alloc_pages(int nr_pages, struct page **pages); pages 199 include/xen/grant_table.h void gnttab_free_pages(int nr_pages, struct page **pages); pages 209 include/xen/grant_table.h struct page **pages; pages 219 include/xen/grant_table.h int gnttab_pages_set_private(int nr_pages, struct page **pages); pages 220 include/xen/grant_table.h void gnttab_pages_clear_private(int nr_pages, struct page **pages); pages 224 include/xen/grant_table.h struct page **pages, unsigned int count); pages 227 include/xen/grant_table.h struct page **pages, unsigned int count); pages 292 include/xen/grant_table.h void gnttab_foreach_grant(struct page **pages, pages 30 include/xen/mem-reservation.h struct page **pages, pages 34 include/xen/mem-reservation.h struct page **pages); pages 38 include/xen/mem-reservation.h struct page **pages, pages 43 include/xen/mem-reservation.h __xenmem_reservation_va_mapping_update(count, pages, frames); pages 48 include/xen/mem-reservation.h struct page **pages) pages 52 include/xen/mem-reservation.h __xenmem_reservation_va_mapping_reset(count, pages); pages 47 include/xen/xen-front-pgdir-shbuf.h struct page **pages; pages 64 include/xen/xen-front-pgdir-shbuf.h struct page **pages; pages 67 include/xen/xen-ops.h unsigned int domid, bool no_translate, struct page **pages); pages 72 include/xen/xen-ops.h bool no_translate, struct page **pages) pages 87 include/xen/xen-ops.h struct page **pages); pages 89 include/xen/xen-ops.h int nr, struct page **pages); pages 100 include/xen/xen-ops.h struct page **pages) pages 106 include/xen/xen-ops.h int nr, struct page **pages) pages 137 include/xen/xen-ops.h struct page **pages) pages 141 include/xen/xen-ops.h prot, domid, pages); pages 149 include/xen/xen-ops.h false, pages); pages 173 include/xen/xen-ops.h struct page **pages) pages 179 include/xen/xen-ops.h true, pages); pages 198 include/xen/xen-ops.h struct page **pages) pages 204 include/xen/xen-ops.h pages); pages 208 include/xen/xen-ops.h int numpgs, struct page **pages); pages 94 kernel/bpf/core.c fp->pages = size / PAGE_SIZE; pages 221 kernel/bpf/core.c u32 pages, delta; pages 227 kernel/bpf/core.c pages = size / PAGE_SIZE; pages 228 kernel/bpf/core.c if (pages <= fp_old->pages) pages 231 kernel/bpf/core.c delta = pages - fp_old->pages; pages 240 kernel/bpf/core.c memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); pages 241 kernel/bpf/core.c fp->pages = pages; pages 537 kernel/bpf/core.c *symbol_end = addr + hdr->pages * PAGE_SIZE; pages 767 kernel/bpf/core.c static int bpf_jit_charge_modmem(u32 pages) pages 769 kernel/bpf/core.c if (atomic_long_add_return(pages, &bpf_jit_current) > pages 772 kernel/bpf/core.c atomic_long_sub(pages, &bpf_jit_current); pages 780 kernel/bpf/core.c static void bpf_jit_uncharge_modmem(u32 pages) pages 782 kernel/bpf/core.c atomic_long_sub(pages, &bpf_jit_current); pages 801 kernel/bpf/core.c u32 size, hole, start, pages; pages 808 kernel/bpf/core.c pages = size / PAGE_SIZE; pages 810 kernel/bpf/core.c if (bpf_jit_charge_modmem(pages)) pages 814 kernel/bpf/core.c bpf_jit_uncharge_modmem(pages); pages 821 kernel/bpf/core.c hdr->pages = pages; pages 834 kernel/bpf/core.c u32 pages = hdr->pages; pages 837 kernel/bpf/core.c bpf_jit_uncharge_modmem(pages); pages 1033 kernel/bpf/core.c fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL); pages 1039 kernel/bpf/core.c memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); pages 460 kernel/bpf/local_storage.c static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages) pages 466 kernel/bpf/local_storage.c *pages = round_up(sizeof(struct bpf_cgroup_storage) + size, pages 470 kernel/bpf/local_storage.c *pages = round_up(round_up(size, 8) * num_possible_cpus(), pages 484 kernel/bpf/local_storage.c u32 pages; pages 490 kernel/bpf/local_storage.c size = bpf_cgroup_storage_calculate_size(map, &pages); pages 492 kernel/bpf/local_storage.c if (bpf_map_charge_memlock(map, pages)) pages 518 kernel/bpf/local_storage.c bpf_map_uncharge_memlock(map, pages); pages 545 kernel/bpf/local_storage.c u32 pages; pages 552 kernel/bpf/local_storage.c bpf_cgroup_storage_calculate_size(map, &pages); pages 553 kernel/bpf/local_storage.c bpf_map_uncharge_memlock(map, pages); pages 186 kernel/bpf/syscall.c static int bpf_charge_memlock(struct user_struct *user, u32 pages) pages 190 kernel/bpf/syscall.c if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) { pages 191 kernel/bpf/syscall.c atomic_long_sub(pages, &user->locked_vm); pages 197 kernel/bpf/syscall.c static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) pages 200 kernel/bpf/syscall.c atomic_long_sub(pages, &user->locked_vm); pages 205 kernel/bpf/syscall.c u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; pages 213 kernel/bpf/syscall.c ret = bpf_charge_memlock(user, pages); pages 219 kernel/bpf/syscall.c mem->pages = pages; pages 227 kernel/bpf/syscall.c bpf_uncharge_memlock(mem->user, mem->pages); pages 240 kernel/bpf/syscall.c int bpf_map_charge_memlock(struct bpf_map *map, u32 pages) pages 244 kernel/bpf/syscall.c ret = bpf_charge_memlock(map->memory.user, pages); pages 247 kernel/bpf/syscall.c map->memory.pages += pages; pages 251 kernel/bpf/syscall.c void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages) pages 253 kernel/bpf/syscall.c bpf_uncharge_memlock(map->memory.user, pages); pages 254 kernel/bpf/syscall.c map->memory.pages -= pages; pages 399 kernel/bpf/syscall.c map->memory.pages * 1ULL << PAGE_SHIFT, pages 1242 kernel/bpf/syscall.c int __bpf_prog_charge(struct user_struct *user, u32 pages) pages 1248 kernel/bpf/syscall.c user_bufs = atomic_long_add_return(pages, &user->locked_vm); pages 1250 kernel/bpf/syscall.c atomic_long_sub(pages, &user->locked_vm); pages 1258 kernel/bpf/syscall.c void __bpf_prog_uncharge(struct user_struct *user, u32 pages) pages 1261 kernel/bpf/syscall.c atomic_long_sub(pages, &user->locked_vm); pages 1269 kernel/bpf/syscall.c ret = __bpf_prog_charge(user, prog->pages); pages 1283 kernel/bpf/syscall.c __bpf_prog_uncharge(user, prog->pages); pages 1421 kernel/bpf/syscall.c prog->pages * 1ULL << PAGE_SHIFT, pages 46 kernel/dma/coherent.c int pages = size >> PAGE_SHIFT; pages 47 kernel/dma/coherent.c int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); pages 74 kernel/dma/coherent.c dma_mem->size = pages; pages 211 kernel/dma/contiguous.c bool dma_release_from_contiguous(struct device *dev, struct page *pages, pages 214 kernel/dma/contiguous.c return cma_release(dev_get_cma_area(dev), pages, count); pages 1111 kernel/dma/debug.c if (page != stack_vm_area->pages[i]) pages 20 kernel/dma/remap.c return area->pages; pages 23 kernel/dma/remap.c static struct vm_struct *__dma_common_pages_remap(struct page **pages, pages 32 kernel/dma/remap.c if (map_vm_area(area, prot, pages)) { pages 44 kernel/dma/remap.c void *dma_common_pages_remap(struct page **pages, size_t size, pages 49 kernel/dma/remap.c area = __dma_common_pages_remap(pages, size, prot, caller); pages 53 kernel/dma/remap.c area->pages = pages; pages 66 kernel/dma/remap.c struct page **pages; pages 69 kernel/dma/remap.c pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); pages 70 kernel/dma/remap.c if (!pages) pages 74 kernel/dma/remap.c pages[i] = nth_page(page, i); pages 76 kernel/dma/remap.c area = __dma_common_pages_remap(pages, size, prot, caller); pages 78 kernel/dma/remap.c kfree(pages); pages 104 kernel/events/uprobes.c struct page *pages[2]; pages 1505 kernel/events/uprobes.c area->xol_mapping.pages = area->pages; pages 1506 kernel/events/uprobes.c area->pages[0] = alloc_page(GFP_HIGHUSER); pages 1507 kernel/events/uprobes.c if (!area->pages[0]) pages 1509 kernel/events/uprobes.c area->pages[1] = NULL; pages 1516 kernel/events/uprobes.c arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE); pages 1521 kernel/events/uprobes.c __free_page(area->pages[0]); pages 1563 kernel/events/uprobes.c put_page(area->pages[0]); pages 1630 kernel/events/uprobes.c arch_uprobe_copy_ixol(area->pages[0], xol_vaddr, pages 277 kernel/fork.c mod_memcg_page_state(vm->pages[i], pages 281 kernel/fork.c memcg_kmem_uncharge(vm->pages[i], 0); pages 383 kernel/fork.c mod_zone_page_state(page_zone(vm->pages[i]), pages 418 kernel/fork.c ret = memcg_kmem_charge(vm->pages[i], GFP_KERNEL, 0); pages 422 kernel/fork.c mod_memcg_page_state(vm->pages[i], pages 301 kernel/kexec_core.c struct page *pages; pages 305 kernel/kexec_core.c pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order); pages 306 kernel/kexec_core.c if (pages) { pages 309 kernel/kexec_core.c pages->mapping = NULL; pages 310 kernel/kexec_core.c set_page_private(pages, order); pages 313 kernel/kexec_core.c SetPageReserved(pages + i); pages 315 kernel/kexec_core.c arch_kexec_post_alloc_pages(page_address(pages), count, pages 320 kernel/kexec_core.c clear_highpage(pages + i); pages 323 kernel/kexec_core.c return pages; pages 367 kernel/kexec_core.c struct page *pages; pages 379 kernel/kexec_core.c pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); pages 380 kernel/kexec_core.c if (!pages) pages 382 kernel/kexec_core.c pfn = page_to_boot_pfn(pages); pages 388 kernel/kexec_core.c list_add(&pages->lru, &extra_pages); pages 389 kernel/kexec_core.c pages = NULL; pages 391 kernel/kexec_core.c } while (!pages); pages 393 kernel/kexec_core.c if (pages) { pages 395 kernel/kexec_core.c list_add(&pages->lru, &image->control_pages); pages 413 kernel/kexec_core.c return pages; pages 441 kernel/kexec_core.c struct page *pages; pages 443 kernel/kexec_core.c pages = NULL; pages 469 kernel/kexec_core.c pages = pfn_to_page(hole_start >> PAGE_SHIFT); pages 476 kernel/kexec_core.c if (pages) pages 477 kernel/kexec_core.c arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0); pages 479 kernel/kexec_core.c return pages; pages 486 kernel/kexec_core.c struct page *pages = NULL; pages 490 kernel/kexec_core.c pages = kimage_alloc_normal_control_pages(image, order); pages 493 kernel/kexec_core.c pages = kimage_alloc_crash_control_pages(image, order); pages 497 kernel/kexec_core.c return pages; pages 121 kernel/kprobes.c .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), pages 140 kernel/kprobes.c list_for_each_entry_rcu(kip, &c->pages, list) { pages 184 kernel/kprobes.c list_add_rcu(&kip->list, &c->pages); pages 221 kernel/kprobes.c list_for_each_entry_safe(kip, next, &c->pages, list) { pages 243 kernel/kprobes.c list_for_each_entry_rcu(kip, &c->pages, list) { pages 281 kernel/kprobes.c list_for_each_entry_rcu(kip, &c->pages, list) { pages 299 kernel/kprobes.c .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), pages 14 kernel/power/power.h unsigned long pages; pages 506 kernel/power/snapshot.c unsigned long pages; pages 508 kernel/power/snapshot.c pages = end - start; pages 517 kernel/power/snapshot.c nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); pages 884 kernel/power/snapshot.c unsigned long bits, pfn, pages; pages 888 kernel/power/snapshot.c pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; pages 889 kernel/power/snapshot.c bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK); pages 1703 kernel/power/snapshot.c unsigned long saveable, size, max_size, count, highmem, pages = 0; pages 1761 kernel/power/snapshot.c pages = preallocate_image_highmem(save_highmem); pages 1762 kernel/power/snapshot.c pages += preallocate_image_memory(saveable - pages, avail_normal); pages 1767 kernel/power/snapshot.c pages = minimum_image_size(saveable); pages 1773 kernel/power/snapshot.c if (avail_normal > pages) pages 1774 kernel/power/snapshot.c avail_normal -= pages; pages 1777 kernel/power/snapshot.c if (size < pages) pages 1778 kernel/power/snapshot.c size = min_t(unsigned long, pages, max_size); pages 1801 kernel/power/snapshot.c pages = preallocate_image_memory(alloc, avail_normal); pages 1802 kernel/power/snapshot.c if (pages < alloc) { pages 1804 kernel/power/snapshot.c alloc -= pages; pages 1805 kernel/power/snapshot.c pages += pages_highmem; pages 1809 kernel/power/snapshot.c pages += pages_highmem; pages 1814 kernel/power/snapshot.c alloc = (count - pages) - size; pages 1815 kernel/power/snapshot.c pages += preallocate_image_highmem(alloc); pages 1827 kernel/power/snapshot.c pages += pages_highmem + size; pages 1835 kernel/power/snapshot.c pages -= free_unnecessary_pages(); pages 1839 kernel/power/snapshot.c pr_cont("done (allocated %lu pages)\n", pages); pages 1840 kernel/power/snapshot.c swsusp_show_speed(start, stop, pages, "Allocated"); pages 2052 kernel/power/snapshot.c info->pages = snapshot_get_image_size(); pages 2053 kernel/power/snapshot.c info->size = info->pages; pages 2204 kernel/power/snapshot.c nr_meta_pages = info->pages - info->image_pages - 1; pages 906 kernel/power/swap.c unsigned long pages; pages 909 kernel/power/swap.c pages = snapshot_get_image_size(); pages 916 kernel/power/swap.c if (!enough_swap(pages)) { pages 934 kernel/power/swap.c save_image(&handle, &snapshot, pages - 1) : pages 935 kernel/power/swap.c save_image_lzo(&handle, &snapshot, pages - 1); pages 1493 kernel/power/swap.c load_image(&handle, &snapshot, header->pages - 1) : pages 1494 kernel/power/swap.c load_image_lzo(&handle, &snapshot, header->pages - 1); pages 1214 kernel/relay.c struct page *pages[PIPE_DEF_BUFFERS]; pages 1217 kernel/relay.c .pages = pages, pages 1252 kernel/relay.c spd.pages[spd.nr_pages] = rbuf->page_array[pidx]; pages 2394 kernel/sched/fair.c void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) pages 2457 kernel/sched/fair.c p->numa_pages_migrated += pages; pages 2459 kernel/sched/fair.c p->numa_faults_locality[2] += pages; pages 2461 kernel/sched/fair.c p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; pages 2462 kernel/sched/fair.c p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; pages 2463 kernel/sched/fair.c p->numa_faults_locality[local] += pages; pages 2493 kernel/sched/fair.c long pages, virtpages; pages 2537 kernel/sched/fair.c pages = sysctl_numa_balancing_scan_size; pages 2538 kernel/sched/fair.c pages <<= 20 - PAGE_SHIFT; /* MB in pages */ pages 2539 kernel/sched/fair.c virtpages = pages * 8; /* Scan up to this much virtual space */ pages 2540 kernel/sched/fair.c if (!pages) pages 2577 kernel/sched/fair.c end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); pages 2590 kernel/sched/fair.c pages -= (end - start) >> PAGE_SHIFT; pages 2594 kernel/sched/fair.c if (pages <= 0 || virtpages <= 0) pages 408 kernel/trace/ftrace.c struct ftrace_profile_page *pages; pages 576 kernel/trace/ftrace.c pg = stat->pages = stat->start; pages 592 kernel/trace/ftrace.c int pages; pages 596 kernel/trace/ftrace.c if (stat->pages) pages 599 kernel/trace/ftrace.c stat->pages = (void *)get_zeroed_page(GFP_KERNEL); pages 600 kernel/trace/ftrace.c if (!stat->pages) pages 616 kernel/trace/ftrace.c pg = stat->start = stat->pages; pages 618 kernel/trace/ftrace.c pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); pages 620 kernel/trace/ftrace.c for (i = 1; i < pages; i++) { pages 638 kernel/trace/ftrace.c stat->pages = NULL; pages 743 kernel/trace/ftrace.c if (stat->pages->index == PROFILES_PER_PAGE) { pages 744 kernel/trace/ftrace.c if (!stat->pages->next) pages 746 kernel/trace/ftrace.c stat->pages = stat->pages->next; pages 749 kernel/trace/ftrace.c rec = &stat->pages->records[stat->pages->index++]; pages 453 kernel/trace/ring_buffer.c struct list_head *pages; pages 942 kernel/trace/ring_buffer.c rb_list_head_clear(cpu_buffer->pages); pages 944 kernel/trace/ring_buffer.c list_for_each(hd, cpu_buffer->pages) pages 1018 kernel/trace/ring_buffer.c list = cpu_buffer->pages; pages 1156 kernel/trace/ring_buffer.c struct list_head *head = cpu_buffer->pages; pages 1189 kernel/trace/ring_buffer.c static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu) pages 1233 kernel/trace/ring_buffer.c list_add(&bpage->list, pages); pages 1250 kernel/trace/ring_buffer.c list_for_each_entry_safe(bpage, tmp, pages, list) { pages 1263 kernel/trace/ring_buffer.c LIST_HEAD(pages); pages 1267 kernel/trace/ring_buffer.c if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu)) pages 1275 kernel/trace/ring_buffer.c cpu_buffer->pages = pages.next; pages 1276 kernel/trace/ring_buffer.c list_del(&pages); pages 1331 kernel/trace/ring_buffer.c = list_entry(cpu_buffer->pages, struct buffer_page, list); pages 1348 kernel/trace/ring_buffer.c struct list_head *head = cpu_buffer->pages; pages 1549 kernel/trace/ring_buffer.c cpu_buffer->pages = next_page; pages 1566 kernel/trace/ring_buffer.c RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); pages 1609 kernel/trace/ring_buffer.c struct list_head *pages = &cpu_buffer->new_pages; pages 1639 kernel/trace/ring_buffer.c first_page = pages->next; pages 1640 kernel/trace/ring_buffer.c last_page = pages->prev; pages 1663 kernel/trace/ring_buffer.c INIT_LIST_HEAD(pages); pages 3738 kernel/trace/ring_buffer.c cpu_buffer->pages = reader->list.prev; pages 4356 kernel/trace/ring_buffer.c = list_entry(cpu_buffer->pages, struct buffer_page, list); pages 6106 kernel/trace/trace.c __free_page(spd->pages[idx]); pages 6173 kernel/trace/trace.c .pages = pages_def, pages 6210 kernel/trace/trace.c spd.pages[i] = alloc_page(GFP_KERNEL); pages 6211 kernel/trace/trace.c if (!spd.pages[i]) pages 6218 kernel/trace/trace.c page_address(spd.pages[i]), pages 6221 kernel/trace/trace.c __free_page(spd.pages[i]); pages 7415 kernel/trace/trace.c .pages = pages_def, pages 7478 kernel/trace/trace.c spd.pages[i] = page; pages 290 kernel/trace/tracing_map.c if (!a->pages) pages 294 kernel/trace/tracing_map.c memset(a->pages[i], 0, PAGE_SIZE); pages 304 kernel/trace/tracing_map.c if (!a->pages) pages 308 kernel/trace/tracing_map.c if (!a->pages[i]) pages 310 kernel/trace/tracing_map.c free_page((unsigned long)a->pages[i]); pages 313 kernel/trace/tracing_map.c kfree(a->pages); pages 337 kernel/trace/tracing_map.c a->pages = kcalloc(a->n_pages, sizeof(void *), GFP_KERNEL); pages 338 kernel/trace/tracing_map.c if (!a->pages) pages 342 kernel/trace/tracing_map.c a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL); pages 343 kernel/trace/tracing_map.c if (!a->pages[i]) pages 170 kernel/trace/tracing_map.h void **pages; pages 174 kernel/trace/tracing_map.h (array->pages[idx >> array->entry_shift] + \ pages 1233 lib/iov_iter.c struct page **pages, pages 1245 lib/iov_iter.c get_page(*pages++ = pipe->bufs[idx].page); pages 1254 lib/iov_iter.c struct page **pages, size_t maxsize, unsigned maxpages, pages 1272 lib/iov_iter.c return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start); pages 1276 lib/iov_iter.c struct page **pages, size_t maxsize, unsigned maxpages, pages 1283 lib/iov_iter.c return pipe_get_pages(i, pages, maxsize, maxpages, start); pages 1299 lib/iov_iter.c pages); pages 1306 lib/iov_iter.c get_page(*pages = v.bv_page); pages 1322 lib/iov_iter.c struct page ***pages, size_t maxsize, pages 1349 lib/iov_iter.c *pages = p; pages 1356 lib/iov_iter.c struct page ***pages, size_t maxsize, pages 1365 lib/iov_iter.c return pipe_get_pages_alloc(i, pages, maxsize, start); pages 1386 lib/iov_iter.c *pages = p; pages 1391 lib/iov_iter.c *pages = p = get_pages_array(1); pages 389 lib/scatterlist.c int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, pages 407 lib/scatterlist.c page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) { pages 427 lib/scatterlist.c page_to_pfn(pages[j]) != pages 428 lib/scatterlist.c page_to_pfn(pages[j - 1]) + 1) pages 433 lib/scatterlist.c sg_set_page(s, pages[cur_page], pages 464 lib/scatterlist.c int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, pages 468 lib/scatterlist.c return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size, pages 152 mm/backing-dev.c #define K(pages) ((pages) << (PAGE_SHIFT - 10)) pages 41 mm/balloon_compaction.c struct list_head *pages) pages 48 mm/balloon_compaction.c list_for_each_entry_safe(page, tmp, pages, lru) { pages 77 mm/balloon_compaction.c struct list_head *pages, size_t n_req_pages) pages 84 mm/balloon_compaction.c list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { pages 104 mm/balloon_compaction.c list_add(&page->lru, pages); pages 180 mm/balloon_compaction.c LIST_HEAD(pages); pages 183 mm/balloon_compaction.c n_pages = balloon_page_list_dequeue(b_dev_info, &pages, 1); pages 194 mm/balloon_compaction.c if (unlikely(list_empty(&b_dev_info->pages) && pages 200 mm/balloon_compaction.c return list_first_entry(&pages, struct page, lru); pages 226 mm/balloon_compaction.c list_add(&page->lru, &b_dev_info->pages); pages 78 mm/cma.c unsigned long pages) pages 80 mm/cma.c return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; pages 514 mm/cma.c bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) pages 518 mm/cma.c if (!cma || !pages) pages 521 mm/cma.c pr_debug("%s(page %p)\n", __func__, (void *)pages); pages 523 mm/cma.c pfn = page_to_pfn(pages); pages 532 mm/cma.c trace_cma_release(pfn, pages, count); pages 124 mm/cma_debug.c int pages = val; pages 127 mm/cma_debug.c return cma_free_mem(cma, pages); pages 156 mm/cma_debug.c int pages = val; pages 159 mm/cma_debug.c return cma_alloc_mem(cma, pages); pages 82 mm/dmapool.c unsigned pages = 0; pages 87 mm/dmapool.c pages++; pages 95 mm/dmapool.c pages * (pool->allocation / pool->size), pages 96 mm/dmapool.c pool->size, pages); pages 297 mm/filemap.c XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); pages 317 mm/filemap.c if (page != pvec->pages[i]) { pages 318 mm/filemap.c VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, pages 353 mm/filemap.c trace_mm_filemap_delete_from_page_cache(pvec->pages[i]); pages 355 mm/filemap.c unaccount_page_cache_page(mapping, pvec->pages[i]); pages 361 mm/filemap.c page_cache_free_page(mapping, pvec->pages[i]); pages 524 mm/filemap.c struct page *page = pvec.pages[i]; pages 1793 mm/filemap.c struct page **pages) pages 1817 mm/filemap.c pages[ret] = find_subpage(page, xas.xa_index); pages 1858 mm/filemap.c unsigned int nr_pages, struct page **pages) pages 1885 mm/filemap.c pages[ret] = find_subpage(page, xas.xa_index); pages 1915 mm/filemap.c struct page **pages) pages 1943 mm/filemap.c pages[ret] = find_subpage(page, xas.xa_index); pages 126 mm/frame_vector.c struct page **pages; pages 130 mm/frame_vector.c pages = frame_vector_pages(vec); pages 136 mm/frame_vector.c if (WARN_ON(IS_ERR(pages))) pages 139 mm/frame_vector.c put_page(pages[i]); pages 158 mm/frame_vector.c struct page **pages; pages 166 mm/frame_vector.c pages = (struct page **)nums; pages 168 mm/frame_vector.c pages[i] = pfn_to_page(nums[i]); pages 184 mm/frame_vector.c struct page **pages; pages 188 mm/frame_vector.c pages = (struct page **)(vec->ptrs); pages 189 mm/frame_vector.c nums = (unsigned long *)pages; pages 191 mm/frame_vector.c nums[i] = page_to_pfn(pages[i]); pages 389 mm/frontswap.c unsigned long pages = 0, pages_to_unuse = 0; pages 395 mm/frontswap.c pages = pages_to_unuse = total_pages_to_unuse; pages 397 mm/frontswap.c pages = si_frontswap_pages; pages 401 mm/frontswap.c if (security_vm_enough_memory_mm(current->mm, pages)) { pages 405 mm/frontswap.c vm_unacct_memory(pages); pages 54 mm/gup.c void put_user_pages_dirty_lock(struct page **pages, unsigned long npages, pages 66 mm/gup.c put_user_pages(pages, npages); pages 71 mm/gup.c struct page *page = compound_head(pages[index]); pages 108 mm/gup.c void put_user_pages(struct page **pages, unsigned long npages) pages 118 mm/gup.c put_user_page(pages[index]); pages 790 mm/gup.c unsigned int gup_flags, struct page **pages, pages 802 mm/gup.c VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); pages 823 mm/gup.c pages ? &pages[i] : NULL); pages 837 mm/gup.c i = follow_hugetlb_page(mm, vma, pages, vmas, pages 886 mm/gup.c if (pages) { pages 887 mm/gup.c pages[i] = page; pages 1017 mm/gup.c struct page **pages, pages 1032 mm/gup.c if (pages) pages 1038 mm/gup.c ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, pages 1069 mm/gup.c if (likely(pages)) pages 1070 mm/gup.c pages += ret; pages 1082 mm/gup.c pages, NULL, NULL); pages 1093 mm/gup.c if (likely(pages)) pages 1094 mm/gup.c pages++; pages 1166 mm/gup.c unsigned int gup_flags, struct page **pages, pages 1178 mm/gup.c return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, pages 1333 mm/gup.c unsigned long nr_pages, struct page **pages, pages 1359 mm/gup.c if (pages) { pages 1360 mm/gup.c pages[i] = virt_to_page(start); pages 1361 mm/gup.c if (pages[i]) pages 1362 mm/gup.c get_page(pages[i]); pages 1452 mm/gup.c struct page **pages, pages 1465 mm/gup.c struct page *head = compound_head(pages[i]); pages 1471 mm/gup.c step = compound_nr(head) - (pages[i] - head); pages 1504 mm/gup.c put_page(pages[i]); pages 1523 mm/gup.c pages, vmas, NULL, pages 1539 mm/gup.c struct page **pages, pages 1555 mm/gup.c struct page **pages, pages 1564 mm/gup.c if (!pages) pages 1577 mm/gup.c rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, pages 1587 mm/gup.c put_page(pages[i]); pages 1592 mm/gup.c rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages, pages 1606 mm/gup.c struct page **pages, pages 1610 mm/gup.c return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, pages 1623 mm/gup.c unsigned int gup_flags, struct page **pages, pages 1627 mm/gup.c pages, vmas, gup_flags | FOLL_TOUCH); pages 1653 mm/gup.c unsigned int gup_flags, struct page **pages, pages 1666 mm/gup.c pages, NULL, locked, pages 1687 mm/gup.c struct page **pages, unsigned int gup_flags) pages 1703 mm/gup.c ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, pages 1801 mm/gup.c struct page **pages) pages 1804 mm/gup.c struct page *page = pages[--(*nr)]; pages 1827 mm/gup.c unsigned int flags, struct page **pages, int *nr) pages 1854 mm/gup.c undo_dev_pagemap(nr, nr_start, pages); pages 1875 mm/gup.c pages[*nr] = page; pages 1900 mm/gup.c unsigned int flags, struct page **pages, int *nr) pages 1908 mm/gup.c unsigned long end, struct page **pages, int *nr) pages 1918 mm/gup.c undo_dev_pagemap(nr, nr_start, pages); pages 1922 mm/gup.c pages[*nr] = page; pages 1934 mm/gup.c unsigned long end, struct page **pages, int *nr) pages 1940 mm/gup.c if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) pages 1944 mm/gup.c undo_dev_pagemap(nr, nr_start, pages); pages 1951 mm/gup.c unsigned long end, struct page **pages, int *nr) pages 1957 mm/gup.c if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) pages 1961 mm/gup.c undo_dev_pagemap(nr, nr_start, pages); pages 1968 mm/gup.c unsigned long end, struct page **pages, int *nr) pages 1975 mm/gup.c unsigned long end, struct page **pages, int *nr) pages 1992 mm/gup.c struct page **pages, int *nr) pages 2017 mm/gup.c pages[*nr] = page; pages 2043 mm/gup.c struct page **pages, int *nr) pages 2052 mm/gup.c if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) pages 2061 mm/gup.c struct page **pages, int *nr) pages 2069 mm/gup.c struct page **pages, int *nr) pages 2080 mm/gup.c return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); pages 2086 mm/gup.c pages[*nr] = page; pages 2110 mm/gup.c unsigned long end, unsigned int flags, struct page **pages, int *nr) pages 2121 mm/gup.c return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); pages 2127 mm/gup.c pages[*nr] = page; pages 2152 mm/gup.c struct page **pages, int *nr) pages 2164 mm/gup.c pages[*nr] = page; pages 2188 mm/gup.c unsigned int flags, struct page **pages, int *nr) pages 2212 mm/gup.c pages, nr)) pages 2221 mm/gup.c PMD_SHIFT, next, flags, pages, nr)) pages 2223 mm/gup.c } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr)) pages 2231 mm/gup.c unsigned int flags, struct page **pages, int *nr) pages 2245 mm/gup.c pages, nr)) pages 2249 mm/gup.c PUD_SHIFT, next, flags, pages, nr)) pages 2251 mm/gup.c } else if (!gup_pmd_range(pud, addr, next, flags, pages, nr)) pages 2259 mm/gup.c unsigned int flags, struct page **pages, int *nr) pages 2274 mm/gup.c P4D_SHIFT, next, flags, pages, nr)) pages 2276 mm/gup.c } else if (!gup_pud_range(p4d, addr, next, flags, pages, nr)) pages 2284 mm/gup.c unsigned int flags, struct page **pages, int *nr) pages 2298 mm/gup.c pages, nr)) pages 2302 mm/gup.c PGDIR_SHIFT, next, flags, pages, nr)) pages 2304 mm/gup.c } else if (!gup_p4d_range(pgd, addr, next, flags, pages, nr)) pages 2310 mm/gup.c unsigned int flags, struct page **pages, int *nr) pages 2340 mm/gup.c struct page **pages) pages 2376 mm/gup.c gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr); pages 2385 mm/gup.c unsigned int gup_flags, struct page **pages) pages 2397 mm/gup.c pages, NULL, gup_flags); pages 2401 mm/gup.c pages, gup_flags); pages 2424 mm/gup.c unsigned int gup_flags, struct page **pages) pages 2453 mm/gup.c gup_pgd_range(addr, end, gup_flags | FOLL_WRITE, pages, &nr); pages 2461 mm/gup.c pages += nr; pages 2464 mm/gup.c gup_flags, pages); pages 28 mm/gup_benchmark.c struct page **pages; pages 35 mm/gup_benchmark.c pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL); pages 36 mm/gup_benchmark.c if (!pages) pages 55 mm/gup_benchmark.c pages + i); pages 60 mm/gup_benchmark.c pages + i, NULL); pages 63 mm/gup_benchmark.c nr = get_user_pages(addr, nr, gup->flags & 1, pages + i, pages 67 mm/gup_benchmark.c kvfree(pages); pages 83 mm/gup_benchmark.c if (!pages[i]) pages 85 mm/gup_benchmark.c put_page(pages[i]); pages 90 mm/gup_benchmark.c kvfree(pages); pages 116 mm/highmem.c unsigned int pages = 0; pages 120 mm/highmem.c pages += zone_page_state(zone, NR_FREE_PAGES); pages 123 mm/highmem.c return pages; pages 1208 mm/huge_memory.c struct page **pages; pages 1211 mm/huge_memory.c pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *), pages 1213 mm/huge_memory.c if (unlikely(!pages)) { pages 1219 mm/huge_memory.c pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma, pages 1221 mm/huge_memory.c if (unlikely(!pages[i] || pages 1222 mm/huge_memory.c mem_cgroup_try_charge_delay(pages[i], vma->vm_mm, pages 1224 mm/huge_memory.c if (pages[i]) pages 1225 mm/huge_memory.c put_page(pages[i]); pages 1227 mm/huge_memory.c memcg = (void *)page_private(pages[i]); pages 1228 mm/huge_memory.c set_page_private(pages[i], 0); pages 1229 mm/huge_memory.c mem_cgroup_cancel_charge(pages[i], memcg, pages 1231 mm/huge_memory.c put_page(pages[i]); pages 1233 mm/huge_memory.c kfree(pages); pages 1237 mm/huge_memory.c set_page_private(pages[i], (unsigned long)memcg); pages 1241 mm/huge_memory.c copy_user_highpage(pages[i], page + i, pages 1243 mm/huge_memory.c __SetPageUptodate(pages[i]); pages 1271 mm/huge_memory.c entry = mk_pte(pages[i], vma->vm_page_prot); pages 1273 mm/huge_memory.c memcg = (void *)page_private(pages[i]); pages 1274 mm/huge_memory.c set_page_private(pages[i], 0); pages 1275 mm/huge_memory.c page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false); pages 1276 mm/huge_memory.c mem_cgroup_commit_charge(pages[i], memcg, false, false); pages 1277 mm/huge_memory.c lru_cache_add_active_or_unevictable(pages[i], vma); pages 1283 mm/huge_memory.c kfree(pages); pages 1306 mm/huge_memory.c memcg = (void *)page_private(pages[i]); pages 1307 mm/huge_memory.c set_page_private(pages[i], 0); pages 1308 mm/huge_memory.c mem_cgroup_cancel_charge(pages[i], memcg, false); pages 1309 mm/huge_memory.c put_page(pages[i]); pages 1311 mm/huge_memory.c kfree(pages); pages 4388 mm/hugetlb.c struct page **pages, struct vm_area_struct **vmas, pages 4504 mm/hugetlb.c if (pages) { pages 4512 mm/hugetlb.c if (pages) { pages 4513 mm/hugetlb.c pages[i] = mem_map_offset(page, pfn_offset); pages 4514 mm/hugetlb.c get_page(pages[i]); pages 4561 mm/hugetlb.c unsigned long pages = 0; pages 4586 mm/hugetlb.c pages++; pages 4606 mm/hugetlb.c pages++; pages 4618 mm/hugetlb.c pages++; pages 4642 mm/hugetlb.c return pages << h->order; pages 184 mm/khugepaged.c unsigned long pages; pages 186 mm/khugepaged.c err = kstrtoul(buf, 10, &pages); pages 187 mm/khugepaged.c if (err || !pages || pages > UINT_MAX) pages 190 mm/khugepaged.c khugepaged_pages_to_scan = pages; pages 1903 mm/khugepaged.c static unsigned int khugepaged_scan_mm_slot(unsigned int pages, pages 1913 mm/khugepaged.c VM_BUG_ON(!pages); pages 1995 mm/khugepaged.c if (progress >= pages) pages 2047 mm/khugepaged.c unsigned int pages = khugepaged_pages_to_scan; pages 2052 mm/khugepaged.c while (progress < pages) { pages 2066 mm/khugepaged.c progress += khugepaged_scan_mm_slot(pages - progress, pages 2069 mm/khugepaged.c progress = pages; pages 1599 mm/memblock.c unsigned long pages = 0; pages 1608 mm/memblock.c pages += end_pfn - start_pfn; pages 1611 mm/memblock.c return PFN_PHYS(pages); pages 1976 mm/memblock.c unsigned long pages; pages 1980 mm/memblock.c pages = free_low_memory_core_early(); pages 1981 mm/memblock.c totalram_pages_add(pages); pages 1983 mm/memblock.c return pages; pages 1511 mm/memory.c static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, pages 1527 mm/memory.c ret = vm_insert_page(vma, uaddr, pages[offset + i]); pages 1554 mm/memory.c int vm_map_pages(struct vm_area_struct *vma, struct page **pages, pages 1557 mm/memory.c return __vm_map_pages(vma, pages, num, vma->vm_pgoff); pages 1574 mm/memory.c int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, pages 1577 mm/memory.c return __vm_map_pages(vma, pages, num, 0); pages 1972 mm/memory.c unsigned long vm_len, pfn, pages; pages 1984 mm/memory.c pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; pages 1985 mm/memory.c if (pfn + pages < pfn) pages 1989 mm/memory.c if (vma->vm_pgoff > pages) pages 1992 mm/memory.c pages -= vma->vm_pgoff; pages 1996 mm/memory.c if (vm_len >> PAGE_SHIFT > pages) pages 1596 mm/migrate.c const void __user * __user *pages, pages 1613 mm/migrate.c if (get_user(p, pages + i)) pages 1714 mm/migrate.c const void __user **pages, int *status) pages 1721 mm/migrate.c unsigned long addr = (unsigned long)(*pages); pages 1741 mm/migrate.c pages++; pages 1753 mm/migrate.c const void __user * __user *pages, pages 1767 mm/migrate.c if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) pages 1775 mm/migrate.c pages += chunk_nr; pages 1787 mm/migrate.c const void __user * __user *pages, pages 1835 mm/migrate.c err = do_pages_move(mm, task_nodes, nr_pages, pages, pages 1838 mm/migrate.c err = do_pages_stat(mm, nr_pages, pages, status); pages 1849 mm/migrate.c const void __user * __user *, pages, pages 1853 mm/migrate.c return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); pages 1863 mm/migrate.c const void __user * __user *pages; pages 1866 mm/migrate.c pages = compat_alloc_user_space(nr_pages * sizeof(void *)); pages 1871 mm/migrate.c put_user(compat_ptr(p), pages + i)) pages 1874 mm/migrate.c return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); pages 207 mm/mincore.c static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) pages 216 mm/mincore.c end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); pages 218 mm/mincore.c unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE); pages 219 mm/mincore.c memset(vec, 1, pages); pages 220 mm/mincore.c return pages; pages 256 mm/mincore.c unsigned long pages; pages 270 mm/mincore.c pages = len >> PAGE_SHIFT; pages 271 mm/mincore.c pages += (offset_in_page(len)) != 0; pages 273 mm/mincore.c if (!access_ok(vec, pages)) pages 281 mm/mincore.c while (pages) { pages 287 mm/mincore.c retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); pages 296 mm/mincore.c pages -= retval; pages 303 mm/mlock.c struct page *page = pvec->pages[i]; pages 324 mm/mlock.c pagevec_add(&pvec_putback, pvec->pages[i]); pages 325 mm/mlock.c pvec->pages[i] = NULL; pages 335 mm/mlock.c struct page *page = pvec->pages[i]; pages 3367 mm/mmap.c struct page **pages; pages 3370 mm/mmap.c pages = vma->vm_private_data; pages 3377 mm/mmap.c pages = sm->pages; pages 3380 mm/mmap.c for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) pages 3383 mm/mmap.c if (*pages) { pages 3384 mm/mmap.c struct page *page = *pages; pages 3458 mm/mmap.c unsigned long vm_flags, struct page **pages) pages 3461 mm/mmap.c mm, addr, len, vm_flags, (void *)pages, pages 49 mm/mmu_gather.c free_pages_and_swap_cache(batch->pages, batch->nr); pages 81 mm/mmu_gather.c batch->pages[batch->nr++] = page; pages 44 mm/mprotect.c unsigned long pages = 0; pages 124 mm/mprotect.c pages++; pages 140 mm/mprotect.c pages++; pages 154 mm/mprotect.c pages++; pages 161 mm/mprotect.c return pages; pages 195 mm/mprotect.c unsigned long pages = 0; pages 236 mm/mprotect.c pages += HPAGE_PMD_NR; pages 248 mm/mprotect.c pages += this_pages; pages 258 mm/mprotect.c return pages; pages 267 mm/mprotect.c unsigned long pages = 0; pages 274 mm/mprotect.c pages += change_pmd_range(vma, pud, addr, next, newprot, pages 278 mm/mprotect.c return pages; pages 287 mm/mprotect.c unsigned long pages = 0; pages 294 mm/mprotect.c pages += change_pud_range(vma, p4d, addr, next, newprot, pages 298 mm/mprotect.c return pages; pages 309 mm/mprotect.c unsigned long pages = 0; pages 319 mm/mprotect.c pages += change_p4d_range(vma, pgd, addr, next, newprot, pages 324 mm/mprotect.c if (pages) pages 328 mm/mprotect.c return pages; pages 335 mm/mprotect.c unsigned long pages; pages 338 mm/mprotect.c pages = hugetlb_change_protection(vma, start, end, newprot); pages 340 mm/mprotect.c pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); pages 342 mm/mprotect.c return pages; pages 685 mm/mremap.c int pages = (new_len - old_len) >> PAGE_SHIFT; pages 693 mm/mremap.c vm_stat_account(mm, vma->vm_flags, pages); pages 695 mm/mremap.c mm->locked_vm += pages; pages 330 mm/nommu.c void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) pages 343 mm/nommu.c void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) pages 393 mm/nommu.c int vm_map_pages(struct vm_area_struct *vma, struct page **pages, pages 400 mm/nommu.c int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, pages 1448 mm/page-writeback.c int pages; /* target nr_dirtied_pause */ pages 1481 mm/page-writeback.c pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); pages 1491 mm/page-writeback.c if (pages < DIRTY_POLL_THRESH) { pages 1493 mm/page-writeback.c pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); pages 1494 mm/page-writeback.c if (pages > DIRTY_POLL_THRESH) { pages 1495 mm/page-writeback.c pages = DIRTY_POLL_THRESH; pages 1500 mm/page-writeback.c pause = HZ * pages / (task_ratelimit + 1); pages 1503 mm/page-writeback.c pages = task_ratelimit * t / roundup_pow_of_two(HZ); pages 1506 mm/page-writeback.c *nr_dirtied_pause = pages; pages 1510 mm/page-writeback.c return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t; pages 2201 mm/page-writeback.c struct page *page = pvec.pages[i]; pages 5103 mm/page_alloc.c unsigned long pages[NR_LRU_LISTS]; pages 5109 mm/page_alloc.c pages[lru] = global_node_page_state(NR_LRU_BASE + lru); pages 5125 mm/page_alloc.c pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; pages 6661 mm/page_alloc.c unsigned long pages = spanned_pages; pages 6673 mm/page_alloc.c pages = present_pages; pages 6675 mm/page_alloc.c return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; pages 7090 mm/page_alloc.c unsigned long pages = end_pfn - start_pfn; pages 7092 mm/page_alloc.c totalpages += pages; pages 7093 mm/page_alloc.c if (pages) pages 7496 mm/page_alloc.c unsigned long pages = 0; pages 7500 mm/page_alloc.c for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { pages 7518 mm/page_alloc.c if (pages && s) pages 7520 mm/page_alloc.c s, pages << (PAGE_SHIFT - 10)); pages 7522 mm/page_alloc.c return pages; pages 230 mm/page_io.c sis->pages = page_no - 1; pages 92 mm/percpu-internal.h static inline int pcpu_nr_pages_to_map_bits(int pages) pages 94 mm/percpu-internal.h return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; pages 51 mm/percpu-km.c struct page *pages; pages 59 mm/percpu-km.c pages = alloc_pages(gfp, order_base_2(nr_pages)); pages 60 mm/percpu-km.c if (!pages) { pages 66 mm/percpu-km.c pcpu_set_page_chunk(nth_page(pages, i), chunk); pages 68 mm/percpu-km.c chunk->data = pages; pages 69 mm/percpu-km.c chunk->base_addr = page_address(pages); pages 33 mm/percpu-vm.c static struct page **pages; pages 34 mm/percpu-vm.c size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); pages 38 mm/percpu-vm.c if (!pages) pages 39 mm/percpu-vm.c pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); pages 40 mm/percpu-vm.c return pages; pages 54 mm/percpu-vm.c struct page **pages, int page_start, int page_end) pages 61 mm/percpu-vm.c struct page *page = pages[pcpu_page_idx(cpu, i)]; pages 82 mm/percpu-vm.c struct page **pages, int page_start, int page_end, pages 92 mm/percpu-vm.c struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; pages 103 mm/percpu-vm.c __free_page(pages[pcpu_page_idx(cpu, i)]); pages 109 mm/percpu-vm.c __free_page(pages[pcpu_page_idx(tcpu, i)]); pages 153 mm/percpu-vm.c struct page **pages, int page_start, int page_end) pages 164 mm/percpu-vm.c pages[pcpu_page_idx(cpu, i)] = page; pages 192 mm/percpu-vm.c static int __pcpu_map_pages(unsigned long addr, struct page **pages, pages 196 mm/percpu-vm.c PAGE_KERNEL, pages); pages 214 mm/percpu-vm.c struct page **pages, int page_start, int page_end) pages 221 mm/percpu-vm.c &pages[pcpu_page_idx(cpu, page_start)], pages 227 mm/percpu-vm.c pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], pages 278 mm/percpu-vm.c struct page **pages; pages 280 mm/percpu-vm.c pages = pcpu_get_pages(); pages 281 mm/percpu-vm.c if (!pages) pages 284 mm/percpu-vm.c if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp)) pages 287 mm/percpu-vm.c if (pcpu_map_pages(chunk, pages, page_start, page_end)) { pages 288 mm/percpu-vm.c pcpu_free_pages(chunk, pages, page_start, page_end); pages 311 mm/percpu-vm.c struct page **pages; pages 318 mm/percpu-vm.c pages = pcpu_get_pages(); pages 319 mm/percpu-vm.c BUG_ON(!pages); pages 324 mm/percpu-vm.c pcpu_unmap_pages(chunk, pages, page_start, page_end); pages 328 mm/percpu-vm.c pcpu_free_pages(chunk, pages, page_start, page_end); pages 2837 mm/percpu.c struct page **pages; pages 2859 mm/percpu.c sizeof(pages[0])); pages 2860 mm/percpu.c pages = memblock_alloc(pages_size, SMP_CACHE_BYTES); pages 2861 mm/percpu.c if (!pages) pages 2880 mm/percpu.c pages[j++] = virt_to_page(ptr); pages 2897 mm/percpu.c rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], pages 2924 mm/percpu.c free_fn(page_address(pages[j]), PAGE_SIZE); pages 2927 mm/percpu.c memblock_free_early(__pa(pages), pages_size); pages 30 mm/process_vm_access.c static int process_vm_rw_pages(struct page **pages, pages 38 mm/process_vm_access.c struct page *page = *pages++; pages 87 mm/process_vm_access.c / sizeof(struct pages *); pages 99 mm/process_vm_access.c int pages = min(nr_pages, max_pages_per_loop); pages 109 mm/process_vm_access.c pages = get_user_pages_remote(task, mm, pa, pages, flags, pages 113 mm/process_vm_access.c if (pages <= 0) pages 116 mm/process_vm_access.c bytes = pages * PAGE_SIZE - start_offset; pages 125 mm/process_vm_access.c nr_pages -= pages; pages 126 mm/process_vm_access.c pa += pages * PAGE_SIZE; pages 127 mm/process_vm_access.c while (pages) pages 128 mm/process_vm_access.c put_page(process_pages[--pages]); pages 189 mm/process_vm_access.c sizeof(struct pages *)*nr_pages), pages 65 mm/readahead.c struct list_head *pages) pages 69 mm/readahead.c while (!list_empty(pages)) { pages 70 mm/readahead.c victim = lru_to_page(pages); pages 88 mm/readahead.c int read_cache_pages(struct address_space *mapping, struct list_head *pages, pages 94 mm/readahead.c while (!list_empty(pages)) { pages 95 mm/readahead.c page = lru_to_page(pages); pages 106 mm/readahead.c read_cache_pages_invalidate_pages(mapping, pages); pages 117 mm/readahead.c struct list_head *pages, unsigned int nr_pages, gfp_t gfp) pages 126 mm/readahead.c ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); pages 128 mm/readahead.c put_pages_list(pages); pages 133 mm/readahead.c struct page *page = lru_to_page(pages); pages 200 mm/shmem.c static inline int shmem_acct_block(unsigned long flags, long pages) pages 206 mm/shmem.c pages * VM_ACCT(PAGE_SIZE)); pages 209 mm/shmem.c static inline void shmem_unacct_blocks(unsigned long flags, long pages) pages 212 mm/shmem.c vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); pages 215 mm/shmem.c static inline bool shmem_inode_acct_block(struct inode *inode, long pages) pages 220 mm/shmem.c if (shmem_acct_block(info->flags, pages)) pages 225 mm/shmem.c sbinfo->max_blocks - pages) > 0) pages 227 mm/shmem.c percpu_counter_add(&sbinfo->used_blocks, pages); pages 233 mm/shmem.c shmem_unacct_blocks(info->flags, pages); pages 237 mm/shmem.c static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages) pages 243 mm/shmem.c percpu_counter_sub(&sbinfo->used_blocks, pages); pages 244 mm/shmem.c shmem_unacct_blocks(info->flags, pages); pages 314 mm/shmem.c bool shmem_charge(struct inode *inode, long pages) pages 319 mm/shmem.c if (!shmem_inode_acct_block(inode, pages)) pages 323 mm/shmem.c inode->i_mapping->nrpages += pages; pages 326 mm/shmem.c info->alloced += pages; pages 327 mm/shmem.c inode->i_blocks += pages * BLOCKS_PER_PAGE; pages 334 mm/shmem.c void shmem_uncharge(struct inode *inode, long pages) pages 342 mm/shmem.c info->alloced -= pages; pages 343 mm/shmem.c inode->i_blocks -= pages * BLOCKS_PER_PAGE; pages 347 mm/shmem.c shmem_inode_unacct_blocks(inode, pages); pages 780 mm/shmem.c PAGEVEC_SIZE, pvec.pages, indices); pages 818 mm/shmem.c pvec.pages, indices); pages 822 mm/shmem.c struct page *page = pvec.pages[i]; pages 909 mm/shmem.c pvec.pages, indices); pages 919 mm/shmem.c struct page *page = pvec.pages[i]; pages 1177 mm/shmem.c struct page *page = pvec.pages[i]; pages 1218 mm/shmem.c pvec.pages, indices, pages 2655 mm/shmem.c pvec.nr, pvec.pages, indices); pages 2669 mm/shmem.c page = pvec.pages[i]; pages 1713 mm/slub.c int pages = 1 << order; pages 1729 mm/slub.c current->reclaim_state->reclaimed_slab += pages; pages 2253 mm/slub.c int pages; pages 2258 mm/slub.c pages = 0; pages 2264 mm/slub.c pages = oldpage->pages; pages 2276 mm/slub.c pages = 0; pages 2281 mm/slub.c pages++; pages 2284 mm/slub.c page->pages = pages; pages 4850 mm/slub.c x = page->pages; pages 5074 mm/slub.c int pages = 0; pages 5084 mm/slub.c pages += page->pages; pages 5089 mm/slub.c len = sprintf(buf, "%d(%d)", objects, pages); pages 5099 mm/slub.c page->pobjects, page->pages); pages 126 mm/swap.c void put_pages_list(struct list_head *pages) pages 128 mm/swap.c while (!list_empty(pages)) { pages 131 mm/swap.c victim = lru_to_page(pages); pages 152 mm/swap.c struct page **pages) pages 160 mm/swap.c pages[seg] = kmap_to_page(kiov[seg].iov_base); pages 161 mm/swap.c get_page(pages[seg]); pages 179 mm/swap.c int get_kernel_page(unsigned long start, int write, struct page **pages) pages 186 mm/swap.c return get_kernel_pages(&kiov, 1, write, pages); pages 200 mm/swap.c struct page *page = pvec->pages[i]; pages 215 mm/swap.c release_pages(pvec->pages, pvec->nr); pages 352 mm/swap.c struct page *pagevec_page = pvec->pages[i]; pages 760 mm/swap.c void release_pages(struct page **pages, int nr) pages 770 mm/swap.c struct page *page = pages[i]; pages 862 mm/swap.c release_pages(pvec->pages, pagevec_count(pvec)); pages 996 mm/swap.c pvec->pages, indices); pages 1014 mm/swap.c struct page *page = pvec->pages[i]; pages 1016 mm/swap.c pvec->pages[j++] = page; pages 1045 mm/swap.c pvec->pages); pages 1055 mm/swap.c PAGEVEC_SIZE, pvec->pages); pages 1065 mm/swap.c min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages); pages 95 mm/swap_slots.c long pages; pages 100 mm/swap_slots.c pages = get_nr_swap_pages(); pages 102 mm/swap_slots.c if (pages > num_online_cpus() * pages 109 mm/swap_slots.c if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE) pages 288 mm/swap_state.c void free_pages_and_swap_cache(struct page **pages, int nr) pages 290 mm/swap_state.c struct page **pagep = pages; pages 467 mm/swap_state.c unsigned int pages, last_ra; pages 474 mm/swap_state.c pages = hits + 2; pages 475 mm/swap_state.c if (pages == 2) { pages 482 mm/swap_state.c pages = 1; pages 485 mm/swap_state.c while (roundup < pages) pages 487 mm/swap_state.c pages = roundup; pages 490 mm/swap_state.c if (pages > max_pages) pages 491 mm/swap_state.c pages = max_pages; pages 495 mm/swap_state.c if (pages < last_ra) pages 496 mm/swap_state.c pages = last_ra; pages 498 mm/swap_state.c return pages; pages 504 mm/swap_state.c unsigned int hits, pages, max_pages; pages 512 mm/swap_state.c pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages, pages 516 mm/swap_state.c atomic_set(&last_readahead_pages, pages); pages 518 mm/swap_state.c return pages; pages 683 mm/swapfile.c if (si->inuse_pages == si->pages) { pages 769 mm/swapfile.c if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { pages 1832 mm/swapfile.c n = sis->pages; pages 2390 mm/swapfile.c *span = sis->pages; pages 2401 mm/swapfile.c *span = sis->pages; pages 2453 mm/swapfile.c atomic_long_add(p->pages, &nr_swap_pages); pages 2454 mm/swapfile.c total_swap_pages += p->pages; pages 2557 mm/swapfile.c if (!security_vm_enough_memory_mm(current->mm, p->pages)) pages 2558 mm/swapfile.c vm_unacct_memory(p->pages); pages 2581 mm/swapfile.c atomic_long_sub(p->pages, &nr_swap_pages); pages 2582 mm/swapfile.c total_swap_pages -= p->pages; pages 2772 mm/swapfile.c si->pages << (PAGE_SHIFT - 10), pages 3049 mm/swapfile.c p->pages = nr_good_pages; pages 3053 mm/swapfile.c nr_good_pages = p->pages; pages 3298 mm/swapfile.c p->pages<<(PAGE_SHIFT-10), name->name, p->prio, pages 71 mm/truncate.c if (xa_is_value(pvec->pages[j])) pages 83 mm/truncate.c struct page *page = pvec->pages[i]; pages 87 mm/truncate.c pvec->pages[j++] = page; pages 341 mm/truncate.c struct page *page = pvec.pages[i]; pages 365 mm/truncate.c truncate_cleanup_page(mapping, locked_pvec.pages[i]); pages 368 mm/truncate.c unlock_page(locked_pvec.pages[i]); pages 433 mm/truncate.c struct page *page = pvec.pages[i]; pages 561 mm/truncate.c struct page *page = pvec.pages[i]; pages 704 mm/truncate.c struct page *page = pvec.pages[i]; pages 424 mm/util.c int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, pages 436 mm/util.c if (locked_vm + pages > limit) pages 440 mm/util.c mm->locked_vm = locked_vm + pages; pages 442 mm/util.c WARN_ON_ONCE(pages > locked_vm); pages 443 mm/util.c mm->locked_vm = locked_vm - pages; pages 447 mm/util.c (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, pages 467 mm/util.c int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) pages 471 mm/util.c if (pages == 0 || !mm) pages 475 mm/util.c ret = __account_locked_vm(mm, pages, inc, current, pages 805 mm/util.c int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) pages 813 mm/util.c vm_acct_memory(pages); pages 822 mm/util.c if (pages > totalram_pages() + total_swap_pages) pages 846 mm/util.c vm_unacct_memory(pages); pages 139 mm/vmalloc.c unsigned long end, pgprot_t prot, struct page **pages, int *nr) pages 152 mm/vmalloc.c struct page *page = pages[*nr]; pages 165 mm/vmalloc.c unsigned long end, pgprot_t prot, struct page **pages, int *nr) pages 175 mm/vmalloc.c if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) pages 182 mm/vmalloc.c unsigned long end, pgprot_t prot, struct page **pages, int *nr) pages 192 mm/vmalloc.c if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) pages 199 mm/vmalloc.c unsigned long end, pgprot_t prot, struct page **pages, int *nr) pages 209 mm/vmalloc.c if (vmap_pud_range(p4d, addr, next, prot, pages, nr)) pages 222 mm/vmalloc.c pgprot_t prot, struct page **pages) pages 234 mm/vmalloc.c err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr); pages 243 mm/vmalloc.c pgprot_t prot, struct page **pages) pages 247 mm/vmalloc.c ret = vmap_page_range_noflush(start, end, prot, pages); pages 1778 mm/vmalloc.c void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) pages 1799 mm/vmalloc.c if (vmap_page_range(addr, addr + size, prot, pages) < 0) { pages 1963 mm/vmalloc.c pgprot_t prot, struct page **pages) pages 1965 mm/vmalloc.c return vmap_page_range_noflush(addr, addr + size, prot, pages); pages 2006 mm/vmalloc.c int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) pages 2012 mm/vmalloc.c err = vmap_page_range(addr, end, prot, pages); pages 2177 mm/vmalloc.c if (page_address(area->pages[i])) pages 2178 mm/vmalloc.c set_direct_map(area->pages[i]); pages 2210 mm/vmalloc.c unsigned long addr = (unsigned long)page_address(area->pages[i]); pages 2255 mm/vmalloc.c struct page *page = area->pages[i]; pages 2262 mm/vmalloc.c kvfree(area->pages); pages 2370 mm/vmalloc.c void *vmap(struct page **pages, unsigned int count, pages 2386 mm/vmalloc.c if (map_vm_area(area, prot, pages)) { pages 2401 mm/vmalloc.c struct page **pages; pages 2414 mm/vmalloc.c pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask, pages 2417 mm/vmalloc.c pages = kmalloc_node(array_size, nested_gfp, node); pages 2420 mm/vmalloc.c if (!pages) { pages 2426 mm/vmalloc.c area->pages = pages; pages 2443 mm/vmalloc.c area->pages[i] = page; pages 2449 mm/vmalloc.c if (map_vm_area(area, prot, pages)) pages 3466 mm/vmalloc.c counters[page_to_nid(v->pages[nr])]++; pages 3538 mm/vmalloc.c if (is_vmalloc_addr(v->pages)) pages 4347 mm/vmscan.c struct page *page = pvec->pages[i]; pages 398 mm/workingset.c unsigned long pages; pages 430 mm/workingset.c for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) pages 431 mm/workingset.c pages += lruvec_page_state_local(lruvec, pages 433 mm/workingset.c pages += lruvec_page_state_local(lruvec, NR_SLAB_RECLAIMABLE); pages 434 mm/workingset.c pages += lruvec_page_state_local(lruvec, NR_SLAB_UNRECLAIMABLE); pages 437 mm/workingset.c pages = node_present_pages(sc->nid); pages 439 mm/workingset.c max_nodes = pages >> (XA_CHUNK_SHIFT - 3); pages 1511 mm/z3fold.c static int z3fold_zpool_shrink(void *pool, unsigned int pages, pages 1517 mm/z3fold.c while (total < pages) { pages 170 mm/zbud.c static int zbud_zpool_shrink(void *pool, unsigned int pages, pages 176 mm/zbud.c while (total < pages) { pages 315 mm/zpool.c int zpool_shrink(struct zpool *zpool, unsigned int pages, pages 319 mm/zpool.c zpool->driver->shrink(zpool->pool, pages, reclaimed) : -EINVAL; pages 1028 mm/zsmalloc.c struct page *pages[]) pages 1044 mm/zsmalloc.c page = pages[i]; pages 1068 mm/zsmalloc.c struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE]; pages 1084 mm/zsmalloc.c dec_zone_page_state(pages[i], NR_ZSPAGES); pages 1085 mm/zsmalloc.c __free_page(pages[i]); pages 1092 mm/zsmalloc.c pages[i] = page; pages 1095 mm/zsmalloc.c create_page_chain(class, zspage, pages); pages 1139 mm/zsmalloc.c struct page *pages[2], int off, int size) pages 1141 mm/zsmalloc.c BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); pages 1147 mm/zsmalloc.c struct page *pages[2], int off, int size) pages 1177 mm/zsmalloc.c struct page *pages[2], int off, int size) pages 1194 mm/zsmalloc.c addr = kmap_atomic(pages[0]); pages 1197 mm/zsmalloc.c addr = kmap_atomic(pages[1]); pages 1205 mm/zsmalloc.c struct page *pages[2], int off, int size) pages 1224 mm/zsmalloc.c addr = kmap_atomic(pages[0]); pages 1227 mm/zsmalloc.c addr = kmap_atomic(pages[1]); pages 1303 mm/zsmalloc.c struct page *pages[2]; pages 1337 mm/zsmalloc.c pages[0] = page; pages 1338 mm/zsmalloc.c pages[1] = get_next_page(page); pages 1339 mm/zsmalloc.c BUG_ON(!pages[1]); pages 1341 mm/zsmalloc.c ret = __zs_map_object(area, pages, off, class->size); pages 1373 mm/zsmalloc.c struct page *pages[2]; pages 1375 mm/zsmalloc.c pages[0] = page; pages 1376 mm/zsmalloc.c pages[1] = get_next_page(page); pages 1377 mm/zsmalloc.c BUG_ON(!pages[1]); pages 1379 mm/zsmalloc.c __zs_unmap_object(area, pages, off, class->size); pages 1896 mm/zsmalloc.c struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; pages 1902 mm/zsmalloc.c pages[idx] = newpage; pages 1904 mm/zsmalloc.c pages[idx] = page; pages 1908 mm/zsmalloc.c create_page_chain(class, zspage, pages); pages 22 net/9p/trans_common.c void p9_release_pages(struct page **pages, int nr_pages) pages 27 net/9p/trans_common.c if (pages[i]) pages 28 net/9p/trans_common.c put_page(pages[i]); pages 305 net/9p/trans_virtio.c struct page ***pages, pages 329 net/9p/trans_virtio.c n = iov_iter_get_pages_alloc(data, pages, count, offs); pages 357 net/9p/trans_virtio.c *pages = kmalloc_array(nr_pages, sizeof(struct page *), pages 359 net/9p/trans_virtio.c if (!*pages) pages 366 net/9p/trans_virtio.c (*pages)[index] = vmalloc_to_page(p); pages 368 net/9p/trans_virtio.c (*pages)[index] = kmap_to_page(p); pages 390 net/ceph/cls_lock_client.c struct page **pages; pages 405 net/ceph/cls_lock_client.c pages = ceph_alloc_page_vector(1, GFP_NOIO); pages 406 net/ceph/cls_lock_client.c if (IS_ERR(pages)) pages 407 net/ceph/cls_lock_client.c return PTR_ERR(pages); pages 409 net/ceph/cls_lock_client.c p = page_address(pages[0]); pages 421 net/ceph/cls_lock_client.c osd_req_op_cls_request_data_pages(req, which, pages, assert_op_buf_size, pages 941 net/ceph/messenger.c BUG_ON(!data->pages); pages 971 net/ceph/messenger.c return data->pages[cursor->page_index]; pages 3255 net/ceph/messenger.c ceph_release_page_vector(data->pages, num_pages); pages 3261 net/ceph/messenger.c void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, pages 3266 net/ceph/messenger.c BUG_ON(!pages); pages 3271 net/ceph/messenger.c data->pages = pages; pages 133 net/ceph/osd_client.c struct page **pages, u64 length, u32 alignment, pages 137 net/ceph/osd_client.c osd_data->pages = pages; pages 191 net/ceph/osd_client.c unsigned int which, struct page **pages, pages 198 net/ceph/osd_client.c ceph_osd_data_pages_init(osd_data, pages, length, alignment, pages 204 net/ceph/osd_client.c unsigned int which, struct page **pages, pages 211 net/ceph/osd_client.c ceph_osd_data_pages_init(osd_data, pages, length, alignment, pages 291 net/ceph/osd_client.c unsigned int which, struct page **pages, u64 length, pages 297 net/ceph/osd_client.c ceph_osd_data_pages_init(osd_data, pages, length, alignment, pages 323 net/ceph/osd_client.c unsigned int which, struct page **pages, u64 length, pages 329 net/ceph/osd_client.c ceph_osd_data_pages_init(osd_data, pages, length, alignment, pages 362 net/ceph/osd_client.c ceph_release_page_vector(osd_data->pages, num_pages); pages 964 net/ceph/osd_client.c ceph_msg_data_add_pages(msg, osd_data->pages, pages 2947 net/ceph/osd_client.c void *p = page_address(osd_data->pages[0]); pages 4439 net/ceph/osd_client.c *lreq->preply_pages = data->pages; pages 4825 net/ceph/osd_client.c struct page **pages; pages 4861 net/ceph/osd_client.c pages = ceph_alloc_page_vector(1, GFP_NOIO); pages 4862 net/ceph/osd_client.c if (IS_ERR(pages)) { pages 4863 net/ceph/osd_client.c ret = PTR_ERR(pages); pages 4868 net/ceph/osd_client.c pages, PAGE_SIZE, 0, false, true); pages 4995 net/ceph/osd_client.c struct page **pages; pages 5006 net/ceph/osd_client.c pages = ceph_alloc_page_vector(1, GFP_NOIO); pages 5007 net/ceph/osd_client.c if (IS_ERR(pages)) { pages 5008 net/ceph/osd_client.c ret = PTR_ERR(pages); pages 5015 net/ceph/osd_client.c pages, PAGE_SIZE, 0, false, true); pages 5024 net/ceph/osd_client.c void *p = page_address(pages[0]); pages 5241 net/ceph/osd_client.c struct page **pages, int num_pages, int page_align) pages 5257 net/ceph/osd_client.c pages, *plen, page_align, false, false); pages 5281 net/ceph/osd_client.c struct page **pages, int num_pages) pages 5295 net/ceph/osd_client.c osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align, pages 5321 net/ceph/osd_client.c struct page **pages; pages 5324 net/ceph/osd_client.c pages = ceph_alloc_page_vector(1, GFP_KERNEL); pages 5325 net/ceph/osd_client.c if (IS_ERR(pages)) pages 5326 net/ceph/osd_client.c return PTR_ERR(pages); pages 5334 net/ceph/osd_client.c p = page_address(pages[0]); pages 5340 net/ceph/osd_client.c ceph_osd_data_pages_init(&op->copy_from.osd_data, pages, pages 5515 net/ceph/osd_client.c struct page **pages; pages 5517 net/ceph/osd_client.c pages = ceph_alloc_page_vector(calc_pages_for(0, data_len), pages 5519 net/ceph/osd_client.c if (IS_ERR(pages)) { pages 5524 net/ceph/osd_client.c ceph_msg_data_add_pages(m, pages, data_len, 0, true); pages 13 net/ceph/pagevec.c void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) pages 19 net/ceph/pagevec.c set_page_dirty_lock(pages[i]); pages 20 net/ceph/pagevec.c put_page(pages[i]); pages 22 net/ceph/pagevec.c kvfree(pages); pages 26 net/ceph/pagevec.c void ceph_release_page_vector(struct page **pages, int num_pages) pages 31 net/ceph/pagevec.c __free_pages(pages[i], 0); pages 32 net/ceph/pagevec.c kfree(pages); pages 41 net/ceph/pagevec.c struct page **pages; pages 44 net/ceph/pagevec.c pages = kmalloc_array(num_pages, sizeof(*pages), flags); pages 45 net/ceph/pagevec.c if (!pages) pages 48 net/ceph/pagevec.c pages[i] = __page_cache_alloc(flags); pages 49 net/ceph/pagevec.c if (pages[i] == NULL) { pages 50 net/ceph/pagevec.c ceph_release_page_vector(pages, i); pages 54 net/ceph/pagevec.c return pages; pages 61 net/ceph/pagevec.c int ceph_copy_user_to_page_vector(struct page **pages, pages 72 net/ceph/pagevec.c bad = copy_from_user(page_address(pages[i]) + po, data, l); pages 87 net/ceph/pagevec.c void ceph_copy_to_page_vector(struct page **pages, pages 98 net/ceph/pagevec.c memcpy(page_address(pages[i]) + po, data, l); pages 110 net/ceph/pagevec.c void ceph_copy_from_page_vector(struct page **pages, pages 121 net/ceph/pagevec.c memcpy(data, page_address(pages[i]) + po, l); pages 137 net/ceph/pagevec.c void ceph_zero_page_vector_range(int off, int len, struct page **pages) pages 148 net/ceph/pagevec.c dout("zeroing %d %p head from %d\n", i, pages[i], pages 150 net/ceph/pagevec.c zero_user_segment(pages[i], off, end); pages 155 net/ceph/pagevec.c dout("zeroing %d %p len=%d\n", i, pages[i], len); pages 156 net/ceph/pagevec.c zero_user_segment(pages[i], 0, PAGE_SIZE); pages 162 net/ceph/pagevec.c dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len); pages 163 net/ceph/pagevec.c zero_user_segment(pages[i], 0, len); pages 629 net/core/datagram.c struct page *pages[MAX_SKB_FRAGS]; pages 638 net/core/datagram.c copied = iov_iter_get_pages(from, pages, length, pages 658 net/core/datagram.c skb_fill_page_desc(skb, frag++, pages[n], start, size); pages 2272 net/core/skbuff.c put_page(spd->pages[i]); pages 2299 net/core/skbuff.c spd->pages[spd->nr_pages - 1] == page && pages 2326 net/core/skbuff.c spd->pages[spd->nr_pages] = page; pages 2430 net/core/skbuff.c struct page *pages[MAX_SKB_FRAGS]; pages 2432 net/core/skbuff.c .pages = pages, pages 303 net/core/skmsg.c struct page *pages[MAX_MSG_FRAGS]; pages 315 net/core/skmsg.c copied = iov_iter_get_pages(from, pages, bytes, maxpages, pages 329 net/core/skmsg.c pages[i], use, offset); pages 32 net/ieee802154/nl-phy.c int i, pages = 0; pages 52 net/ieee802154/nl-phy.c buf[pages++] = phy->supported.channels[i] | (i << 27); pages 54 net/ieee802154/nl-phy.c if (pages && pages 56 net/ieee802154/nl-phy.c pages * sizeof(uint32_t), buf)) pages 65 net/rds/info.c struct page **pages; pages 122 net/rds/info.c iter->addr = kmap_atomic(*iter->pages); pages 127 net/rds/info.c "bytes %lu\n", *iter->pages, iter->addr, pages 140 net/rds/info.c iter->pages++; pages 167 net/rds/info.c struct page **pages = NULL; pages 191 net/rds/info.c pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); pages 192 net/rds/info.c if (!pages) { pages 196 net/rds/info.c ret = get_user_pages_fast(start, nr_pages, FOLL_WRITE, pages); pages 215 net/rds/info.c iter.pages = pages; pages 238 net/rds/info.c for (i = 0; pages && i < nr_pages; i++) pages 239 net/rds/info.c put_page(pages[i]); pages 240 net/rds/info.c kfree(pages); pages 389 net/rds/message.c struct page *pages; pages 393 net/rds/message.c copied = iov_iter_get_pages(from, &pages, PAGE_SIZE, pages 409 net/rds/message.c sg_set_page(sg, pages, copied, start); pages 157 net/rds/rdma.c struct page **pages, int write) pages 162 net/rds/rdma.c pages); pages 166 net/rds/rdma.c put_page(pages[ret]); pages 179 net/rds/rdma.c struct page **pages = NULL; pages 216 net/rds/rdma.c pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); pages 217 net/rds/rdma.c if (!pages) { pages 250 net/rds/rdma.c ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); pages 265 net/rds/rdma.c sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); pages 319 net/rds/rdma.c kfree(pages); pages 580 net/rds/rdma.c struct page **pages = NULL; pages 614 net/rds/rdma.c pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); pages 615 net/rds/rdma.c if (!pages) { pages 676 net/rds/rdma.c ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write); pages 692 net/rds/rdma.c sg_set_page(sg, pages[j], pages 716 net/rds/rdma.c kfree(pages); pages 380 net/smc/smc_core.c if (buf_desc->pages) pages 381 net/smc/smc_core.c __free_pages(buf_desc->pages, buf_desc->order); pages 752 net/smc/smc_core.c buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN | pages 756 net/smc/smc_core.c if (!buf_desc->pages) { pages 760 net/smc/smc_core.c buf_desc->cpu_addr = (void *)page_address(buf_desc->pages); pages 819 net/smc/smc_core.c buf_desc->pages = virt_to_page(buf_desc->cpu_addr); pages 150 net/smc/smc_core.h struct page *pages; pages 148 net/smc/smc_rx.c put_page(spd->pages[i]); pages 170 net/smc/smc_rx.c spd.pages = &smc->conn.rmb_desc->pages; pages 178 net/smc/smc_rx.c get_page(smc->conn.rmb_desc->pages); pages 1846 net/sunrpc/auth_gss/auth_gss.c inpages = snd_buf->pages + first; pages 1847 net/sunrpc/auth_gss/auth_gss.c snd_buf->pages = rqstp->rq_enc_pages; pages 457 net/sunrpc/auth_gss/gss_krb5_crypto.c struct page **pages; pages 484 net/sunrpc/auth_gss/gss_krb5_crypto.c in_page = desc->pages[i]; pages 531 net/sunrpc/auth_gss/gss_krb5_crypto.c int offset, struct page **pages) pages 546 net/sunrpc/auth_gss/gss_krb5_crypto.c desc.pages = pages; pages 678 net/sunrpc/auth_gss/gss_krb5_crypto.c u32 offset, u8 *iv, struct page **pages, int encrypt) pages 700 net/sunrpc/auth_gss/gss_krb5_crypto.c save_pages = buf->pages; pages 702 net/sunrpc/auth_gss/gss_krb5_crypto.c buf->pages = pages; pages 705 net/sunrpc/auth_gss/gss_krb5_crypto.c buf->pages = save_pages; pages 734 net/sunrpc/auth_gss/gss_krb5_crypto.c struct xdr_buf *buf, struct page **pages) pages 793 net/sunrpc/auth_gss/gss_krb5_crypto.c save_pages = buf->pages; pages 794 net/sunrpc/auth_gss/gss_krb5_crypto.c buf->pages = pages; pages 799 net/sunrpc/auth_gss/gss_krb5_crypto.c buf->pages = save_pages; pages 817 net/sunrpc/auth_gss/gss_krb5_crypto.c desc.pages = pages; pages 837 net/sunrpc/auth_gss/gss_krb5_crypto.c desc.iv, pages, 1); pages 85 net/sunrpc/auth_gss/gss_krb5_wrap.c ptr = kmap_atomic(buf->pages[last]); pages 159 net/sunrpc/auth_gss/gss_krb5_wrap.c struct xdr_buf *buf, struct page **pages) pages 222 net/sunrpc/auth_gss/gss_krb5_wrap.c tmp_pages = buf->pages; pages 223 net/sunrpc/auth_gss/gss_krb5_wrap.c buf->pages = pages; pages 227 net/sunrpc/auth_gss/gss_krb5_wrap.c buf->pages = tmp_pages; pages 250 net/sunrpc/auth_gss/gss_krb5_wrap.c offset + headlen - conflen, pages); pages 256 net/sunrpc/auth_gss/gss_krb5_wrap.c offset + headlen - conflen, pages)) pages 447 net/sunrpc/auth_gss/gss_krb5_wrap.c struct xdr_buf *buf, struct page **pages) pages 488 net/sunrpc/auth_gss/gss_krb5_wrap.c err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages); pages 599 net/sunrpc/auth_gss/gss_krb5_wrap.c struct xdr_buf *buf, struct page **pages) pages 609 net/sunrpc/auth_gss/gss_krb5_wrap.c return gss_wrap_kerberos_v1(kctx, offset, buf, pages); pages 612 net/sunrpc/auth_gss/gss_krb5_wrap.c return gss_wrap_kerberos_v2(kctx, offset, buf, pages); pages 205 net/sunrpc/auth_gss/gss_rpc_upcall.c for (i = 0; i < arg->npages && arg->pages[i]; i++) pages 206 net/sunrpc/auth_gss/gss_rpc_upcall.c __free_page(arg->pages[i]); pages 208 net/sunrpc/auth_gss/gss_rpc_upcall.c kfree(arg->pages); pages 214 net/sunrpc/auth_gss/gss_rpc_upcall.c arg->pages = kcalloc(arg->npages, sizeof(struct page *), GFP_KERNEL); pages 219 net/sunrpc/auth_gss/gss_rpc_upcall.c if (!arg->pages) pages 56 net/sunrpc/auth_gss/gss_rpc_xdr.c xdr_write_pages(xdr, in->pages, in->page_base, in->page_len); pages 773 net/sunrpc/auth_gss/gss_rpc_xdr.c arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE); pages 124 net/sunrpc/auth_gss/gss_rpc_xdr.h struct page **pages; /* Array of contiguous pages */ pages 137 net/sunrpc/auth_gss/gss_rpc_xdr.h struct page **pages; pages 1082 net/sunrpc/auth_gss/svcauth_gss.c if (in_token->pages[i]) pages 1083 net/sunrpc/auth_gss/svcauth_gss.c put_page(in_token->pages[i]); pages 1087 net/sunrpc/auth_gss/svcauth_gss.c kfree(in_token->pages); pages 1088 net/sunrpc/auth_gss/svcauth_gss.c in_token->pages = NULL; pages 1098 net/sunrpc/auth_gss/svcauth_gss.c int pages, i, res; pages 1109 net/sunrpc/auth_gss/svcauth_gss.c pages = DIV_ROUND_UP(inlen, PAGE_SIZE); pages 1110 net/sunrpc/auth_gss/svcauth_gss.c in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL); pages 1111 net/sunrpc/auth_gss/svcauth_gss.c if (!in_token->pages) pages 1115 net/sunrpc/auth_gss/svcauth_gss.c for (i = 0; i < pages; i++) { pages 1116 net/sunrpc/auth_gss/svcauth_gss.c in_token->pages[i] = alloc_page(GFP_KERNEL); pages 1117 net/sunrpc/auth_gss/svcauth_gss.c if (!in_token->pages[i]) { pages 1124 net/sunrpc/auth_gss/svcauth_gss.c memcpy(page_address(in_token->pages[0]), argv->iov_base, length); pages 1131 net/sunrpc/auth_gss/svcauth_gss.c memcpy(page_address(in_token->pages[i]), pages 1132 net/sunrpc/auth_gss/svcauth_gss.c page_address(rqstp->rq_arg.pages[i]) + page_base, pages 1725 net/sunrpc/auth_gss/svcauth_gss.c inpages = resbuf->pages; pages 1243 net/sunrpc/clnt.c void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages, pages 1252 net/sunrpc/clnt.c xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len); pages 76 net/sunrpc/socklib.c struct page **ppage = xdr->pages; pages 567 net/sunrpc/svc.c unsigned int pages, arghi; pages 573 net/sunrpc/svc.c pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. pages 577 net/sunrpc/svc.c WARN_ON_ONCE(pages > RPCSVC_MAXPAGES); pages 578 net/sunrpc/svc.c if (pages > RPCSVC_MAXPAGES) pages 579 net/sunrpc/svc.c pages = RPCSVC_MAXPAGES; pages 580 net/sunrpc/svc.c while (pages) { pages 585 net/sunrpc/svc.c pages--; pages 587 net/sunrpc/svc.c return pages == 0; pages 1514 net/sunrpc/svc.c rqstp->rq_res.pages = rqstp->rq_respages + 1; pages 1646 net/sunrpc/svc.c unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct page **pages, pages 1664 net/sunrpc/svc.c vec[i].iov_base = page_address(*pages); pages 1668 net/sunrpc/svc.c ++pages; pages 635 net/sunrpc/svc_xprt.c int pages; pages 639 net/sunrpc/svc_xprt.c pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT; pages 640 net/sunrpc/svc_xprt.c if (pages > RPCSVC_MAXPAGES) { pages 642 net/sunrpc/svc_xprt.c pages, RPCSVC_MAXPAGES); pages 644 net/sunrpc/svc_xprt.c pages = RPCSVC_MAXPAGES; pages 646 net/sunrpc/svc_xprt.c for (i = 0; i < pages ; i++) pages 666 net/sunrpc/svc_xprt.c arg->pages = rqstp->rq_pages + 1; pages 669 net/sunrpc/svc_xprt.c arg->page_len = (pages-2)*PAGE_SIZE; pages 670 net/sunrpc/svc_xprt.c arg->len = (pages-1)*PAGE_SIZE; pages 186 net/sunrpc/svcsock.c struct page **ppage = xdr->pages; pages 1005 net/sunrpc/svcsock.c static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len) pages 1011 net/sunrpc/svcsock.c vec[i].iov_base = page_address(pages[i]); pages 128 net/sunrpc/xdr.c kaddr = kmap_atomic(buf->pages[0]); pages 152 net/sunrpc/xdr.c buf->bvec[i].bv_page = buf->pages[i]; pages 178 net/sunrpc/xdr.c struct page **pages, unsigned int base, unsigned int len) pages 187 net/sunrpc/xdr.c xdr->pages = pages; pages 219 net/sunrpc/xdr.c _shift_data_right_pages(struct page **pages, size_t pgto_base, pages 231 net/sunrpc/xdr.c pgto = pages + (pgto_base >> PAGE_SHIFT); pages 232 net/sunrpc/xdr.c pgfrom = pages + (pgfrom_base >> PAGE_SHIFT); pages 280 net/sunrpc/xdr.c _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) pages 286 net/sunrpc/xdr.c pgto = pages + (pgbase >> PAGE_SHIFT); pages 324 net/sunrpc/xdr.c _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) pages 330 net/sunrpc/xdr.c pgfrom = pages + (pgbase >> PAGE_SHIFT); pages 397 net/sunrpc/xdr.c buf->pages, pages 417 net/sunrpc/xdr.c _shift_data_right_pages(buf->pages, pages 424 net/sunrpc/xdr.c _copy_to_pages(buf->pages, buf->page_base, pages 478 net/sunrpc/xdr.c buf->pages, buf->page_base + pglen - len, pages 700 net/sunrpc/xdr.c xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT); pages 757 net/sunrpc/xdr.c void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, pages 762 net/sunrpc/xdr.c buf->pages = pages; pages 814 net/sunrpc/xdr.c xdr->page_ptr = &xdr->buf->pages[pgnr]; pages 832 net/sunrpc/xdr.c newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT; pages 886 net/sunrpc/xdr.c struct page **pages, unsigned int len) pages 889 net/sunrpc/xdr.c buf->pages = pages; pages 1128 net/sunrpc/xdr.c subbuf->pages = &buf->pages[base >> PAGE_SHIFT]; pages 1204 net/sunrpc/xdr.c _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len); pages 1235 net/sunrpc/xdr.c _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len); pages 1403 net/sunrpc/xdr.c ppages = buf->pages + (base >> PAGE_SHIFT); pages 1591 net/sunrpc/xdr.c sg_set_page(sg, buf->pages[i], thislen, page_offset); pages 237 net/sunrpc/xprtrdma/rpc_rdma.c ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT); pages 667 net/sunrpc/xprtrdma/rpc_rdma.c ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); pages 962 net/sunrpc/xprtrdma/rpc_rdma.c ppages = rqst->rq_rcv_buf.pages + pages 655 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count]; pages 378 net/sunrpc/xprtrdma/svc_rdma_rw.c page = xdr->pages + page_no; pages 620 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.pages[info->ri_pageno] = pages 826 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.pages = head->rc_pages; pages 606 net/sunrpc/xprtrdma/svc_rdma_sendto.c ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); pages 681 net/sunrpc/xprtrdma/svc_rdma_sendto.c ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); pages 717 net/sunrpc/xprtrdma/svc_rdma_sendto.c int i, pages = rqstp->rq_next_page - rqstp->rq_respages; pages 719 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_page_count += pages; pages 720 net/sunrpc/xprtrdma/svc_rdma_sendto.c for (i = 0; i < pages; i++) { pages 335 net/sunrpc/xprtsock.c if (buf->pages[i]) pages 337 net/sunrpc/xprtsock.c buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp); pages 338 net/sunrpc/xprtsock.c if (!buf->pages[i]) { pages 161 net/tls/tls_sw.c unsigned int pages; pages 197 net/tls/tls_sw.c for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) { pages 1331 net/tls/tls_sw.c struct page *pages[MAX_SKB_FRAGS]; pages 1343 net/tls/tls_sw.c copied = iov_iter_get_pages(from, pages, pages 1359 net/tls/tls_sw.c pages[i], use, offset); pages 1399 net/tls/tls_sw.c int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0; pages 1496 net/tls/tls_sw.c &pages, chunk, &sgout[1], pages 1508 net/tls/tls_sw.c pages = 0; pages 1520 net/tls/tls_sw.c for (; pages > 0; pages--) pages 1521 net/tls/tls_sw.c put_page(sg_page(&sgout[pages])); pages 188 net/xdp/xdp_umem.c vunmap(umem->pages[i].addr); pages 207 net/xdp/xdp_umem.c umem->pages[i].addr = addr; pages 252 net/xdp/xdp_umem.c kfree(umem->pages); pages 253 net/xdp/xdp_umem.c umem->pages = NULL; pages 415 net/xdp/xdp_umem.c umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL); pages 416 net/xdp/xdp_umem.c if (!umem->pages) { pages 425 net/xdp/xdp_umem.c kfree(umem->pages); pages 128 net/xdp/xsk.c void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr; pages 599 net/xdp/xsk.c struct xdp_umem_page *pgs = umem->pages; pages 142 net/xdp/xsk_queue.h (unsigned long)umem->pages[(addr >> PAGE_SHIFT)].addr & pages 153 samples/vfio-mdev/mbochs.c struct page **pages; pages 172 samples/vfio-mdev/mbochs.c struct page **pages; pages 540 samples/vfio-mdev/mbochs.c mdev_state->pages = kcalloc(mdev_state->pagecount, pages 543 samples/vfio-mdev/mbochs.c if (!mdev_state->pages) pages 578 samples/vfio-mdev/mbochs.c kfree(mdev_state->pages); pages 705 samples/vfio-mdev/mbochs.c if (!mdev_state->pages[pgoff]) { pages 706 samples/vfio-mdev/mbochs.c mdev_state->pages[pgoff] = pages 708 samples/vfio-mdev/mbochs.c if (!mdev_state->pages[pgoff]) pages 712 samples/vfio-mdev/mbochs.c get_page(mdev_state->pages[pgoff]); pages 713 samples/vfio-mdev/mbochs.c return mdev_state->pages[pgoff]; pages 739 samples/vfio-mdev/mbochs.c if (!mdev_state->pages[i]) pages 741 samples/vfio-mdev/mbochs.c put_page(mdev_state->pages[i]); pages 742 samples/vfio-mdev/mbochs.c mdev_state->pages[i] = NULL; pages 794 samples/vfio-mdev/mbochs.c vmf->page = dmabuf->pages[vmf->pgoff]; pages 846 samples/vfio-mdev/mbochs.c if (sg_alloc_table_from_pages(sg, dmabuf->pages, dmabuf->pagecount, pages 885 samples/vfio-mdev/mbochs.c put_page(dmabuf->pages[pg]); pages 897 samples/vfio-mdev/mbochs.c struct page *page = dmabuf->pages[page_num]; pages 932 samples/vfio-mdev/mbochs.c dmabuf->pages = kcalloc(dmabuf->pagecount, sizeof(struct page *), pages 934 samples/vfio-mdev/mbochs.c if (!dmabuf->pages) pages 939 samples/vfio-mdev/mbochs.c dmabuf->pages[pg] = __mbochs_get_page(mdev_state, pages 941 samples/vfio-mdev/mbochs.c if (!dmabuf->pages[pg]) pages 953 samples/vfio-mdev/mbochs.c put_page(dmabuf->pages[--pg]); pages 954 samples/vfio-mdev/mbochs.c kfree(dmabuf->pages); pages 1301 security/commoncap.c int cap_vm_enough_memory(struct mm_struct *mm, long pages) pages 27 security/keys/big_key.c struct page *pages[]; pages 149 security/keys/big_key.c if (buf->pages[i]) pages 150 security/keys/big_key.c __free_page(buf->pages[i]); pages 173 security/keys/big_key.c buf->sg = (void *)(buf->pages + npg); pages 177 security/keys/big_key.c buf->pages[i] = alloc_page(GFP_KERNEL); pages 178 security/keys/big_key.c if (!buf->pages[i]) pages 182 security/keys/big_key.c sg_set_page(&buf->sg[i], buf->pages[i], l, 0); pages 186 security/keys/big_key.c buf->virt = vmap(buf->pages, buf->nr_pages, VM_MAP, PAGE_KERNEL); pages 752 security/security.c int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) pages 766 security/security.c rc = hp->hook.vm_enough_memory(mm, pages); pages 772 security/security.c return __vm_enough_memory(mm, pages, cap_sys_admin); pages 2247 security/selinux/hooks.c static int selinux_vm_enough_memory(struct mm_struct *mm, long pages) pages 269 sound/core/pcm_memory.c if (idx >= (unsigned int)sgbuf->pages) pages 36 sound/core/sgbuf.c for (i = 0; i < sgbuf->pages; i++) { pages 60 sound/core/sgbuf.c unsigned int i, pages, chunk, maxpages; pages 79 sound/core/sgbuf.c pages = snd_sgbuf_aligned_pages(size); pages 80 sound/core/sgbuf.c sgbuf->tblsize = sgbuf_align_table(pages); pages 92 sound/core/sgbuf.c while (pages > 0) { pages 93 sound/core/sgbuf.c chunk = pages; pages 100 sound/core/sgbuf.c if (!sgbuf->pages) pages 104 sound/core/sgbuf.c size = sgbuf->pages * PAGE_SIZE; pages 118 sound/core/sgbuf.c sgbuf->pages += chunk; pages 119 sound/core/sgbuf.c pages -= chunk; pages 125 sound/core/sgbuf.c dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot); pages 25 sound/firewire/packets-buffer.c unsigned int packets_per_page, pages; pages 42 sound/firewire/packets-buffer.c pages = DIV_ROUND_UP(count, packets_per_page); pages 45 sound/firewire/packets-buffer.c pages, direction); pages 51 sound/firewire/packets-buffer.c p = page_address(b->iso_buffer.pages[page_index]); pages 375 sound/isa/gus/interwave.c int bank_pos, pages; pages 386 sound/isa/gus/interwave.c pages = 0; pages 417 sound/isa/gus/interwave.c pages += psizes[i]; pages 420 sound/isa/gus/interwave.c pages <<= 18; pages 421 sound/isa/gus/interwave.c gus->gf1.memory = pages; pages 128 sound/pci/ctxfi/ctvmem.c unsigned i, pages; pages 141 sound/pci/ctxfi/ctvmem.c pages = block->size >> CT_PAGE_SHIFT; pages 142 sound/pci/ctxfi/ctvmem.c for (i = 0; i < pages; i++) { pages 85 sound/pci/emu10k1/memory.c blk->pages = blk->last_page - blk->first_page + 1; pages 118 sound/pci/emu10k1/memory.c page = blk->mapped_page + blk->pages; pages 139 sound/pci/emu10k1/memory.c page = search_empty_map_area(emu, blk->pages, &next); pages 174 sound/pci/emu10k1/memory.c start_page = q->mapped_page + q->pages; pages 280 sound/pci/emu10k1/memory.c if (size >= blk->pages) { pages 479 sound/pci/riptide/riptide.c unsigned int pages; pages 1096 sound/pci/riptide/riptide.c for (j = 0; j < data->pages; j++) { pages 1476 sound/pci/riptide/riptide.c unsigned int i, j, size, pages, f, pt, period; pages 1484 sound/pci/riptide/riptide.c pages = DIV_ROUND_UP(size, f); pages 1486 sound/pci/riptide/riptide.c data->pages = pages; pages 1489 sound/pci/riptide/riptide.c size, pages, f, period); pages 1492 sound/pci/riptide/riptide.c for (i = 0; i < pages; i++) { pages 251 sound/soc/intel/haswell/sst-haswell-ipc.c int pages; pages 435 sound/soc/intel/haswell/sst-haswell-pcm.c int i, pages, stream = substream->stream; pages 437 sound/soc/intel/haswell/sst-haswell-pcm.c pages = snd_sgbuf_aligned_pages(size); pages 440 sound/soc/intel/haswell/sst-haswell-pcm.c dma_area, size, pages); pages 442 sound/soc/intel/haswell/sst-haswell-pcm.c for (i = 0; i < pages; i++) { pages 475 sound/soc/intel/haswell/sst-haswell-pcm.c u32 rate, bits, map, pages, module_id; pages 613 sound/soc/intel/haswell/sst-haswell-pcm.c pages = (runtime->dma_bytes / PAGE_SIZE) + 1; pages 615 sound/soc/intel/haswell/sst-haswell-pcm.c pages = runtime->dma_bytes / PAGE_SIZE; pages 619 sound/soc/intel/haswell/sst-haswell-pcm.c pages, runtime->dma_bytes, 0, pages 209 sound/soc/sof/core.c int i, pages; pages 211 sound/soc/sof/core.c pages = snd_sgbuf_aligned_pages(size); pages 214 sound/soc/sof/core.c dmab->area, size, pages); pages 216 sound/soc/sof/core.c for (i = 0; i < pages; i++) { pages 253 sound/soc/sof/core.c return pages; pages 145 sound/soc/sof/pcm.c pcm.params.buffer.pages = PFN_UP(runtime->dma_bytes); pages 181 sound/soc/sof/trace.c params.buffer.pages = sdev->dma_trace_pages; pages 34 sound/xen/xen_snd_front_alsa.c struct page **pages; pages 227 sound/xen/xen_snd_front_alsa.c stream->pages = NULL; pages 237 sound/xen/xen_snd_front_alsa.c kfree(stream->pages); pages 450 sound/xen/xen_snd_front_alsa.c stream->pages = kcalloc(stream->num_pages, sizeof(struct page *), pages 452 sound/xen/xen_snd_front_alsa.c if (!stream->pages) pages 456 sound/xen/xen_snd_front_alsa.c stream->pages[i] = virt_to_page(stream->buffer + i * PAGE_SIZE); pages 482 sound/xen/xen_snd_front_alsa.c buf_cfg.pages = stream->pages; pages 52 tools/perf/arch/s390/util/auxtrace.c unsigned int pages; pages 68 tools/perf/arch/s390/util/auxtrace.c pages = DEFAULT_AUX_PAGES * factor; pages 69 tools/perf/arch/s390/util/auxtrace.c opts->auxtrace_mmap_pages = roundup_pow_of_two(pages); pages 768 tools/perf/util/evlist.c unsigned long pages; pages 782 tools/perf/util/evlist.c pages = (max * 1024) / page_size; pages 783 tools/perf/util/evlist.c if (!is_power_of_2(pages)) pages 784 tools/perf/util/evlist.c pages = rounddown_pow_of_two(pages); pages 786 tools/perf/util/evlist.c return pages; pages 789 tools/perf/util/evlist.c size_t evlist__mmap_size(unsigned long pages) pages 791 tools/perf/util/evlist.c if (pages == UINT_MAX) pages 792 tools/perf/util/evlist.c pages = perf_event_mlock_kb_in_pages(); pages 793 tools/perf/util/evlist.c else if (!is_power_of_2(pages)) pages 796 tools/perf/util/evlist.c return (pages + 1) * page_size; pages 802 tools/perf/util/evlist.c unsigned long pages, val; pages 817 tools/perf/util/evlist.c pages = PERF_ALIGN(val, page_size) / page_size; pages 821 tools/perf/util/evlist.c pages = strtoul(str, &eptr, 10); pages 826 tools/perf/util/evlist.c if (pages == 0 && min == 0) { pages 828 tools/perf/util/evlist.c } else if (!is_power_of_2(pages)) { pages 832 tools/perf/util/evlist.c pages = roundup_pow_of_two(pages); pages 833 tools/perf/util/evlist.c if (!pages) pages 836 tools/perf/util/evlist.c unit_number__scnprintf(buf, sizeof(buf), pages * page_size); pages 838 tools/perf/util/evlist.c buf, pages); pages 841 tools/perf/util/evlist.c if (pages > max) pages 844 tools/perf/util/evlist.c return pages; pages 850 tools/perf/util/evlist.c long pages; pages 855 tools/perf/util/evlist.c pages = parse_pages_arg(str, 1, max); pages 856 tools/perf/util/evlist.c if (pages < 0) { pages 861 tools/perf/util/evlist.c *mmap_pages = pages; pages 888 tools/perf/util/evlist.c int evlist__mmap_ex(struct evlist *evlist, unsigned int pages, pages 912 tools/perf/util/evlist.c evlist->core.mmap_len = evlist__mmap_size(pages); pages 932 tools/perf/util/evlist.c int evlist__mmap(struct evlist *evlist, unsigned int pages) pages 934 tools/perf/util/evlist.c return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0); pages 187 tools/perf/util/evlist.h int evlist__mmap_ex(struct evlist *evlist, unsigned int pages, pages 191 tools/perf/util/evlist.h int evlist__mmap(struct evlist *evlist, unsigned int pages); pages 194 tools/perf/util/evlist.h size_t evlist__mmap_size(unsigned long pages); pages 902 tools/perf/util/python.c int pages = 128, overwrite = false; pages 905 tools/perf/util/python.c &pages, &overwrite)) pages 908 tools/perf/util/python.c if (evlist__mmap(evlist, pages) < 0) { pages 80 tools/testing/radix-tree/regression1.c unsigned int nr_pages, struct page **pages) pages 102 tools/testing/radix-tree/regression1.c pages[ret] = page; pages 157 tools/testing/radix-tree/regression1.c struct page *pages[10]; pages 159 tools/testing/radix-tree/regression1.c find_get_pages(0, 10, pages); pages 82 tools/testing/radix-tree/regression2.c struct page *pages[1]; pages 114 tools/testing/radix-tree/regression2.c radix_tree_gang_lookup_tag_slot(&mt_tree, (void ***)pages, start, end, pages 8 tools/testing/scatterlist/main.c static void set_pages(struct page **pages, const unsigned *array, unsigned num) pages 14 tools/testing/scatterlist/main.c pages[i] = (struct page *)(unsigned long) pages 56 tools/testing/scatterlist/main.c struct page *pages[MAX_PAGES]; pages 60 tools/testing/scatterlist/main.c set_pages(pages, test->pfn, test->num_pages); pages 62 tools/testing/scatterlist/main.c ret = __sg_alloc_table_from_pages(&st, pages, test->num_pages, pages 863 tools/testing/selftests/kvm/lib/kvm_util.c uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; pages 872 tools/testing/selftests/kvm/lib/kvm_util.c pgidx_start, pages)) pages 874 tools/testing/selftests/kvm/lib/kvm_util.c pgidx_start, pages); pages 883 tools/testing/selftests/kvm/lib/kvm_util.c pgidx_start, pages)) pages 886 tools/testing/selftests/kvm/lib/kvm_util.c pgidx_start, pages); pages 895 tools/testing/selftests/kvm/lib/kvm_util.c pgidx_start, pages)) { pages 897 tools/testing/selftests/kvm/lib/kvm_util.c vm->vpages_valid, pgidx_start, pages); pages 905 tools/testing/selftests/kvm/lib/kvm_util.c "pages: 0x%lx", pages); pages 912 tools/testing/selftests/kvm/lib/kvm_util.c pgidx_start, pages), pages 916 tools/testing/selftests/kvm/lib/kvm_util.c pgidx_start, pages); pages 918 tools/testing/selftests/kvm/lib/kvm_util.c pgidx_start, pages), pages 922 tools/testing/selftests/kvm/lib/kvm_util.c pgidx_start, pages); pages 951 tools/testing/selftests/kvm/lib/kvm_util.c uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); pages 962 tools/testing/selftests/kvm/lib/kvm_util.c for (vm_vaddr_t vaddr = vaddr_start; pages > 0; pages 963 tools/testing/selftests/kvm/lib/kvm_util.c pages--, vaddr += vm->page_size) { pages 96 tools/testing/selftests/powerpc/mm/subpage_prot.c long i, j, pages, err; pages 98 tools/testing/selftests/powerpc/mm/subpage_prot.c pages = size / 0x10000; pages 99 tools/testing/selftests/powerpc/mm/subpage_prot.c map = malloc(pages * 4); pages 106 tools/testing/selftests/powerpc/mm/subpage_prot.c for (i = 0; i < pages; i++) { pages 120 tools/testing/selftests/powerpc/mm/subpage_prot.c for (i = 0; i < pages; i++) { pages 62 tools/virtio/linux/virtio.h void *pages, pages 221 tools/vm/page-types.c static unsigned long pages2mb(unsigned long pages) pages 223 tools/vm/page-types.c return (pages * page_size) >> 20; pages 275 tools/vm/page-types.c unsigned long pages) pages 277 tools/vm/page-types.c return do_u64_read(kpageflags_fd, opt_kpageflags, buf, index, pages); pages 282 tools/vm/page-types.c unsigned long pages) pages 285 tools/vm/page-types.c return pages; pages 287 tools/vm/page-types.c return do_u64_read(kpagecgroup_fd, opt_kpageflags, buf, index, pages); pages 292 tools/vm/page-types.c unsigned long pages) pages 294 tools/vm/page-types.c return kpagecount_fd < 0 ? pages : pages 296 tools/vm/page-types.c buf, index, pages); pages 301 tools/vm/page-types.c unsigned long pages) pages 303 tools/vm/page-types.c return do_u64_read(pagemap_fd, "/proc/pid/pagemap", buf, index, pages); pages 670 tools/vm/page-types.c unsigned long pages; pages 685 tools/vm/page-types.c pages = kpageflags_read(buf, index, batch); pages 686 tools/vm/page-types.c if (pages == 0) pages 689 tools/vm/page-types.c if (kpagecgroup_read(cgi, index, pages) != pages) pages 692 tools/vm/page-types.c if (kpagecount_read(cnt, index, pages) != pages) pages 695 tools/vm/page-types.c for (i = 0; i < pages; i++) pages 699 tools/vm/page-types.c index += pages; pages 700 tools/vm/page-types.c count -= pages; pages 729 tools/vm/page-types.c unsigned long pages; pages 735 tools/vm/page-types.c pages = pagemap_read(buf, index, batch); pages 736 tools/vm/page-types.c if (pages == 0) pages 739 tools/vm/page-types.c for (i = 0; i < pages; i++) { pages 747 tools/vm/page-types.c index += pages; pages 748 tools/vm/page-types.c count -= pages; pages 1779 virt/kvm/kvm_main.c struct page **pages, int nr_pages) pages 1791 virt/kvm/kvm_main.c return __get_user_pages_fast(addr, nr_pages, 1, pages);