max_pages 559 arch/x86/xen/setup.c unsigned long max_pages, limit; max_pages 564 arch/x86/xen/setup.c max_pages = limit; max_pages 578 arch/x86/xen/setup.c max_pages = ret; max_pages 581 arch/x86/xen/setup.c return min(max_pages, limit); max_pages 747 arch/x86/xen/setup.c unsigned long max_pages; max_pages 796 arch/x86/xen/setup.c max_pages = xen_get_max_pages(); max_pages 799 arch/x86/xen/setup.c max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages); max_pages 801 arch/x86/xen/setup.c if (max_pages > max_pfn) max_pages 802 arch/x86/xen/setup.c extra_pages += max_pages - max_pfn; max_pages 819 arch/x86/xen/setup.c extra_pages, max_pages - max_pfn); max_pages 66 drivers/gpu/drm/ati_pcigart.c int max_pages; max_pages 76 drivers/gpu/drm/ati_pcigart.c max_pages = (gart_info->table_size / sizeof(u32)); max_pages 77 drivers/gpu/drm/ati_pcigart.c pages = (entry->pages <= max_pages) max_pages 78 drivers/gpu/drm/ati_pcigart.c ? entry->pages : max_pages; max_pages 602 drivers/gpu/drm/i915/gem/selftests/huge_pages.c unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT; max_pages 609 drivers/gpu/drm/i915/gem/selftests/huge_pages.c for_each_prime_number_from(page_num, 1, max_pages) { max_pages 349 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c const unsigned long max_pages = max_pages 351 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c const unsigned long max_step = max(int_sqrt(max_pages), 2UL); max_pages 364 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c for (npages = 1; npages <= max_pages; npages *= prime) { max_pages 560 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c const unsigned long max_pages = max_pages 571 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c for_each_prime_number_from(size, 1, max_pages) { max_pages 501 drivers/gpu/drm/i915/selftests/i915_vma.c const unsigned int max_pages = 64; max_pages 508 drivers/gpu/drm/i915/selftests/i915_vma.c obj = i915_gem_object_create_internal(vm->i915, max_pages * PAGE_SIZE); max_pages 520 drivers/gpu/drm/i915/selftests/i915_vma.c GEM_BUG_ON(max_offset > max_pages); max_pages 521 drivers/gpu/drm/i915/selftests/i915_vma.c max_offset = max_pages - max_offset; max_pages 954 drivers/gpu/drm/ttm/ttm_page_alloc.c int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) max_pages 993 drivers/gpu/drm/ttm/ttm_page_alloc.c _manager->options.max_size = max_pages; max_pages 1161 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) max_pages 1176 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c _manager->options.max_size = max_pages; max_pages 87 drivers/infiniband/core/fmr_pool.c int max_pages; max_pages 244 drivers/infiniband/core/fmr_pool.c pool->max_pages = params->max_pages_per_fmr; max_pages 265 drivers/infiniband/core/fmr_pool.c .max_pages = params->max_pages_per_fmr, max_pages 402 drivers/infiniband/core/fmr_pool.c if (list_len < 1 || list_len > pool->max_pages) max_pages 57 drivers/infiniband/core/rw.c u32 max_pages; max_pages 60 drivers/infiniband/core/rw.c max_pages = dev->attrs.max_pi_fast_reg_page_list_len; max_pages 62 drivers/infiniband/core/rw.c max_pages = dev->attrs.max_fast_reg_page_list_len; max_pages 65 drivers/infiniband/core/rw.c return min_t(u32, max_pages, 256); max_pages 138 drivers/infiniband/hw/mlx4/mlx4_ib.h u32 max_pages; max_pages 552 drivers/infiniband/hw/mlx4/mr.c int max_pages) max_pages 561 drivers/infiniband/hw/mlx4/mr.c mr->page_map_size = roundup(max_pages * sizeof(u64), max_pages 681 drivers/infiniband/hw/mlx4/mr.c mr->max_pages = max_num_sg; max_pages 713 drivers/infiniband/hw/mlx4/mr.c fmr_attr->max_pages, fmr_attr->max_maps, max_pages 798 drivers/infiniband/hw/mlx4/mr.c if (unlikely(mr->npages == mr->max_pages)) max_pages 574 drivers/infiniband/hw/mthca/mthca_mr.c int list_len = mr->attr.max_pages; max_pages 583 drivers/infiniband/hw/mthca/mthca_mr.c mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE) max_pages 694 drivers/infiniband/hw/mthca/mthca_mr.c if (list_len > fmr->attr.max_pages) max_pages 147 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h u32 max_pages; max_pages 252 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c mr->max_pages = max_num_sg; max_pages 305 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c if (mr->npages == mr->max_pages) max_pages 733 drivers/infiniband/sw/rdmavt/mr.c m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ; max_pages 738 drivers/infiniband/sw/rdmavt/mr.c rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages, max_pages 757 drivers/infiniband/sw/rdmavt/mr.c fmr->mr.max_segs = fmr_attr->max_pages; max_pages 114 drivers/infiniband/sw/rxe/rxe_loc.h int max_pages, struct rxe_mem *mem); max_pages 239 drivers/infiniband/sw/rxe/rxe_mr.c int max_pages, struct rxe_mem *mem) max_pages 248 drivers/infiniband/sw/rxe/rxe_mr.c err = rxe_mem_alloc(mem, max_pages); max_pages 253 drivers/infiniband/sw/rxe/rxe_mr.c mem->max_buf = max_pages; max_pages 1506 drivers/md/dm-writecache.c unsigned max_pages; max_pages 1513 drivers/md/dm-writecache.c max_pages = e->wc_list_contiguous; max_pages 1515 drivers/md/dm-writecache.c bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set); max_pages 1521 drivers/md/dm-writecache.c if (max_pages <= WB_LIST_INLINE || max_pages 1522 drivers/md/dm-writecache.c unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *), max_pages 1526 drivers/md/dm-writecache.c max_pages = WB_LIST_INLINE; max_pages 1534 drivers/md/dm-writecache.c while (wbl->size && wb->wc_list_n < max_pages) { max_pages 817 drivers/net/ethernet/emulex/benet/be_cmds.c static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, max_pages 820 drivers/net/ethernet/emulex/benet/be_cmds.c int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); max_pages 447 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c int max_pages) max_pages 453 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c if (num_q_pages > max_pages) { max_pages 974 drivers/net/ethernet/mellanox/mlx4/mr.c if (npages > fmr->max_pages) max_pages 1043 drivers/net/ethernet/mellanox/mlx4/mr.c int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, max_pages 1056 drivers/net/ethernet/mellanox/mlx4/mr.c if (max_pages * sizeof(*fmr->mtts) > PAGE_SIZE) max_pages 1060 drivers/net/ethernet/mellanox/mlx4/mr.c fmr->max_pages = max_pages; max_pages 1064 drivers/net/ethernet/mellanox/mlx4/mr.c err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages, max_pages 694 drivers/scsi/be2iscsi/be_cmds.c static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, max_pages 700 drivers/scsi/be2iscsi/be_cmds.c buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); max_pages 4898 drivers/scsi/st.c const unsigned int max_pages, unsigned long uaddr, max_pages 4913 drivers/scsi/st.c if (nr_pages > max_pages) max_pages 4920 drivers/scsi/st.c pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL); max_pages 2913 fs/btrfs/ctree.h u64 newer_than, unsigned long max_pages); max_pages 860 fs/ceph/addr.c unsigned i, pvec_pages, max_pages, locked_pages = 0; max_pages 867 fs/ceph/addr.c max_pages = wsize >> PAGE_SHIFT; max_pages 872 fs/ceph/addr.c max_pages - locked_pages); max_pages 876 fs/ceph/addr.c for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { max_pages 959 fs/ceph/addr.c max_pages = calc_pages_for(0, (u64)len); max_pages 960 fs/ceph/addr.c pages = kmalloc_array(max_pages, max_pages 1018 fs/ceph/addr.c locked_pages < max_pages) { max_pages 849 fs/cifs/misc.c unsigned int max_pages = iov_iter_npages(iter, INT_MAX); max_pages 860 fs/cifs/misc.c if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT) max_pages 861 fs/cifs/misc.c bv = kmalloc_array(max_pages, sizeof(struct bio_vec), max_pages 865 fs/cifs/misc.c bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec))); max_pages 870 fs/cifs/misc.c if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT) max_pages 871 fs/cifs/misc.c pages = kmalloc_array(max_pages, sizeof(struct page *), max_pages 875 fs/cifs/misc.c pages = vmalloc(array_size(max_pages, sizeof(struct page *))); max_pages 884 fs/cifs/misc.c while (count && npages < max_pages) { max_pages 885 fs/cifs/misc.c rc = iov_iter_get_pages(iter, pages, count, max_pages, &start); max_pages 902 fs/cifs/misc.c if (npages + cur_npages > max_pages) { max_pages 904 fs/cifs/misc.c npages + cur_npages, max_pages); max_pages 359 fs/cramfs/inode.c unsigned int pages, max_pages, offset; max_pages 378 fs/cramfs/inode.c max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; max_pages 380 fs/cramfs/inode.c if (pgoff >= max_pages) max_pages 382 fs/cramfs/inode.c pages = min(vma_pages(vma), max_pages - pgoff); max_pages 394 fs/cramfs/inode.c if (pgoff + pages == max_pages && cramfs_last_page_is_shared(inode)) { max_pages 461 fs/cramfs/inode.c unsigned int pages, block_pages, max_pages, offset; max_pages 464 fs/cramfs/inode.c max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; max_pages 465 fs/cramfs/inode.c if (pgoff >= max_pages || pages > max_pages - pgoff) max_pages 1661 fs/fuse/dev.c num_pages = min(num_pages, fc->max_pages); max_pages 923 fs/fuse/file.c unsigned int max_pages; max_pages 937 fs/fuse/file.c (ap->num_pages == fc->max_pages || max_pages 940 fs/fuse/file.c data->max_pages = min_t(unsigned int, data->nr_pages, max_pages 941 fs/fuse/file.c fc->max_pages); max_pages 943 fs/fuse/file.c data->ia = ia = fuse_io_alloc(NULL, data->max_pages); max_pages 951 fs/fuse/file.c if (WARN_ON(ap->num_pages >= data->max_pages)) { max_pages 980 fs/fuse/file.c data.max_pages = min_t(unsigned int, nr_pages, fc->max_pages); max_pages 982 fs/fuse/file.c data.ia = fuse_io_alloc(NULL, data.max_pages); max_pages 1142 fs/fuse/file.c unsigned int max_pages) max_pages 1199 fs/fuse/file.c ap->num_pages < max_pages && offset == 0); max_pages 1205 fs/fuse/file.c unsigned int max_pages) max_pages 1210 fs/fuse/file.c max_pages); max_pages 1231 fs/fuse/file.c fc->max_pages); max_pages 1367 fs/fuse/file.c unsigned int max_pages) max_pages 1387 fs/fuse/file.c while (nbytes < *nbytesp && ap->num_pages < max_pages) { max_pages 1392 fs/fuse/file.c max_pages - ap->num_pages, max_pages 1438 fs/fuse/file.c unsigned int max_pages; max_pages 1440 fs/fuse/file.c max_pages = iov_iter_npages(iter, fc->max_pages); max_pages 1441 fs/fuse/file.c ia = fuse_io_alloc(io, max_pages); max_pages 1461 fs/fuse/file.c max_pages); max_pages 1494 fs/fuse/file.c max_pages = iov_iter_npages(iter, fc->max_pages); max_pages 1495 fs/fuse/file.c ia = fuse_io_alloc(io, max_pages); max_pages 1905 fs/fuse/file.c unsigned int max_pages; max_pages 1915 fs/fuse/file.c max_t(unsigned int, data->max_pages * 2, max_pages 1917 fs/fuse/file.c fc->max_pages); max_pages 1918 fs/fuse/file.c WARN_ON(npages <= data->max_pages); max_pages 1929 fs/fuse/file.c data->max_pages = npages; max_pages 2039 fs/fuse/file.c (is_writeback || ap->num_pages == fc->max_pages || max_pages 2044 fs/fuse/file.c } else if (wpa && ap->num_pages == data->max_pages) { max_pages 2076 fs/fuse/file.c data->max_pages = 1; max_pages 2142 fs/fuse/file.c data.orig_pages = kcalloc(fc->max_pages, max_pages 2618 fs/fuse/file.c u32 max = fc->max_pages << PAGE_SHIFT; max_pages 2724 fs/fuse/file.c unsigned int in_iovs = 0, out_iovs = 0, max_pages; max_pages 2747 fs/fuse/file.c ap.pages = fuse_pages_alloc(fc->max_pages, GFP_KERNEL, &ap.descs); max_pages 2752 fs/fuse/file.c fuse_page_descs_length_init(ap.descs, 0, fc->max_pages); max_pages 2784 fs/fuse/file.c max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE); max_pages 2788 fs/fuse/file.c if (max_pages > fc->max_pages) max_pages 2790 fs/fuse/file.c while (ap.num_pages < max_pages) { max_pages 3058 fs/fuse/file.c return round_up(off, fc->max_pages << PAGE_SHIFT); max_pages 531 fs/fuse/fuse_i.h unsigned int max_pages; max_pages 629 fs/fuse/inode.c fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; max_pages 950 fs/fuse/inode.c fc->max_pages = max_pages 952 fs/fuse/inode.c max_t(unsigned int, arg->max_pages, 1)); max_pages 1018 fs/nfs/pnfs.c size_t max_pages = max_response_pages(server); max_pages 1029 fs/nfs/pnfs.c if (npages < max_pages) max_pages 1030 fs/nfs/pnfs.c max_pages = npages; max_pages 1033 fs/nfs/pnfs.c lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); max_pages 1038 fs/nfs/pnfs.c lgp->args.layout.pglen = max_pages * PAGE_SIZE; max_pages 1070 fs/nfs/pnfs.c size_t max_pages = lgp->args.layout.pglen / PAGE_SIZE; max_pages 1072 fs/nfs/pnfs.c nfs4_free_pages(lgp->args.layout.pages, max_pages); max_pages 103 fs/nfs/pnfs_dev.c int max_pages; max_pages 114 fs/nfs/pnfs_dev.c max_pages = nfs_page_array_len(0, max_resp_sz); max_pages 116 fs/nfs/pnfs_dev.c __func__, server, max_resp_sz, max_pages); max_pages 122 fs/nfs/pnfs_dev.c pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); max_pages 126 fs/nfs/pnfs_dev.c for (i = 0; i < max_pages; i++) { max_pages 155 fs/nfs/pnfs_dev.c for (i = 0; i < max_pages; i++) max_pages 25 fs/nilfs2/segbuf.c int max_pages; max_pages 365 fs/nilfs2/segbuf.c wi->nr_vecs = min(wi->max_pages, wi->rest_blocks); max_pages 406 fs/nilfs2/segbuf.c wi->max_pages = BIO_MAX_PAGES; max_pages 407 fs/nilfs2/segbuf.c wi->nr_vecs = min(wi->max_pages, wi->rest_blocks); max_pages 107 include/drm/drm_prime.h dma_addr_t *addrs, int max_pages); max_pages 37 include/drm/ttm/ttm_page_alloc.h int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); max_pages 81 include/drm/ttm/ttm_page_alloc.h int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); max_pages 99 include/drm/ttm/ttm_page_alloc.h unsigned max_pages) max_pages 715 include/linux/mlx4/device.h int max_pages; max_pages 1417 include/linux/mlx4/device.h int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, max_pages 48 include/linux/pagevec.h xa_mark_t tag, unsigned max_pages); max_pages 14 include/linux/swap_cgroup.h extern int swap_cgroup_swapon(int type, unsigned long max_pages); max_pages 33 include/linux/swap_cgroup.h swap_cgroup_swapon(int type, unsigned long max_pages) max_pages 1430 include/rdma/ib_verbs.h int max_pages; max_pages 701 include/uapi/linux/fuse.h uint16_t max_pages; max_pages 227 mm/readahead.c unsigned long max_pages; max_pages 236 mm/readahead.c max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); max_pages 237 mm/readahead.c nr_to_read = min(nr_to_read, max_pages); max_pages 388 mm/readahead.c unsigned long max_pages = ra->ra_pages; max_pages 396 mm/readahead.c if (req_size > max_pages && bdi->io_pages > max_pages) max_pages 397 mm/readahead.c max_pages = min(req_size, bdi->io_pages); max_pages 412 mm/readahead.c ra->size = get_next_ra_size(ra, max_pages); max_pages 427 mm/readahead.c start = page_cache_next_miss(mapping, offset + 1, max_pages); max_pages 430 mm/readahead.c if (!start || start - offset > max_pages) max_pages 436 mm/readahead.c ra->size = get_next_ra_size(ra, max_pages); max_pages 444 mm/readahead.c if (req_size > max_pages) max_pages 460 mm/readahead.c if (try_context_readahead(mapping, ra, offset, req_size, max_pages)) max_pages 471 mm/readahead.c ra->size = get_init_ra_size(req_size, max_pages); max_pages 482 mm/readahead.c add_pages = get_next_ra_size(ra, max_pages); max_pages 483 mm/readahead.c if (ra->size + add_pages <= max_pages) { max_pages 487 mm/readahead.c ra->size = max_pages; max_pages 488 mm/readahead.c ra->async_size = max_pages >> 1; max_pages 1062 mm/swap.c xa_mark_t tag, unsigned max_pages) max_pages 1065 mm/swap.c min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages); max_pages 167 mm/swap_cgroup.c int swap_cgroup_swapon(int type, unsigned long max_pages) max_pages 177 mm/swap_cgroup.c length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); max_pages 464 mm/swap_state.c int max_pages, max_pages 490 mm/swap_state.c if (pages > max_pages) max_pages 491 mm/swap_state.c pages = max_pages; max_pages 504 mm/swap_state.c unsigned int hits, pages, max_pages; max_pages 507 mm/swap_state.c max_pages = 1 << READ_ONCE(page_cluster); max_pages 508 mm/swap_state.c if (max_pages <= 1) max_pages 512 mm/swap_state.c pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages, max_pages 148 net/rds/ib_fmr.c if (page_cnt > ibmr->pool->fmr_attr.max_pages) { max_pages 79 net/rds/ib_frmr.c pool->fmr_attr.max_pages); max_pages 243 net/rds/ib_frmr.c if (frmr->dma_npages > ibmr->pool->fmr_attr.max_pages) { max_pages 177 net/rds/ib_rdma.c iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages; max_pages 187 net/rds/ib_rdma.c iinfo6->rdma_mr_size = pool_1m->fmr_attr.max_pages; max_pages 599 net/rds/ib_rdma.c pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1; max_pages 603 net/rds/ib_rdma.c pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1; max_pages 607 net/rds/ib_rdma.c pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;