/linux-4.4.14/drivers/staging/rdma/ipath/ |
D | ipath_user_pages.c | 40 static void __ipath_release_user_pages(struct page **p, size_t num_pages, in __ipath_release_user_pages() argument 45 for (i = 0; i < num_pages; i++) { in __ipath_release_user_pages() 47 (unsigned long) num_pages, p[i]); in __ipath_release_user_pages() 55 static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, in __ipath_get_user_pages() argument 64 if (num_pages > lock_limit) { in __ipath_get_user_pages() 70 (unsigned long) num_pages, start_page); in __ipath_get_user_pages() 72 for (got = 0; got < num_pages; got += ret) { in __ipath_get_user_pages() 75 num_pages - got, 1, 1, in __ipath_get_user_pages() 81 current->mm->pinned_vm += num_pages; in __ipath_get_user_pages() 160 int ipath_get_user_pages(unsigned long start_page, size_t num_pages, in ipath_get_user_pages() argument [all …]
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | user_pages.c | 56 static void __hfi1_release_user_pages(struct page **p, size_t num_pages, in __hfi1_release_user_pages() argument 61 for (i = 0; i < num_pages; i++) { in __hfi1_release_user_pages() 71 static int __hfi1_get_user_pages(unsigned long start_page, size_t num_pages, in __hfi1_get_user_pages() argument 80 if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) { in __hfi1_get_user_pages() 85 for (got = 0; got < num_pages; got += ret) { in __hfi1_get_user_pages() 88 num_pages - got, 1, 1, in __hfi1_get_user_pages() 94 current->mm->pinned_vm += num_pages; in __hfi1_get_user_pages() 131 int hfi1_get_user_pages(unsigned long start_page, size_t num_pages, in hfi1_get_user_pages() argument 138 ret = __hfi1_get_user_pages(start_page, num_pages, p); in hfi1_get_user_pages() 145 void hfi1_release_user_pages(struct page **p, size_t num_pages) in hfi1_release_user_pages() argument [all …]
|
D | user_sdma.c | 87 #define num_pages(x) (1 + ((((x) - 1) & PAGE_MASK) >> PAGE_SHIFT)) macro
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_user_pages.c | 39 static void __qib_release_user_pages(struct page **p, size_t num_pages, in __qib_release_user_pages() argument 44 for (i = 0; i < num_pages; i++) { in __qib_release_user_pages() 54 static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, in __qib_get_user_pages() argument 63 if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) { in __qib_get_user_pages() 68 for (got = 0; got < num_pages; got += ret) { in __qib_get_user_pages() 71 num_pages - got, 1, 1, in __qib_get_user_pages() 77 current->mm->pinned_vm += num_pages; in __qib_get_user_pages() 132 int qib_get_user_pages(unsigned long start_page, size_t num_pages, in qib_get_user_pages() argument 139 ret = __qib_get_user_pages(start_page, num_pages, p); in qib_get_user_pages() 146 void qib_release_user_pages(struct page **p, size_t num_pages) in qib_release_user_pages() argument [all …]
|
/linux-4.4.14/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_gmr.c | 40 unsigned long num_pages, in vmw_gmr2_bind() argument 48 uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0); in vmw_gmr2_bind() 49 uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num; in vmw_gmr2_bind() 59 define_cmd.numPages = num_pages; in vmw_gmr2_bind() 74 while (num_pages > 0) { in vmw_gmr2_bind() 75 unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP); in vmw_gmr2_bind() 95 num_pages -= nr; in vmw_gmr2_bind() 130 unsigned long num_pages, in vmw_gmr_bind() argument 143 return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); in vmw_gmr_bind()
|
D | vmwgfx_buffer.c | 241 return ++(viter->i) < viter->num_pages; in __vmw_piter_non_sg_next() 309 viter->num_pages = vsgt->num_pages; in vmw_piter_start() 408 vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; in vmw_ttm_map_dma() 419 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; in vmw_ttm_map_dma() 426 vsgt->num_pages, 0, in vmw_ttm_map_dma() 428 vsgt->num_pages << PAGE_SHIFT, in vmw_ttm_map_dma() 433 if (vsgt->num_pages > vmw_tt->sgt.nents) { in vmw_ttm_map_dma() 435 sgl_size * (vsgt->num_pages - in vmw_ttm_map_dma() 576 ttm->num_pages, vmw_be->gmr_id); in vmw_ttm_bind() 580 vmw_mob_create(ttm->num_pages); in vmw_ttm_bind() [all …]
|
D | vmwgfx_gmrid_manager.c | 62 gman->used_gmr_pages += bo->num_pages; in vmw_gmrid_man_get_node() 86 mem->num_pages = bo->num_pages; in vmw_gmrid_man_get_node() 96 gman->used_gmr_pages -= bo->num_pages; in vmw_gmrid_man_get_node() 110 gman->used_gmr_pages -= mem->num_pages; in vmw_gmrid_man_put_node()
|
D | vmwgfx_reg.h | 43 u32 num_pages; member
|
D | vmwgfx_dmabuf.c | 166 place.lpfn = bo->num_pages; in vmw_dmabuf_pin_in_start_of_vram() 187 bo->mem.start < bo->num_pages && in vmw_dmabuf_pin_in_start_of_vram()
|
D | vmwgfx_mob.c | 58 unsigned long num_pages; member 414 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); in vmw_mob_create() 436 ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE, in vmw_mob_pt_populate()
|
D | vmwgfx_drv.h | 251 unsigned long num_pages; member 273 unsigned long num_pages; member 591 unsigned long num_pages,
|
D | vmwgfx_resource.c | 376 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in vmw_dmabuf_acc_size() local 377 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *)); in vmw_dmabuf_acc_size() 390 ttm_round_pot(num_pages * sizeof(dma_addr_t)); in vmw_dmabuf_acc_size() 1090 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size); in vmw_resource_buf_alloc()
|
D | vmwgfx_surface.c | 1332 if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < in vmw_gb_surface_define_ioctl() 1371 rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE; in vmw_gb_surface_define_ioctl() 1448 rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE; in vmw_gb_surface_reference_ioctl()
|
D | vmwgfx_cotable.c | 436 for (i = 0; i < old_bo->num_pages; ++i) { in vmw_cotable_resize()
|
D | vmwgfx_shader.c | 865 if ((u64)buffer->base.num_pages * PAGE_SIZE < in vmw_shader_define()
|
D | vmwgfx_fb.c | 532 par->vmw_bo->base.num_pages, &par->map); in vmw_fb_kms_framebuffer()
|
D | vmwgfx_stdu.c | 804 suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE; in vmw_stdu_dmabuf_fifo_commit()
|
D | vmwgfx_execbuf.c | 1114 if (unlikely(new_query_bo->base.num_pages > 4)) { in vmw_query_bo_switch_prepare() 1693 bo_size = vmw_bo->base.num_pages * PAGE_SIZE; in vmw_cmd_dma()
|
D | vmwgfx_kms.c | 840 if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) { in vmw_kms_new_framebuffer_dmabuf()
|
/linux-4.4.14/net/ceph/ |
D | pagevec.c | 16 int num_pages, bool write_page) in ceph_get_direct_page_vector() argument 22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); in ceph_get_direct_page_vector() 26 while (got < num_pages) { in ceph_get_direct_page_vector() 29 num_pages - got, write_page, 0, pages + got); in ceph_get_direct_page_vector() 45 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument 49 for (i = 0; i < num_pages; i++) { in ceph_put_page_vector() 58 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector() argument 62 for (i = 0; i < num_pages; i++) in ceph_release_page_vector() 71 struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags) in ceph_alloc_page_vector() argument 76 pages = kmalloc(sizeof(*pages) * num_pages, flags); in ceph_alloc_page_vector() [all …]
|
D | osd_client.c | 270 int num_pages; in ceph_osd_data_release() local 272 num_pages = calc_pages_for((u64)osd_data->alignment, in ceph_osd_data_release() 274 ceph_release_page_vector(osd_data->pages, num_pages); in ceph_osd_data_release() 2696 struct page **pages, int num_pages, int page_align) in ceph_osdc_readpages() argument 2739 struct page **pages, int num_pages) in ceph_osdc_writepages() argument
|
/linux-4.4.14/drivers/misc/ |
D | vmw_balloon.c | 259 int (*lock)(struct vmballoon *b, unsigned int num_pages, 261 int (*unlock)(struct vmballoon *b, unsigned int num_pages, 467 unsigned int num_pages, bool is_2m_pages, unsigned int *target) in vmballoon_send_batched_lock() argument 475 status = VMWARE_BALLOON_CMD(BATCHED_2M_LOCK, pfn, num_pages, in vmballoon_send_batched_lock() 478 status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages, in vmballoon_send_batched_lock() 515 unsigned int num_pages, bool is_2m_pages, unsigned int *target) in vmballoon_send_batched_unlock() argument 523 status = VMWARE_BALLOON_CMD(BATCHED_2M_UNLOCK, pfn, num_pages, in vmballoon_send_batched_unlock() 526 status = VMWARE_BALLOON_CMD(BATCHED_UNLOCK, pfn, num_pages, in vmballoon_send_batched_unlock() 595 static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages, in vmballoon_lock_page() argument 639 unsigned int num_pages, bool is_2m_pages, unsigned int *target) in vmballoon_lock_batched_page() argument [all …]
|
/linux-4.4.14/drivers/gpu/drm/ttm/ |
D | ttm_tt.c | 53 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*)); in ttm_tt_alloc_page_directory() 58 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, in ttm_dma_tt_alloc_page_directory() 62 ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); in ttm_dma_tt_alloc_page_directory() 63 ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages); in ttm_dma_tt_alloc_page_directory() 123 drm_clflush_pages(ttm->pages, ttm->num_pages); in ttm_tt_set_caching() 125 for (i = 0; i < ttm->num_pages; ++i) { in ttm_tt_set_caching() 193 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in ttm_tt_init() 225 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in ttm_dma_tt_init() 303 for (i = 0; i < ttm->num_pages; ++i) { in ttm_tt_swapin() 341 ttm->num_pages << PAGE_SHIFT, in ttm_tt_swapout() [all …]
|
D | ttm_bo_util.c | 359 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); in ttm_bo_move_memcpy() 378 add = new_mem->num_pages - 1; in ttm_bo_move_memcpy() 381 for (i = 0; i < new_mem->num_pages; ++i) { in ttm_bo_move_memcpy() 531 unsigned long num_pages, in ttm_bo_kmap_ttm() argument 546 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { in ttm_bo_kmap_ttm() 562 map->virtual = vmap(ttm->pages + start_page, num_pages, in ttm_bo_kmap_ttm() 569 unsigned long start_page, unsigned long num_pages, in ttm_bo_kmap() argument 580 if (num_pages > bo->num_pages) in ttm_bo_kmap() 582 if (start_page > bo->num_pages) in ttm_bo_kmap() 585 if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) in ttm_bo_kmap() [all …]
|
D | ttm_bo.c | 91 bo, bo->mem.num_pages, bo->mem.size >> 10, in ttm_bo_mem_space_debug() 252 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, in ttm_bo_add_ttm() 258 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, in ttm_bo_add_ttm() 987 mem.num_pages = bo->num_pages; in ttm_bo_move_buffer() 988 mem.size = mem.num_pages << PAGE_SHIFT; in ttm_bo_move_buffer() 1017 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) in ttm_bo_mem_compat() 1030 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) in ttm_bo_mem_compat() 1093 unsigned long num_pages; in ttm_bo_init() local 1107 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in ttm_bo_init() 1108 if (num_pages == 0) { in ttm_bo_init() [all …]
|
D | ttm_agp_backend.c | 59 mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY); in ttm_agp_bind() 64 for (i = 0; i < ttm->num_pages; i++) { in ttm_agp_bind()
|
D | ttm_bo_manager.c | 77 ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages, in ttm_bo_man_get_node()
|
D | ttm_page_alloc.c | 873 for (i = 0; i < ttm->num_pages; ++i) { in ttm_pool_populate() 907 for (i = 0; i < ttm->num_pages; ++i) { in ttm_pool_unpopulate()
|
D | ttm_bo_vm.c | 182 if (unlikely(page_offset >= bo->num_pages)) { in ttm_bo_vm_fault()
|
D | ttm_page_alloc_dma.c | 906 for (i = 0; i < ttm->num_pages; ++i) { in ttm_dma_populate() 990 for (i = 0; i < ttm->num_pages; i++) { in ttm_dma_unpopulate()
|
/linux-4.4.14/drivers/firmware/efi/ |
D | fake_mem.c | 74 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1; in efi_fake_memmap() 119 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1; in efi_fake_memmap() 134 md->num_pages = (m_end - md->phys_addr + 1) >> in efi_fake_memmap() 141 md->num_pages = (end - md->phys_addr + 1) >> in efi_fake_memmap() 147 md->num_pages = (m_start - md->phys_addr) >> in efi_fake_memmap() 155 md->num_pages = (m_end - m_start + 1) >> in efi_fake_memmap() 162 md->num_pages = (end - m_end) >> in efi_fake_memmap() 169 md->num_pages = (m_start - md->phys_addr) >> in efi_fake_memmap() 176 md->num_pages = (end - md->phys_addr + 1) >> in efi_fake_memmap()
|
D | runtime-map.c | 53 EFI_RUNTIME_U64_ATTR_SHOW(num_pages); 73 static struct map_attribute map_num_pages_attr = __ATTR_RO(num_pages);
|
D | efi.c | 304 size = md->num_pages << EFI_PAGE_SHIFT; in efi_mem_desc_lookup() 323 u64 size = md->num_pages << EFI_PAGE_SHIFT; in efi_mem_desc_end() 344 u64 size = md->num_pages << EFI_PAGE_SHIFT; in efi_lookup_mapped_addr() 661 (md->num_pages << EFI_PAGE_SHIFT)))) in efi_mem_attributes()
|
/linux-4.4.14/drivers/media/v4l2-core/ |
D | videobuf2-dma-sg.c | 52 unsigned int num_pages; member 109 int num_pages; in vb2_dma_sg_alloc() local 125 buf->num_pages = size >> PAGE_SHIFT; in vb2_dma_sg_alloc() 128 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), in vb2_dma_sg_alloc() 138 buf->num_pages, 0, size, GFP_KERNEL); in vb2_dma_sg_alloc() 162 __func__, buf->num_pages); in vb2_dma_sg_alloc() 169 num_pages = buf->num_pages; in vb2_dma_sg_alloc() 170 while (num_pages--) in vb2_dma_sg_alloc() 171 __free_page(buf->pages[num_pages]); in vb2_dma_sg_alloc() 183 int i = buf->num_pages; in vb2_dma_sg_put() [all …]
|
D | videobuf2-vmalloc.c | 211 int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE; in vb2_vmalloc_dmabuf_ops_attach() local 223 ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL); in vb2_vmalloc_dmabuf_ops_attach()
|
/linux-4.4.14/drivers/gpu/drm/ |
D | drm_cache.c | 59 unsigned long num_pages) in drm_cache_flush_clflush() argument 64 for (i = 0; i < num_pages; i++) in drm_cache_flush_clflush() 71 drm_clflush_pages(struct page *pages[], unsigned long num_pages) in drm_clflush_pages() argument 76 drm_cache_flush_clflush(pages, num_pages); in drm_clflush_pages() 85 for (i = 0; i < num_pages; i++) { in drm_clflush_pages()
|
D | drm_memory.c | 56 unsigned long i, num_pages = in agp_remap() local 83 page_map = vmalloc(num_pages * sizeof(struct page *)); in agp_remap() 88 for (i = 0; i < num_pages; ++i) in agp_remap() 90 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); in agp_remap()
|
D | drm_agpsupport.c | 471 unsigned long num_pages, in drm_agp_bind_pages() argument 480 mem = agp_allocate_memory(dev->agp->bridge, num_pages, in drm_agp_bind_pages() 484 num_pages); in drm_agp_bind_pages() 488 for (i = 0; i < num_pages; i++) in drm_agp_bind_pages() 490 mem->page_count = num_pages; in drm_agp_bind_pages()
|
/linux-4.4.14/drivers/virtio/ |
D | virtio_balloon.c | 62 unsigned int num_pages; member 159 vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; in fill_balloon() 201 vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE; in leak_balloon() 291 u32 num_pages; in towards_target() local 293 virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, in towards_target() 294 &num_pages); in towards_target() 298 num_pages = le32_to_cpu((__force __le32)num_pages); in towards_target() 300 target = num_pages; in towards_target() 301 return target - vb->num_pages; in towards_target() 306 u32 actual = vb->num_pages; in update_balloon_size() [all …]
|
/linux-4.4.14/drivers/gpu/drm/gma500/ |
D | mmu.c | 510 uint32_t num_pages, uint32_t desired_tile_stride, in psb_mmu_flush_ptes() argument 528 rows = num_pages / desired_tile_stride; in psb_mmu_flush_ptes() 530 desired_tile_stride = num_pages; in psb_mmu_flush_ptes() 558 uint32_t num_pages, uint32_t desired_tile_stride, in psb_mmu_flush_ptes() argument 566 unsigned long address, uint32_t num_pages) in psb_mmu_remove_pfn_sequence() argument 577 end = addr + (num_pages << PAGE_SHIFT); in psb_mmu_remove_pfn_sequence() 594 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); in psb_mmu_remove_pfn_sequence() 605 uint32_t num_pages, uint32_t desired_tile_stride, in psb_mmu_remove_pages() argument 619 rows = num_pages / desired_tile_stride; in psb_mmu_remove_pages() 621 desired_tile_stride = num_pages; in psb_mmu_remove_pages() [all …]
|
D | mmu.h | 76 uint32_t num_pages); 80 uint32_t num_pages, int type); 85 unsigned long address, uint32_t num_pages, 89 unsigned long address, uint32_t num_pages,
|
D | gtt.c | 419 unsigned i, num_pages; in psb_gtt_init() local 536 num_pages = vram_stolen_size >> PAGE_SHIFT; in psb_gtt_init() 538 num_pages, pfn_base << PAGE_SHIFT, 0); in psb_gtt_init() 539 for (i = 0; i < num_pages; ++i) { in psb_gtt_init()
|
/linux-4.4.14/drivers/hv/ |
D | hv_balloon.c | 285 __u32 num_pages; member 446 __u32 num_pages; member 1074 int num_pages = range_array->finfo.page_cnt; in free_balloon_pages() local 1079 for (i = 0; i < num_pages; i++) { in free_balloon_pages() 1089 unsigned int num_pages, in alloc_balloon_pages() argument 1096 if (num_pages < alloc_unit) in alloc_balloon_pages() 1099 for (i = 0; (i * alloc_unit) < num_pages; i++) { in alloc_balloon_pages() 1133 return num_pages; in alloc_balloon_pages() 1140 unsigned int num_pages = dm_device.balloon_wrk.num_pages; in balloon_up() local 1151 WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0); in balloon_up() [all …]
|
/linux-4.4.14/fs/fuse/ |
D | file.c | 352 if (idx_from < curr_index + req->num_pages && in fuse_range_is_writeback() 523 for (i = 0; i < req->num_pages; i++) { in fuse_release_user_pages() 689 for (i = start_idx; i < req->num_pages; i++) { in fuse_short_read() 726 req->num_pages = 1; in fuse_do_readpage() 770 for (i = 0; mapping == NULL && i < req->num_pages; i++) in fuse_readpages_end() 785 for (i = 0; i < req->num_pages; i++) { in fuse_readpages_end() 803 size_t count = req->num_pages << PAGE_CACHE_SHIFT; in fuse_send_readpages() 837 if (req->num_pages && in fuse_readpages_fill() 838 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || in fuse_readpages_fill() 839 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || in fuse_readpages_fill() [all …]
|
D | dev.c | 1014 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { in fuse_copy_pages() 1684 release_pages(req->pages, req->num_pages, false); in fuse_retrieve_end() 1698 int num_pages; in fuse_retrieve() local 1709 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; in fuse_retrieve() 1710 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ); in fuse_retrieve() 1712 req = fuse_get_req(fc, num_pages); in fuse_retrieve() 1725 while (num && req->num_pages < num_pages) { in fuse_retrieve() 1734 req->pages[req->num_pages] = page; in fuse_retrieve() 1735 req->page_descs[req->num_pages].length = this_num; in fuse_retrieve() 1736 req->num_pages++; in fuse_retrieve()
|
D | fuse_i.h | 367 unsigned num_pages; member
|
D | cuse.c | 455 req->num_pages = 1; in cuse_send_init()
|
D | dir.c | 1337 req->num_pages = 1; in fuse_readdir()
|
/linux-4.4.14/drivers/misc/mic/scif/ |
D | scif_rma.c | 160 window->num_pages = scif_zalloc(nr_pages * sizeof(*window->num_pages)); in scif_create_window() 161 if (!window->num_pages) in scif_create_window() 221 scif_free(window->num_pages, nr_pages * sizeof(*window->num_pages)); in scif_destroy_incomplete_window() 250 window->num_pages[j] << in scif_unmap_window() 351 scif_free(window->num_pages, nr_pages * sizeof(*window->num_pages)); in scif_destroy_window() 405 vmalloc_num_pages = is_vmalloc_addr(&window->num_pages[0]); in scif_create_remote_lookup() 418 vmalloc_to_page(&window->num_pages[i]) : in scif_create_remote_lookup() 419 virt_to_page(&window->num_pages[i]), in scif_create_remote_lookup() 496 window->num_pages = scif_zalloc(nr_pages * in scif_create_remote_window() 497 sizeof(*window->num_pages)); in scif_create_remote_window() [all …]
|
D | scif_debugfs.c | 82 window->dma_addr[j], window->num_pages[j]); in scif_display_window()
|
D | scif_rma.h | 276 u64 *num_pages; member
|
D | scif_dma.c | 803 end = start + (window->num_pages[i] << PAGE_SHIFT); in scif_off_to_dma_addr() 813 start += (window->num_pages[i] << PAGE_SHIFT); in scif_off_to_dma_addr()
|
/linux-4.4.14/drivers/gpu/drm/radeon/ |
D | radeon_ttm.c | 261 unsigned num_pages; in radeon_move_blit() local 298 num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); in radeon_move_blit() 299 fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv); in radeon_move_blit() 445 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved); in radeon_bo_move() 456 mem->bus.size = mem->num_pages << PAGE_SHIFT; in radeon_ttm_io_mem_reserve() 547 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; in radeon_ttm_tt_pin_userptr() 555 unsigned num_pages = ttm->num_pages - pinned; in radeon_ttm_tt_pin_userptr() local 559 r = get_user_pages(current, current->mm, userptr, num_pages, in radeon_ttm_tt_pin_userptr() 566 } while (pinned < ttm->num_pages); in radeon_ttm_tt_pin_userptr() 568 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, in radeon_ttm_tt_pin_userptr() [all …]
|
D | radeon_prime.c | 35 int npages = bo->tbo.num_pages; in radeon_gem_prime_get_sg_table() 45 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, in radeon_gem_prime_vmap()
|
D | radeon_object.h | 98 return bo->tbo.num_pages << PAGE_SHIFT; in radeon_bo_size() 103 return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; in radeon_bo_ngpu_pages()
|
D | radeon_object.c | 54 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; in radeon_update_memory_usage() 285 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); in radeon_bo_kmap() 642 bo->tbo.num_pages << PAGE_SHIFT); in radeon_bo_get_surface_reg() 797 size = bo->mem.num_pages << PAGE_SHIFT; in radeon_bo_fault_reserve_notify()
|
D | radeon_trace.h | 24 __entry->pages = bo->tbo.num_pages;
|
D | radeon_cs.c | 385 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; in cmp_size_smaller_first()
|
/linux-4.4.14/drivers/gpu/drm/i915/ |
D | i915_gem_userptr.c | 479 st_set_pages(struct sg_table **st, struct page **pvec, int num_pages) in st_set_pages() argument 489 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL); in st_set_pages() 493 for_each_sg((*st)->sgl, sg, num_pages, n) in st_set_pages() 496 ret = sg_alloc_table_from_pages(*st, pvec, num_pages, in st_set_pages() 497 0, num_pages << PAGE_SHIFT, in st_set_pages() 513 struct page **pvec, int num_pages) in __i915_gem_userptr_set_pages() argument 517 ret = st_set_pages(&obj->pages, pvec, num_pages); in __i915_gem_userptr_set_pages() 679 const int num_pages = obj->base.size >> PAGE_SHIFT; in i915_gem_userptr_get_pages() local 718 pvec = kmalloc(num_pages*sizeof(struct page *), in i915_gem_userptr_get_pages() 721 pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); in i915_gem_userptr_get_pages() [all …]
|
D | i915_gpu_error.c | 596 int num_pages; in i915_error_object_create() local 604 num_pages = src->base.size >> PAGE_SHIFT; in i915_error_object_create() 606 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); in i915_error_object_create() 620 reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end); in i915_error_object_create() 630 if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end) in i915_error_object_create() 638 dst->page_count = num_pages; in i915_error_object_create() 639 while (num_pages--) { in i915_error_object_create()
|
/linux-4.4.14/include/linux/ceph/ |
D | libceph.h | 211 extern void ceph_release_page_vector(struct page **pages, int num_pages); 214 int num_pages, 216 extern void ceph_put_page_vector(struct page **pages, int num_pages, 218 extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
|
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_ttm.c | 269 new_mem->num_pages * PAGE_SIZE, /* bytes */ in amdgpu_move_blit() 415 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); in amdgpu_bo_move() 426 mem->bus.size = mem->num_pages << PAGE_SHIFT; in amdgpu_ttm_io_mem_reserve() 508 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; in amdgpu_ttm_tt_pin_userptr() 517 unsigned num_pages = ttm->num_pages - pinned; in amdgpu_ttm_tt_pin_userptr() local 521 r = get_user_pages(current, current->mm, userptr, num_pages, in amdgpu_ttm_tt_pin_userptr() 528 } while (pinned < ttm->num_pages); in amdgpu_ttm_tt_pin_userptr() 530 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, in amdgpu_ttm_tt_pin_userptr() 531 ttm->num_pages << PAGE_SHIFT, in amdgpu_ttm_tt_pin_userptr() 542 gtt->ttm.dma_address, ttm->num_pages); in amdgpu_ttm_tt_pin_userptr() [all …]
|
D | amdgpu_prime.c | 35 int npages = bo->tbo.num_pages; in amdgpu_gem_prime_get_sg_table() 45 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, in amdgpu_gem_prime_vmap()
|
D | amdgpu_object.h | 104 return bo->tbo.num_pages << PAGE_SHIFT; in amdgpu_bo_size() 109 return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; in amdgpu_bo_ngpu_pages()
|
D | amdgpu_object.c | 322 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); in amdgpu_bo_kmap() 621 size = bo->mem.num_pages << PAGE_SHIFT; in amdgpu_bo_fault_reserve_notify()
|
D | amdgpu_trace.h | 24 __entry->pages = bo->tbo.num_pages;
|
D | amdgpu_cs.c | 462 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; in cmp_size_smaller_first()
|
/linux-4.4.14/drivers/scsi/bfa/ |
D | bfa_fcbuild.c | 675 int num_pages = 0; in fc_logout_params_pages() local 681 num_pages = (be16_to_cpu(prlo->payload_len) - 4) / 16; in fc_logout_params_pages() 684 num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16; in fc_logout_params_pages() 686 return num_pages; in fc_logout_params_pages() 691 u32 d_id, u32 s_id, __be16 ox_id, int num_pages) in fc_tprlo_acc_build() argument 697 memset(tprlo_acc, 0, (num_pages * 16) + 4); in fc_tprlo_acc_build() 701 tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4); in fc_tprlo_acc_build() 703 for (page = 0; page < num_pages; page++) { in fc_tprlo_acc_build() 715 u32 s_id, __be16 ox_id, int num_pages) in fc_prlo_acc_build() argument 721 memset(prlo_acc, 0, (num_pages * 16) + 4); in fc_prlo_acc_build() [all …]
|
D | bfa_fcbuild.h | 294 u32 d_id, u32 s_id, __be16 ox_id, int num_pages); 297 u32 d_id, u32 s_id, __be16 ox_id, int num_pages); 308 u16 ox_id, int num_pages); 313 u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type,
|
/linux-4.4.14/arch/x86/platform/efi/ |
D | efi.c | 128 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; in efi_find_mirror() 154 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; in do_add_efi_memmap() 241 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), in efi_print_memmap() 242 (md->num_pages >> (20 - EFI_PAGE_SHIFT))); in efi_print_memmap() 542 npages = md->num_pages; in efi_set_executable() 585 size = md->num_pages << PAGE_SHIFT; in old_map_region() 625 prev_size = prev_md->num_pages << EFI_PAGE_SHIFT; in efi_merge_regions() 628 prev_md->num_pages += md->num_pages; in efi_merge_regions() 642 size = md->num_pages << EFI_PAGE_SHIFT; in get_systab_virt_addr() 1014 (md->num_pages << EFI_PAGE_SHIFT)))) in efi_mem_type()
|
D | efi_32.c | 43 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) in efi_setup_page_tables() argument 47 void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) in efi_cleanup_page_tables() argument
|
D | quirks.c | 148 u64 size = md->num_pages << EFI_PAGE_SHIFT; in efi_reserve_boot_services() 164 md->num_pages = 0; in efi_reserve_boot_services() 179 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; in efi_free_boot_services()
|
D | efi_64.c | 144 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) in efi_setup_page_tables() argument 163 if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) { in efi_setup_page_tables() 197 void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) in efi_cleanup_page_tables() argument 201 kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages); in efi_cleanup_page_tables() 212 if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf)) in __map_region() 219 unsigned long size = md->num_pages << PAGE_SHIFT; in efi_map_region()
|
/linux-4.4.14/fs/ceph/ |
D | file.c | 67 size_t *page_align, int *num_pages) in dio_get_pages_alloc() argument 97 *num_pages = npages; in dio_get_pages_alloc() 413 struct page **pages, int num_pages, in striped_read() argument 433 pages_left = num_pages; in striped_read() 504 int num_pages, ret; in ceph_sync_read() local 530 pages = dio_get_pages_alloc(i, n, &start, &num_pages); in ceph_sync_read() 535 pages, num_pages, checkeof, in ceph_sync_read() 538 ceph_put_page_vector(pages, num_pages, true); in ceph_sync_read() 548 num_pages = calc_pages_for(off, len); in ceph_sync_read() 549 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); in ceph_sync_read() [all …]
|
D | addr.c | 266 int num_pages; in finish_read() local 274 num_pages = calc_pages_for((u64)osd_data->alignment, in finish_read() 276 for (i = 0; i < num_pages; i++) { in finish_read() 299 static void ceph_unlock_page_vector(struct page **pages, int num_pages) in ceph_unlock_page_vector() argument 303 for (i = 0; i < num_pages; i++) in ceph_unlock_page_vector() 611 int num_pages; in writepages_finish() local 623 num_pages = calc_pages_for((u64)osd_data->alignment, in writepages_finish() 632 wrote = num_pages; in writepages_finish() 641 for (i = 0; i < num_pages; i++) { in writepages_finish() 671 ceph_put_wrbuffer_cap_refs(ci, num_pages, snapc); in writepages_finish() [all …]
|
/linux-4.4.14/drivers/scsi/be2iscsi/ |
D | be_cmds.c | 844 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); in beiscsi_cmd_eq_create() 968 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); in beiscsi_cmd_cq_create() 1046 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); in beiscsi_cmd_mccq_create() 1166 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); in be_cmd_create_default_pdu_queue() 1265 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); in be_cmd_wrbq_create() 1308 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); in be_cmd_iscsi_post_template_hdr() 1340 u32 page_offset, u32 num_pages) in be_cmd_iscsi_post_sgl_pages() argument 1348 u32 temp_num_pages = num_pages; in be_cmd_iscsi_post_sgl_pages() 1350 if (num_pages == 0xff) in be_cmd_iscsi_post_sgl_pages() 1351 num_pages = 1; in be_cmd_iscsi_post_sgl_pages() [all …]
|
D | be_cmds.h | 271 u16 num_pages; /* sword */ member 557 u16 num_pages; member 592 u16 num_pages; member 771 u32 num_pages); 821 u16 num_pages; member 842 u16 num_pages; member 859 u16 num_pages; member 868 u16 num_pages; member
|
/linux-4.4.14/drivers/gpu/drm/nouveau/ |
D | nouveau_bo.c | 222 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; in nouveau_bo_new() 263 nvbo->bo.mem.num_pages < vram_pages / 4) { in set_placement_range() 432 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, in nouveau_bo_map() 468 for (i = 0; i < ttm_dma->ttm.num_pages; i++) in nouveau_bo_sync_for_device() 488 for (i = 0; i < ttm_dma->ttm.num_pages; i++) in nouveau_bo_sync_for_cpu() 703 OUT_RING (chan, new_mem->num_pages); in nve0_bo_move_copy() 727 u32 page_count = new_mem->num_pages; in nvc0_bo_move_copy() 730 page_count = new_mem->num_pages; in nvc0_bo_move_copy() 765 u32 page_count = new_mem->num_pages; in nvc0_bo_move_m2mf() 768 page_count = new_mem->num_pages; in nvc0_bo_move_m2mf() [all …]
|
D | nouveau_prime.c | 34 int npages = nvbo->bo.num_pages; in nouveau_gem_prime_get_sg_table() 44 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages, in nouveau_gem_prime_vmap()
|
D | nouveau_sgdma.c | 39 node->size = (mem->num_pages << PAGE_SHIFT) >> 12; in nv04_sgdma_bind() 74 node->size = (mem->num_pages << PAGE_SHIFT) >> 12; in nv50_sgdma_bind()
|
D | nouveau_ttm.c | 94 ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT, in nouveau_vram_manager_new() 241 ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift, in nv04_gart_manager_new()
|
D | nouveau_gem.c | 247 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; in nouveau_gem_info() 623 nvbo->bo.mem.num_pages << PAGE_SHIFT)) { in nouveau_gem_pushbuf_reloc_apply() 630 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, in nouveau_gem_pushbuf_reloc_apply() 803 num_pages, in nouveau_gem_ioctl_pushbuf()
|
/linux-4.4.14/arch/m68k/sun3/ |
D | config.c | 48 unsigned long num_pages; variable 121 max_pfn = num_pages = __pa(memory_end) >> PAGE_SHIFT; in sun3_bootmem_alloc() 127 availmem += init_bootmem(start_page, num_pages); in sun3_bootmem_alloc()
|
/linux-4.4.14/drivers/virt/ |
D | fsl_hypervisor.c | 155 unsigned int num_pages; in ioctl_memcpy() local 218 num_pages = (param.count + lb_offset + PAGE_SIZE - 1) >> PAGE_SHIFT; in ioctl_memcpy() 226 pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL); in ioctl_memcpy() 236 sg_list_unaligned = kmalloc(num_pages * sizeof(struct fh_sg_list) + in ioctl_memcpy() 248 param.local_vaddr - lb_offset, num_pages, in ioctl_memcpy() 253 if (num_pinned != num_pages) { in ioctl_memcpy() 276 for (i = 1; i < num_pages; i++) { in ioctl_memcpy() 293 virt_to_phys(sg_list), num_pages); in ioctl_memcpy() 297 for (i = 0; i < num_pages; i++) in ioctl_memcpy()
|
/linux-4.4.14/arch/ia64/kernel/ |
D | efi.c | 273 u64 num_pages; member 278 #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT) 283 return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT)); in kmd_end() 315 end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK; in walk() 573 size = md->num_pages << EFI_PAGE_SHIFT; in efi_init() 714 if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) in kern_memory_descriptor() 1072 k->num_pages = md->num_pages; in efi_memmap_init() 1105 (k-1)->num_pages += in efi_memmap_init() 1111 k->num_pages = (lim - md->phys_addr) in efi_memmap_init() 1126 (k-1)->num_pages += md->num_pages; in efi_memmap_init() [all …]
|
/linux-4.4.14/arch/mips/kernel/ |
D | vdso.c | 41 unsigned long num_pages, i; in init_vdso_image() local 46 num_pages = image->size / PAGE_SIZE; in init_vdso_image() 48 for (i = 0; i < num_pages; i++) { in init_vdso_image()
|
/linux-4.4.14/arch/m68k/mm/ |
D | sun3mmu.c | 31 extern unsigned long num_pages; 57 size = num_pages * sizeof(pte_t); in paging_init()
|
D | mcfmmu.c | 30 extern unsigned long num_pages; 51 size = num_pages * sizeof(pte_t); in paging_init() 80 zones_size[ZONE_DMA] = num_pages; in paging_init()
|
/linux-4.4.14/include/drm/ttm/ |
D | ttm_bo_api.h | 122 unsigned long num_pages; member 204 unsigned long num_pages; member 633 unsigned long num_pages, struct ttm_bo_kmap_obj *map);
|
D | ttm_bo_driver.h | 120 unsigned long num_pages; member
|
/linux-4.4.14/fs/btrfs/ |
D | check-integrity.c | 335 char **datav, unsigned int num_pages); 338 unsigned int num_pages, 1614 unsigned int num_pages; in btrfsic_release_block_ctx() local 1618 num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >> in btrfsic_release_block_ctx() 1620 while (num_pages > 0) { in btrfsic_release_block_ctx() 1621 num_pages--; in btrfsic_release_block_ctx() 1622 if (block_ctx->datav[num_pages]) { in btrfsic_release_block_ctx() 1623 kunmap(block_ctx->pagev[num_pages]); in btrfsic_release_block_ctx() 1624 block_ctx->datav[num_pages] = NULL; in btrfsic_release_block_ctx() 1626 if (block_ctx->pagev[num_pages]) { in btrfsic_release_block_ctx() [all …]
|
D | file.c | 409 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, in btrfs_copy_from_user() argument 464 static void btrfs_drop_pages(struct page **pages, size_t num_pages) in btrfs_drop_pages() argument 467 for (i = 0; i < num_pages; i++) { in btrfs_drop_pages() 489 struct page **pages, size_t num_pages, in btrfs_dirty_pages() argument 510 for (i = 0; i < num_pages; i++) { in btrfs_dirty_pages() 1322 size_t num_pages, loff_t pos, in prepare_pages() argument 1331 for (i = 0; i < num_pages; i++) { in prepare_pages() 1344 if (!err && i == num_pages - 1) in prepare_pages() 1382 size_t num_pages, loff_t pos, in lock_and_cleanup_extent_if_need() argument 1392 last_pos = start_pos + ((u64)num_pages << PAGE_CACHE_SHIFT) - 1; in lock_and_cleanup_extent_if_need() [all …]
|
D | extent_io.c | 2156 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len); in repair_eb_io_failure() local 2162 for (i = 0; i < num_pages; i++) { in repair_eb_io_failure() 3683 unsigned long i, num_pages; in lock_extent_buffer_for_io() local 3733 num_pages = num_extent_pages(eb->start, eb->len); in lock_extent_buffer_for_io() 3734 for (i = 0; i < num_pages; i++) { in lock_extent_buffer_for_io() 3856 unsigned long i, num_pages; in write_one_eb() local 3862 num_pages = num_extent_pages(eb->start, eb->len); in write_one_eb() 3863 atomic_set(&eb->io_pages, num_pages); in write_one_eb() 3867 for (i = 0; i < num_pages; i++) { in write_one_eb() 3880 if (atomic_sub_and_test(num_pages - i, &eb->io_pages)) in write_one_eb() [all …]
|
D | free-space-cache.c | 310 int num_pages; in io_ctl_init() local 313 num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); in io_ctl_init() 320 (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) in io_ctl_init() 325 io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS); in io_ctl_init() 329 io_ctl->num_pages = num_pages; in io_ctl_init() 353 ASSERT(io_ctl->index < io_ctl->num_pages); in io_ctl_map_page() 368 for (i = 0; i < io_ctl->num_pages; i++) { in io_ctl_drop_pages() 384 for (i = 0; i < io_ctl->num_pages; i++) { in io_ctl_prepare_pages() 403 for (i = 0; i < io_ctl->num_pages; i++) { in io_ctl_prepare_pages() 422 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages); in io_ctl_set_generation() [all …]
|
D | ioctl.c | 1105 unsigned long num_pages) in cluster_pages_for_defrag() argument 1124 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1); in cluster_pages_for_defrag() 2817 int num_pages, u64 off) in gather_extent_pages() argument 2822 for (i = 0; i < num_pages; i++) { in gather_extent_pages() 2915 int num_pages; member 2925 for (i = 0; i < cmp->num_pages; i++) { in btrfs_cmp_data_free() 2946 int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT; in btrfs_cmp_data_prepare() local 2955 src_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS); in btrfs_cmp_data_prepare() 2956 dst_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS); in btrfs_cmp_data_prepare() 2962 cmp->num_pages = num_pages; in btrfs_cmp_data_prepare() [all …]
|
D | raid56.c | 966 int num_pages = rbio_nr_pages(stripe_len, real_stripes); in alloc_rbio() local 970 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 + in alloc_rbio() 984 rbio->nr_pages = num_pages; in alloc_rbio() 999 rbio->bio_pages = p + sizeof(struct page *) * num_pages; in alloc_rbio() 1000 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; in alloc_rbio()
|
D | extent-tree.c | 3323 u64 num_pages = 0; in cache_save_setup() local 3431 num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024); in cache_save_setup() 3432 if (!num_pages) in cache_save_setup() 3433 num_pages = 1; in cache_save_setup() 3435 num_pages *= 16; in cache_save_setup() 3436 num_pages *= PAGE_CACHE_SIZE; in cache_save_setup() 3438 ret = btrfs_check_data_free_space(inode, 0, num_pages); in cache_save_setup() 3442 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, in cache_save_setup() 3443 num_pages, num_pages, in cache_save_setup() 3457 btrfs_free_reserved_data_space(inode, 0, num_pages); in cache_save_setup()
|
D | ctree.h | 1307 int num_pages; member 4054 struct page **pages, size_t num_pages,
|
/linux-4.4.14/arch/m68k/coldfire/ |
D | m54xx.c | 83 unsigned long num_pages; variable 95 num_pages = PFN_DOWN(_ramend - _rambase); in mcf54xx_bootmem_alloc()
|
/linux-4.4.14/drivers/misc/vmw_vmci/ |
D | vmci_queue_pair.c | 153 size_t num_pages; /* Number of pages incl. header. */ member 301 const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; in qp_alloc_queue() local 303 if (num_pages > in qp_alloc_queue() 309 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas); in qp_alloc_queue() 310 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas); in qp_alloc_queue() 321 queue->kernel_if->num_pages = num_pages; in qp_alloc_queue() 327 for (i = 0; i < num_pages; i++) { in qp_alloc_queue() 627 const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; in qp_host_alloc_queue() local 630 if (num_pages > (SIZE_MAX - queue_size) / in qp_host_alloc_queue() 634 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page); in qp_host_alloc_queue() [all …]
|
/linux-4.4.14/drivers/gpu/drm/vgem/ |
D | vgem_drv.c | 93 loff_t num_pages; in vgem_gem_fault() local 101 num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE); in vgem_gem_fault() 103 if (page_offset > num_pages) in vgem_gem_fault()
|
/linux-4.4.14/include/drm/ |
D | drm_agpsupport.h | 35 unsigned long num_pages, 84 unsigned long num_pages, in drm_agp_bind_pages() argument
|
D | drm_cache.h | 36 void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
|
D | drmP.h | 930 void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
|
/linux-4.4.14/include/trace/events/ |
D | hswadsp.h | 161 __field( int, num_pages ) 170 __entry->num_pages = stream->request.ringinfo.num_pages; 178 (int)__entry->num_pages, (int)__entry->ring_size,
|
/linux-4.4.14/drivers/misc/genwqe/ |
D | card_utils.c | 237 int num_pages) in genwqe_unmap_pages() argument 242 for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) { in genwqe_unmap_pages() 250 struct page **page_list, int num_pages, in genwqe_map_pages() argument 257 for (i = 0; i < num_pages; i++) { in genwqe_map_pages() 278 genwqe_unmap_pages(cd, dma_list, num_pages); in genwqe_map_pages() 282 static int genwqe_sgl_size(int num_pages) in genwqe_sgl_size() argument 284 int len, num_tlb = num_pages / 7; in genwqe_sgl_size() 286 len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1); in genwqe_sgl_size()
|
/linux-4.4.14/arch/arm64/kernel/ |
D | efi.c | 76 (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT)) in efi_to_phys() 173 npages = md->num_pages; in reserve_regions() 266 md->num_pages << EFI_PAGE_SHIFT, in efi_virtmap_init()
|
/linux-4.4.14/arch/x86/include/asm/ |
D | efi.h | 114 extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages); 115 extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
|
/linux-4.4.14/include/uapi/linux/ |
D | virtio_balloon.h | 43 __u32 num_pages; member
|
/linux-4.4.14/arch/powerpc/kvm/ |
D | e500_mmu.c | 749 int num_pages, ret, i; in kvm_vcpu_ioctl_config_tlb() local 780 num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) - in kvm_vcpu_ioctl_config_tlb() 782 pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL); in kvm_vcpu_ioctl_config_tlb() 786 ret = get_user_pages_fast(cfg->array, num_pages, 1, pages); in kvm_vcpu_ioctl_config_tlb() 790 if (ret != num_pages) { in kvm_vcpu_ioctl_config_tlb() 791 num_pages = ret; in kvm_vcpu_ioctl_config_tlb() 796 virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL); in kvm_vcpu_ioctl_config_tlb() 838 vcpu_e500->num_shared_tlb_pages = num_pages; in kvm_vcpu_ioctl_config_tlb() 854 for (i = 0; i < num_pages; i++) in kvm_vcpu_ioctl_config_tlb()
|
/linux-4.4.14/drivers/gpu/drm/virtio/ |
D | virtgpu_object.c | 110 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); in virtio_gpu_object_kmap() 124 int nr_pages = bo->tbo.num_pages; in virtio_gpu_object_get_sg_table()
|
D | virtgpu_ttm.c | 264 mem->bus.size = mem->num_pages << PAGE_SHIFT; in virtio_gpu_ttm_io_mem_reserve() 300 if (!ttm->num_pages) in virtio_gpu_ttm_backend_bind() 302 ttm->num_pages, bo_mem, ttm); in virtio_gpu_ttm_backend_bind()
|
/linux-4.4.14/drivers/firmware/efi/libstub/ |
D | efi-stub-helper.c | 176 if (desc->num_pages < nr_pages) in efi_high_alloc() 180 end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT); in efi_high_alloc() 261 if (desc->num_pages < nr_pages) in efi_low_alloc() 265 end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT); in efi_low_alloc()
|
D | arm-stub.c | 328 left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE; in regions_are_adjacent() 381 size = in->num_pages * EFI_PAGE_SIZE; in efi_get_virtmap()
|
/linux-4.4.14/drivers/gpu/drm/via/ |
D | via_dmablit.c | 187 for (i = 0; i < vsg->num_pages; ++i) { in via_free_sg_info() 235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - in via_lock_all_dma_pages() 238 vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages); in via_lock_all_dma_pages() 244 vsg->num_pages, in via_lock_all_dma_pages() 249 if (ret != vsg->num_pages) { in via_lock_all_dma_pages()
|
D | via_dmablit.h | 42 unsigned long num_pages; member
|
/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
D | mem.c | 151 int page_shift, size_t offset, size_t num_pages, in __mlx5_ib_populate_pas() argument 170 for (i = 0; i < num_pages; ++i) { in __mlx5_ib_populate_pas()
|
D | mlx5_ib.h | 595 int page_shift, size_t offset, size_t num_pages,
|
/linux-4.4.14/fs/exofs/ |
D | ore_raid.c | 639 unsigned num_pages; in _ore_add_parity_unit() local 645 num_pages = _sp2d_max_pg(sp2d) + 1 - si->cur_pg; in _ore_add_parity_unit() 660 for (i = 0; i < num_pages; i++) { in _ore_add_parity_unit() 669 BUG_ON(si->cur_pg + num_pages > sp2d->pages_in_unit); in _ore_add_parity_unit() 672 per_dev, num_pages * PAGE_SIZE); in _ore_add_parity_unit()
|
/linux-4.4.14/drivers/gpu/drm/tegra/ |
D | gem.h | 42 unsigned long num_pages; member
|
D | gem.c | 198 bo->num_pages = bo->gem.size >> PAGE_SHIFT; in tegra_bo_get_pages() 200 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); in tegra_bo_get_pages() 522 if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) in tegra_gem_prime_map_dma_buf() 525 for_each_sg(sgt->sgl, sg, bo->num_pages, i) in tegra_gem_prime_map_dma_buf()
|
D | fb.c | 258 bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP, in tegra_fbdev_probe()
|
/linux-4.4.14/drivers/gpu/drm/qxl/ |
D | qxl_ttm.c | 224 mem->bus.size = mem->num_pages << PAGE_SHIFT; in qxl_ttm_io_mem_reserve() 269 if (!ttm->num_pages) { in qxl_ttm_backend_bind() 271 ttm->num_pages, bo_mem, ttm); in qxl_ttm_backend_bind()
|
D | qxl_object.h | 57 return bo->tbo.num_pages << PAGE_SHIFT; in qxl_bo_size()
|
D | qxl_object.c | 135 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); in qxl_bo_kmap()
|
/linux-4.4.14/drivers/block/xen-blkback/ |
D | blkback.c | 152 unsigned int num_pages = 0; in shrink_free_pagepool() local 158 page[num_pages] = list_first_entry(&blkif->free_pages, in shrink_free_pagepool() 160 list_del(&page[num_pages]->lru); in shrink_free_pagepool() 162 if (++num_pages == NUM_BATCH_FREE_PAGES) { in shrink_free_pagepool() 164 gnttab_free_pages(num_pages, page); in shrink_free_pagepool() 166 num_pages = 0; in shrink_free_pagepool() 170 if (num_pages != 0) in shrink_free_pagepool() 171 gnttab_free_pages(num_pages, page); in shrink_free_pagepool()
|
/linux-4.4.14/arch/x86/mm/ |
D | pageattr.c | 907 unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) in populate_pte() argument 913 while (num_pages-- && start < end) { in populate_pte() 929 unsigned num_pages, pud_t *pud, pgprot_t pgprot) in populate_pmd() argument 939 unsigned long pre_end = start + (num_pages << PAGE_SHIFT); in populate_pmd() 944 cur_pages = min_t(unsigned int, num_pages, cur_pages); in populate_pmd() 962 if (num_pages == cur_pages) in populate_pmd() 995 populate_pte(cpa, start, end, num_pages - cur_pages, in populate_pmd() 998 return num_pages; in populate_pmd()
|
/linux-4.4.14/drivers/gpu/drm/mgag200/ |
D | mgag200_cursor.c | 119 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); in mga_crtc_cursor_set() 183 pixels_prev->bo.num_pages, in mga_crtc_cursor_set()
|
D | mgag200_fb.c | 80 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); in mga_dirty_update()
|
D | mgag200_ttm.c | 164 mem->bus.size = mem->num_pages << PAGE_SHIFT; in mgag200_ttm_io_mem_reserve()
|
D | mgag200_mode.c | 862 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); in mga_crtc_do_set_base()
|
/linux-4.4.14/drivers/scsi/bnx2fc/ |
D | bnx2fc_tgt.c | 667 int num_pages; in bnx2fc_alloc_session_resc() local 725 num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE; in bnx2fc_alloc_session_resc() 729 while (num_pages--) { in bnx2fc_alloc_session_resc() 780 num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE; in bnx2fc_alloc_session_resc() 784 while (num_pages--) { in bnx2fc_alloc_session_resc()
|
/linux-4.4.14/fs/ocfs2/ |
D | aops.h | 36 void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages);
|
D | aops.c | 1272 void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) in ocfs2_unlock_and_free_pages() argument 1276 for(i = 0; i < num_pages; i++) { in ocfs2_unlock_and_free_pages()
|
D | alloc.c | 6854 int ret, i, has_data, num_pages = 0; in ocfs2_convert_inline_data_to_extents() local 6934 ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages); in ocfs2_convert_inline_data_to_extents() 6956 for (i = 0; i < num_pages; i++) in ocfs2_convert_inline_data_to_extents() 6990 ocfs2_unlock_and_free_pages(pages, num_pages); in ocfs2_convert_inline_data_to_extents()
|
/linux-4.4.14/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_abi.h | 85 u32 num_pages; member
|
D | ocrdma_hw.c | 441 cmd->num_pages = 4; in ocrdma_mbx_create_eq() 445 ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma, in ocrdma_mbx_create_eq() 565 int num_pages, status; in ocrdma_mbx_create_mq() local 571 num_pages = PAGES_4K_SPANNED(mq->va, mq->size); in ocrdma_mbx_create_mq() 576 cmd->cqid_pages = num_pages; in ocrdma_mbx_create_mq() 591 ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K); in ocrdma_mbx_create_mq() 1616 int *num_pages, int *page_size) in ocrdma_build_q_conf() argument 1632 *num_pages = in ocrdma_build_q_conf()
|
D | ocrdma_sli.h | 316 u32 num_pages; member
|
D | ocrdma_verbs.c | 1043 uresp.num_pages = 1; in ocrdma_copy_cq_uresp()
|
/linux-4.4.14/drivers/net/ethernet/8390/ |
D | smc-ultra.c | 212 unsigned char num_pages, irqreg, addr, piomode; in ultra_probe1() local 290 num_pages = num_pages_tbl[(addr >> 4) & 3]; in ultra_probe1() 297 ei_status.stop_page = num_pages; in ultra_probe1()
|
/linux-4.4.14/fs/cifs/ |
D | file.c | 2351 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages) in cifs_write_allocate_pages() argument 2356 for (i = 0; i < num_pages; i++) { in cifs_write_allocate_pages() 2363 num_pages = i; in cifs_write_allocate_pages() 2370 for (i = 0; i < num_pages; i++) in cifs_write_allocate_pages() 2379 size_t num_pages; in get_numpages() local 2383 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE); in get_numpages() 2388 return num_pages; in get_numpages() 2424 size_t *len, unsigned long *num_pages) in wdata_fill_from_iovec() argument 2427 unsigned long i, nr_pages = *num_pages; in wdata_fill_from_iovec() 2460 *num_pages = i + 1; in wdata_fill_from_iovec() [all …]
|
/linux-4.4.14/Documentation/ABI/testing/ |
D | sysfs-firmware-efi-runtime-map | 28 num_pages : The size of the memory range in pages.
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlxsw/ |
D | pci.c | 1282 u16 num_pages) in mlxsw_pci_fw_area_init() argument 1289 mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item), in mlxsw_pci_fw_area_init() 1293 mlxsw_pci->fw_area.count = num_pages; in mlxsw_pci_fw_area_init() 1296 for (i = 0; i < num_pages; i++) { in mlxsw_pci_fw_area_init() 1399 u16 num_pages; in mlxsw_pci_init() local 1444 num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox); in mlxsw_pci_init() 1445 err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages); in mlxsw_pci_init()
|
/linux-4.4.14/Documentation/device-mapper/ |
D | kcopyd.txt | 12 int kcopyd_client_create(unsigned int num_pages,
|
/linux-4.4.14/sound/soc/intel/haswell/ |
D | sst-haswell-ipc.h | 277 u32 num_pages; member 436 u32 ring_pt_address, u32 num_pages,
|
D | sst-haswell-ipc.c | 1141 u32 ring_pt_address, u32 num_pages, in sst_hsw_stream_buffer() argument 1150 stream->request.ringinfo.num_pages = num_pages; in sst_hsw_stream_buffer()
|
/linux-4.4.14/drivers/block/zram/ |
D | zram_drv.c | 484 size_t num_pages = disksize >> PAGE_SHIFT; in zram_meta_free() local 488 for (index = 0; index < num_pages; index++) { in zram_meta_free() 504 size_t num_pages; in zram_meta_alloc() local 510 num_pages = disksize >> PAGE_SHIFT; in zram_meta_alloc() 511 meta->table = vzalloc(num_pages * sizeof(*meta->table)); in zram_meta_alloc()
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/ |
D | pagealloc.c | 78 __be32 num_pages; member 184 *npages = be32_to_cpu(out.num_pages); in mlx5_cmd_query_pages()
|
D | eq.c | 276 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); in mlx5_eq_int()
|
/linux-4.4.14/drivers/s390/char/ |
D | sclp_vt220.c | 689 static int __init __sclp_vt220_init(int num_pages) in __sclp_vt220_init() argument 709 for (i = 0; i < num_pages; i++) { in __sclp_vt220_init()
|
D | zcore.c | 190 u32 num_pages; member 592 hdr->num_pages = mem_size / PAGE_SIZE; in zcore_header_init()
|
/linux-4.4.14/drivers/gpu/drm/bochs/ |
D | bochs_fbdev.c | 96 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, in bochsfb_create()
|
D | bochs_mm.c | 142 mem->bus.size = mem->num_pages << PAGE_SHIFT; in bochs_ttm_io_mem_reserve()
|
/linux-4.4.14/drivers/net/ethernet/emulex/benet/ |
D | be_cmds.h | 348 u16 num_pages; /* sword */ member 450 u16 num_pages; member 513 u16 num_pages; member 521 u16 num_pages; member 540 u8 num_pages; member 566 u8 num_pages; member
|
D | be_cmds.c | 954 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); in be_cmd_eq_create() 1116 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); in be_cmd_cq_create() 1194 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); in be_cmd_mccq_ext_create() 1259 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); in be_cmd_mccq_org_create() 1321 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); in be_cmd_txq_create() 1368 req->num_pages = 2; in be_cmd_rxq_create()
|
/linux-4.4.14/drivers/net/ethernet/smsc/ |
D | smc91c92_cs.c | 1200 u_short num_pages; in smc_start_xmit() local 1217 num_pages = skb->len >> 8; in smc_start_xmit() 1219 if (num_pages > 7) { in smc_start_xmit() 1220 netdev_err(dev, "Far too big packet error: %d pages\n", num_pages); in smc_start_xmit() 1239 outw(MC_ALLOC | num_pages, ioaddr + MMU_CMD); in smc_start_xmit()
|
/linux-4.4.14/drivers/char/agp/ |
D | generic.c | 1195 …t agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages) in agp_generic_alloc_pages() argument 1200 for (i = 0; i < num_pages; i++) { in agp_generic_alloc_pages() 1217 set_pages_array_uc(mem->pages, num_pages); in agp_generic_alloc_pages()
|
/linux-4.4.14/drivers/md/ |
D | bitmap.c | 757 unsigned long num_pages; in bitmap_storage_alloc() local 764 num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE); in bitmap_storage_alloc() 765 offset = slot_number * (num_pages - 1); in bitmap_storage_alloc() 768 * num_pages, GFP_KERNEL); in bitmap_storage_alloc() 785 for ( ; pnum < num_pages; pnum++) { in bitmap_storage_alloc() 798 roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)), in bitmap_storage_alloc()
|
/linux-4.4.14/drivers/scsi/bnx2i/ |
D | bnx2i_hwi.c | 952 int num_pages; in setup_qp_page_tables() local 964 num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE; in setup_qp_page_tables() 971 while (num_pages--) { in setup_qp_page_tables() 992 num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE; in setup_qp_page_tables() 999 while (num_pages--) { in setup_qp_page_tables() 1020 num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE; in setup_qp_page_tables() 1027 while (num_pages--) { in setup_qp_page_tables()
|
/linux-4.4.14/arch/ia64/hp/sim/boot/ |
D | fw-emu.c | 252 md->num_pages = (end - start) >> 12; \ in sys_fw_init()
|
/linux-4.4.14/drivers/net/ethernet/broadcom/ |
D | cnic.h | 132 int num_pages; member
|
D | cnic.c | 739 for (i = 0; i < dma->num_pages; i++) { in cnic_free_dma() 753 dma->num_pages = 0; in cnic_free_dma() 761 for (i = 0; i < dma->num_pages; i++) { in cnic_setup_page_tbl() 775 for (i = 0; i < dma->num_pages; i++) { in cnic_setup_page_tbl_le() 796 dma->num_pages = pages; in cnic_alloc_dma() 2323 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages; in cnic_bnx2x_fcoe_init1()
|
/linux-4.4.14/drivers/gpu/drm/ast/ |
D | ast_mode.c | 547 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); in ast_crtc_do_set_base() 933 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &ast->cache_kmap); in ast_cursor_init() 1171 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map); in ast_cursor_set()
|
D | ast_fb.c | 104 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); in ast_dirty_update()
|
D | ast_ttm.c | 164 mem->bus.size = mem->num_pages << PAGE_SHIFT; in ast_ttm_io_mem_reserve()
|
/linux-4.4.14/drivers/gpu/drm/cirrus/ |
D | cirrus_fbdev.c | 77 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); in cirrus_dirty_update()
|
D | cirrus_ttm.c | 164 mem->bus.size = mem->num_pages << PAGE_SHIFT; in cirrus_ttm_io_mem_reserve()
|
D | cirrus_mode.c | 169 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); in cirrus_crtc_do_set_base()
|
/linux-4.4.14/drivers/block/drbd/ |
D | drbd_bitmap.c | 1041 int num_pages, i, count = 0; in bm_rw() local 1084 num_pages = b->bm_number_of_pages; in bm_rw() 1089 for (i = 0; i < num_pages; i++) { in bm_rw()
|
/linux-4.4.14/drivers/staging/slicoss/ |
D | slic.h | 65 u32 num_pages; member
|
D | slicoss.c | 1189 for (i = 0; i < rspq->num_pages; i++) { in slic_rspqueue_free() 1211 rspq->num_pages = SLIC_RSPQ_PAGES_GB; in slic_rspqueue_init() 1213 for (i = 0; i < rspq->num_pages; i++) { in slic_rspqueue_init() 1256 rspq->pageindex = (rspq->pageindex + 1) % rspq->num_pages; in slic_rspqueue_getnext()
|
/linux-4.4.14/drivers/gpu/drm/omapdrm/ |
D | omap_dmm_tiler.c | 397 int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in tiler_reserve_1d() local 405 if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages, in tiler_reserve_1d()
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
D | ehca_mrmw.c | 1899 int num_pages, in ehca_check_kpages_per_ate() argument 1902 for (; *sg && num_pages > 0; *sg = sg_next(*sg), num_pages--) { in ehca_check_kpages_per_ate() 1910 pgaddr, *prev_pgaddr, num_pages); in ehca_check_kpages_per_ate()
|
/linux-4.4.14/mm/ |
D | memory_hotplug.c | 339 unsigned long start_pfn, unsigned long num_pages) in ensure_zone_is_initialized() argument 342 return init_currently_empty_zone(zone, start_pfn, num_pages); in ensure_zone_is_initialized()
|
/linux-4.4.14/drivers/staging/android/ion/ |
D | ion.c | 222 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; in ion_buffer_create() local 226 buffer->pages = vmalloc(sizeof(struct page *) * num_pages); in ion_buffer_create()
|
/linux-4.4.14/arch/x86/boot/compressed/ |
D | eboot.c | 1258 prev->size += d->num_pages << 12; in setup_e820() 1274 e820_map->size = d->num_pages << PAGE_SHIFT; in setup_e820()
|
/linux-4.4.14/include/linux/mlx5/ |
D | device.h | 509 __be32 num_pages; member
|
D | mlx5_ifc.h | 3324 u8 num_pages[0x20]; member 6579 u8 num_pages[0x20]; member
|
/linux-4.4.14/include/linux/ |
D | efi.h | 114 u64 num_pages; member
|
/linux-4.4.14/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_ethtool.c | 838 int num_pages = __bnx2x_get_page_reg_num(bp); in bnx2x_read_pages_regs() local 849 for (i = 0; i < num_pages; i++) { in bnx2x_read_pages_regs()
|
/linux-4.4.14/kernel/ |
D | module.c | 1880 void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages)) in set_page_attributes() argument
|