/linux-4.1.27/drivers/media/pci/ivtv/ |
D | ivtv-udma.c | 33 dma_page->page_count = dma_page->last - dma_page->first + 1; in ivtv_udma_get_page_info() 34 if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset; in ivtv_udma_get_page_info() 48 for (i = 0; i < dma_page->page_count; i++) { in ivtv_udma_fill_sg_list() 49 unsigned int len = (i == dma_page->page_count - 1) ? in ivtv_udma_fill_sg_list() 112 if (dma->SG_length || dma->page_count) { in ivtv_udma_setup() 114 dma->SG_length, dma->page_count); in ivtv_udma_setup() 120 if (user_dma.page_count <= 0) { in ivtv_udma_setup() 122 user_dma.page_count, size_in_bytes, user_dma.offset); in ivtv_udma_setup() 128 user_dma.uaddr, user_dma.page_count, 0, 1, dma->map); in ivtv_udma_setup() 130 if (user_dma.page_count != err) { in ivtv_udma_setup() [all …]
|
D | ivtv-yuv.c | 67 if (dma->SG_length || dma->page_count) { in ivtv_yuv_prep_user_dma() 70 dma->SG_length, dma->page_count); in ivtv_yuv_prep_user_dma() 79 …y_pages = get_user_pages(current, current->mm, y_dma.uaddr, y_dma.page_count, 0, 1, &dma->map[0], … in ivtv_yuv_prep_user_dma() 81 if (y_pages == y_dma.page_count) { in ivtv_yuv_prep_user_dma() 83 uv_dma.uaddr, uv_dma.page_count, 0, 1, in ivtv_yuv_prep_user_dma() 88 if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) { in ivtv_yuv_prep_user_dma() 91 if (y_pages == y_dma.page_count) { in ivtv_yuv_prep_user_dma() 94 "expecting %d\n", uv_pages, uv_dma.page_count); in ivtv_yuv_prep_user_dma() 106 "expecting %d\n", y_pages, y_dma.page_count); in ivtv_yuv_prep_user_dma() 123 dma->page_count = y_pages + uv_pages; in ivtv_yuv_prep_user_dma() [all …]
|
D | ivtv-driver.h | 285 int page_count; member 305 int page_count; member
|
D | ivtvfb.c | 297 size_in_bytes, itv->udma.page_count); in ivtvfb_prep_dec_dma_to_device() 304 size_in_bytes, itv->udma.page_count); in ivtvfb_prep_dec_dma_to_device()
|
/linux-4.1.27/drivers/firewire/ |
D | core-iso.c | 42 int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count) in fw_iso_buffer_alloc() argument 46 buffer->page_count = 0; in fw_iso_buffer_alloc() 48 buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]), in fw_iso_buffer_alloc() 53 for (i = 0; i < page_count; i++) { in fw_iso_buffer_alloc() 58 buffer->page_count = i; in fw_iso_buffer_alloc() 59 if (i < page_count) { in fw_iso_buffer_alloc() 75 for (i = 0; i < buffer->page_count; i++) { in fw_iso_buffer_map_dma() 84 if (i < buffer->page_count) in fw_iso_buffer_map_dma() 91 int page_count, enum dma_data_direction direction) in fw_iso_buffer_init() argument 95 ret = fw_iso_buffer_alloc(buffer, page_count); in fw_iso_buffer_init() [all …]
|
D | core.h | 157 int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
|
D | core-cdev.c | 1086 buffer_end = client->buffer.page_count << PAGE_SHIFT; in ioctl_queue_iso() 1675 int page_count, ret; in fw_device_op_mmap() local 1692 page_count = size >> PAGE_SHIFT; in fw_device_op_mmap() 1696 ret = fw_iso_buffer_alloc(&client->buffer, page_count); in fw_device_op_mmap()
|
D | ohci.c | 3402 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) in queue_iso_buffer_fill()
|
/linux-4.1.27/drivers/char/agp/ |
D | generic.c | 186 if (curr->page_count != 0) { in agp_free_memory() 191 for (i = 0; i < curr->page_count; i++) { in agp_free_memory() 196 for (i = 0; i < curr->page_count; i++) { in agp_free_memory() 223 size_t page_count, u32 type) in agp_allocate_memory() argument 234 if ((cur_memory + page_count > bridge->max_memory_agp) || in agp_allocate_memory() 235 (cur_memory + page_count < page_count)) in agp_allocate_memory() 239 new = agp_generic_alloc_user(page_count, type); in agp_allocate_memory() 246 new = bridge->driver->alloc_by_type(page_count, type); in agp_allocate_memory() 252 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; in agp_allocate_memory() 260 if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { in agp_allocate_memory() [all …]
|
D | i460-agp.c | 311 if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) { in i460_insert_memory_small_io_page() 317 while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) { in i460_insert_memory_small_io_page() 327 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { in i460_insert_memory_small_io_page() 346 for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++) in i460_remove_memory_small_io_page() 415 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; in i460_insert_memory_large_io_page() 417 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; in i460_insert_memory_large_io_page() 473 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; in i460_remove_memory_large_io_page() 475 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; in i460_remove_memory_large_io_page()
|
D | sgi-agp.c | 173 if ((pg_start + mem->page_count) > num_entries) in sgi_tioca_insert_memory() 178 while (j < (pg_start + mem->page_count)) { in sgi_tioca_insert_memory() 189 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in sgi_tioca_insert_memory() 217 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in sgi_tioca_remove_memory()
|
D | nvidia-agp.c | 210 if (mem->page_count == 0) in nvidia_insert_memory() 213 if ((pg_start + mem->page_count) > in nvidia_insert_memory() 217 for (j = pg_start; j < (pg_start + mem->page_count); j++) { in nvidia_insert_memory() 226 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in nvidia_insert_memory() 250 if (mem->page_count == 0) in nvidia_remove_memory() 253 for (i = pg_start; i < (mem->page_count + pg_start); i++) in nvidia_remove_memory()
|
D | intel-gtt.c | 126 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); in intel_gtt_unmap_memory() 216 if ((pg_start + mem->page_count) in i810_insert_dcache_entries() 223 for (i = pg_start; i < (pg_start + mem->page_count); i++) { in i810_insert_dcache_entries() 268 new->page_count = pg_count; in alloc_agpphysmem_i8xx() 279 if (curr->page_count == 4) in intel_i810_free_by_type() 896 if (mem->page_count == 0) in intel_fake_agp_insert_entries() 899 if (pg_start + mem->page_count > intel_private.gtt_total_entries) in intel_fake_agp_insert_entries() 914 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); in intel_fake_agp_insert_entries() 922 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, in intel_fake_agp_insert_entries() 949 if (mem->page_count == 0) in intel_fake_agp_remove_entries() [all …]
|
D | ati-agp.c | 280 if (mem->page_count == 0) in ati_insert_memory() 283 if ((pg_start + mem->page_count) > num_entries) in ati_insert_memory() 287 while (j < (pg_start + mem->page_count)) { in ati_insert_memory() 301 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in ati_insert_memory() 326 if (mem->page_count == 0) in ati_remove_memory() 329 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in ati_remove_memory()
|
D | uninorth-agp.c | 165 if (mem->page_count == 0) in uninorth_insert_memory() 171 if ((pg_start + mem->page_count) > num_entries) in uninorth_insert_memory() 175 for (i = 0; i < mem->page_count; ++i) { in uninorth_insert_memory() 184 for (i = 0; i < mem->page_count; i++) { in uninorth_insert_memory() 214 if (mem->page_count == 0) in uninorth_remove_memory() 218 for (i = 0; i < mem->page_count; ++i) { in uninorth_remove_memory()
|
D | agp.h | 199 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type); 203 struct agp_memory *memory, size_t page_count); 220 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type);
|
D | efficeon-agp.c | 240 int i, count = mem->page_count, num_entries; in efficeon_insert_memory() 248 if ((pg_start + mem->page_count) > num_entries) in efficeon_insert_memory() 289 int i, count = mem->page_count, num_entries; in efficeon_remove_memory() 295 if ((pg_start + mem->page_count) > num_entries) in efficeon_remove_memory()
|
D | ali-agp.c | 128 int i, page_count; in m1541_cache_flush() local 133 page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order; in m1541_cache_flush() 134 for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) { in m1541_cache_flush()
|
D | sworks-agp.c | 331 if ((pg_start + mem->page_count) > num_entries) { in serverworks_insert_memory() 336 while (j < (pg_start + mem->page_count)) { in serverworks_insert_memory() 349 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in serverworks_insert_memory() 374 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in serverworks_remove_memory()
|
D | amd-k7-agp.c | 295 if ((pg_start + mem->page_count) > num_entries) in amd_insert_memory() 299 while (j < (pg_start + mem->page_count)) { in amd_insert_memory() 312 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in amd_insert_memory() 335 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in amd_remove_memory()
|
D | parisc-agp.c | 138 io_pg_count = info->io_pages_per_kpage * mem->page_count; in parisc_agp_insert_memory() 155 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { in parisc_agp_insert_memory() 185 io_pg_count = info->io_pages_per_kpage * mem->page_count; in parisc_agp_remove_memory()
|
D | hp-agp.c | 345 io_pg_count = hp->io_pages_per_kpage * mem->page_count; in hp_zx1_insert_memory() 363 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { in hp_zx1_insert_memory() 390 io_pg_count = hp->io_pages_per_kpage * mem->page_count; in hp_zx1_remove_memory()
|
D | amd64-agp.c | 63 if (((unsigned long)pg_start + mem->page_count) > num_entries) in amd64_insert_memory() 69 while (j < (pg_start + mem->page_count)) { in amd64_insert_memory() 80 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in amd64_insert_memory()
|
D | alpha-agp.c | 98 if ((pg_start + mem->page_count) > num_entries) in alpha_core_agp_insert_memory()
|
/linux-4.1.27/drivers/target/ |
D | target_core_rd.c | 87 u32 i, j, page_count = 0, sg_per_table; in rd_release_sgl_table() local 97 page_count++; in rd_release_sgl_table() 104 return page_count; in rd_release_sgl_table() 109 u32 page_count; in rd_release_device_space() local 114 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array, in rd_release_device_space() 119 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, in rd_release_device_space() 120 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); in rd_release_device_space() 250 u32 page_count; in rd_release_prot_space() local 255 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array, in rd_release_prot_space() 260 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, in rd_release_prot_space() [all …]
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
D | lloop.c | 194 u32 page_count = 0; in do_bio_lustrebacked() local 224 pages[page_count] = bvec.bv_page; in do_bio_lustrebacked() 225 offsets[page_count] = offset; in do_bio_lustrebacked() 226 page_count++; in do_bio_lustrebacked() 229 LASSERT(page_count <= LLOOP_MAX_SEGMENTS); in do_bio_lustrebacked() 234 page_count); in do_bio_lustrebacked() 236 pvec->ldp_size = page_count << PAGE_CACHE_SHIFT; in do_bio_lustrebacked() 237 pvec->ldp_nr = page_count; in do_bio_lustrebacked() 295 unsigned int page_count = 0; in loop_get_bio() local 314 page_count, (*bio)->bi_vcnt); in loop_get_bio() [all …]
|
D | rw26.c | 139 if (page_count(vmpage) > 3) in ll_releasepage() 239 int page_count = pv->ldp_nr; in ll_direct_rw_pages() local 247 for (i = 0; i < page_count; i++) { in ll_direct_rw_pages() 337 struct page **pages, int page_count) in ll_direct_IO_26_seg() argument 340 .ldp_nr = page_count, in ll_direct_IO_26_seg()
|
D | vvp_dev.c | 420 page_count(vmpage)); in vvp_pgcache_page_show()
|
D | vvp_page.c | 384 (long)vmpage->flags, page_count(vmpage), in vvp_page_print()
|
D | vvp_io.c | 619 (long)vmf->page->flags, page_count(vmf->page), in vvp_io_kernel_fault()
|
/linux-4.1.27/drivers/staging/lustre/lustre/osc/ |
D | osc_cache.c | 178 int page_count; in osc_extent_sanity_check0() local 275 page_count = 0; in osc_extent_sanity_check0() 278 ++page_count; in osc_extent_sanity_check0() 284 if (page_count != ext->oe_nr_pages) { in osc_extent_sanity_check0() 1069 int page_count = 0; in osc_extent_make_ready() local 1081 ++page_count; in osc_extent_make_ready() 1104 LASSERT(page_count == ext->oe_nr_pages); in osc_extent_make_ready() 1883 int page_count = 0; in get_write_extents() local 1891 if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count, in get_write_extents() 1893 return page_count; in get_write_extents() [all …]
|
D | osc_request.c | 1094 static void handle_short_read(int nob_read, u32 page_count, in handle_short_read() argument 1102 LASSERT (page_count > 0); in handle_short_read() 1110 page_count--; in handle_short_read() 1116 page_count--; in handle_short_read() 1121 while (page_count-- > 0) { in handle_short_read() 1131 u32 page_count, struct brw_page **pga) in check_write_rcs() argument 1221 (long)pga[i]->pg->flags, page_count(pga[i]->pg), in osc_checksum_bulk() 1246 struct lov_stripe_md *lsm, u32 page_count, in osc_brw_prep_request() argument 1279 for (niocount = i = 1; i < page_count; i++) { in osc_brw_prep_request() 1302 desc = ptlrpc_prep_bulk_imp(req, page_count, in osc_brw_prep_request() [all …]
|
/linux-4.1.27/fs/btrfs/ |
D | scrub.c | 106 int page_count; member 113 int page_count; member 426 for (i = 0; i < sbio->page_count; i++) { in scrub_free_ctx() 490 sbio->page_count = 0; in scrub_setup_ctx() 625 WARN_ON(sblock->page_count < 1); in scrub_print_warning() 909 BUG_ON(sblock_to_check->page_count < 1); in scrub_handle_errored_block() 922 length = sblock_to_check->page_count * PAGE_SIZE; in scrub_handle_errored_block() 1098 sblocks_for_recheck[mirror_index].page_count > 0; in scrub_handle_errored_block() 1154 for (page_num = 0; page_num < sblock_bad->page_count; in scrub_handle_errored_block() 1167 sblocks_for_recheck[mirror_index].page_count > 0; in scrub_handle_errored_block() [all …]
|
/linux-4.1.27/drivers/gpu/drm/nouveau/ |
D | nouveau_bo.c | 733 u32 page_count = new_mem->num_pages; in nvc0_bo_move_copy() local 736 page_count = new_mem->num_pages; in nvc0_bo_move_copy() 737 while (page_count) { in nvc0_bo_move_copy() 738 int line_count = (page_count > 8191) ? 8191 : page_count; in nvc0_bo_move_copy() 756 page_count -= line_count; in nvc0_bo_move_copy() 771 u32 page_count = new_mem->num_pages; in nvc0_bo_move_m2mf() local 774 page_count = new_mem->num_pages; in nvc0_bo_move_m2mf() 775 while (page_count) { in nvc0_bo_move_m2mf() 776 int line_count = (page_count > 2047) ? 2047 : page_count; in nvc0_bo_move_m2mf() 795 page_count -= line_count; in nvc0_bo_move_m2mf() [all …]
|
/linux-4.1.27/drivers/iommu/ |
D | tegra-gart.c | 58 u32 page_count; /* total remappable size */ member 90 iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \ 157 gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1; in gart_iova_range_valid() 240 gart->page_count * GART_PAGE_SIZE - 1; in gart_iommu_domain_alloc() 413 gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT); in tegra_gart_probe() 415 gart->savedata = vmalloc(sizeof(u32) * gart->page_count); in tegra_gart_probe()
|
/linux-4.1.27/fs/xfs/ |
D | xfs_buf.c | 217 int page_count) in _xfs_buf_get_pages() argument 221 bp->b_page_count = page_count; in _xfs_buf_get_pages() 222 if (page_count <= XB_PAGES) { in _xfs_buf_get_pages() 226 page_count, KM_NOFS); in _xfs_buf_get_pages() 230 memset(bp->b_pages, 0, sizeof(struct page *) * page_count); in _xfs_buf_get_pages() 293 unsigned short page_count, i; in xfs_buf_allocate_memory() local 329 page_count = end - start; in xfs_buf_allocate_memory() 330 error = _xfs_buf_get_pages(bp, page_count); in xfs_buf_allocate_memory() 777 int page_count; in xfs_buf_associate_memory() local 782 page_count = buflen >> PAGE_SHIFT; in xfs_buf_associate_memory() [all …]
|
/linux-4.1.27/fs/pstore/ |
D | ram_core.c | 388 unsigned int page_count; in persistent_ram_vmap() local 394 page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); in persistent_ram_vmap() 401 pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); in persistent_ram_vmap() 404 __func__, page_count); in persistent_ram_vmap() 408 for (i = 0; i < page_count; i++) { in persistent_ram_vmap() 412 vaddr = vmap(pages, page_count, VM_MAP, prot); in persistent_ram_vmap()
|
/linux-4.1.27/drivers/gpu/drm/ttm/ |
D | ttm_agp_backend.c | 63 mem->page_count = 0; in ttm_agp_bind() 70 mem->pages[mem->page_count++] = page; in ttm_agp_bind()
|
D | ttm_page_alloc.c | 687 if (page_count(pages[i]) != 1) in ttm_put_pages() 699 if (page_count(pages[i]) != 1) in ttm_put_pages()
|
/linux-4.1.27/drivers/gpu/drm/ |
D | drm_bufs.c | 742 dma->page_count += byte_count >> PAGE_SHIFT; in drm_legacy_addbufs_agp() 777 int page_count; in drm_legacy_addbufs_pci() local 845 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) * in drm_legacy_addbufs_pci() 855 dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); in drm_legacy_addbufs_pci() 857 dma->page_count + (count << page_order)); in drm_legacy_addbufs_pci() 862 page_count = 0; in drm_legacy_addbufs_pci() 881 dma->page_count + page_count, in drm_legacy_addbufs_pci() 883 temp_pagelist[dma->page_count + page_count++] in drm_legacy_addbufs_pci() 942 if (dma->page_count) { in drm_legacy_addbufs_pci() 949 dma->page_count += entry->seg_count << page_order; in drm_legacy_addbufs_pci() [all …]
|
D | drm_vm.c | 165 page_count(page)); in drm_do_vm_fault() 490 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { in drm_mmap_dma()
|
D | drm_agpsupport.c | 492 mem->page_count = num_pages; in drm_agp_bind_pages()
|
/linux-4.1.27/drivers/block/ |
D | rbd.c | 267 u32 page_count; member 2070 obj_request->page_count); in rbd_obj_request_destroy() 2293 obj_request->page_count = 0; in rbd_img_obj_end_request() 2489 unsigned int page_count; in rbd_img_request_fill() local 2492 page_count = (u32)calc_pages_for(offset, length); in rbd_img_request_fill() 2493 obj_request->page_count = page_count; in rbd_img_request_fill() 2495 page_count--; /* more on last page */ in rbd_img_request_fill() 2496 pages += page_count; in rbd_img_request_fill() 2532 u32 page_count; in rbd_osd_copyup_callback() local 2548 page_count = obj_request->copyup_page_count; in rbd_osd_copyup_callback() [all …]
|
D | ps3vram.c | 61 unsigned int page_count; member 363 for (i = 0; i < cache->page_count; i++) { in ps3vram_cache_flush() 383 for (i = 0; i < cache->page_count; i++) { in ps3vram_cache_match() 394 i = (jiffies + (counter++)) % cache->page_count; in ps3vram_cache_match() 408 priv->cache.page_count = CACHE_PAGE_COUNT; in ps3vram_cache_init()
|
/linux-4.1.27/drivers/gpu/drm/udl/ |
D | udl_dmabuf.c | 82 int page_count; in udl_map_dma_buf() local 100 page_count = obj->base.size / PAGE_SIZE; in udl_map_dma_buf() 101 obj->sg = drm_prime_pages_to_sg(obj->pages, page_count); in udl_map_dma_buf()
|
D | udl_gem.c | 160 int page_count = obj->base.size / PAGE_SIZE; in udl_gem_vmap() local 174 obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL); in udl_gem_vmap()
|
/linux-4.1.27/arch/xtensa/mm/ |
D | tlb.c | 244 page_count(p), in check_tlb_entry() 246 if (!page_count(p)) in check_tlb_entry()
|
/linux-4.1.27/mm/ |
D | migrate.c | 318 if (page_count(page) != expected_count) in migrate_page_move_mapping() 329 if (page_count(page) != expected_count || in migrate_page_move_mapping() 404 if (page_count(page) != 1) in migrate_huge_page_move_mapping() 415 if (page_count(page) != expected_count || in migrate_huge_page_move_mapping() 931 if (page_count(page) == 1) { in unmap_and_move() 1631 if (PageTransHuge(page) && page_count(page) != 3) { in numamigrate_isolate_page() 1767 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) { in migrate_misplaced_transhuge_page() 1810 if (page_count(page) != 2) { in migrate_misplaced_transhuge_page()
|
D | page_isolation.c | 243 else if (page_count(page) == 0 && in __test_page_isolated_in_pageblock()
|
D | memory-failure.c | 898 count = page_count(p) - 1; in page_action() 1492 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1)) in unpoison_memory() 1703 pfn, ret, page_count(page), page->flags); in __soft_offline_page()
|
D | hugetlb.c | 801 if (page_count(page) > 0) in pfn_range_valid_gigantic() 971 BUG_ON(page_count(page)); in free_huge_page() 1178 if (PageHuge(page) && !page_count(page)) { in dissolve_free_huge_page() 1375 VM_BUG_ON_PAGE(page_count(page), page); in gather_surplus_pages() 1595 WARN_ON(page_count(page) != 1); in gather_bootmem_prealloc() 3788 BUG_ON(page_count(virt_to_page(ptep)) == 0); in huge_pmd_unshare() 3789 if (page_count(virt_to_page(ptep)) == 1) in huge_pmd_unshare() 3926 if (!page_huge_active(hpage) && !page_count(hpage)) { in dequeue_hwpoisoned_huge_page()
|
D | huge_memory.c | 1732 BUG_ON(page_count(page_tail) <= 0); in __split_huge_page_refcount() 1747 BUG_ON(page_count(page) <= 0); in __split_huge_page_refcount() 2165 if (page_count(page) != 1 + !!PageSwapCache(page)) { in __collapse_huge_page_isolate() 2619 if (page_count(page) != 1 + !!PageSwapCache(page)) in khugepaged_scan_pmd() 2911 VM_BUG_ON_PAGE(!page_count(page), page); in __split_huge_page_pmd()
|
D | vmscan.c | 452 return page_count(page) - page_has_private(page) == 2; in is_page_cache_freeable() 1113 if (!mapping && page_count(page) == 1) { in shrink_page_list() 1380 VM_BUG_ON_PAGE(!page_count(page), page); in isolate_lru_page()
|
D | nommu.c | 670 if (page_count(page) != 1) in free_page_series() 672 page, page_count(page)); in free_page_series()
|
D | page_alloc.c | 527 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); in page_is_buddy() 541 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); in page_is_buddy() 1636 VM_BUG_ON_PAGE(!page_count(page), page); in split_page() 6548 count += page_count(page) != 1; in free_contig_range() 6627 BUG_ON(page_count(page)); in __offline_isolated_pages()
|
D | compaction.c | 756 page_count(page) > page_mapcount(page)) in isolate_migratepages_block()
|
D | kmemleak.c | 1360 if (page_count(page) == 0) in kmemleak_scan()
|
D | memory_hotplug.c | 1438 if (page_count(page)) { in do_migrate_range()
|
D | shmem.c | 1852 } else if (page_count(page) - page_mapcount(page) > 1) { in shmem_tag_pins() 1912 page_count(page) - page_mapcount(page) != 1) { in shmem_wait_for_pins()
|
D | ksm.c | 900 if (page_mapcount(page) + 1 + swapped != page_count(page)) { in write_protect_page()
|
D | memcontrol.c | 5644 VM_BUG_ON_PAGE(page_count(page), page); in uncharge_list() 5826 VM_BUG_ON_PAGE(page_count(page), page); in mem_cgroup_swapout()
|
D | memory.c | 1507 if (!page_count(page)) in vm_insert_page()
|
/linux-4.1.27/include/linux/ |
D | agp_backend.h | 74 size_t page_count; member
|
D | pagemap.h | 160 VM_BUG_ON_PAGE(page_count(page) == 0, page); in page_cache_get_speculative() 189 VM_BUG_ON_PAGE(page_count(page) == 0, page); in page_cache_add_speculative() 208 VM_BUG_ON_PAGE(page_count(page) != 0, page); in page_unfreeze_refs()
|
D | firewire.h | 423 int page_count; member 428 int page_count, enum dma_data_direction direction);
|
D | relay.h | 44 unsigned int page_count; /* number of current buffer pages */ member
|
D | mm.h | 495 static inline int page_count(struct page *page) in page_count() function
|
/linux-4.1.27/drivers/net/ethernet/brocade/bna/ |
D | bna_tx_rx.c | 1483 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \ 2077 u32 page_count, in bna_rxq_qpt_setup() argument 2091 rxq->qpt.page_count = page_count; in bna_rxq_qpt_setup() 2100 for (i = 0; i < rxq->qpt.page_count; i++) { in bna_rxq_qpt_setup() 2115 u32 page_count, in bna_rxp_cqpt_setup() argument 2129 rxp->cq.qpt.page_count = page_count; in bna_rxp_cqpt_setup() 2138 for (i = 0; i < rxp->cq.qpt.page_count; i++) { in bna_rxp_cqpt_setup() 2531 u32 page_count; in bna_rx_create() local 2551 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len / in bna_rx_create() 2715 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE, in bna_rx_create() [all …]
|
D | bna_types.h | 315 u32 page_count; member
|
/linux-4.1.27/arch/unicore32/mm/ |
D | init.c | 88 else if (!page_count(page)) in show_mem() 91 shared += page_count(page) - 1; in show_mem()
|
/linux-4.1.27/drivers/hv/ |
D | hv_balloon.c | 380 __u32 page_count; member 911 resp.page_count = process_hot_add(pg_start, pfn_cnt, in hot_add_req() 914 dm->num_pages_added += resp.page_count; in hot_add_req() 933 if (resp.page_count > 0) in hot_add_req() 940 if (!do_hot_add || (resp.page_count == 0)) in hot_add_req()
|
/linux-4.1.27/include/drm/ |
D | drm_legacy.h | 105 int page_count; /**< number of pages */ member
|
/linux-4.1.27/drivers/gpu/drm/i915/ |
D | i915_gem_tiling.c | 531 int page_count = obj->base.size >> PAGE_SHIFT; in i915_gem_object_save_bit_17_swizzle() local 535 obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count), in i915_gem_object_save_bit_17_swizzle()
|
D | i915_gpu_error.c | 321 for (page = offset = 0; page < obj->page_count; page++) { in print_error_obj() 542 for (page = 0; page < obj->page_count; page++) in i915_error_object_free() 625 dst->page_count = num_pages; in i915_error_object_create()
|
D | i915_gem.c | 2038 int page_count, i; in i915_gem_object_get_pages_gtt() local 2058 page_count = obj->base.size / PAGE_SIZE; in i915_gem_object_get_pages_gtt() 2059 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { in i915_gem_object_get_pages_gtt() 2075 for (i = 0; i < page_count; i++) { in i915_gem_object_get_pages_gtt() 2079 page_count, in i915_gem_object_get_pages_gtt()
|
D | i915_drv.h | 477 int page_count; member
|
/linux-4.1.27/fs/ceph/ |
D | super.c | 505 int page_count; in create_fs_client() local 550 page_count = fsc->mount_options->wsize >> PAGE_CACHE_SHIFT; in create_fs_client() 551 size = sizeof (struct page *) * (page_count ? page_count : 1); in create_fs_client()
|
/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/ |
D | tracefile.h | 335 __LASSERT(page_count(tage->page) > 0); \
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/ |
D | icm.c | 232 static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count) in mlx4_UNMAP_ICM() argument 234 return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM, in mlx4_UNMAP_ICM()
|
D | en_rx.c | 198 i, page_count(page_alloc->page)); in mlx4_en_destroy_allocator()
|
/linux-4.1.27/drivers/net/ethernet/sfc/ |
D | siena_sriov.c | 809 u64 page_count = req->u.set_status_page.peer_page_count; in efx_vfdi_set_status_page() local 815 if (!req->u.set_status_page.dma_addr || page_count > max_page_count) { in efx_vfdi_set_status_page() 831 if (page_count) { in efx_vfdi_set_status_page() 832 vf->peer_page_addrs = kcalloc(page_count, sizeof(u64), in efx_vfdi_set_status_page() 837 page_count * sizeof(u64)); in efx_vfdi_set_status_page() 838 vf->peer_page_count = page_count; in efx_vfdi_set_status_page()
|
D | rx.c | 127 if (page_count(page) == 1) { in efx_reuse_page()
|
/linux-4.1.27/include/linux/ceph/ |
D | messenger.h | 134 unsigned short page_count; /* pages in array */ member
|
/linux-4.1.27/arch/arm/include/asm/ |
D | kvm_mmu.h | 157 return page_count(ptr_page) == 1; in kvm_page_empty()
|
/linux-4.1.27/arch/arm64/include/asm/ |
D | kvm_mmu.h | 204 return page_count(ptr_page) == 1;
|
/linux-4.1.27/drivers/scsi/lpfc/ |
D | lpfc_sli4.h | 141 uint32_t page_count; /* Number of pages allocated for this queue */ member 643 uint32_t page_count; member
|
D | lpfc_sli.c | 12795 queue->page_count = (ALIGN(entry_size * entry_count, in lpfc_sli4_queue_alloc() 12800 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { in lpfc_sli4_queue_alloc() 13002 eq->page_count); in lpfc_eq_create() 13122 cq->page_count); in lpfc_cq_create() 13228 mq->page_count); in lpfc_mq_create_fb_init() 13310 &mq_create_ext->u.request, mq->page_count); in lpfc_mq_create() 13467 wq->page_count); in lpfc_wq_create() 13754 hrq->page_count); in lpfc_rq_create() 13885 drq->page_count); in lpfc_rq_create() 15664 hdr_tmpl, rpi_page->page_count); in lpfc_sli4_post_rpi_hdr()
|
D | lpfc_init.c | 5996 rpi_hdr->page_count = 1; in lpfc_sli4_create_rpi_hdr()
|
/linux-4.1.27/arch/powerpc/kernel/ |
D | vdso.c | 147 page_count(pg), in dump_one_vdso_page() 152 page_count(upg), in dump_one_vdso_page()
|
/linux-4.1.27/drivers/mtd/nand/ |
D | denali.c | 752 uint32_t page_count = 1; in denali_send_pipeline_cmd() local 793 PIPELINE_ACCESS | op | page_count); in denali_send_pipeline_cmd() 1027 const int page_count = 1; in denali_setup_dma() local 1035 index_addr(denali, mode | denali->page, 0x2000 | op | page_count); in denali_setup_dma()
|
/linux-4.1.27/arch/mips/mm/ |
D | gup.c | 66 VM_BUG_ON(page_count(page) == 0); in get_head_page_multiple()
|
/linux-4.1.27/net/ceph/ |
D | messenger.c | 595 if (page_count(page) >= 1) in ceph_tcp_sendpage() 928 int page_count; in ceph_msg_data_pages_cursor_init() local 936 page_count = calc_pages_for(data->alignment, (u64)data->length); in ceph_msg_data_pages_cursor_init() 939 BUG_ON(page_count > (int)USHRT_MAX); in ceph_msg_data_pages_cursor_init() 940 cursor->page_count = (unsigned short)page_count; in ceph_msg_data_pages_cursor_init() 953 BUG_ON(cursor->page_index >= cursor->page_count); in ceph_msg_data_pages_next() 984 BUG_ON(cursor->page_index >= cursor->page_count); in ceph_msg_data_pages_advance()
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
D | mthca_cmd.h | 279 int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count);
|
D | mthca_cmd.c | 1563 int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count) in mthca_UNMAP_ICM() argument 1566 page_count, (unsigned long long) virt); in mthca_UNMAP_ICM() 1568 return mthca_cmd(dev, virt, page_count, 0, in mthca_UNMAP_ICM()
|
/linux-4.1.27/arch/alpha/kernel/ |
D | core_titan.c | 683 mem->page_count, mem->pages); in titan_agp_bind_memory() 691 mem->page_count); in titan_agp_unbind_memory()
|
D | core_marvel.c | 1019 mem->page_count, mem->pages); in marvel_agp_bind_memory() 1027 mem->page_count); in marvel_agp_unbind_memory()
|
/linux-4.1.27/arch/x86/mm/ |
D | gup.c | 112 VM_BUG_ON_PAGE(page_count(page) == 0, page); in get_head_page_multiple()
|
/linux-4.1.27/drivers/infiniband/hw/nes/ |
D | nes_verbs.c | 286 u32 stag, u32 page_count) in alloc_fast_reg_mr() argument 295 u64 region_length = page_count * PAGE_SIZE; in alloc_fast_reg_mr() 305 page_count, region_length); in alloc_fast_reg_mr() 345 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, (page_count * 8)); in alloc_fast_reg_mr() 2320 int page_count = 0; in nes_reg_user_mr() local 2411 …if ((page_count != 0) && (page_count << 12) - (ib_umem_offset(region) & (4096 - 1)) >= region->len… in nes_reg_user_mr() 2413 if ((page_count&0x01FF) == 0) { in nes_reg_user_mr() 2414 if (page_count >= 1024 * 512) { in nes_reg_user_mr() 2479 if (page_count != 0) { in nes_reg_user_mr() 2500 page_count++; in nes_reg_user_mr()
|
/linux-4.1.27/arch/tile/mm/ |
D | homecache.c | 368 BUG_ON(page_count(page) > 1); in homecache_change_page_home()
|
/linux-4.1.27/fs/cachefiles/ |
D | rdwr.c | 238 netpage, netpage->index, page_count(netpage)); in cachefiles_read_backing_file_one() 481 netpage, netpage->index, page_count(netpage)); in cachefiles_read_backing_file()
|
/linux-4.1.27/fs/ |
D | pipe.c | 128 if (page_count(page) == 1 && !pipe->tmp_page) in anon_pipe_buf_release() 156 if (page_count(page) == 1) { in generic_pipe_buf_steal()
|
D | aio.c | 297 page_count(ctx->ring_pages[i])); in aio_free_ring() 474 current->pid, i, page_count(page)); in aio_setup_ring()
|
/linux-4.1.27/drivers/net/hyperv/ |
D | netvsc.c | 706 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt : in netvsc_copy_to_send_buf() local 717 for (i = 0; i < page_count; i++) { in netvsc_copy_to_send_buf()
|
/linux-4.1.27/kernel/ |
D | relay.c | 149 buf->page_count = n_pages; in relay_alloc_buf() 216 for (i = 0; i < buf->page_count; i++) in relay_destroy_buf()
|
/linux-4.1.27/drivers/infiniband/hw/ehca/ |
D | ehca_mrmw.c | 1541 int page_count; in ehca_reg_mr_section() local 1549 page_count = EHCA_SECTSIZE / pginfo->hwpage_size; in ehca_reg_mr_section() 1551 while (page < page_count) { in ehca_reg_mr_section() 1553 for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count); in ehca_reg_mr_section()
|
/linux-4.1.27/drivers/xen/ |
D | grant-table.c | 849 if (page_count(item->pages[pc]) > 1) { in __gnttab_unmap_refs_async()
|
/linux-4.1.27/drivers/android/ |
D | binder.c | 3068 active_transactions, page_count; in binder_deferred_release() local 3136 page_count = 0; in binder_deferred_release() 3152 page_count++; in binder_deferred_release() 3163 outgoing_refs, active_transactions, buffers, page_count); in binder_deferred_release()
|
/linux-4.1.27/drivers/scsi/ |
D | libiscsi_tcp.c | 133 if (page_count(sg_page(sg)) >= 1 && !recv) in iscsi_tcp_segment_map()
|
/linux-4.1.27/fs/fuse/ |
D | dev.c | 841 page_count(page) != 1 || in fuse_check_page() 850 … count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapc… in fuse_check_page()
|
/linux-4.1.27/drivers/net/ethernet/sun/ |
D | cassini.c | 587 if (page_count(page->buffer) > 1) in cas_spare_recover() 1375 if (page_count(page->buffer) == 1) in cas_page_spare() 1395 if (page_count(page0[index]->buffer) > 1) { in cas_page_swap() 2234 if (page_count(page[entry]->buffer) > 1) { in cas_post_rxds_ringN()
|
/linux-4.1.27/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_main.c | 232 if (unlikely(page_count(page) != 1)) in fm10k_can_reuse_rx_page()
|
/linux-4.1.27/drivers/block/drbd/ |
D | drbd_int.h | 1690 if (page_count(page) > 1) in drbd_peer_req_has_active_page()
|
D | drbd_main.c | 1513 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page)) in _drbd_send_page()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/ |
D | i40e_txrx.c | 1104 if ((page_count(rx_bi->page) == 1) && in i40e_clean_rx_irq_ps()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40e/ |
D | i40e_txrx.c | 1629 if ((page_count(rx_bi->page) == 1) && in i40e_clean_rx_irq_ps()
|
/linux-4.1.27/drivers/net/ethernet/intel/igbvf/ |
D | netdev.c | 337 (page_count(buffer_info->page) != 1)) in igbvf_clean_rx_irq()
|
/linux-4.1.27/drivers/s390/net/ |
D | qeth_core_main.c | 2794 if (page_count(virt_to_page(entry->elements[i])) > 1) { in qeth_find_free_buffer_pool_entry() 2809 if (page_count(virt_to_page(entry->elements[i])) > 1) { in qeth_find_free_buffer_pool_entry()
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbevf/ |
D | ixgbevf_main.c | 822 if (unlikely(page_count(page) != 1)) in ixgbevf_add_rx_frag()
|
/linux-4.1.27/arch/x86/kvm/ |
D | mmu.c | 608 WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn))); in mmu_spte_clear_track_bits()
|
/linux-4.1.27/drivers/net/ethernet/intel/igb/ |
D | igb_main.c | 6612 if (unlikely(page_count(page) != 1)) in igb_can_reuse_rx_page()
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_main.c | 1888 if (unlikely(page_count(page) != 1)) in ixgbe_add_rx_frag()
|