Searched refs:page_count (Results 1 - 122 of 122) sorted by relevance

/linux-4.4.14/drivers/media/pci/ivtv/
H A Divtv-udma.c33 dma_page->page_count = dma_page->last - dma_page->first + 1; ivtv_udma_get_page_info()
34 if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset; ivtv_udma_get_page_info()
48 for (i = 0; i < dma_page->page_count; i++) { ivtv_udma_fill_sg_list()
49 unsigned int len = (i == dma_page->page_count - 1) ? ivtv_udma_fill_sg_list()
112 if (dma->SG_length || dma->page_count) { ivtv_udma_setup()
113 IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n", ivtv_udma_setup()
114 dma->SG_length, dma->page_count); ivtv_udma_setup()
120 if (user_dma.page_count <= 0) { ivtv_udma_setup()
121 IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n", ivtv_udma_setup()
122 user_dma.page_count, size_in_bytes, user_dma.offset); ivtv_udma_setup()
128 user_dma.uaddr, user_dma.page_count, 0, 1, dma->map); ivtv_udma_setup()
130 if (user_dma.page_count != err) { ivtv_udma_setup()
132 err, user_dma.page_count); ivtv_udma_setup()
141 dma->page_count = user_dma.page_count; ivtv_udma_setup()
145 for (i = 0; i < dma->page_count; i++) { ivtv_udma_setup()
148 dma->page_count = 0; ivtv_udma_setup()
153 dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE); ivtv_udma_setup()
162 return dma->page_count; ivtv_udma_setup()
173 if (dma->page_count == 0) ivtv_udma_unmap()
178 pci_unmap_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE); ivtv_udma_unmap()
185 for (i = 0; i < dma->page_count; i++) { ivtv_udma_unmap()
188 dma->page_count = 0; ivtv_udma_unmap()
203 pci_unmap_sg(itv->pdev, itv->udma.SGlist, itv->udma.page_count, PCI_DMA_TODEVICE); ivtv_udma_free()
H A Divtv-yuv.c67 if (dma->SG_length || dma->page_count) { ivtv_yuv_prep_user_dma()
69 ("prep_user_dma: SG_length %d page_count %d still full?\n", ivtv_yuv_prep_user_dma()
70 dma->SG_length, dma->page_count); ivtv_yuv_prep_user_dma()
79 y_dma.uaddr, y_dma.page_count, 0, 1, ivtv_yuv_prep_user_dma()
82 if (y_pages == y_dma.page_count) { ivtv_yuv_prep_user_dma()
84 uv_dma.uaddr, uv_dma.page_count, 0, 1, ivtv_yuv_prep_user_dma()
88 if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) { ivtv_yuv_prep_user_dma()
91 if (y_pages == y_dma.page_count) { ivtv_yuv_prep_user_dma()
94 "expecting %d\n", uv_pages, uv_dma.page_count); ivtv_yuv_prep_user_dma()
106 "expecting %d\n", y_pages, y_dma.page_count); ivtv_yuv_prep_user_dma()
123 dma->page_count = y_pages + uv_pages; ivtv_yuv_prep_user_dma()
128 for (i = 0; i < dma->page_count; i++) { ivtv_yuv_prep_user_dma()
131 dma->page_count = 0; ivtv_yuv_prep_user_dma()
134 dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE); ivtv_yuv_prep_user_dma()
H A Divtv-driver.h285 int page_count; member in struct:ivtv_user_dma
305 int page_count; member in struct:ivtv_dma_page_info
H A Divtvfb.c298 size_in_bytes, itv->udma.page_count); ivtvfb_prep_dec_dma_to_device()
305 size_in_bytes, itv->udma.page_count); ivtvfb_prep_dec_dma_to_device()
/linux-4.4.14/drivers/firewire/
H A Dcore-iso.c42 int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count) fw_iso_buffer_alloc() argument
46 buffer->page_count = 0; fw_iso_buffer_alloc()
48 buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]), fw_iso_buffer_alloc()
53 for (i = 0; i < page_count; i++) { fw_iso_buffer_alloc()
58 buffer->page_count = i; fw_iso_buffer_alloc()
59 if (i < page_count) { fw_iso_buffer_alloc()
75 for (i = 0; i < buffer->page_count; i++) { fw_iso_buffer_map_dma()
84 if (i < buffer->page_count) fw_iso_buffer_map_dma()
91 int page_count, enum dma_data_direction direction) fw_iso_buffer_init()
95 ret = fw_iso_buffer_alloc(buffer, page_count); fw_iso_buffer_init()
114 for (i = 0; i < buffer->page_count; i++) { fw_iso_buffer_map_vma()
136 for (i = 0; i < buffer->page_count; i++) fw_iso_buffer_destroy()
141 buffer->page_count = 0; fw_iso_buffer_destroy()
153 for (i = 0; i < buffer->page_count; i++) { fw_iso_buffer_lookup()
90 fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, int page_count, enum dma_data_direction direction) fw_iso_buffer_init() argument
H A Dcore.h157 int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
H A Dcore-cdev.c1086 buffer_end = client->buffer.page_count << PAGE_SHIFT; ioctl_queue_iso()
1675 int page_count, ret; fw_device_op_mmap() local
1692 page_count = size >> PAGE_SHIFT; fw_device_op_mmap()
1696 ret = fw_iso_buffer_alloc(&client->buffer, page_count); fw_device_op_mmap()
H A Dohci.c3402 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) queue_iso_buffer_fill()
/linux-4.4.14/drivers/char/agp/
H A Dgeneric.c186 if (curr->page_count != 0) { agp_free_memory()
191 for (i = 0; i < curr->page_count; i++) { agp_free_memory()
196 for (i = 0; i < curr->page_count; i++) { agp_free_memory()
214 * @page_count: size_t argument of the number of pages
223 size_t page_count, u32 type) agp_allocate_memory()
234 if ((cur_memory + page_count > bridge->max_memory_agp) || agp_allocate_memory()
235 (cur_memory + page_count < page_count)) agp_allocate_memory()
239 new = agp_generic_alloc_user(page_count, type); agp_allocate_memory()
246 new = bridge->driver->alloc_by_type(page_count, type); agp_allocate_memory()
252 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; agp_allocate_memory()
260 if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { agp_allocate_memory()
268 for (i = 0; i < page_count; i++) { agp_allocate_memory()
276 new->page_count++; agp_allocate_memory()
1046 if (mem->page_count == 0) agp_generic_insert_memory()
1084 if (((pg_start + mem->page_count) > num_entries) || agp_generic_insert_memory()
1085 ((pg_start + mem->page_count) < pg_start)) agp_generic_insert_memory()
1090 while (j < (pg_start + mem->page_count)) { agp_generic_insert_memory()
1101 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { agp_generic_insert_memory()
1125 if (mem->page_count == 0) agp_generic_remove_memory()
1132 if (((pg_start + mem->page_count) > num_entries) || agp_generic_remove_memory()
1133 ((pg_start + mem->page_count) < pg_start)) agp_generic_remove_memory()
1143 for (i = pg_start; i < (mem->page_count + pg_start); i++) { agp_generic_remove_memory()
1153 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) agp_generic_alloc_by_type() argument
1167 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type) agp_generic_alloc_user() argument
1173 pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; agp_generic_alloc_user()
1174 new = agp_create_user_memory(page_count); agp_generic_alloc_user()
1178 for (i = 0; i < page_count; i++) agp_generic_alloc_user()
1180 new->page_count = 0; agp_generic_alloc_user()
1213 mem->page_count++; agp_generic_alloc_pages()
1250 set_pages_array_wb(mem->pages, mem->page_count); agp_generic_destroy_pages()
1253 for (i = 0; i < mem->page_count; i++) { agp_generic_destroy_pages()
222 agp_allocate_memory(struct agp_bridge_data *bridge, size_t page_count, u32 type) agp_allocate_memory() argument
H A Dati-agp.c280 if (mem->page_count == 0) ati_insert_memory()
283 if ((pg_start + mem->page_count) > num_entries) ati_insert_memory()
287 while (j < (pg_start + mem->page_count)) { ati_insert_memory()
301 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { ati_insert_memory()
326 if (mem->page_count == 0) ati_remove_memory()
329 for (i = pg_start; i < (mem->page_count + pg_start); i++) { ati_remove_memory()
H A Dnvidia-agp.c210 if (mem->page_count == 0) nvidia_insert_memory()
213 if ((pg_start + mem->page_count) > nvidia_insert_memory()
217 for (j = pg_start; j < (pg_start + mem->page_count); j++) { nvidia_insert_memory()
226 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { nvidia_insert_memory()
250 if (mem->page_count == 0) nvidia_remove_memory()
253 for (i = pg_start; i < (mem->page_count + pg_start); i++) nvidia_remove_memory()
H A Dsgi-agp.c173 if ((pg_start + mem->page_count) > num_entries) sgi_tioca_insert_memory()
178 while (j < (pg_start + mem->page_count)) { sgi_tioca_insert_memory()
189 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { sgi_tioca_insert_memory()
217 for (i = pg_start; i < (mem->page_count + pg_start); i++) { sgi_tioca_remove_memory()
H A Duninorth-agp.c165 if (mem->page_count == 0) uninorth_insert_memory()
171 if ((pg_start + mem->page_count) > num_entries) uninorth_insert_memory()
175 for (i = 0; i < mem->page_count; ++i) { uninorth_insert_memory()
184 for (i = 0; i < mem->page_count; i++) { uninorth_insert_memory()
214 if (mem->page_count == 0) uninorth_remove_memory()
218 for (i = 0; i < mem->page_count; ++i) { uninorth_remove_memory()
H A Di460-agp.c311 if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) { i460_insert_memory_small_io_page()
317 while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) { i460_insert_memory_small_io_page()
327 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { i460_insert_memory_small_io_page()
346 for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++) i460_remove_memory_small_io_page()
415 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; i460_insert_memory_large_io_page()
417 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; i460_insert_memory_large_io_page()
473 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; i460_remove_memory_large_io_page()
475 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; i460_remove_memory_large_io_page()
H A Defficeon-agp.c240 int i, count = mem->page_count, num_entries; efficeon_insert_memory()
248 if ((pg_start + mem->page_count) > num_entries) efficeon_insert_memory()
289 int i, count = mem->page_count, num_entries; efficeon_remove_memory()
295 if ((pg_start + mem->page_count) > num_entries) efficeon_remove_memory()
H A Dsworks-agp.c331 if ((pg_start + mem->page_count) > num_entries) { serverworks_insert_memory()
336 while (j < (pg_start + mem->page_count)) { serverworks_insert_memory()
349 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { serverworks_insert_memory()
374 for (i = pg_start; i < (mem->page_count + pg_start); i++) { serverworks_remove_memory()
H A Dagp.h199 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type);
203 struct agp_memory *memory, size_t page_count);
220 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type);
H A Dali-agp.c128 int i, page_count; m1541_cache_flush() local
133 page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order; m1541_cache_flush()
134 for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) { m1541_cache_flush()
H A Dintel-gtt.c126 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); intel_gtt_unmap_memory()
216 if ((pg_start + mem->page_count) i810_insert_dcache_entries()
223 for (i = pg_start; i < (pg_start + mem->page_count); i++) { i810_insert_dcache_entries()
268 new->page_count = pg_count; alloc_agpphysmem_i8xx()
279 if (curr->page_count == 4) intel_i810_free_by_type()
896 if (mem->page_count == 0) intel_fake_agp_insert_entries()
899 if (pg_start + mem->page_count > intel_private.gtt_total_entries) intel_fake_agp_insert_entries()
914 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); intel_fake_agp_insert_entries()
922 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, intel_fake_agp_insert_entries()
949 if (mem->page_count == 0) intel_fake_agp_remove_entries()
952 intel_gtt_clear_range(pg_start, mem->page_count); intel_fake_agp_remove_entries()
977 new->page_count = pg_count; intel_fake_agp_alloc_by_type()
H A Dparisc-agp.c138 io_pg_count = info->io_pages_per_kpage * mem->page_count; parisc_agp_insert_memory()
155 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { parisc_agp_insert_memory()
185 io_pg_count = info->io_pages_per_kpage * mem->page_count; parisc_agp_remove_memory()
H A Damd-k7-agp.c295 if ((pg_start + mem->page_count) > num_entries) amd_insert_memory()
299 while (j < (pg_start + mem->page_count)) { amd_insert_memory()
312 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { amd_insert_memory()
335 for (i = pg_start; i < (mem->page_count + pg_start); i++) { amd_remove_memory()
H A Dhp-agp.c345 io_pg_count = hp->io_pages_per_kpage * mem->page_count; hp_zx1_insert_memory()
363 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { hp_zx1_insert_memory()
390 io_pg_count = hp->io_pages_per_kpage * mem->page_count; hp_zx1_remove_memory()
H A Dalpha-agp.c98 if ((pg_start + mem->page_count) > num_entries) alpha_core_agp_insert_memory()
H A Damd64-agp.c63 if (((unsigned long)pg_start + mem->page_count) > num_entries) amd64_insert_memory()
69 while (j < (pg_start + mem->page_count)) { amd64_insert_memory()
80 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { amd64_insert_memory()
/linux-4.4.14/drivers/target/
H A Dtarget_core_rd.c81 u32 i, j, page_count = 0, sg_per_table; rd_release_sgl_table() local
91 page_count++; rd_release_sgl_table()
98 return page_count; rd_release_sgl_table()
103 u32 page_count; rd_release_device_space() local
108 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array, rd_release_device_space()
113 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, rd_release_device_space()
114 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); rd_release_device_space()
236 u32 page_count; rd_release_prot_space() local
241 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array, rd_release_prot_space()
246 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, rd_release_prot_space()
247 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); rd_release_prot_space()
/linux-4.4.14/drivers/staging/lustre/lustre/llite/
H A Dlloop.c194 u32 page_count = 0; do_bio_lustrebacked() local
224 pages[page_count] = bvec.bv_page; bio_for_each_segment()
225 offsets[page_count] = offset; bio_for_each_segment()
226 page_count++; bio_for_each_segment()
229 LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
234 page_count);
236 pvec->ldp_size = page_count << PAGE_CACHE_SHIFT;
237 pvec->ldp_nr = page_count;
252 * 2. Reserve the # of (page_count * depth) cl_pages from the reserved
295 unsigned int page_count = 0; loop_get_bio() local
314 page_count, (*bio)->bi_vcnt); loop_get_bio()
315 if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS) loop_get_bio()
318 page_count += (*bio)->bi_vcnt; loop_get_bio()
H A Drw26.c139 if (page_count(vmpage) > 3) ll_releasepage()
239 int page_count = pv->ldp_nr; ll_direct_rw_pages() local
247 for (i = 0; i < page_count; i++) { ll_direct_rw_pages()
337 struct page **pages, int page_count) ll_direct_IO_26_seg()
340 .ldp_nr = page_count, ll_direct_IO_26_seg()
333 ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io, int rw, struct inode *inode, struct address_space *mapping, size_t size, loff_t file_offset, struct page **pages, int page_count) ll_direct_IO_26_seg() argument
H A Dvvp_dev.c417 page_count(vmpage)); vvp_pgcache_page_show()
H A Dvvp_page.c390 (long)vmpage->flags, page_count(vmpage), vvp_page_print()
H A Dvvp_io.c617 (long)vmf->page->flags, page_count(vmf->page), vvp_io_kernel_fault()
/linux-4.4.14/drivers/iommu/
H A Dtegra-gart.c58 u32 page_count; /* total remappable size */ member in struct:gart_device
90 iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
157 gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1; gart_iova_range_valid()
240 gart->page_count * GART_PAGE_SIZE - 1; gart_iommu_domain_alloc()
413 gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT); tegra_gart_probe()
415 gart->savedata = vmalloc(sizeof(u32) * gart->page_count); tegra_gart_probe()
/linux-4.4.14/drivers/gpu/drm/nouveau/
H A Dnouveau_bo.c727 u32 page_count = new_mem->num_pages; nvc0_bo_move_copy() local
730 page_count = new_mem->num_pages; nvc0_bo_move_copy()
731 while (page_count) { nvc0_bo_move_copy()
732 int line_count = (page_count > 8191) ? 8191 : page_count; nvc0_bo_move_copy()
750 page_count -= line_count; nvc0_bo_move_copy()
765 u32 page_count = new_mem->num_pages; nvc0_bo_move_m2mf() local
768 page_count = new_mem->num_pages; nvc0_bo_move_m2mf()
769 while (page_count) { nvc0_bo_move_m2mf()
770 int line_count = (page_count > 2047) ? 2047 : page_count; nvc0_bo_move_m2mf()
789 page_count -= line_count; nvc0_bo_move_m2mf()
804 u32 page_count = new_mem->num_pages; nva3_bo_move_copy() local
807 page_count = new_mem->num_pages; nva3_bo_move_copy()
808 while (page_count) { nva3_bo_move_copy()
809 int line_count = (page_count > 8191) ? 8191 : page_count; nva3_bo_move_copy()
827 page_count -= line_count; nva3_bo_move_copy()
989 u32 page_count = new_mem->num_pages; nv04_bo_move_m2mf() local
1000 page_count = new_mem->num_pages; nv04_bo_move_m2mf()
1001 while (page_count) { nv04_bo_move_m2mf()
1002 int line_count = (page_count > 2047) ? 2047 : page_count; nv04_bo_move_m2mf()
1021 page_count -= line_count; nv04_bo_move_m2mf()
/linux-4.4.14/drivers/staging/lustre/lustre/osc/
H A Dosc_cache.c178 int page_count; osc_extent_sanity_check0() local
276 page_count = 0; osc_extent_sanity_check0()
279 ++page_count; osc_extent_sanity_check0()
285 if (page_count != ext->oe_nr_pages) { osc_extent_sanity_check0()
1070 int page_count = 0; osc_extent_make_ready() local
1082 ++page_count; osc_extent_make_ready()
1105 LASSERT(page_count == ext->oe_nr_pages); osc_extent_make_ready()
1880 int page_count = 0; get_write_extents() local
1888 if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count, get_write_extents()
1890 return page_count; get_write_extents()
1893 if (page_count == max_pages) get_write_extents()
1894 return page_count; get_write_extents()
1899 if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count, get_write_extents()
1901 return page_count; get_write_extents()
1913 &page_count, &max_pages)) get_write_extents()
1914 return page_count; get_write_extents()
1917 if (page_count == max_pages) get_write_extents()
1918 return page_count; get_write_extents()
1929 if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count, get_write_extents()
1931 return page_count; get_write_extents()
1935 return page_count; get_write_extents()
1946 u32 page_count = 0; osc_send_write_rpc() local
1952 page_count = get_write_extents(osc, &rpclist); osc_send_write_rpc()
1953 LASSERT(equi(page_count == 0, list_empty(&rpclist))); osc_send_write_rpc()
1958 osc_update_pending(osc, OBD_BRW_WRITE, -page_count); osc_send_write_rpc()
1991 LASSERT(page_count > 0); osc_send_write_rpc()
2017 int page_count = 0; osc_send_read_rpc() local
2025 if (!try_to_add_extent_for_io(cli, ext, &rpclist, &page_count, osc_send_read_rpc()
2031 LASSERT(page_count <= max_pages); osc_send_read_rpc()
2033 osc_update_pending(osc, OBD_BRW_READ, -page_count); osc_send_read_rpc()
2038 LASSERT(page_count > 0); osc_send_read_rpc()
2565 int page_count = 0; osc_queue_sync_pages() local
2577 ++page_count; list_for_each_entry()
2578 mppr <<= (page_count > mppr); list_for_each_entry()
2596 ext->oe_nr_pages = page_count;
2605 osc_update_pending(obj, OBD_BRW_WRITE, page_count);
2608 osc_update_pending(obj, OBD_BRW_READ, page_count);
H A Dosc_request.c1063 static void handle_short_read(int nob_read, u32 page_count, handle_short_read() argument
1071 LASSERT(page_count > 0); handle_short_read()
1079 page_count--; handle_short_read()
1085 page_count--; handle_short_read()
1090 while (page_count-- > 0) { handle_short_read()
1100 u32 page_count, struct brw_page **pga) check_write_rcs()
1191 (long)pga[i]->pg->flags, page_count(pga[i]->pg), osc_checksum_bulk()
1216 struct lov_stripe_md *lsm, u32 page_count, osc_brw_prep_request()
1249 for (niocount = i = 1; i < page_count; i++) { osc_brw_prep_request()
1271 desc = ptlrpc_prep_bulk_imp(req, page_count, osc_brw_prep_request()
1297 LASSERT(page_count > 0); osc_brw_prep_request()
1299 for (requested_nob = i = 0; i < page_count; i++, niobuf++) { osc_brw_prep_request()
1305 LASSERTF(page_count == 1 || osc_brw_prep_request()
1307 ergo(i > 0 && i < page_count - 1, osc_brw_prep_request()
1309 ergo(i == page_count - 1, poff == 0)), osc_brw_prep_request()
1311 i, page_count, pg, pg->off, pg->count); osc_brw_prep_request()
1314 i, page_count, osc_brw_prep_request()
1367 page_count, pga, osc_brw_prep_request()
1400 aa->aa_page_count = page_count; osc_brw_prep_request()
1416 u32 page_count, struct brw_page **pga, check_write_checksum()
1430 new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE, check_write_checksum()
1452 pga[page_count-1]->off + pga[page_count-1]->count - 1); check_write_checksum()
1835 int page_count = 0; osc_build_rpc() local
1848 ++page_count; list_for_each_entry()
1872 pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS);
1923 sort_brw_pages(pga, page_count);
1924 rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
1976 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
1982 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
1990 page_count, aa, cli->cl_r_in_flight,
1098 check_write_rcs(struct ptlrpc_request *req, int requested_nob, int niocount, u32 page_count, struct brw_page **pga) check_write_rcs() argument
1214 osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa, struct lov_stripe_md *lsm, u32 page_count, struct brw_page **pga, struct ptlrpc_request **reqp, int reserve, int resend) osc_brw_prep_request() argument
1414 check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer, __u32 client_cksum, __u32 server_cksum, int nob, u32 page_count, struct brw_page **pga, cksum_type_t client_cksum_type) check_write_checksum() argument
/linux-4.4.14/fs/btrfs/
H A Dscrub.c106 int page_count; member in struct:scrub_bio
113 int page_count; member in struct:scrub_block
431 for (i = 0; i < sbio->page_count; i++) { scrub_free_ctx()
482 sbio->page_count = 0; scrub_setup_ctx()
617 WARN_ON(sblock->page_count < 1); scrub_print_warning()
899 BUG_ON(sblock_to_check->page_count < 1); scrub_handle_errored_block()
912 length = sblock_to_check->page_count * PAGE_SIZE; scrub_handle_errored_block()
1085 sblocks_for_recheck[mirror_index].page_count > 0; scrub_handle_errored_block()
1139 for (page_num = 0; page_num < sblock_bad->page_count; scrub_handle_errored_block()
1152 sblocks_for_recheck[mirror_index].page_count > 0; scrub_handle_errored_block()
1242 for (page_index = 0; page_index < sblock->page_count; scrub_handle_errored_block()
1305 u64 length = original_sblock->page_count * PAGE_SIZE; scrub_setup_recheck_block()
1399 BUG_ON(page_index >= original_sblock->page_count); scrub_setup_recheck_block()
1405 sblock->page_count++; scrub_setup_recheck_block()
1482 for (page_num = 0; page_num < sblock->page_count; page_num++) { scrub_recheck_block()
1549 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { scrub_repair_block_from_good_copy()
1621 for (page_num = 0; page_num < sblock->page_count; page_num++) { scrub_write_block_to_dev_replace()
1665 wr_ctx->wr_curr_bio->page_count = 0; scrub_add_page_to_wr_bio()
1668 if (sbio->page_count == 0) { scrub_add_page_to_wr_bio()
1689 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != scrub_add_page_to_wr_bio()
1691 sbio->logical + sbio->page_count * PAGE_SIZE != scrub_add_page_to_wr_bio()
1699 if (sbio->page_count < 1) { scrub_add_page_to_wr_bio()
1709 sbio->pagev[sbio->page_count] = spage; scrub_add_page_to_wr_bio()
1711 sbio->page_count++; scrub_add_page_to_wr_bio()
1712 if (sbio->page_count == wr_ctx->pages_per_wr_bio) scrub_add_page_to_wr_bio()
1757 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO); scrub_wr_bio_end_io_worker()
1762 for (i = 0; i < sbio->page_count; i++) { scrub_wr_bio_end_io_worker()
1771 for (i = 0; i < sbio->page_count; i++) scrub_wr_bio_end_io_worker()
1796 WARN_ON(sblock->page_count < 1); scrub_checksum()
1824 BUG_ON(sblock->page_count < 1); scrub_checksum_data()
1843 BUG_ON(index >= sblock->page_count); scrub_checksum_data()
1872 BUG_ON(sblock->page_count < 1); scrub_checksum_tree_block()
1911 BUG_ON(index >= sblock->page_count); scrub_checksum_tree_block()
1942 BUG_ON(sblock->page_count < 1); scrub_checksum_super()
1970 BUG_ON(index >= sblock->page_count); scrub_checksum_super()
2015 for (i = 0; i < sblock->page_count; i++) scrub_block_put()
2065 sctx->bios[sctx->curr]->page_count = 0; scrub_add_page_to_rd_bio()
2073 if (sbio->page_count == 0) { scrub_add_page_to_rd_bio()
2092 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != scrub_add_page_to_rd_bio()
2094 sbio->logical + sbio->page_count * PAGE_SIZE != scrub_add_page_to_rd_bio()
2101 sbio->pagev[sbio->page_count] = spage; scrub_add_page_to_rd_bio()
2104 if (sbio->page_count < 1) { scrub_add_page_to_rd_bio()
2115 sbio->page_count++; scrub_add_page_to_rd_bio()
2116 if (sbio->page_count == sctx->pages_per_rd_bio) scrub_add_page_to_rd_bio()
2180 u64 length = sblock->page_count * PAGE_SIZE; scrub_missing_raid56_pages()
2216 for (i = 0; i < sblock->page_count; i++) { scrub_missing_raid56_pages()
2290 sblock->page_count++; scrub_pages()
2300 WARN_ON(sblock->page_count == 0); scrub_pages()
2308 for (index = 0; index < sblock->page_count; index++) { scrub_pages()
2345 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO); scrub_bio_end_io_worker()
2347 for (i = 0; i < sbio->page_count; i++) { scrub_bio_end_io_worker()
2356 for (i = 0; i < sbio->page_count; i++) { scrub_bio_end_io_worker()
2441 u64 end = sblock->pagev[sblock->page_count - 1]->logical + scrub_block_complete()
2595 sblock->page_count++; scrub_pages_for_parity()
2604 WARN_ON(sblock->page_count == 0); scrub_pages_for_parity()
2605 for (index = 0; index < sblock->page_count; index++) { scrub_pages_for_parity()
/linux-4.4.14/fs/pstore/
H A Dram_core.c388 unsigned int page_count; persistent_ram_vmap() local
394 page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); persistent_ram_vmap()
401 pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); persistent_ram_vmap()
404 __func__, page_count); persistent_ram_vmap()
408 for (i = 0; i < page_count; i++) { persistent_ram_vmap()
412 vaddr = vmap(pages, page_count, VM_MAP, prot); persistent_ram_vmap()
/linux-4.4.14/drivers/gpu/drm/ttm/
H A Dttm_agp_backend.c63 mem->page_count = 0; ttm_agp_bind()
70 mem->pages[mem->page_count++] = page; ttm_agp_bind()
H A Dttm_page_alloc.c687 if (page_count(pages[i]) != 1) ttm_put_pages()
699 if (page_count(pages[i]) != 1) ttm_put_pages()
/linux-4.4.14/drivers/gpu/drm/udl/
H A Dudl_gem.c160 int page_count = obj->base.size / PAGE_SIZE; udl_gem_vmap() local
174 obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL); udl_gem_vmap()
H A Dudl_dmabuf.c82 int page_count; udl_map_dma_buf() local
100 page_count = obj->base.size / PAGE_SIZE; udl_map_dma_buf()
101 obj->sg = drm_prime_pages_to_sg(obj->pages, page_count); udl_map_dma_buf()
/linux-4.4.14/fs/xfs/
H A Dxfs_buf.c217 int page_count) _xfs_buf_get_pages()
221 bp->b_page_count = page_count; _xfs_buf_get_pages()
222 if (page_count <= XB_PAGES) { _xfs_buf_get_pages()
226 page_count, KM_NOFS); _xfs_buf_get_pages()
230 memset(bp->b_pages, 0, sizeof(struct page *) * page_count); _xfs_buf_get_pages()
293 unsigned short page_count, i; xfs_buf_allocate_memory() local
329 page_count = end - start; xfs_buf_allocate_memory()
330 error = _xfs_buf_get_pages(bp, page_count); xfs_buf_allocate_memory()
776 int page_count; xfs_buf_associate_memory() local
781 page_count = buflen >> PAGE_SHIFT; xfs_buf_associate_memory()
790 rval = _xfs_buf_get_pages(bp, page_count); xfs_buf_associate_memory()
813 unsigned long page_count; xfs_buf_get_uncached() local
822 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT; xfs_buf_get_uncached()
823 error = _xfs_buf_get_pages(bp, page_count); xfs_buf_get_uncached()
827 for (i = 0; i < page_count; i++) { xfs_buf_get_uncached()
215 _xfs_buf_get_pages( xfs_buf_t *bp, int page_count) _xfs_buf_get_pages() argument
/linux-4.4.14/drivers/block/
H A Drbd.c267 u32 page_count; member in struct:rbd_obj_request::__anon3652::__anon3653
2091 obj_request->page_count); rbd_obj_request_destroy()
2314 obj_request->page_count = 0; rbd_img_obj_end_request()
2513 unsigned int page_count; rbd_img_request_fill() local
2516 page_count = (u32)calc_pages_for(offset, length); rbd_img_request_fill()
2517 obj_request->page_count = page_count; rbd_img_request_fill()
2519 page_count--; /* more on last page */ rbd_img_request_fill()
2520 pages += page_count; rbd_img_request_fill()
2556 u32 page_count; rbd_osd_copyup_callback() local
2572 page_count = obj_request->copyup_page_count; rbd_osd_copyup_callback()
2573 rbd_assert(page_count); rbd_osd_copyup_callback()
2575 ceph_release_page_vector(pages, page_count); rbd_osd_copyup_callback()
2598 u32 page_count; rbd_img_obj_parent_read_full_callback() local
2609 page_count = img_request->copyup_page_count; rbd_img_obj_parent_read_full_callback()
2610 rbd_assert(page_count); rbd_img_obj_parent_read_full_callback()
2633 ceph_release_page_vector(pages, page_count); rbd_img_obj_parent_read_full_callback()
2656 orig_request->copyup_page_count = page_count; rbd_img_obj_parent_read_full_callback()
2706 u32 page_count; rbd_img_obj_parent_read_full() local
2738 page_count = (u32)calc_pages_for(0, length); rbd_img_obj_parent_read_full()
2739 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); rbd_img_obj_parent_read_full()
2756 parent_request->copyup_page_count = page_count; rbd_img_obj_parent_read_full()
2769 ceph_release_page_vector(pages, page_count); rbd_img_obj_parent_read_full()
2852 u32 page_count; rbd_img_obj_exists_submit() local
2865 page_count = (u32)calc_pages_for(0, size); rbd_img_obj_exists_submit()
2866 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); rbd_img_obj_exists_submit()
2879 stat_request->page_count = page_count; rbd_img_obj_exists_submit()
3286 u32 page_count; rbd_obj_method_sync() local
3296 page_count = (u32)calc_pages_for(0, inbound_size); rbd_obj_method_sync()
3297 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); rbd_obj_method_sync()
3308 obj_request->page_count = page_count; rbd_obj_method_sync()
3352 ceph_release_page_vector(pages, page_count); rbd_obj_method_sync()
3508 u32 page_count; rbd_obj_read_sync() local
3512 page_count = (u32) calc_pages_for(offset, length); rbd_obj_read_sync()
3513 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); rbd_obj_read_sync()
3524 obj_request->page_count = page_count; rbd_obj_read_sync()
3560 ceph_release_page_vector(pages, page_count); rbd_obj_read_sync()
H A Dps3vram.c61 unsigned int page_count; member in struct:ps3vram_cache
363 for (i = 0; i < cache->page_count; i++) { ps3vram_cache_flush()
383 for (i = 0; i < cache->page_count; i++) { ps3vram_cache_match()
394 i = (jiffies + (counter++)) % cache->page_count; ps3vram_cache_match()
408 priv->cache.page_count = CACHE_PAGE_COUNT; ps3vram_cache_init()
/linux-4.4.14/drivers/gpu/drm/
H A Ddrm_bufs.c742 dma->page_count += byte_count >> PAGE_SHIFT; drm_legacy_addbufs_agp()
777 int page_count; drm_legacy_addbufs_pci() local
845 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) * drm_legacy_addbufs_pci()
855 dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); drm_legacy_addbufs_pci()
857 dma->page_count + (count << page_order)); drm_legacy_addbufs_pci()
862 page_count = 0; drm_legacy_addbufs_pci()
881 dma->page_count + page_count, drm_legacy_addbufs_pci()
883 temp_pagelist[dma->page_count + page_count++] drm_legacy_addbufs_pci()
942 if (dma->page_count) { drm_legacy_addbufs_pci()
949 dma->page_count += entry->seg_count << page_order; drm_legacy_addbufs_pci()
1105 dma->page_count += byte_count >> PAGE_SHIFT; drm_legacy_addbufs_sg()
H A Ddrm_vm.c165 page_count(page)); drm_do_vm_fault()
490 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { drm_mmap_dma()
H A Ddrm_agpsupport.c490 mem->page_count = num_pages; drm_agp_bind_pages()
/linux-4.4.14/drivers/net/ethernet/brocade/bna/
H A Dbna_tx_rx.c1305 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1863 u32 page_count, bna_rxq_qpt_setup()
1877 rxq->qpt.page_count = page_count; bna_rxq_qpt_setup()
1886 for (i = 0; i < rxq->qpt.page_count; i++) { bna_rxq_qpt_setup()
1901 u32 page_count, bna_rxp_cqpt_setup()
1915 rxp->cq.qpt.page_count = page_count; bna_rxp_cqpt_setup()
1924 for (i = 0; i < rxp->cq.qpt.page_count; i++) { bna_rxp_cqpt_setup()
2286 u32 page_count; bna_rx_create() local
2306 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len / bna_rx_create()
2472 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE, bna_rx_create()
3169 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size, bna_txq_qpt_setup() argument
3182 txq->qpt.page_count = page_count; bna_txq_qpt_setup()
3191 for (i = 0; i < page_count; i++) { bna_txq_qpt_setup()
3327 u32 page_count; bna_tx_res_req() local
3338 page_count = q_size >> PAGE_SHIFT; bna_tx_res_req()
3343 mem_info->len = page_count * sizeof(struct bna_dma_addr); bna_tx_res_req()
3349 mem_info->len = page_count * sizeof(void *); bna_tx_res_req()
3355 mem_info->len = PAGE_SIZE * page_count; bna_tx_res_req()
3380 int page_count; bna_tx_create() local
3384 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) / bna_tx_create()
3487 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE, bna_tx_create()
1861 bna_rxq_qpt_setup(struct bna_rxq *rxq, struct bna_rxp *rxp, u32 page_count, u32 page_size, struct bna_mem_descr *qpt_mem, struct bna_mem_descr *swqpt_mem, struct bna_mem_descr *page_mem) bna_rxq_qpt_setup() argument
1900 bna_rxp_cqpt_setup(struct bna_rxp *rxp, u32 page_count, u32 page_size, struct bna_mem_descr *qpt_mem, struct bna_mem_descr *swqpt_mem, struct bna_mem_descr *page_mem) bna_rxp_cqpt_setup() argument
H A Dbna_types.h308 u32 page_count; member in struct:bna_qpt
/linux-4.4.14/include/linux/
H A Dagp_backend.h74 size_t page_count; member in struct:agp_memory
H A Dpagemap.h116 * of the allocator must be considered unstable. page_count may return higher
167 VM_BUG_ON_PAGE(page_count(page) == 0, page); page_cache_get_speculative()
196 VM_BUG_ON_PAGE(page_count(page) == 0, page); page_cache_add_speculative()
215 VM_BUG_ON_PAGE(page_count(page) != 0, page); page_unfreeze_refs()
H A Dfirewire.h423 int page_count; member in struct:fw_iso_buffer
428 int page_count, enum dma_data_direction direction);
H A Drelay.h44 unsigned int page_count; /* number of current buffer pages */ member in struct:rchan_buf
H A Dmm.h449 static inline int page_count(struct page *page) page_count() function
589 * For the non-reserved pages, page_count(page) denotes a reference count.
590 * page_count() == 0 means the page is free. page->lru is then used for
592 * page_count() > 0 means the page has been allocated.
601 * In this case, page_count still tracks the references, and should only
/linux-4.4.14/arch/xtensa/mm/
H A Dtlb.c244 page_count(p), check_tlb_entry()
246 if (!page_count(p)) check_tlb_entry()
/linux-4.4.14/drivers/net/fjes/
H A Dfjes_hw.c468 int page_count; fjes_hw_register_buff_addr() local
487 page_count = buf_pair->tx.size / EP_BUFFER_INFO_SIZE; fjes_hw_register_buff_addr()
488 for (i = 0; i < page_count; i++) { fjes_hw_register_buff_addr()
497 page_count = buf_pair->rx.size / EP_BUFFER_INFO_SIZE; fjes_hw_register_buff_addr()
498 for (i = 0; i < page_count; i++) { fjes_hw_register_buff_addr()
/linux-4.4.14/fs/ceph/
H A Dsuper.c518 int page_count; create_fs_client() local
563 page_count = fsc->mount_options->wsize >> PAGE_CACHE_SHIFT; create_fs_client()
564 size = sizeof (struct page *) * (page_count ? page_count : 1); create_fs_client()
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/
H A Dicm.c232 static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count) mlx4_UNMAP_ICM() argument
234 return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM, mlx4_UNMAP_ICM()
H A Den_rx.c198 i, page_count(page_alloc->page)); mlx4_en_destroy_allocator()
/linux-4.4.14/arch/unicore32/mm/
H A Dinit.c88 else if (!page_count(page)) for_each_bank()
91 shared += page_count(page) - 1; for_each_bank()
/linux-4.4.14/drivers/hv/
H A Dhv_balloon.c363 * If page_count is less than the requested page count, then the host should
374 * page_count: number of pages that were successfully hot added.
382 __u32 page_count; member in struct:dm_hot_add_response
915 resp.page_count = process_hot_add(pg_start, pfn_cnt, hot_add_req()
918 dm->num_pages_added += resp.page_count; hot_add_req()
937 if (resp.page_count > 0) hot_add_req()
944 if (!do_hot_add || (resp.page_count == 0)) hot_add_req()
/linux-4.4.14/mm/
H A Dmigrate.c223 * Once radix-tree replacement of page migration started, page_count __migration_entry_wait()
324 if (page_count(page) != expected_count) migrate_page_move_mapping()
346 if (page_count(page) != expected_count || migrate_page_move_mapping()
449 if (page_count(page) != expected_count || migrate_huge_page_move_mapping()
941 if (page_count(page) == 1) { unmap_and_move()
1647 * check on page_count(), so we must do it here, now that the page numamigrate_isolate_page()
1652 if (PageTransHuge(page) && page_count(page) != 3) { numamigrate_isolate_page()
1788 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) { migrate_misplaced_transhuge_page()
1830 if (page_count(page) != 2) { migrate_misplaced_transhuge_page()
H A Dmemory-failure.c855 count = page_count(p) - 1; page_action()
1162 VM_BUG_ON_PAGE(!page_count(p), p); memory_failure()
1439 if (page_count(page) > 1) { unpoison_memory()
1507 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1)) unpoison_memory()
1714 pfn, ret, page_count(page), page->flags); __soft_offline_page()
H A Dhugetlb.c1049 if (page_count(page) > 0) pfn_range_valid_gigantic()
1218 BUG_ON(page_count(page)); free_huge_page()
1418 if (PageHuge(page) && !page_count(page)) { dissolve_free_huge_page()
1710 VM_BUG_ON_PAGE(page_count(page), page); gather_surplus_pages()
2009 WARN_ON(page_count(page) != 1); gather_bootmem_prealloc()
4242 * indicated by page_count > 1, unmap is achieved by clearing pud and
4255 BUG_ON(page_count(virt_to_page(ptep)) == 0); huge_pmd_unshare()
4256 if (page_count(virt_to_page(ptep)) == 1) huge_pmd_unshare()
4398 if (!page_huge_active(hpage) && !page_count(hpage)) { dequeue_hwpoisoned_huge_page()
H A Dhuge_memory.c1805 BUG_ON(page_count(page_tail) <= 0); __split_huge_page_refcount()
1820 BUG_ON(page_count(page) <= 0); __split_huge_page_refcount()
2239 if (page_count(page) != 1 + !!PageSwapCache(page)) { __collapse_huge_page_isolate()
2697 if (page_count(page) != 1 + !!PageSwapCache(page)) khugepaged_scan_pmd()
2986 VM_BUG_ON_PAGE(!page_count(page), page); __split_huge_page_pmd()
H A Dpage_alloc.c610 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); page_is_buddy()
624 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); page_is_buddy()
2118 VM_BUG_ON_PAGE(!page_count(page), page); split_page()
6560 * We can't use page_count without pin a page has_unmovable_pages()
6573 * page_count() is not 0. has_unmovable_pages()
6586 * If the page is not RAM, page_count()should be 0. has_unmovable_pages()
6815 count += page_count(page) != 1; free_contig_range()
6886 * page_count() is not 0. __offline_isolated_pages()
6894 BUG_ON(page_count(page)); __offline_isolated_pages()
H A Dvmscan.c483 return page_count(page) - page_has_private(page) == 2; is_page_cache_freeable()
625 * drop the reference. So if PageDirty is tested before page_count __remove_mapping()
634 * !page_count(page) [good, discard it] __remove_mapping()
1153 * process address space (page_count == 1) it can be freed. shrink_page_list()
1159 if (!mapping && page_count(page) == 1) { shrink_page_list()
1428 VM_BUG_ON_PAGE(!page_count(page), page); isolate_lru_page()
H A Dksm.c901 if (page_mapcount(page) + 1 + swapped != page_count(page)) { write_protect_page()
1044 * case, we need to lock and check page_count is not raised. try_to_merge_one_page()
H A Dshmem.c1868 } else if (page_count(page) - page_mapcount(page) > 1) { shmem_tag_pins()
1928 page_count(page) - page_mapcount(page) != 1) { shmem_wait_for_pins()
H A Dcompaction.c777 page_count(page) > page_mapcount(page)) isolate_migratepages_block()
H A Dkmemleak.c1394 if (page_count(page) == 0) for_each_online_node()
H A Dmemory_hotplug.c1484 if (page_count(page)) { do_migrate_range()
H A Dmemcontrol.c5507 VM_BUG_ON_PAGE(page_count(page), page); uncharge_list()
5677 VM_BUG_ON_PAGE(page_count(page), page); mem_cgroup_swapout()
H A Dmemory.c1548 if (!page_count(page)) vm_insert_page()
/linux-4.4.14/drivers/net/ethernet/sfc/
H A Dsiena_sriov.c811 u64 page_count = req->u.set_status_page.peer_page_count; efx_vfdi_set_status_page() local
817 if (!req->u.set_status_page.dma_addr || page_count > max_page_count) { efx_vfdi_set_status_page()
833 if (page_count) { efx_vfdi_set_status_page()
834 vf->peer_page_addrs = kcalloc(page_count, sizeof(u64), efx_vfdi_set_status_page()
839 page_count * sizeof(u64)); efx_vfdi_set_status_page()
840 vf->peer_page_count = page_count; efx_vfdi_set_status_page()
H A Drx.c126 /* If page_count is 1 then we hold the only reference to this page. */ efx_reuse_page()
127 if (page_count(page) == 1) { efx_reuse_page()
/linux-4.4.14/include/drm/
H A Ddrm_legacy.h105 int page_count; /**< number of pages */ member in struct:drm_device_dma
/linux-4.4.14/include/linux/ceph/
H A Dmessenger.h130 unsigned short page_count; /* pages in array */ member in struct:ceph_msg_data_cursor::__anon12152::__anon12154
/linux-4.4.14/arch/mips/mm/
H A Dgup.c66 VM_BUG_ON(page_count(page) == 0); get_head_page_multiple()
/linux-4.4.14/arch/arm64/include/asm/
H A Dkvm_mmu.h204 return page_count(ptr_page) == 1;
/linux-4.4.14/arch/arm/include/asm/
H A Dkvm_mmu.h157 return page_count(ptr_page) == 1; kvm_page_empty()
/linux-4.4.14/sound/soc/intel/skylake/
H A Dskl-sst-cldma.h209 unsigned int max_size, u32 page_count);
/linux-4.4.14/net/ceph/
H A Dmessenger.c594 /* sendpage cannot properly handle pages with page_count == 0, ceph_tcp_sendpage()
596 if (page_count(page) >= 1) ceph_tcp_sendpage()
925 int page_count; ceph_msg_data_pages_cursor_init() local
933 page_count = calc_pages_for(data->alignment, (u64)data->length); ceph_msg_data_pages_cursor_init()
936 BUG_ON(page_count > (int)USHRT_MAX); ceph_msg_data_pages_cursor_init()
937 cursor->page_count = (unsigned short)page_count; ceph_msg_data_pages_cursor_init()
950 BUG_ON(cursor->page_index >= cursor->page_count); ceph_msg_data_pages_next()
981 BUG_ON(cursor->page_index >= cursor->page_count); ceph_msg_data_pages_advance()
/linux-4.4.14/drivers/scsi/lpfc/
H A Dlpfc_sli4.h141 uint32_t page_count; /* Number of pages allocated for this queue */ member in struct:lpfc_queue
643 uint32_t page_count; member in struct:lpfc_rpi_hdr
H A Dlpfc_sli.c12775 queue->page_count = (ALIGN(entry_size * entry_count, lpfc_sli4_queue_alloc()
12780 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { lpfc_sli4_queue_alloc()
12978 eq->page_count); lpfc_eq_create()
13098 cq->page_count); lpfc_cq_create()
13204 mq->page_count); lpfc_mq_create_fb_init()
13286 &mq_create_ext->u.request, mq->page_count); lpfc_mq_create()
13443 wq->page_count); lpfc_wq_create()
13730 hrq->page_count); lpfc_rq_create()
13861 drq->page_count); lpfc_rq_create()
15640 hdr_tmpl, rpi_page->page_count); lpfc_sli4_post_rpi_hdr()
H A Dlpfc_init.c6015 rpi_hdr->page_count = 1; lpfc_sli4_create_rpi_hdr()
/linux-4.4.14/arch/alpha/kernel/
H A Dcore_titan.c688 mem->page_count, mem->pages); titan_agp_unbind_memory()
696 mem->page_count); titan_agp_translate()
H A Dcore_marvel.c1030 mem->page_count, mem->pages); marvel_agp_translate()
1038 mem->page_count); marvel_agp_translate()
/linux-4.4.14/drivers/staging/lustre/lustre/libcfs/
H A Dtracefile.h331 __LASSERT(page_count(tage->page) > 0); \
/linux-4.4.14/drivers/mtd/nand/
H A Ddenali.c761 uint32_t page_count = 1; denali_send_pipeline_cmd() local
802 PIPELINE_ACCESS | op | page_count); denali_send_pipeline_cmd()
1036 const int page_count = 1; denali_setup_dma() local
1044 index_addr(denali, mode | denali->page, 0x2000 | op | page_count); denali_setup_dma()
/linux-4.4.14/drivers/infiniband/hw/mthca/
H A Dmthca_cmd.h279 int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count);
H A Dmthca_cmd.c1563 int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count) mthca_UNMAP_ICM() argument
1566 page_count, (unsigned long long) virt); mthca_UNMAP_ICM()
1568 return mthca_cmd(dev, virt, page_count, 0, mthca_UNMAP_ICM()
/linux-4.4.14/arch/tile/mm/
H A Dhomecache.c368 BUG_ON(page_count(page) > 1); homecache_change_page_home()
H A Dpgtable.c248 * Make every page have a page_count() of one, not just the first. pgtable_alloc_one()
/linux-4.4.14/drivers/infiniband/hw/nes/
H A Dnes_verbs.c287 u32 stag, u32 page_count) alloc_fast_reg_mr()
296 u64 region_length = page_count * PAGE_SIZE; alloc_fast_reg_mr()
304 nes_debug(NES_DBG_MR, "alloc_fast_reg_mr: page_count = %d, " alloc_fast_reg_mr()
306 page_count, region_length); alloc_fast_reg_mr()
346 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, (page_count * 8)); alloc_fast_reg_mr()
2305 int page_count = 0; nes_reg_user_mr() local
2396 if ((page_count != 0) && (page_count << 12) - (ib_umem_offset(region) & (4096 - 1)) >= region->length) nes_reg_user_mr()
2398 if ((page_count&0x01FF) == 0) { nes_reg_user_mr()
2399 if (page_count >= 1024 * 512) { nes_reg_user_mr()
2464 if (page_count != 0) { nes_reg_user_mr()
2485 page_count++; nes_reg_user_mr()
286 alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, u32 stag, u32 page_count) alloc_fast_reg_mr() argument
/linux-4.4.14/drivers/gpu/drm/i915/
H A Di915_gpu_error.c323 for (page = offset = 0; page < obj->page_count; page++) { print_error_obj()
554 for (page = 0; page < obj->page_count; page++) i915_error_object_free()
638 dst->page_count = num_pages; i915_error_object_create()
H A Di915_gem_fence.c779 int page_count = obj->base.size >> PAGE_SHIFT; i915_gem_object_save_bit_17_swizzle() local
783 obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count), i915_gem_object_save_bit_17_swizzle()
H A Di915_gem.c2245 int page_count, i; i915_gem_object_get_pages_gtt() local
2266 page_count = obj->base.size / PAGE_SIZE; i915_gem_object_get_pages_gtt()
2267 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { i915_gem_object_get_pages_gtt()
2282 for (i = 0; i < page_count; i++) { i915_gem_object_get_pages_gtt()
2286 page_count, i915_gem_object_get_pages_gtt()
H A Di915_drv.h561 int page_count; member in struct:drm_i915_error_state::drm_i915_error_ring::drm_i915_error_object
/linux-4.4.14/drivers/staging/rdma/ehca/
H A Dehca_mrmw.c1541 int page_count; ehca_reg_mr_section() local
1549 page_count = EHCA_SECTSIZE / pginfo->hwpage_size; ehca_reg_mr_section()
1551 while (page < page_count) { ehca_reg_mr_section()
1553 for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count); ehca_reg_mr_section()
/linux-4.4.14/arch/x86/mm/
H A Dgup.c112 VM_BUG_ON_PAGE(page_count(page) == 0, page); get_head_page_multiple()
/linux-4.4.14/drivers/net/hyperv/
H A Dnetvsc.c717 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt : netvsc_copy_to_send_buf() local
728 for (i = 0; i < page_count; i++) { netvsc_copy_to_send_buf()
/linux-4.4.14/fs/cachefiles/
H A Drdwr.c238 netpage, netpage->index, page_count(netpage)); cachefiles_read_backing_file_one()
478 netpage, netpage->index, page_count(netpage)); list_for_each_entry_safe()
/linux-4.4.14/kernel/
H A Drelay.c146 buf->page_count = n_pages; relay_alloc_buf()
213 for (i = 0; i < buf->page_count; i++) relay_destroy_buf()
/linux-4.4.14/fs/
H A Dpipe.c134 if (page_count(page) == 1 && !pipe->tmp_page) anon_pipe_buf_release()
162 if (page_count(page) == 1) { generic_pipe_buf_steal()
H A Daio.c297 page_count(ctx->ring_pages[i])); aio_free_ring()
483 current->pid, i, page_count(page)); aio_setup_ring()
/linux-4.4.14/drivers/android/
H A Dbinder.c3068 active_transactions, page_count; binder_deferred_release() local
3136 page_count = 0; binder_deferred_release()
3152 page_count++; binder_deferred_release()
3163 outgoing_refs, active_transactions, buffers, page_count); binder_deferred_release()
/linux-4.4.14/fs/fuse/
H A Ddev.c827 page_count(page) != 1 || fuse_check_page()
836 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping); fuse_check_page()
/linux-4.4.14/drivers/scsi/
H A Dlibiscsi_tcp.c133 if (page_count(sg_page(sg)) >= 1 && !recv) iscsi_tcp_segment_map()
/linux-4.4.14/drivers/net/ethernet/sun/
H A Dcassini.c587 if (page_count(page->buffer) > 1) cas_spare_recover()
1375 if (page_count(page->buffer) == 1) cas_page_spare()
1395 if (page_count(page0[index]->buffer) > 1) { cas_page_swap()
2234 if (page_count(page[entry]->buffer) > 1) { cas_post_rxds_ringN()
/linux-4.4.14/drivers/block/drbd/
H A Ddrbd_main.c1479 * XFS seems to have problems, still, it submits pages with page_count == 0!
1481 * with page_count == 0 or PageSlab.
1508 * page_count of 0 and/or have PageSlab() set. _drbd_send_page()
1513 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page)) _drbd_send_page()
H A Ddrbd_int.h1691 if (page_count(page) > 1) page_chain_for_each()
/linux-4.4.14/drivers/xen/
H A Dgrant-table.c896 if (page_count(item->pages[pc]) > 1) { __gnttab_unmap_refs_async()
/linux-4.4.14/drivers/net/ethernet/intel/fm10k/
H A Dfm10k_main.c232 if (unlikely(page_count(page) != 1)) fm10k_can_reuse_rx_page()
/linux-4.4.14/drivers/net/ethernet/intel/i40evf/
H A Di40e_txrx.c1044 if ((page_count(rx_bi->page) == 1) && i40e_clean_rx_irq_ps()
/linux-4.4.14/drivers/s390/net/
H A Dqeth_core_main.c2746 if (page_count(virt_to_page(entry->elements[i])) > 1) { qeth_find_free_buffer_pool_entry()
2761 if (page_count(virt_to_page(entry->elements[i])) > 1) { qeth_find_free_buffer_pool_entry()
/linux-4.4.14/drivers/net/ethernet/intel/i40e/
H A Di40e_txrx.c1579 if ((page_count(rx_bi->page) == 1) && i40e_clean_rx_irq_ps()
/linux-4.4.14/drivers/net/ethernet/intel/igbvf/
H A Dnetdev.c338 (page_count(buffer_info->page) != 1)) igbvf_clean_rx_irq()
/linux-4.4.14/drivers/net/ethernet/freescale/
H A Dgianfar.c2942 if (unlikely(page_count(page) != 1)) gfar_add_rx_frag()
/linux-4.4.14/drivers/net/ethernet/intel/ixgbevf/
H A Dixgbevf_main.c821 if (unlikely(page_count(page) != 1)) ixgbevf_add_rx_frag()
/linux-4.4.14/arch/x86/kvm/
H A Dmmu.c608 WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn))); mmu_spte_clear_track_bits()
/linux-4.4.14/drivers/net/ethernet/intel/igb/
H A Digb_main.c6609 if (unlikely(page_count(page) != 1)) igb_can_reuse_rx_page()
/linux-4.4.14/drivers/net/ethernet/intel/ixgbe/
H A Dixgbe_main.c1928 if (unlikely(page_count(page) != 1)) ixgbe_add_rx_frag()

Completed in 6001 milliseconds