/linux-4.1.27/drivers/char/agp/ |
D | i460-agp.c | 294 off_t pg_start, int type) in i460_insert_memory_small_io_page() argument 301 mem, pg_start, type, page_to_phys(mem->pages[0])); in i460_insert_memory_small_io_page() 306 io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start; in i460_insert_memory_small_io_page() 337 off_t pg_start, int type) in i460_remove_memory_small_io_page() argument 342 mem, pg_start, type); in i460_remove_memory_small_io_page() 344 pg_start = I460_IOPAGES_PER_KPAGE * pg_start; in i460_remove_memory_small_io_page() 346 for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++) in i460_remove_memory_small_io_page() 401 off_t pg_start, int type) in i460_insert_memory_large_io_page() argument 414 start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; in i460_insert_memory_large_io_page() 415 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; in i460_insert_memory_large_io_page() [all …]
|
D | sgi-agp.c | 127 static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start, in sgi_tioca_insert_memory() argument 173 if ((pg_start + mem->page_count) > num_entries) in sgi_tioca_insert_memory() 176 j = pg_start; in sgi_tioca_insert_memory() 178 while (j < (pg_start + mem->page_count)) { in sgi_tioca_insert_memory() 189 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in sgi_tioca_insert_memory() 200 static int sgi_tioca_remove_memory(struct agp_memory *mem, off_t pg_start, in sgi_tioca_remove_memory() argument 217 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in sgi_tioca_remove_memory()
|
D | efficeon-agp.c | 238 static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int type) in efficeon_insert_memory() argument 245 printk(KERN_DEBUG PFX "efficeon_insert_memory(%lx, %d)\n", pg_start, count); in efficeon_insert_memory() 248 if ((pg_start + mem->page_count) > num_entries) in efficeon_insert_memory() 260 int index = pg_start + i; in efficeon_insert_memory() 287 static int efficeon_remove_memory(struct agp_memory * mem, off_t pg_start, int type) in efficeon_remove_memory() argument 291 printk(KERN_DEBUG PFX "efficeon_remove_memory(%lx, %d)\n", pg_start, count); in efficeon_remove_memory() 295 if ((pg_start + mem->page_count) > num_entries) in efficeon_remove_memory() 301 int index = pg_start + i; in efficeon_remove_memory()
|
D | alpha-agp.c | 86 static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start, in alpha_core_agp_insert_memory() argument 98 if ((pg_start + mem->page_count) > num_entries) in alpha_core_agp_insert_memory() 101 status = agp->ops->bind(agp, pg_start, mem); in alpha_core_agp_insert_memory() 108 static int alpha_core_agp_remove_memory(struct agp_memory *mem, off_t pg_start, in alpha_core_agp_remove_memory() argument 114 status = agp->ops->unbind(agp, pg_start, mem); in alpha_core_agp_remove_memory()
|
D | generic.c | 411 int agp_bind_memory(struct agp_memory *curr, off_t pg_start) in agp_bind_memory() argument 427 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); in agp_bind_memory() 433 curr->pg_start = pg_start; in agp_bind_memory() 463 ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type); in agp_unbind_memory() 469 curr->pg_start = 0; in agp_unbind_memory() 1033 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) in agp_generic_insert_memory() argument 1084 if (((pg_start + mem->page_count) > num_entries) || in agp_generic_insert_memory() 1085 ((pg_start + mem->page_count) < pg_start)) in agp_generic_insert_memory() 1088 j = pg_start; in agp_generic_insert_memory() 1090 while (j < (pg_start + mem->page_count)) { in agp_generic_insert_memory() [all …]
|
D | nvidia-agp.c | 201 static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type) in nvidia_insert_memory() argument 213 if ((pg_start + mem->page_count) > in nvidia_insert_memory() 217 for (j = pg_start; j < (pg_start + mem->page_count); j++) { in nvidia_insert_memory() 226 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in nvidia_insert_memory() 240 static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type) in nvidia_remove_memory() argument 253 for (i = pg_start; i < (mem->page_count + pg_start); i++) in nvidia_remove_memory()
|
D | sworks-agp.c | 320 off_t pg_start, int type) in serverworks_insert_memory() argument 331 if ((pg_start + mem->page_count) > num_entries) { in serverworks_insert_memory() 335 j = pg_start; in serverworks_insert_memory() 336 while (j < (pg_start + mem->page_count)) { in serverworks_insert_memory() 349 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in serverworks_insert_memory() 360 static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start, in serverworks_remove_memory() argument 374 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in serverworks_remove_memory()
|
D | ati-agp.c | 267 off_t pg_start, int type) in ati_insert_memory() argument 283 if ((pg_start + mem->page_count) > num_entries) in ati_insert_memory() 286 j = pg_start; in ati_insert_memory() 287 while (j < (pg_start + mem->page_count)) { in ati_insert_memory() 301 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in ati_insert_memory() 314 static int ati_remove_memory(struct agp_memory * mem, off_t pg_start, in ati_remove_memory() argument 329 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in ati_remove_memory()
|
D | amd-k7-agp.c | 283 static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type) in amd_insert_memory() argument 295 if ((pg_start + mem->page_count) > num_entries) in amd_insert_memory() 298 j = pg_start; in amd_insert_memory() 299 while (j < (pg_start + mem->page_count)) { in amd_insert_memory() 312 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in amd_insert_memory() 325 static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type) in amd_remove_memory() argument 335 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in amd_remove_memory()
|
D | intel-gtt.c | 211 static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start, in i810_insert_dcache_entries() argument 216 if ((pg_start + mem->page_count) in i810_insert_dcache_entries() 223 for (i = pg_start; i < (pg_start + mem->page_count); i++) { in i810_insert_dcache_entries() 842 unsigned int pg_start, in intel_gtt_insert_sg_entries() argument 849 j = pg_start; in intel_gtt_insert_sg_entries() 882 off_t pg_start, int type) in intel_fake_agp_insert_entries() argument 894 return i810_insert_dcache_entries(mem, pg_start, type); in intel_fake_agp_insert_entries() 899 if (pg_start + mem->page_count > intel_private.gtt_total_entries) in intel_fake_agp_insert_entries() 918 intel_gtt_insert_sg_entries(&st, pg_start, type); in intel_fake_agp_insert_entries() 922 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, in intel_fake_agp_insert_entries() [all …]
|
D | compat_ioctl.h | 59 compat_off_t pg_start; /* starting page to populate */ member 82 compat_off_t pg_start; /* starting page to populate */ member
|
D | parisc-agp.c | 125 parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type) in parisc_agp_insert_memory() argument 137 io_pg_start = info->io_pages_per_kpage * pg_start; in parisc_agp_insert_memory() 174 parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type) in parisc_agp_remove_memory() argument 184 io_pg_start = info->io_pages_per_kpage * pg_start; in parisc_agp_remove_memory()
|
D | compat_ioctl.c | 119 ksegment[seg].pg_start = usegment[seg].pg_start; in compat_agpioc_reserve_wrap() 186 return agp_bind_memory(memory, bind_info.pg_start); in compat_agpioc_bind_wrap()
|
D | uninorth-agp.c | 149 static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int type) in uninorth_insert_memory() argument 171 if ((pg_start + mem->page_count) > num_entries) in uninorth_insert_memory() 174 gp = (u32 *) &agp_bridge->gatt_table[pg_start]; in uninorth_insert_memory() 199 int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) in uninorth_remove_memory() argument 217 gp = (u32 *) &agp_bridge->gatt_table[pg_start]; in uninorth_remove_memory()
|
D | frontend.c | 106 off_t pg_start; in agp_find_seg_in_client() local 109 pg_start = offset / 4096; in agp_find_seg_in_client() 115 if ((seg[i].pg_start == pg_start) && in agp_find_seg_in_client() 179 seg[i].pg_start = user_seg[i].pg_start; in agp_create_segment() 929 return agp_bind_memory(memory, bind_info.pg_start); in agpioc_bind_wrap()
|
D | amd64-agp.c | 44 static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) in amd64_insert_memory() argument 63 if (((unsigned long)pg_start + mem->page_count) > num_entries) in amd64_insert_memory() 66 j = pg_start; in amd64_insert_memory() 69 while (j < (pg_start + mem->page_count)) { in amd64_insert_memory() 80 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in amd64_insert_memory()
|
D | hp-agp.c | 332 hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type) in hp_zx1_insert_memory() argument 344 io_pg_start = hp->io_pages_per_kpage * pg_start; in hp_zx1_insert_memory() 379 hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type) in hp_zx1_remove_memory() argument 389 io_pg_start = hp->io_pages_per_kpage * pg_start; in hp_zx1_remove_memory()
|
D | agp.h | 197 int agp_generic_insert_memory(struct agp_memory *mem, off_t pg_start, int type); 198 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type);
|
/linux-4.1.27/include/linux/ |
D | agpgart.h | 54 off_t pg_start; /* starting page to populate */ member 60 off_t pg_start; member 83 off_t pg_start; /* starting page to populate */ member
|
D | agp_backend.h | 77 off_t pg_start; member
|
/linux-4.1.27/arch/alpha/kernel/ |
D | core_titan.c | 581 long pg_start; member 599 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, in titan_agp_setup() 601 if (aper->pg_start < 0) { in titan_agp_setup() 608 aper->arena->dma_base + aper->pg_start * PAGE_SIZE; in titan_agp_setup() 621 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); in titan_agp_cleanup() 625 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); in titan_agp_cleanup() 626 status = iommu_release(aper->arena, aper->pg_start, in titan_agp_cleanup() 679 titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) in titan_agp_bind_memory() argument 682 return iommu_bind(aper->arena, aper->pg_start + pg_start, in titan_agp_bind_memory() 687 titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) in titan_agp_unbind_memory() argument [all …]
|
D | core_marvel.c | 895 long pg_start; member 912 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, in marvel_agp_setup() 915 if (aper->pg_start < 0) { in marvel_agp_setup() 922 aper->arena->dma_base + aper->pg_start * PAGE_SIZE; in marvel_agp_setup() 935 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); in marvel_agp_cleanup() 939 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); in marvel_agp_cleanup() 940 status = iommu_release(aper->arena, aper->pg_start, in marvel_agp_cleanup() 1015 marvel_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) in marvel_agp_bind_memory() argument 1018 return iommu_bind(aper->arena, aper->pg_start + pg_start, in marvel_agp_bind_memory() 1023 marvel_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) in marvel_agp_unbind_memory() argument [all …]
|
D | pci_iommu.c | 875 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) in iommu_release() argument 885 for(i = pg_start; i < pg_start + pg_count; i++) in iommu_release() 889 iommu_arena_free(arena, pg_start, pg_count); in iommu_release() 894 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, in iommu_bind() argument 907 for(j = pg_start; j < pg_start + pg_count; j++) { in iommu_bind() 914 for(i = 0, j = pg_start; i < pg_count; i++, j++) in iommu_bind() 923 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) in iommu_unbind() argument 930 p = arena->ptes + pg_start; in iommu_unbind()
|
/linux-4.1.27/fs/f2fs/ |
D | file.c | 689 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) in truncate_hole() argument 694 for (index = pg_start; index < pg_end; index++) { in truncate_hole() 714 pgoff_t pg_start, pg_end; in punch_hole() local 731 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; in punch_hole() 737 if (pg_start == pg_end) { in punch_hole() 738 fill_zero(inode, pg_start, off_start, in punch_hole() 742 fill_zero(inode, pg_start++, off_start, in punch_hole() 747 if (pg_start < pg_end) { in punch_hole() 754 blk_start = pg_start << PAGE_CACHE_SHIFT; in punch_hole() 760 ret = truncate_hole(inode, pg_start, pg_end); in punch_hole() [all …]
|
/linux-4.1.27/include/uapi/linux/ |
D | agpgart.h | 80 __kernel_off_t pg_start; /* starting page to populate */ member 103 __kernel_off_t pg_start;/* starting page to populate */ member
|
/linux-4.1.27/arch/arc/include/asm/ |
D | ptrace.h | 96 unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \ 97 (struct pt_regs *)(pg_start + THREAD_SIZE) - 1; \
|
/linux-4.1.27/drivers/hv/ |
D | hv_balloon.c | 746 static unsigned long handle_pg_range(unsigned long pg_start, in handle_pg_range() argument 749 unsigned long start_pfn = pg_start; in handle_pg_range() 825 static unsigned long process_hot_add(unsigned long pg_start, in process_hot_add() argument 836 if (pfn_covered(pg_start, pfn_cnt)) in process_hot_add() 853 ha_region->covered_end_pfn = pg_start; in process_hot_add() 862 return handle_pg_range(pg_start, pfn_cnt); in process_hot_add() 871 unsigned long pg_start, pfn_cnt; in hot_add_req() local 882 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page; in hot_add_req() 899 region_start = pg_start; in hot_add_req() 904 region_start = (pg_start / HA_CHUNK) * HA_CHUNK; in hot_add_req() [all …]
|
/linux-4.1.27/include/drm/ |
D | intel-gtt.h | 17 unsigned int pg_start,
|
/linux-4.1.27/fs/9p/ |
D | vfs_file.c | 422 unsigned long pg_start, pg_end; in v9fs_file_write_iter() local 423 pg_start = origin >> PAGE_CACHE_SHIFT; in v9fs_file_write_iter() 427 pg_start, pg_end); in v9fs_file_write_iter()
|
/linux-4.1.27/tools/vm/ |
D | page-types.c | 178 static unsigned long pg_start[MAX_VMAS]; variable 651 if (pg_start[i] >= end) in walk_task() 654 start = max_t(unsigned long, pg_start[i], index); in walk_task() 803 pg_start[nr_vmas] = vm_start / page_size; in parse_pid()
|
/linux-4.1.27/fs/fuse/ |
D | inode.c | 333 pgoff_t pg_start; in fuse_reverse_inval_inode() local 342 pg_start = offset >> PAGE_CACHE_SHIFT; in fuse_reverse_inval_inode() 348 pg_start, pg_end); in fuse_reverse_inval_inode()
|