/linux-4.1.27/drivers/char/agp/ |
H A D | i460-agp.c | 294 off_t pg_start, int type) i460_insert_memory_small_io_page() 300 pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n", i460_insert_memory_small_io_page() 301 mem, pg_start, type, page_to_phys(mem->pages[0])); i460_insert_memory_small_io_page() 306 io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start; i460_insert_memory_small_io_page() 337 off_t pg_start, int type) i460_remove_memory_small_io_page() 341 pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n", i460_remove_memory_small_io_page() 342 mem, pg_start, type); i460_remove_memory_small_io_page() 344 pg_start = I460_IOPAGES_PER_KPAGE * pg_start; i460_remove_memory_small_io_page() 346 for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++) i460_remove_memory_small_io_page() 401 off_t pg_start, int type) i460_insert_memory_large_io_page() 413 /* Figure out what pg_start means in terms of our large GART pages */ i460_insert_memory_large_io_page() 414 start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; i460_insert_memory_large_io_page() 415 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; i460_insert_memory_large_io_page() 416 start_offset = pg_start % I460_KPAGES_PER_IOPAGE; i460_insert_memory_large_io_page() 417 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; i460_insert_memory_large_io_page() 462 off_t pg_start, int type) i460_remove_memory_large_io_page() 471 /* Figure out what pg_start means in terms of our large GART pages */ i460_remove_memory_large_io_page() 472 start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; i460_remove_memory_large_io_page() 473 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; i460_remove_memory_large_io_page() 474 start_offset = pg_start % I460_KPAGES_PER_IOPAGE; i460_remove_memory_large_io_page() 475 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; i460_remove_memory_large_io_page() 501 off_t pg_start, int type) i460_insert_memory() 504 return i460_insert_memory_small_io_page(mem, pg_start, type); i460_insert_memory() 506 return i460_insert_memory_large_io_page(mem, pg_start, type); i460_insert_memory() 510 off_t pg_start, int type) i460_remove_memory() 513 return i460_remove_memory_small_io_page(mem, pg_start, type); i460_remove_memory() 515 return i460_remove_memory_large_io_page(mem, pg_start, type); i460_remove_memory() 293 i460_insert_memory_small_io_page(struct agp_memory *mem, off_t pg_start, int type) i460_insert_memory_small_io_page() argument 336 i460_remove_memory_small_io_page(struct agp_memory *mem, off_t pg_start, int type) i460_remove_memory_small_io_page() argument 400 i460_insert_memory_large_io_page(struct agp_memory *mem, off_t pg_start, int type) i460_insert_memory_large_io_page() argument 461 i460_remove_memory_large_io_page(struct agp_memory *mem, off_t pg_start, int type) i460_remove_memory_large_io_page() argument 500 i460_insert_memory(struct agp_memory *mem, off_t pg_start, int type) i460_insert_memory() argument 509 i460_remove_memory(struct agp_memory *mem, off_t pg_start, int type) i460_remove_memory() argument
|
H A D | sgi-agp.c | 127 static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start, sgi_tioca_insert_memory() argument 173 if ((pg_start + mem->page_count) > num_entries) sgi_tioca_insert_memory() 176 j = pg_start; sgi_tioca_insert_memory() 178 while (j < (pg_start + mem->page_count)) { sgi_tioca_insert_memory() 189 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { sgi_tioca_insert_memory() 200 static int sgi_tioca_remove_memory(struct agp_memory *mem, off_t pg_start, sgi_tioca_remove_memory() argument 217 for (i = pg_start; i < (mem->page_count + pg_start); i++) { sgi_tioca_remove_memory()
|
H A D | alpha-agp.c | 86 static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start, alpha_core_agp_insert_memory() argument 98 if ((pg_start + mem->page_count) > num_entries) alpha_core_agp_insert_memory() 101 status = agp->ops->bind(agp, pg_start, mem); alpha_core_agp_insert_memory() 108 static int alpha_core_agp_remove_memory(struct agp_memory *mem, off_t pg_start, alpha_core_agp_remove_memory() argument 114 status = agp->ops->unbind(agp, pg_start, mem); alpha_core_agp_remove_memory()
|
H A D | ati-agp.c | 267 off_t pg_start, int type) ati_insert_memory() 283 if ((pg_start + mem->page_count) > num_entries) ati_insert_memory() 286 j = pg_start; ati_insert_memory() 287 while (j < (pg_start + mem->page_count)) { ati_insert_memory() 301 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { ati_insert_memory() 314 static int ati_remove_memory(struct agp_memory * mem, off_t pg_start, ati_remove_memory() argument 329 for (i = pg_start; i < (mem->page_count + pg_start); i++) { ati_remove_memory() 266 ati_insert_memory(struct agp_memory * mem, off_t pg_start, int type) ati_insert_memory() argument
|
H A D | efficeon-agp.c | 238 static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int type) efficeon_insert_memory() argument 245 printk(KERN_DEBUG PFX "efficeon_insert_memory(%lx, %d)\n", pg_start, count); efficeon_insert_memory() 248 if ((pg_start + mem->page_count) > num_entries) efficeon_insert_memory() 260 int index = pg_start + i; efficeon_insert_memory() 287 static int efficeon_remove_memory(struct agp_memory * mem, off_t pg_start, int type) efficeon_remove_memory() argument 291 printk(KERN_DEBUG PFX "efficeon_remove_memory(%lx, %d)\n", pg_start, count); efficeon_remove_memory() 295 if ((pg_start + mem->page_count) > num_entries) efficeon_remove_memory() 301 int index = pg_start + i; efficeon_remove_memory()
|
H A D | nvidia-agp.c | 201 static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type) nvidia_insert_memory() argument 213 if ((pg_start + mem->page_count) > nvidia_insert_memory() 217 for (j = pg_start; j < (pg_start + mem->page_count); j++) { nvidia_insert_memory() 226 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { nvidia_insert_memory() 240 static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type) nvidia_remove_memory() argument 253 for (i = pg_start; i < (mem->page_count + pg_start); i++) nvidia_remove_memory()
|
H A D | sworks-agp.c | 320 off_t pg_start, int type) serverworks_insert_memory() 331 if ((pg_start + mem->page_count) > num_entries) { serverworks_insert_memory() 335 j = pg_start; serverworks_insert_memory() 336 while (j < (pg_start + mem->page_count)) { serverworks_insert_memory() 349 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { serverworks_insert_memory() 360 static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start, serverworks_remove_memory() argument 374 for (i = pg_start; i < (mem->page_count + pg_start); i++) { serverworks_remove_memory() 319 serverworks_insert_memory(struct agp_memory *mem, off_t pg_start, int type) serverworks_insert_memory() argument
|
H A D | generic.c | 406 * @pg_start: an offset into the graphics aperture translation table 411 int agp_bind_memory(struct agp_memory *curr, off_t pg_start) agp_bind_memory() argument 427 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); agp_bind_memory() 433 curr->pg_start = pg_start; agp_bind_memory() 463 ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type); agp_unbind_memory() 469 curr->pg_start = 0; agp_unbind_memory() 1033 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) agp_generic_insert_memory() argument 1084 if (((pg_start + mem->page_count) > num_entries) || agp_generic_insert_memory() 1085 ((pg_start + mem->page_count) < pg_start)) agp_generic_insert_memory() 1088 j = pg_start; agp_generic_insert_memory() 1090 while (j < (pg_start + mem->page_count)) { agp_generic_insert_memory() 1101 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { agp_generic_insert_memory() 1115 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) agp_generic_remove_memory() argument 1132 if (((pg_start + mem->page_count) > num_entries) || agp_generic_remove_memory() 1133 ((pg_start + mem->page_count) < pg_start)) agp_generic_remove_memory() 1143 for (i = pg_start; i < (mem->page_count + pg_start); i++) { agp_generic_remove_memory()
|
H A D | amd-k7-agp.c | 283 static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type) amd_insert_memory() argument 295 if ((pg_start + mem->page_count) > num_entries) amd_insert_memory() 298 j = pg_start; amd_insert_memory() 299 while (j < (pg_start + mem->page_count)) { amd_insert_memory() 312 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { amd_insert_memory() 325 static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type) amd_remove_memory() argument 335 for (i = pg_start; i < (mem->page_count + pg_start); i++) { amd_remove_memory()
|
H A D | parisc-agp.c | 125 parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type) parisc_agp_insert_memory() argument 137 io_pg_start = info->io_pages_per_kpage * pg_start; parisc_agp_insert_memory() 174 parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type) parisc_agp_remove_memory() argument 184 io_pg_start = info->io_pages_per_kpage * pg_start; parisc_agp_remove_memory()
|
H A D | intel-gtt.c | 211 static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start, i810_insert_dcache_entries() argument 216 if ((pg_start + mem->page_count) i810_insert_dcache_entries() 223 for (i = pg_start; i < (pg_start + mem->page_count); i++) { i810_insert_dcache_entries() 842 unsigned int pg_start, intel_gtt_insert_sg_entries() 849 j = pg_start; intel_gtt_insert_sg_entries() 882 off_t pg_start, int type) intel_fake_agp_insert_entries() 894 return i810_insert_dcache_entries(mem, pg_start, type); intel_fake_agp_insert_entries() 899 if (pg_start + mem->page_count > intel_private.gtt_total_entries) intel_fake_agp_insert_entries() 918 intel_gtt_insert_sg_entries(&st, pg_start, type); intel_fake_agp_insert_entries() 922 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, intel_fake_agp_insert_entries() 947 off_t pg_start, int type) intel_fake_agp_remove_entries() 952 intel_gtt_clear_range(pg_start, mem->page_count); intel_fake_agp_remove_entries() 841 intel_gtt_insert_sg_entries(struct sg_table *st, unsigned int pg_start, unsigned int flags) intel_gtt_insert_sg_entries() argument 881 intel_fake_agp_insert_entries(struct agp_memory *mem, off_t pg_start, int type) intel_fake_agp_insert_entries() argument 946 intel_fake_agp_remove_entries(struct agp_memory *mem, off_t pg_start, int type) intel_fake_agp_remove_entries() argument
|
H A D | compat_ioctl.h | 59 compat_off_t pg_start; /* starting page to populate */ member in struct:agp_segment32 82 compat_off_t pg_start; /* starting page to populate */ member in struct:agp_bind32
|
H A D | compat_ioctl.c | 119 ksegment[seg].pg_start = usegment[seg].pg_start; compat_agpioc_reserve_wrap() 186 return agp_bind_memory(memory, bind_info.pg_start); compat_agpioc_bind_wrap()
|
H A D | uninorth-agp.c | 149 static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int type) uninorth_insert_memory() argument 171 if ((pg_start + mem->page_count) > num_entries) uninorth_insert_memory() 174 gp = (u32 *) &agp_bridge->gatt_table[pg_start]; uninorth_insert_memory() 199 int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) uninorth_remove_memory() argument 217 gp = (u32 *) &agp_bridge->gatt_table[pg_start]; uninorth_remove_memory()
|
H A D | frontend.c | 106 off_t pg_start; agp_find_seg_in_client() local 109 pg_start = offset / 4096; agp_find_seg_in_client() 115 if ((seg[i].pg_start == pg_start) && agp_find_seg_in_client() 179 seg[i].pg_start = user_seg[i].pg_start; agp_create_segment() 929 return agp_bind_memory(memory, bind_info.pg_start); agpioc_bind_wrap()
|
H A D | hp-agp.c | 332 hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type) hp_zx1_insert_memory() argument 344 io_pg_start = hp->io_pages_per_kpage * pg_start; hp_zx1_insert_memory() 379 hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type) hp_zx1_remove_memory() argument 389 io_pg_start = hp->io_pages_per_kpage * pg_start; hp_zx1_remove_memory()
|
H A D | agp.h | 197 int agp_generic_insert_memory(struct agp_memory *mem, off_t pg_start, int type); 198 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type);
|
H A D | amd64-agp.c | 44 static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) amd64_insert_memory() argument 63 if (((unsigned long)pg_start + mem->page_count) > num_entries) amd64_insert_memory() 66 j = pg_start; amd64_insert_memory() 69 while (j < (pg_start + mem->page_count)) { amd64_insert_memory() 80 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { amd64_insert_memory()
|
/linux-4.1.27/include/drm/ |
H A D | intel-gtt.h | 17 unsigned int pg_start,
|
/linux-4.1.27/include/linux/ |
H A D | agpgart.h | 54 off_t pg_start; /* starting page to populate */ member in struct:agp_segment 60 off_t pg_start; member in struct:agp_segment_priv 83 off_t pg_start; /* starting page to populate */ member in struct:agp_bind
|
H A D | agp_backend.h | 77 off_t pg_start; member in struct:agp_memory
|
/linux-4.1.27/arch/arc/include/asm/ |
H A D | ptrace.h | 96 unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \ 97 (struct pt_regs *)(pg_start + THREAD_SIZE) - 1; \
|
/linux-4.1.27/arch/alpha/kernel/ |
H A D | core_titan.c | 586 long pg_start; titan_agp_setup() 604 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, titan_agp_setup() 606 if (aper->pg_start < 0) { titan_agp_setup() 613 aper->arena->dma_base + aper->pg_start * PAGE_SIZE; 626 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); titan_agp_cleanup() 630 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); titan_agp_cleanup() 631 status = iommu_release(aper->arena, aper->pg_start, titan_agp_cleanup() 684 titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) 687 return iommu_bind(aper->arena, aper->pg_start + pg_start, titan_agp_unbind_memory() argument 692 titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) 695 return iommu_unbind(aper->arena, aper->pg_start + pg_start, titan_agp_translate() 581 long pg_start; global() member in struct:titan_agp_aperture 679 titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) titan_agp_bind_memory() argument
|
H A D | core_marvel.c | 906 long pg_start; marvel_agp_setup() 923 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, marvel_agp_setup() 926 if (aper->pg_start < 0) { marvel_agp_setup() 933 aper->arena->dma_base + aper->pg_start * PAGE_SIZE; marvel_agp_cleanup() 946 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); marvel_agp_cleanup() 950 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); marvel_agp_configure() 951 status = iommu_release(aper->arena, aper->pg_start, marvel_agp_configure() 1026 marvel_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) marvel_agp_unbind_memory() 1029 return iommu_bind(aper->arena, aper->pg_start + pg_start, 1034 marvel_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) marvel_agp_translate() 1037 return iommu_unbind(aper->arena, aper->pg_start + pg_start, marvel_agp_translate() 895 long pg_start; global() member in struct:marvel_agp_aperture 1015 marvel_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) marvel_agp_bind_memory() argument 1023 marvel_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) marvel_agp_unbind_memory() argument
|
H A D | pci_iommu.c | 877 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) iommu_release() 887 for(i = pg_start; i < pg_start + pg_count; i++) iommu_release() 891 iommu_arena_free(arena, pg_start, pg_count); 896 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, iommu_bind() 909 for(j = pg_start; j < pg_start + pg_count; j++) { iommu_bind() 916 for(i = 0, j = pg_start; i < pg_count; i++, j++) iommu_bind() 925 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) iommu_unbind() 932 p = arena->ptes + pg_start; iommu_unbind() 875 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) iommu_release() argument 894 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, struct page **pages) iommu_bind() argument 923 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) iommu_unbind() argument
|
/linux-4.1.27/include/uapi/linux/ |
H A D | agpgart.h | 80 __kernel_off_t pg_start; /* starting page to populate */ member in struct:_agp_segment 103 __kernel_off_t pg_start;/* starting page to populate */ member in struct:_agp_bind
|
/linux-4.1.27/fs/f2fs/ |
H A D | file.c | 689 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) truncate_hole() argument 694 for (index = pg_start; index < pg_end; index++) { truncate_hole() 714 pgoff_t pg_start, pg_end; punch_hole() local 731 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; punch_hole() 737 if (pg_start == pg_end) { punch_hole() 738 fill_zero(inode, pg_start, off_start, punch_hole() 742 fill_zero(inode, pg_start++, off_start, punch_hole() 747 if (pg_start < pg_end) { punch_hole() 754 blk_start = pg_start << PAGE_CACHE_SHIFT; punch_hole() 760 ret = truncate_hole(inode, pg_start, pg_end); punch_hole() 772 pgoff_t index, pg_start, pg_end; expand_inode_data() local 789 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; expand_inode_data() 797 for (index = pg_start; index <= pg_end; index++) { expand_inode_data() 808 if (pg_start == pg_end) expand_inode_data() 810 else if (index == pg_start && off_start) expand_inode_data()
|
/linux-4.1.27/drivers/hv/ |
H A D | hv_balloon.c | 746 static unsigned long handle_pg_range(unsigned long pg_start, handle_pg_range() argument 749 unsigned long start_pfn = pg_start; handle_pg_range() 825 static unsigned long process_hot_add(unsigned long pg_start, process_hot_add() argument 836 if (pfn_covered(pg_start, pfn_cnt)) process_hot_add() 853 ha_region->covered_end_pfn = pg_start; process_hot_add() 862 return handle_pg_range(pg_start, pfn_cnt); process_hot_add() 871 unsigned long pg_start, pfn_cnt; hot_add_req() local 882 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page; hot_add_req() 899 region_start = pg_start; hot_add_req() 904 region_start = (pg_start / HA_CHUNK) * HA_CHUNK; hot_add_req() 911 resp.page_count = process_hot_add(pg_start, pfn_cnt, hot_add_req()
|
/linux-4.1.27/tools/vm/ |
H A D | page-types.c | 178 static unsigned long pg_start[MAX_VMAS]; variable 651 if (pg_start[i] >= end) walk_task() 654 start = max_t(unsigned long, pg_start[i], index); walk_task() 803 pg_start[nr_vmas] = vm_start / page_size; parse_pid()
|
/linux-4.1.27/fs/9p/ |
H A D | vfs_file.c | 422 unsigned long pg_start, pg_end; v9fs_file_write_iter() local 423 pg_start = origin >> PAGE_CACHE_SHIFT; v9fs_file_write_iter() 427 pg_start, pg_end); v9fs_file_write_iter()
|
/linux-4.1.27/fs/fuse/ |
H A D | inode.c | 333 pgoff_t pg_start; fuse_reverse_inval_inode() local 342 pg_start = offset >> PAGE_CACHE_SHIFT; fuse_reverse_inval_inode() 348 pg_start, pg_end); fuse_reverse_inval_inode()
|