Searched refs:pages (Results 1 - 200 of 2041) sorted by relevance

1234567891011

/linux-4.1.27/net/ceph/
H A Dpagevec.c13 * build a vector of user pages
18 struct page **pages; ceph_get_direct_page_vector() local
22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); ceph_get_direct_page_vector()
23 if (!pages) ceph_get_direct_page_vector()
29 num_pages - got, write_page, 0, pages + got); ceph_get_direct_page_vector()
37 return pages; ceph_get_direct_page_vector()
40 ceph_put_page_vector(pages, got, false); ceph_get_direct_page_vector()
45 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) ceph_put_page_vector() argument
51 set_page_dirty_lock(pages[i]); ceph_put_page_vector()
52 put_page(pages[i]); ceph_put_page_vector()
54 if (is_vmalloc_addr(pages)) ceph_put_page_vector()
55 vfree(pages); ceph_put_page_vector()
57 kfree(pages); ceph_put_page_vector()
61 void ceph_release_page_vector(struct page **pages, int num_pages) ceph_release_page_vector() argument
66 __free_pages(pages[i], 0); ceph_release_page_vector()
67 kfree(pages); ceph_release_page_vector()
72 * allocate a vector new pages
76 struct page **pages; ceph_alloc_page_vector() local
79 pages = kmalloc(sizeof(*pages) * num_pages, flags); ceph_alloc_page_vector()
80 if (!pages) ceph_alloc_page_vector()
83 pages[i] = __page_cache_alloc(flags); ceph_alloc_page_vector()
84 if (pages[i] == NULL) { ceph_alloc_page_vector()
85 ceph_release_page_vector(pages, i); ceph_alloc_page_vector()
89 return pages; ceph_alloc_page_vector()
96 int ceph_copy_user_to_page_vector(struct page **pages, ceph_copy_user_to_page_vector() argument
107 bad = copy_from_user(page_address(pages[i]) + po, data, l); ceph_copy_user_to_page_vector()
122 void ceph_copy_to_page_vector(struct page **pages, ceph_copy_to_page_vector() argument
133 memcpy(page_address(pages[i]) + po, data, l); ceph_copy_to_page_vector()
145 void ceph_copy_from_page_vector(struct page **pages, ceph_copy_from_page_vector() argument
156 memcpy(data, page_address(pages[i]) + po, l); ceph_copy_from_page_vector()
172 void ceph_zero_page_vector_range(int off, int len, struct page **pages) ceph_zero_page_vector_range() argument
183 dout("zeroing %d %p head from %d\n", i, pages[i], ceph_zero_page_vector_range()
185 zero_user_segment(pages[i], off, end); ceph_zero_page_vector_range()
190 dout("zeroing %d %p len=%d\n", i, pages[i], len); ceph_zero_page_vector_range()
191 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); ceph_zero_page_vector_range()
197 dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len); ceph_zero_page_vector_range()
198 zero_user_segment(pages[i], 0, len); ceph_zero_page_vector_range()
H A Dpagelist.c77 /* Allocate enough pages for a pagelist to append the given amount
86 space = (space + PAGE_SIZE - 1) >> PAGE_SHIFT; /* conv to num pages */ ceph_pagelist_reserve()
99 /* Free any pages that have been preallocated. */ ceph_pagelist_free_reserve()
124 /* Truncate a pagelist to the given point. Move extra pages to reserve.
/linux-4.1.27/include/trace/events/
H A Dtlb.h38 TP_PROTO(int reason, unsigned long pages),
39 TP_ARGS(reason, pages),
45 __field(unsigned long, pages)
50 __entry->pages = pages;
53 TP_printk("pages:%ld reason:%s (%d)",
54 __entry->pages,
/linux-4.1.27/arch/powerpc/perf/
H A Dhv-24x7-catalog.h11 __be32 length; /* In 4096 byte pages */
15 __be16 schema_data_offs; /* in 4096 byte pages */
16 __be16 schema_data_len; /* in 4096 byte pages */
23 __be16 group_data_offs; /* in 4096 byte pages */
24 __be16 group_data_len; /* in 4096 byte pages */
27 __be16 formula_data_offs; /* in 4096 byte pages */
28 __be16 formula_data_len; /* in 4096 byte pages */
/linux-4.1.27/lib/
H A Dshow_mem.c39 printk("%lu pages RAM\n", total);
40 printk("%lu pages HighMem/MovableOnly\n", highmem);
42 printk("%lu pages reserved\n", (reserved - totalcma_pages));
43 printk("%lu pages cma reserved\n", totalcma_pages);
45 printk("%lu pages reserved\n", reserved);
48 printk("%lu pages in pagetable cache\n",
52 printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
/linux-4.1.27/mm/
H A Dpercpu-vm.c23 * pcpu_get_pages - get temp pages array
31 * Pointer to temp pages array on success.
35 static struct page **pages; pcpu_get_pages() local
36 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); pcpu_get_pages()
40 if (!pages) pcpu_get_pages()
41 pages = pcpu_mem_zalloc(pages_size); pcpu_get_pages()
42 return pages; pcpu_get_pages()
46 * pcpu_free_pages - free pages which were allocated for @chunk
47 * @chunk: chunk pages were allocated for
48 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
52 * Free pages [@page_start and @page_end) in @pages for all units.
53 * The pages were allocated for @chunk.
56 struct page **pages, int page_start, int page_end) pcpu_free_pages()
63 struct page *page = pages[pcpu_page_idx(cpu, i)]; for_each_possible_cpu()
72 * pcpu_alloc_pages - allocates pages for @chunk
74 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
78 * Allocate pages [@page_start,@page_end) into @pages for all units.
80 * content of @pages and will pass it verbatim to pcpu_map_pages().
83 struct page **pages, int page_start, int page_end) pcpu_alloc_pages()
91 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; for_each_possible_cpu()
102 __free_page(pages[pcpu_page_idx(cpu, i)]);
108 __free_page(pages[pcpu_page_idx(tcpu, i)]); for_each_possible_cpu()
139 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
141 * @pages: pages array which can be used to pass information to free
145 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
146 * Corresponding elements in @pages were cleared by the caller and can
152 struct page **pages, int page_start, int page_end) pcpu_unmap_pages()
163 pages[pcpu_page_idx(cpu, i)] = page; for_each_possible_cpu()
191 static int __pcpu_map_pages(unsigned long addr, struct page **pages, __pcpu_map_pages() argument
195 PAGE_KERNEL, pages); __pcpu_map_pages()
199 * pcpu_map_pages - map pages into a pcpu_chunk
201 * @pages: pages array containing pages to be mapped
205 * For each cpu, map pages [@page_start,@page_end) into @chunk. The
213 struct page **pages, int page_start, int page_end) pcpu_map_pages()
220 &pages[pcpu_page_idx(cpu, page_start)], for_each_possible_cpu()
226 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], for_each_possible_cpu()
267 * For each cpu, populate and map pages [@page_start,@page_end) into
276 struct page **pages; pcpu_populate_chunk() local
278 pages = pcpu_get_pages(chunk); pcpu_populate_chunk()
279 if (!pages) pcpu_populate_chunk()
282 if (pcpu_alloc_pages(chunk, pages, page_start, page_end)) pcpu_populate_chunk()
285 if (pcpu_map_pages(chunk, pages, page_start, page_end)) { pcpu_populate_chunk()
286 pcpu_free_pages(chunk, pages, page_start, page_end); pcpu_populate_chunk()
300 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
309 struct page **pages; pcpu_depopulate_chunk() local
313 * successful population attempt so the temp pages array must pcpu_depopulate_chunk()
316 pages = pcpu_get_pages(chunk); pcpu_depopulate_chunk()
317 BUG_ON(!pages); pcpu_depopulate_chunk()
322 pcpu_unmap_pages(chunk, pages, page_start, page_end); pcpu_depopulate_chunk()
326 pcpu_free_pages(chunk, pages, page_start, page_end); pcpu_depopulate_chunk()
55 pcpu_free_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_free_pages() argument
82 pcpu_alloc_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_alloc_pages() argument
151 pcpu_unmap_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_unmap_pages() argument
212 pcpu_map_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_map_pages() argument
H A Dkmemcheck.c11 int pages; kmemcheck_alloc_shadow() local
14 pages = 1 << order; kmemcheck_alloc_shadow()
28 for(i = 0; i < pages; ++i) kmemcheck_alloc_shadow()
36 kmemcheck_hide_pages(page, pages); kmemcheck_alloc_shadow()
42 int pages; kmemcheck_free_shadow() local
48 pages = 1 << order; kmemcheck_free_shadow()
50 kmemcheck_show_pages(page, pages); kmemcheck_free_shadow()
54 for(i = 0; i < pages; ++i) kmemcheck_free_shadow()
103 int pages; kmemcheck_pagealloc_alloc() local
108 pages = 1 << order; kmemcheck_pagealloc_alloc()
111 * NOTE: We choose to track GFP_ZERO pages too; in fact, they kmemcheck_pagealloc_alloc()
120 kmemcheck_mark_initialized_pages(page, pages); kmemcheck_pagealloc_alloc()
122 kmemcheck_mark_uninitialized_pages(page, pages); kmemcheck_pagealloc_alloc()
H A Dpercpu-km.c52 struct page *pages; pcpu_create_chunk() local
59 pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages)); pcpu_create_chunk()
60 if (!pages) { pcpu_create_chunk()
66 pcpu_set_page_chunk(nth_page(pages, i), chunk); pcpu_create_chunk()
68 chunk->data = pages; pcpu_create_chunk()
69 chunk->base_addr = page_address(pages) - pcpu_group_offsets[0]; pcpu_create_chunk()
106 printk(KERN_WARNING "percpu: wasting %zu pages per chunk\n", pcpu_verify_alloc_info()
H A Dprocess_vm_access.c25 * process_vm_rw_pages - read/write pages from task specified
26 * @pages: array of pointers to pages we want to copy
33 static int process_vm_rw_pages(struct page **pages, process_vm_rw_pages() argument
41 struct page *page = *pages++; process_vm_rw_pages()
62 /* Maximum number of pages kmalloc'd to hold struct page's during copy */
66 * process_vm_rw_single_vec - read/write pages from task specified
70 * @process_pages: struct pages area that can store at least
90 / sizeof(struct pages *); process_vm_rw_single_vec()
98 int pages = min(nr_pages, max_pages_per_loop); process_vm_rw_single_vec() local
101 /* Get the pages we're interested in */ process_vm_rw_single_vec()
102 pages = get_user_pages_unlocked(task, mm, pa, pages, process_vm_rw_single_vec()
104 if (pages <= 0) process_vm_rw_single_vec()
107 bytes = pages * PAGE_SIZE - start_offset; process_vm_rw_single_vec()
116 nr_pages -= pages; process_vm_rw_single_vec()
117 pa += pages * PAGE_SIZE; process_vm_rw_single_vec()
118 while (pages) process_vm_rw_single_vec()
119 put_page(process_pages[--pages]); process_vm_rw_single_vec()
125 /* Maximum number of entries for process pages array
130 * process_vm_rw_core - core of reading/writing pages from task specified
158 * Work out how many pages of struct pages we're going to need process_vm_rw_core()
177 2 pages worth */ process_vm_rw_core()
179 sizeof(struct pages *)*nr_pages), process_vm_rw_core()
H A Dgup.c24 * has touched so far, we don't want to allocate unnecessary pages or no_page_table()
106 lru_add_drain(); /* push cached pages to LRU */ follow_page_pte()
222 /* user gate pages are read-only */ get_gate_page()
339 * Anon pages in shared mappings are surprising: now check_vma_flags()
361 * __get_user_pages() - pin user pages in memory
365 * @nr_pages: number of pages from start to pin
367 * @pages: array that receives pointers to the pages pinned.
369 * only intends to ensure the pages are faulted in.
374 * Returns number of pages pinned. This may be fewer than the number
375 * requested. If nr_pages is 0 or negative, returns 0. If no pages
402 * or mmap_sem contention, and if waiting is needed to pin all pages,
418 unsigned int gup_flags, struct page **pages, __get_user_pages()
428 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); __get_user_pages()
450 pages ? &pages[i] : NULL); __get_user_pages()
460 i = follow_hugetlb_page(mm, vma, pages, vmas, __get_user_pages()
468 * If we have a pending SIGKILL, don't keep faulting pages and __get_user_pages()
495 if (pages) { __get_user_pages()
496 pages[i] = page; __get_user_pages()
583 struct page **pages, __get_user_pages_locked()
598 if (pages) __get_user_pages_locked()
608 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, __get_user_pages_locked()
620 if (!pages) __get_user_pages_locked()
637 pages += ret; __get_user_pages_locked()
649 pages, NULL, NULL); __get_user_pages_locked()
660 pages++; __get_user_pages_locked()
683 * get_user_pages(tsk, mm, ..., pages, NULL);
691 * get_user_pages_locked(tsk, mm, ..., pages, &locked);
697 int write, int force, struct page **pages, get_user_pages_locked()
701 pages, NULL, locked, true, FOLL_TOUCH); get_user_pages_locked()
712 * according to the parameters "pages", "write", "force"
717 int write, int force, struct page **pages, __get_user_pages_unlocked()
724 pages, NULL, &locked, false, gup_flags); __get_user_pages_unlocked()
735 * get_user_pages(tsk, mm, ..., pages, NULL);
740 * get_user_pages_unlocked(tsk, mm, ..., pages);
750 int write, int force, struct page **pages) get_user_pages_unlocked()
753 force, pages, FOLL_TOUCH); get_user_pages_unlocked()
758 * get_user_pages() - pin user pages in memory
763 * @nr_pages: number of pages from start to pin
764 * @write: whether pages will be written to by the caller
767 * @pages: array that receives pointers to the pages pinned.
769 * only intends to ensure the pages are faulted in.
773 * Returns number of pages pinned. This may be fewer than the number
774 * requested. If nr_pages is 0 or negative, returns 0. If no pages
801 * addresses. The pages may be submitted for DMA to devices or accessed via
814 int force, struct page **pages, struct vm_area_struct **vmas) get_user_pages()
817 pages, vmas, NULL, false, FOLL_TOUCH); get_user_pages()
822 * populate_vma_page_range() - populate a range of pages in the vma.
828 * This takes care of mlocking the pages too if VM_LOCKED is set.
878 * __mm_populate - populate and/or mlock pages within a range of address space.
898 * We want to fault in pages for [nstart; end) address range. __mm_populate()
919 * Now fault in a range of pages. populate_vma_page_range() __mm_populate()
920 * double checks the vma flags, so that it won't mlock pages __mm_populate()
971 * get_user_pages_fast attempts to pin user pages by walking the page
973 * protected from page table pages being freed from under it, and should
978 * pages are freed. This is unsuitable for architectures that do not need
981 * Another way to achieve this is to batch up page table containing pages
983 * pages. Disabling interrupts will allow the fast_gup walker to both block
991 * pages containing page tables.
1008 int write, struct page **pages, int *nr) gup_pte_range()
1044 pages[*nr] = page; gup_pte_range()
1063 * __get_user_pages_fast implementation that can pin pages. Thus it's still
1067 int write, struct page **pages, int *nr) gup_pte_range()
1074 unsigned long end, int write, struct page **pages, int *nr) gup_huge_pmd()
1088 pages[*nr] = page; gup_huge_pmd()
1107 * Any tail pages need their mapcount reference taken before we gup_huge_pmd()
1109 * they are split into base pages). gup_huge_pmd()
1121 unsigned long end, int write, struct page **pages, int *nr) gup_huge_pud()
1135 pages[*nr] = page; gup_huge_pud()
1164 struct page **pages, int *nr) gup_huge_pgd()
1178 pages[*nr] = page; gup_huge_pgd()
1206 int write, struct page **pages, int *nr) gup_pmd_range()
1229 pages, nr)) gup_pmd_range()
1238 PMD_SHIFT, next, write, pages, nr)) gup_pmd_range()
1240 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) gup_pmd_range()
1248 int write, struct page **pages, int *nr) gup_pud_range()
1262 pages, nr)) gup_pud_range()
1266 PUD_SHIFT, next, write, pages, nr)) gup_pud_range()
1268 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) gup_pud_range()
1280 struct page **pages) __get_user_pages_fast()
1301 * With interrupts disabled, we block page table pages from being __get_user_pages_fast()
1319 pages, &nr)) __get_user_pages_fast()
1323 PGDIR_SHIFT, next, write, pages, &nr)) __get_user_pages_fast()
1325 } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) __get_user_pages_fast()
1334 * get_user_pages_fast() - pin user pages in memory
1336 * @nr_pages: number of pages from start to pin
1337 * @write: whether pages will be written to
1338 * @pages: array that receives pointers to the pages pinned.
1341 * Attempt to pin user pages in memory without taking mm->mmap_sem.
1345 * Returns number of pages pinned. This may be fewer than the number
1346 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1350 struct page **pages) get_user_pages_fast()
1356 nr = __get_user_pages_fast(start, nr_pages, write, pages); get_user_pages_fast()
1360 /* Try to get the remaining pages with get_user_pages */ get_user_pages_fast()
1362 pages += nr; get_user_pages_fast()
1365 nr_pages - nr, write, 0, pages); get_user_pages_fast()
416 __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) __get_user_pages() argument
578 __get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas, int *locked, bool notify_drop, unsigned int flags) __get_user_pages_locked() argument
695 get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, int *locked) get_user_pages_locked() argument
715 __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, unsigned int gup_flags) __get_user_pages_unlocked() argument
748 get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages) get_user_pages_unlocked() argument
812 get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) get_user_pages() argument
1007 gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pte_range() argument
1066 gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pte_range() argument
1073 gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_huge_pmd() argument
1120 gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_huge_pud() argument
1162 gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_huge_pgd() argument
1205 gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pmd_range() argument
1247 gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pud_range() argument
1279 __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) __get_user_pages_fast() argument
1349 get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) get_user_pages_fast() argument
H A Dpage_counter.c16 * page_counter_cancel - take pages out of the local counter
18 * @nr_pages: number of pages to cancel
30 * page_counter_charge - hierarchically charge pages
32 * @nr_pages: number of pages to charge
54 * page_counter_try_charge - try to hierarchically charge pages
56 * @nr_pages: number of pages to charge
112 * page_counter_uncharge - hierarchically uncharge pages
114 * @nr_pages: number of pages to uncharge
125 * page_counter_limit - limit the number of pages allowed
129 * Returns 0 on success, -EBUSY if the current number of pages on the
170 * @nr_pages: returns the result in number of pages
H A Dpage_isolation.c33 * number of pages in a range that are held by the balloon set_migratetype_isolate()
34 * driver to shrink memory. If all the pages are accounted for set_migratetype_isolate()
37 * pages reported as "can be isolated" should be isolated(freed) set_migratetype_isolate()
46 * We just check MOVABLE pages. set_migratetype_isolate()
54 * removable-by-driver pages reported by notifier, we'll fail. set_migratetype_isolate()
95 * these pages to be merged. unset_migratetype_isolate()
144 * start_isolate_page_range() -- make page-allocation-type of range of pages
150 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
151 * the range will never be allocated. Any free pages and pages freed in the
188 * Make isolated pages available again.
208 * Test all pages in the range is free(means isolated) or not.
209 * all pages in [start_pfn...end_pfn) must be in the same zone.
212 * Returns 1 if all pages in the range are isolated.
229 * some free pages could be in MIGRATE_MOVABLE list __test_page_isolated_in_pageblock()
271 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages test_pages_isolated()
283 /* Check all pages are free or marked as ISOLATED */ test_pages_isolated()
H A Dmprotect.c67 unsigned long pages = 0; change_pte_range() local
82 * pages. See similar comment in change_huge_pmd. change_pte_range()
101 /* Avoid taking write faults for known dirty pages */ change_pte_range()
108 pages++; change_pte_range()
124 pages++; change_pte_range()
131 return pages; change_pte_range()
141 unsigned long pages = 0; change_pmd_range() local
168 pages += HPAGE_PMD_NR; change_pmd_range()
180 pages += this_pages; change_pmd_range()
188 return pages; change_pmd_range()
197 unsigned long pages = 0; change_pud_range() local
204 pages += change_pmd_range(vma, pud, addr, next, newprot, change_pud_range()
208 return pages; change_pud_range()
219 unsigned long pages = 0; change_protection_range() local
229 pages += change_pud_range(vma, pgd, addr, next, newprot, change_protection_range()
234 if (pages) change_protection_range()
238 return pages; change_protection_range()
245 unsigned long pages; change_protection() local
248 pages = hugetlb_change_protection(vma, start, end, newprot); change_protection()
250 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); change_protection()
252 return pages; change_protection()
H A Dcma.h8 unsigned int order_per_bit; /* Order of pages represented by one bit */
H A Dworkingset.c19 * Per zone, two clock lists are maintained for file pages: the
20 * inactive and the active list. Freshly faulted pages start out at
21 * the head of the inactive list and page reclaim scans pages from the
24 * whereas active pages are demoted to the inactive list when the
38 * A workload is thrashing when its pages are frequently used but they
42 * In cases where the average access distance between thrashing pages
50 * active pages - which may be used more, hopefully less frequently:
59 * of pages. But a reasonable approximation can be made to measure
60 * thrashing on the inactive list, after which refaulting pages can be
61 * activated optimistically to compete with the existing active pages.
72 * also slides all inactive pages that were faulted into the cache
79 * time indicate the minimum number of inactive pages accessed in
88 * inactive pages accessed while the page was in cache is at least
118 * the only thing eating into inactive list space is active pages.
121 * Activating refaulting pages
123 * All that is known about the active list is that the pages have been
125 * time there is actually a good chance that pages on the active list
129 * least (R - E) active pages, the refaulting page is activated
130 * optimistically in the hope that (R - E) active pages are actually
134 * If this is wrong and demotion kicks in, the pages which are truly
138 * But if this is right, the stale pages will be pushed out of memory
139 * and the used pages get to stay in cache.
274 unsigned long pages; count_shadow_nodes() local
281 pages = node_present_pages(sc->nid); count_shadow_nodes()
283 * Active cache pages are limited to 50% of memory, and shadow count_shadow_nodes()
287 * cache pages, assuming a worst-case node population density count_shadow_nodes()
296 max_nodes = pages >> (1 + RADIX_TREE_MAP_SHIFT - 3); count_shadow_nodes()
341 * no pages, so we expect to be able to remove them all and shadow_lru_isolate()
H A Dswap.c41 /* How many pages do we try to swap or page in/out together? */
49 * This path almost never happens for VM activity - pages are normally
348 * put_pages_list() - release a list of pages
349 * @pages: list of pages threaded on page->lru
351 * Release a list of pages which are strung together on page.lru. Currently
354 void put_pages_list(struct list_head *pages) put_pages_list() argument
356 while (!list_empty(pages)) { put_pages_list()
359 victim = list_entry(pages->prev, struct page, lru); put_pages_list()
367 * get_kernel_pages() - pin kernel pages in memory
371 * @pages: array that receives pointers to the pages pinned.
374 * Returns number of pages pinned. This may be fewer than the number
375 * requested. If nr_pages is 0 or negative, returns 0. If no pages
380 struct page **pages) get_kernel_pages()
388 pages[seg] = kmap_to_page(kiov[seg].iov_base); get_kernel_pages()
389 page_cache_get(pages[seg]); get_kernel_pages()
400 * @pages: array that receives pointer to the page pinned.
407 int get_kernel_page(unsigned long start, int write, struct page **pages) get_kernel_page() argument
414 return get_kernel_pages(&kiov, 1, write, pages); get_kernel_page()
428 struct page *page = pvec->pages[i]; pagevec_lru_move_fn()
443 release_pages(pvec->pages, pvec->nr, pvec->cold); pagevec_lru_move_fn()
584 struct page *pagevec_page = pvec->pages[i]; __lru_cache_activate_page()
801 * Drain pages out of the cpu's pagevecs.
900 * @pages: array of pages to release
901 * @nr: number of pages
902 * @cold: whether the pages are cache cold
904 * Decrement the reference count on all the pages in @pages. If it
907 void release_pages(struct page **pages, int nr, bool cold) release_pages() argument
917 struct page *page = pages[i]; release_pages()
930 * excessive with a continuous string of pages from the release_pages()
973 * The pages which we're about to release may be in the deferred lru-addition
975 * OK from a correctness point of view but is inefficient - those pages may be
985 release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); __pagevec_release()
1047 * Add the passed pages to the LRU, then drop the caller's refcount
1065 * to @nr_entries pages and shadow entries in the mapping. All
1067 * reference against actual pages in @pvec.
1082 pvec->pages, indices); pagevec_lookup_entries()
1090 * pagevec_lookup_entries() fills both pages and exceptional radix
1100 struct page *page = pvec->pages[i]; pagevec_remove_exceptionals()
1102 pvec->pages[j++] = page; pagevec_remove_exceptionals()
1109 * @pvec: Where the resulting pages are placed
1112 * @nr_pages: The maximum number of pages
1114 * pagevec_lookup() will search for and return a group of up to @nr_pages pages
1115 * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
1116 * reference against the pages in @pvec.
1118 * The search returns a group of mapping-contiguous pages with ascending
1119 * indexes. There may be holes in the indices due to not-present pages.
1121 * pagevec_lookup() returns the number of pages which were found.
1126 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); pagevec_lookup()
1135 nr_pages, pvec->pages); pagevec_lookup_tag()
379 get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, struct page **pages) get_kernel_pages() argument
H A Dreadahead.c40 * before calling, such as the NFS fs marking pages that are cached locally
59 * release a list of pages, invalidating them first if need be
62 struct list_head *pages) read_cache_pages_invalidate_pages()
66 while (!list_empty(pages)) { read_cache_pages_invalidate_pages()
67 victim = list_to_page(pages); read_cache_pages_invalidate_pages()
74 * read_cache_pages - populate an address space with some pages & start reads against them
76 * @pages: The address of a list_head which contains the target pages. These
77 * pages have their ->index populated and are otherwise uninitialised.
83 int read_cache_pages(struct address_space *mapping, struct list_head *pages, read_cache_pages() argument
89 while (!list_empty(pages)) { read_cache_pages()
90 page = list_to_page(pages); read_cache_pages()
101 read_cache_pages_invalidate_pages(mapping, pages); read_cache_pages()
112 struct list_head *pages, unsigned nr_pages) read_pages()
121 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); read_pages()
122 /* Clean up the remaining pages */ read_pages()
123 put_pages_list(pages); read_pages()
128 struct page *page = list_to_page(pages); read_pages()
146 * the pages first, then submits them all for I/O. This avoids the very bad
150 * Returns the number of pages requested, or the maximum amount of I/O allowed.
170 * Preallocate as many pages as we will need. __do_page_cache_readahead()
237 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
296 * readahead pages and stalled on the missing page at readahead_index;
298 * only async_size pages left in the readahead window. Normally async_size
304 * indicator. The flag won't be set on already cached pages, to avoid the
323 * Count contiguously cached pages from @offset-1 to @offset-@max,
354 * not enough history pages: try_context_readahead()
444 * Query the page cache and look for the traces(cached history pages) ondemand_readahead()
482 * pagecache pages
486 * pages onto the read request if access patterns suggest it will improve
509 * page_cache_async_readahead - file readahead for marked pages
516 * pagecache pages
521 * more pages.
61 read_cache_pages_invalidate_pages(struct address_space *mapping, struct list_head *pages) read_cache_pages_invalidate_pages() argument
111 read_pages(struct address_space *mapping, struct file *filp, struct list_head *pages, unsigned nr_pages) read_pages() argument
H A Dmincore.c173 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) do_mincore() argument
189 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); do_mincore()
199 * mincore() returns the memory residency status of the pages in the
207 * contain stale information. Only locked pages are guaranteed to
216 * specify one or more pages which are not currently
224 unsigned long pages; SYSCALL_DEFINE3() local
236 pages = len >> PAGE_SHIFT; SYSCALL_DEFINE3()
237 pages += (len & ~PAGE_MASK) != 0; SYSCALL_DEFINE3()
239 if (!access_ok(VERIFY_WRITE, vec, pages)) SYSCALL_DEFINE3()
247 while (pages) { SYSCALL_DEFINE3()
253 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); SYSCALL_DEFINE3()
262 pages -= retval; SYSCALL_DEFINE3()
H A Dswap_state.c66 printk("%lu pages in swap cache\n", total_swapcache_pages()); show_swap_cache_info()
132 * This must be called only on pages that have
207 * This must be called only on pages that have
255 * Passed an array of pages, drop them all from swapcache and then release
258 void free_pages_and_swap_cache(struct page **pages, int nr) free_pages_and_swap_cache() argument
260 struct page **pagep = pages; free_pages_and_swap_cache()
390 unsigned int pages, max_pages, last_ra; swapin_nr_pages() local
402 pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; swapin_nr_pages()
403 if (pages == 2) { swapin_nr_pages()
410 pages = 1; swapin_nr_pages()
414 while (roundup < pages) swapin_nr_pages()
416 pages = roundup; swapin_nr_pages()
419 if (pages > max_pages) swapin_nr_pages()
420 pages = max_pages; swapin_nr_pages()
424 if (pages < last_ra) swapin_nr_pages()
425 pages = last_ra; swapin_nr_pages()
426 atomic_set(&last_readahead_pages, pages); swapin_nr_pages()
428 return pages; swapin_nr_pages()
432 * swapin_readahead - swap in pages in hope we need them soon
483 lru_add_drain(); /* Push any new pages onto the LRU now */ swapin_readahead()
H A Dvmscan.c8 * Removed kswapd_ctl limits, and swap out as many pages as needed
62 /* How many pages shrink_list() should reclaim */
83 /* Scan (total_size >> priority) pages at once */
88 /* Can mapped pages be reclaimed? */
91 /* Can pages be swapped as part of reclaim? */
102 /* Incremented by the number of inactive pages that were scanned */
105 /* Number of pages freed so far during a call to shrink_zones() */
144 * The total number of pages which are beyond the high watermark within all
367 * passes the number of pages scanned and the number of pages on the
369 * when it encountered mapped pages. The ratio is further biased by
527 * Some data journaling orphaned pages can have pageout()
698 * For evictable pages, we can use the cache. putback_lru_page()
707 * Put unevictable pages directly on zone's unevictable putback_lru_page()
777 * All mapped pages start out with page table page_check_references()
786 * Note: the mark is set for activated pages as well page_check_references()
787 * so that recently deactivated but used pages are page_check_references()
796 * Activate file-backed executable pages after first usage. page_check_references()
804 /* Reclaim if clean, defer dirty pages to writeback */ page_check_references()
818 * Anonymous pages are not handled by flushers and must be written page_check_dirty_writeback()
841 * shrink_page_list() returns the number of reclaimed pages
892 /* Double the slab pressure for mapped and swapcache pages */ shrink_page_list()
900 * The number of dirty pages determines if a zone is marked shrink_page_list()
902 * will stall and start writing pages if the tail of the LRU shrink_page_list()
903 * is all dirty unqueued pages. shrink_page_list()
914 * pages are cycling through the LRU so quickly that the shrink_page_list()
915 * pages marked for immediate reclaim are making it to the shrink_page_list()
928 * 1) If reclaim is encountering an excessive number of pages shrink_page_list()
930 * PageReclaim then it indicates that pages are being queued shrink_page_list()
952 * PageReclaim. memcg does not have any dirty pages shrink_page_list()
954 * pages are in writeback and there is nothing else to shrink_page_list()
1037 * Only kswapd can writeback filesystem pages to shrink_page_list()
1039 * if many dirty pages have been encountered. shrink_page_list()
1103 * Rarely, pages can have buffers and no ->mapping. These are shrink_page_list()
1104 * the pages which were not successfully invalidated in shrink_page_list()
1229 /* Only take pages on the LRU. */ __isolate_lru_page()
1233 /* Compaction should not handle unevictable pages but CMA can do so */ __isolate_lru_page()
1241 * wants to isolate pages it will be able to operate on without __isolate_lru_page()
1242 * blocking - clean pages for the most part. __isolate_lru_page()
1244 * ISOLATE_CLEAN means that only clean pages should be isolated. This __isolate_lru_page()
1247 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages __isolate_lru_page()
1258 /* ISOLATE_CLEAN means only clean pages */ __isolate_lru_page()
1263 * Only pages without mappings or that have a __isolate_lru_page()
1291 * shrink the lists perform better by taking out a batch of pages
1299 * @nr_to_scan: The number of pages to look through on the list.
1300 * @lruvec: The LRU vector to pull pages from.
1301 * @dst: The temp list to put pages on to.
1302 * @nr_scanned: The number of pages that were scanned.
1307 * returns how many pages were moved onto *@dst.
1401 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1427 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they too_many_isolated()
1445 * Put back any unfreeable pages. putback_inactive_pages()
1487 * To save our caller's stack, now use input list for pages to free. putback_inactive_pages()
1507 * of reclaimed pages
1590 * If reclaim is isolating dirty pages under writeback, it implies shrink_inactive_list()
1600 * of pages under pages flagged for immediate reclaim and stall if any shrink_inactive_list()
1612 * Tag a zone as congested if all the dirty pages scanned were shrink_inactive_list()
1619 * If dirty pages are scanned that are not queued for IO, it shrink_inactive_list()
1621 * the zone ZONE_DIRTY and kswapd will start writing pages from shrink_inactive_list()
1628 * If kswapd scans pages marked marked for immediate shrink_inactive_list()
1630 * that pages are cycling through the LRU faster than shrink_inactive_list()
1640 * unqueued dirty pages or cycling through the LRU too quickly. shrink_inactive_list()
1655 * This moves pages from the active list to the inactive list.
1660 * If the pages are mostly unmapped, the processing is fast and it is
1662 * the pages are mapped, the processing is slow (page_referenced()) so we
1664 * this, so instead we remove the pages from the LRU while processing them.
1665 * It is safe to rely on PG_active against the non-LRU pages in here because
1721 LIST_HEAD(l_hold); /* The pages which were snipped off */ shrink_active_list()
1774 * Identify referenced, file-backed active pages and shrink_active_list()
1777 * memory under moderate memory pressure. Anon pages shrink_active_list()
1779 * IO, plus JVM can create lots of anon VM_EXEC pages, shrink_active_list()
1793 * Move pages back to the lru list. shrink_active_list()
1797 * Count referenced pages from currently used mappings as rotated, shrink_active_list()
1799 * helps balance scan pressure between file and anonymous pages in shrink_active_list()
1828 * inactive_anon_is_low - check if anonymous pages need to be deactivated
1831 * Returns true if the zone does not have enough inactive anon pages,
1832 * meaning some active anon pages need to be deactivated.
1856 * inactive_file_is_low - check if file pages need to be deactivated
1860 * ensures that active file pages get deactivated, until more
1861 * than half of the file pages are on the inactive list.
1866 * This uses a different ratio than the anonymous pages, because
1910 * by looking at the fraction of the pages scanned we did rotate back
1913 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
1914 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
1952 /* If we have no swap space, do not bother scanning anon pages. */ get_scan_count()
1982 * cache pages start out inactive, every cache fault will tip get_scan_count()
1987 * anon pages. Try to detect this based on file LRU size. get_scan_count()
2023 * pages. We use the recently rotated / recently scanned get_scan_count()
2050 * The amount of pressure on anon vs file pages is inversely get_scan_count()
2051 * proportional to the fraction of recently scanned pages on get_scan_count()
2141 * when the requested number of pages are reclaimed when scanning at shrink_lruvec()
2171 * For kswapd and memcg, reclaim at least the number of pages
2225 * Even if we did not try to evict anon pages at all, we want to
2248 * order-0 pages before compacting the zone. should_continue_reclaim() returns
2249 * true if more pages should be reclaimed such that when the page allocator
2250 * calls try_to_compact_zone() that it will have enough free pages to succeed.
2251 * It will give up earlier than that if there is difficulty reclaiming pages.
2270 * to reclaim pages. This full LRU scan is potentially should_continue_reclaim()
2279 * any pages from the last SWAP_CLUSTER_MAX number of should_continue_reclaim()
2280 * pages that were scanned. This will return to the should_continue_reclaim()
2289 * If we have not reclaimed enough pages for compaction and the should_continue_reclaim()
2360 * nr_to_reclaim pages to be reclaimed and it will shrink_zone()
2373 * the eligible LRU pages were scanned. shrink_zone()
2409 * callers using the pages just freed. Continue reclaiming until compaction_ready()
2410 * there is a buffer of free pages available to give compaction compaction_ready()
2437 * try to reclaim pages from zones which will satisfy the caller's allocation
2442 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
2448 * If a zone is deemed to be full of pinned pages then just give it a light
2466 * highmem pages could be pinning lowmem pages storing buffer_heads shrink_zones()
2515 * This steals pages from memory cgroups over softlimit for_each_zone_zonelist_nodemask()
2516 * and returns the number of reclaimed pages and for_each_zone_zonelist_nodemask()
2517 * scanned pages. This works for global memory pressure for_each_zone_zonelist_nodemask()
2555 * high - the zone may be full of dirty or under-writeback pages, which this
2557 * naps in the hope that some of these pages can be written. But if the
2561 * returns: 0, if no pages reclaimed
2562 * else, the number of pages reclaimed
2598 * Try to write back as many pages as we just scanned. This do_try_to_free_pages()
2686 * responsible for cleaning pages necessary for reclaim to make forward throttle_direct_reclaim()
2822 * here is not a good idea, since it limits the pages we can scan. mem_cgroup_shrink_node_zone()
2824 * will pick up pages from other mem cgroup's as well. We hack mem_cgroup_shrink_node_zone()
2856 * take care of from where we get pages. So the node where we start the try_to_free_mem_cgroup_pages()
2915 * total of balanced pages must be at least 25% of the zones allowed by
3000 * kswapd shrinks the zone by the number of pages required to reach
3003 * Returns true if kswapd scanned at least the requested number of pages to
3004 * reclaim or if the lack of progress was due to pages under writeback.
3020 * Kswapd reclaims only single pages with compaction enabled. Trying kswapd_shrink_zone()
3021 * too hard to reclaim until contiguous free pages have become kswapd_shrink_zone()
3032 * many pages free already. The "too many pages" is defined as the kswapd_shrink_zone()
3050 /* Account for the number of pages attempted to reclaim */ kswapd_shrink_zone()
3057 * congested. It's possible there are dirty pages backed by congested kswapd_shrink_zone()
3076 * There is special handling here for zones which are full of pinned pages.
3077 * This can happen if the pages are all mlocked, or if they are all used by
3079 * What we do is to detect the case where all pages in the zone have been
3087 * lower zones regardless of the number of free pages in the lower zones. This
3089 * of pages is balanced across the zones.
3131 * pages a chance to be referenced before reclaiming. balance_pgdat()
3171 * necessary pages are already available. balance_pgdat()
3193 * pages behind kswapd's direction of progress, which would balance_pgdat()
3219 * priority if enough pages are already being scanned balance_pgdat()
3254 * high watermark number of pages as requsted balance_pgdat()
3261 * progress in reclaiming pages balance_pgdat()
3315 * isolate pages from and skips them in the future scanning. kswapd_try_to_sleep()
3317 * that pages and compaction may succeed so reset the cache. kswapd_try_to_sleep()
3338 * This basically trickles out pages so that we have _some_
3472 * freed pages.
3587 * If non-zero call zone_reclaim when the number of free pages falls below
3594 #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
3595 #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
3598 * Priority for ZONE_RECLAIM. This determines the fraction of pages
3605 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
3611 * If the number of slab pages in a zone grows beyond this percentage then
3623 * It's possible for there to be more file mapped pages than zone_unmapped_file_pages()
3624 * accounted for by the pages on the file LRU lists because zone_unmapped_file_pages()
3625 * tmpfs pages accounted for as ANON can also be FILE_MAPPED zone_unmapped_file_pages()
3630 /* Work out how many page cache pages we can reclaim in this reclaim_mode */ zone_pagecache_reclaimable()
3637 * If RECLAIM_SWAP is set, then all file pages are considered zone_pagecache_reclaimable()
3639 * pages like swapcache and zone_unmapped_file_pages() provides zone_pagecache_reclaimable()
3647 /* If we can't clean pages, remove dirty pages from consideration */ zone_pagecache_reclaimable()
3659 * Try to free up some pages from this zone through reclaim.
3663 /* Minimum pages needed in order to stay on node */ __zone_reclaim()
3680 * and we also need to be able to write out pages for RECLAIM_WRITE __zone_reclaim()
3710 * Zone reclaim reclaims unmapped file backed pages and zone_reclaim()
3711 * slab pages if we are over the defined limits. zone_reclaim()
3713 * A small portion of unmapped file backed pages is needed for zone_reclaim()
3714 * file I/O otherwise pages read by file I/O will be immediately zone_reclaim()
3717 * unmapped file backed pages. zone_reclaim()
3774 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
3775 * @pages: array of pages to check
3776 * @nr_pages: number of pages to check
3778 * Checks pages for evictability and moves them to the appropriate lru list.
3782 void check_move_unevictable_pages(struct page **pages, int nr_pages) check_move_unevictable_pages() argument
3791 struct page *page = pages[i]; check_move_unevictable_pages()
H A Dballoon_compaction.c4 * Common interface for making balloon pages movable by compaction.
55 * compaction isolated pages.
65 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { balloon_page_dequeue()
91 * list is empty and there is no isolated pages, then something balloon_page_dequeue()
92 * went out of track and some balloon pages are lost. balloon_page_dequeue()
94 * an infinite loop while attempting to release all its pages. balloon_page_dequeue()
97 if (unlikely(list_empty(&b_dev_info->pages) && balloon_page_dequeue()
128 list_add(&page->lru, &b_dev_info->pages); __putback_balloon_page()
137 * Avoid burning cycles with pages that are yet under __free_pages(), balloon_page_isolate()
147 * As balloon pages are not isolated from LRU lists, concurrent balloon_page_isolate()
153 * or to avoid attempting to isolate pages being released by balloon_page_isolate()
H A Dbootmem.c57 static unsigned long __init bootmap_bytes(unsigned long pages) bootmap_bytes() argument
59 unsigned long bytes = DIV_ROUND_UP(pages, 8); bootmap_bytes()
65 * bootmem_bootmap_pages - calculate bitmap size in pages
66 * @pages: number of pages the bitmap has to represent
68 unsigned long __init bootmem_bootmap_pages(unsigned long pages) bootmem_bootmap_pages() argument
70 unsigned long bytes = bootmap_bytes(pages); bootmem_bootmap_pages()
107 * Initially all pages are reserved - setup_arch() has to init_bootmem_core()
137 * @pages: number of available physical pages
141 unsigned long __init init_bootmem(unsigned long start, unsigned long pages) init_bootmem() argument
143 max_low_pfn = pages; init_bootmem()
145 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); init_bootmem()
149 * free_bootmem_late - free bootmem pages directly to page allocator
175 unsigned long *map, start, end, pages, cur, count = 0; free_all_bootmem_core() local
207 * BITS_PER_LONG block of pages in front of us, free free_all_bootmem_core()
234 pages = bdata->node_low_pfn - bdata->node_min_pfn; free_all_bootmem_core()
235 pages = bootmem_bootmap_pages(pages); free_all_bootmem_core()
236 count += pages; free_all_bootmem_core()
237 while (pages--) free_all_bootmem_core()
269 * free_all_bootmem - release free pages to the buddy allocator
271 * Returns the number of pages actually released.
389 * Partial pages will be considered reserved and left as they are.
411 * Partial pages will be considered reserved and left as they are.
434 * Partial pages will be reserved.
455 * Partial pages will be reserved.
500 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n", alloc_bootmem_bdata()
H A Dmadvise.c184 lru_add_drain(); /* Push any new pages onto the LRU now */ force_swapin_readahead()
211 lru_add_drain(); /* Push any new pages onto the LRU now */ force_shm_swapin_readahead()
258 * Application no longer needs these pages. If the pages are dirty,
262 * these pages later if no one else has touched them in the meantime,
263 * although we could add these pages to a global reuse list for
264 * shrink_active_list to pick up before reclaiming other pages.
269 * pages in anonymous maps after committing to backing store the data
273 * An interface that causes the system to free clean pages and flush
274 * dirty pages is already available as msync(MS_INVALIDATE).
289 * Application wants to free up the pages and associated backing store.
431 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
435 * some pages ahead.
439 * pages and associated backing store.
441 * typically, to avoid COWing pages pinned by get_user_pages().
443 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
444 * this area with pages of identical content from other such areas.
445 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
451 * is attempting to release locked or shared pages.
H A Dcma.c79 unsigned long pages) cma_bitmap_pages_to_bits()
81 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; cma_bitmap_pages_to_bits()
164 * @order_per_bit: Order of pages represented by one bit on bitmap.
216 * @order_per_bit: Order of pages represented by one bit on bitmap.
312 * All pages in the reserved area must come from the same zone. cma_declare_contiguous()
354 * cma_alloc() - allocate pages from contiguous area
356 * @count: Requested number of pages.
357 * @align: Requested alignment of pages (in PAGE_SIZE order).
426 * cma_release() - release allocated pages
428 * @pages: Allocated pages.
429 * @count: Number of allocated pages.
432 * It returns false when provided pages do not belong to contiguous area and
435 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) cma_release() argument
439 if (!cma || !pages) cma_release()
442 pr_debug("%s(page %p)\n", __func__, (void *)pages); cma_release()
444 pfn = page_to_pfn(pages); cma_release()
453 trace_cma_release(pfn, pages, count); cma_release()
78 cma_bitmap_pages_to_bits(const struct cma *cma, unsigned long pages) cma_bitmap_pages_to_bits() argument
H A Dmsync.c21 * Nor does it marks the relevant pages dirty (it used to up to 2.6.17).
22 * Now it doesn't do anything, since dirty pages are properly tracked.
25 * write out the dirty pages and wait on the writeout and check the result.
H A Dfadvise.c26 * deactivate the pages and clear PG_Referenced.
123 * First and last FULL page! Partial pages are deliberately SYSCALL_DEFINE4()
135 * If fewer pages were invalidated than expected then SYSCALL_DEFINE4()
136 * it is possible that some of the pages were on SYSCALL_DEFINE4()
H A Dtruncate.c2 * mm/truncate.c - code for taking down pages from address_spaces
129 * any time, and is not supposed to throw away dirty pages. But pages can
131 * discards clean, unused pages.
162 * Used to get rid of pages on hardware memory corruption.
169 * Only punch for normal data pages for now. generic_error_remove_page()
180 * It only drops clean, unused pages. The page must be locked.
197 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
202 * Truncate the page cache, removing the pages that are between
203 * specified offsets (and zeroing out partial pages
209 * The first pass will remove most pages, so the search cost of the second pass
213 * mapping is large, it is probably the case that the final pages are the most
236 /* Offsets within partial pages */ truncate_inode_pages_range()
241 * 'start' and 'end' always covers the range of pages to be fully truncate_inode_pages_range()
242 * truncated. Partial pages are covered with 'partial_start' at the truncate_inode_pages_range()
263 struct page *page = pvec.pages[i]; truncate_inode_pages_range()
324 * If the truncation happened within a single page no pages truncate_inode_pages_range()
349 struct page *page = pvec.pages[i]; truncate_inode_pages_range()
379 * truncate_inode_pages - truncate *all* the pages from an offset
397 * truncate_inode_pages_final - truncate *all* pages before inode dies
444 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
445 * @mapping: the address_space which holds the pages to invalidate
449 * This function only removes the unlocked pages, if you want to
450 * remove all the pages of one inode, you must call truncate_inode_pages.
453 * invalidate pages which are dirty, locked, under writeback or mapped into
471 struct page *page = pvec.pages[i]; invalidate_mapping_pages()
508 * invalidation guarantees, and cannot afford to leave pages behind because
549 * invalidate_inode_pages2_range - remove range of pages from an address_space
554 * Any pages which are found to be mapped into pagetables are unmapped prior to
557 * Returns -EBUSY if any pages could not be invalidated.
577 struct page *page = pvec.pages[i]; invalidate_inode_pages2_range()
637 * invalidate_inode_pages2 - remove all pages from an address_space
640 * Any pages which are found to be mapped into pagetables are unmapped prior to
643 * Returns -EBUSY if any pages could not be invalidated.
676 * private pages to be COWed, which remain after truncate_pagecache()
790 * hole-punching should not remove private COWed pages from the hole. truncate_pagecache_range()
H A Dpage_alloc.c4 * Manages the free list, the system allocates free pages here.
117 * When calculating the number of globally allowed dirty pages, there
224 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
282 * Temporary debugging check for pages not lying within a given zone.
307 /* Don't complain about poisoned pages */ bad_page()
346 * Higher-order pages are called "compound pages". They are structured thusly:
350 * The remaining PAGE_SIZE pages are called "tail pages".
352 * All pages have PG_compound set. All tail pages have their ->first_page
357 * This usage means that zero-order pages may not be compound.
463 /* Guard pages are not available for any usage */ set_page_guard()
535 * calculating zone/node ids for pages that could page_is_buddy()
554 * units of memory (here, pages), and each level above it describes
560 * At each level, we keep a list of pages, which are heads of continuous
561 * free pages of length of (1 << order) and marked with _mapcount
651 * that pages are being freed that will coalesce soon. In case, __free_one_page()
704 * Frees a number of pages from the PCP lists
705 * Assumes all pages on list are in same zone, and of same order.
706 * count is the number of pages to free.
708 * If the zone was previously in an "all pages pinned" state then look to
711 * And clear the zone's pages_scanned counter, to hold off the "all pages are
732 * Remove pages from lists in a round-robin fashion. A free_pcppages_bulk()
734 * empty list is encountered. This is so more pages are freed free_pcppages_bulk()
913 * influencing the order in which pages are delivered to the IO
937 * Mark as guard pages (or page), that will allow to expand()
940 * pages will stay not present in virtual address space expand()
1085 * Move the free pages in a range to the free lists of the requested type.
1103 * grouping pages by mobility move_freepages()
1167 * steal extra free pages from the same pageblocks to satisfy further
1171 * be more free pages in the pageblock, so try to steal them all. For
1201 * pageblock and check whether half of pages are moved or not. If half of
1202 * pages are moved, we can change migratetype of pageblock and permanently
1203 * use it's pages as requested migratetype in the future.
1209 int pages; steal_suitable_fallback() local
1217 pages = move_freepages_block(zone, page, start_type); steal_suitable_fallback()
1220 if (pages >= (1 << (pageblock_order-1)) || steal_suitable_fallback()
1229 * fragmentation due to mixed migratetype pages in one pageblock.
1272 /* Find the largest possible block of pages in the other list */ __rmqueue_fallback()
1299 * we need to make sure unallocated pages flushed from __rmqueue_fallback()
1350 * Returns the number of new pages which were placed at *list.
1365 * Split buddy pages returned by expand() are received here rmqueue_bulk()
1370 * merge IO requests if the physical pages are ordered rmqueue_bulk()
1453 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1456 * the single zone's pages.
1469 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
1471 * When zone parameter is non-NULL, spill just the single zone's pages.
1581 * Free ISOLATE pages back to the allocator because they are being free_hot_cold_page()
1582 * offlined but treat RESERVE as movable pages so we can get those free_hot_cold_page()
1611 * Free a list of 0-order pages
1625 * n (1<<order) sub-pages: page[0..n]
1640 * Split shadow pages too, because free(page[0]) would split_page()
1716 /* Split into individual pages */ split_free_page()
1874 * Return true if free pages are above 'mark'. This takes into account the order
1892 /* If allocation can't use CMA areas don't use free CMA pages */ __zone_watermark_ok()
1900 /* At the next order, this order's pages become unavailable */ __zone_watermark_ok()
1903 /* Require fewer higher order pages to be free */ __zone_watermark_ok()
2143 * Distribute pages in proportion to the individual get_page_from_freelist()
2160 * proportional share of globally allowed dirty pages. get_page_from_freelist()
2164 * write pages from its LRU list. get_page_from_freelist()
2168 * before they are full. But the pages that do spill get_page_from_freelist()
2237 * 1<<order pages or else the page allocator get_page_from_freelist()
2265 * local node. However, the local node might have free pages left get_page_from_freelist()
2384 * specified, then we retry until we no longer reclaim any pages should_alloc_retry()
2385 * (above), or we've reclaimed an order of pages at least as should_alloc_retry()
2505 * is that pages exist, but not enough to satisfy watermarks. __alloc_pages_direct_compact()
2576 * pages are pinned on the per-cpu lists. Drain them and try again __alloc_pages_direct_reclaim()
2795 * so we fallback to base pages instead. __alloc_pages_slowpath()
2831 * pages, but the allocation wants us to keep going, __alloc_pages_slowpath()
2998 * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
3029 * __free_kmem_pages and free_kmem_pages will free pages allocated with
3063 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
3068 * minimum number of pages to satisfy the request. alloc_pages() can only
3069 * allocate memory in power-of-two pages.
3087 * pages on a node.
3126 * nr_free_zone_pages - count number of pages beyond high watermark
3129 * nr_free_zone_pages() counts the number of counts pages which are beyond the
3131 * zone, the number of pages is calculated as:
3155 * nr_free_buffer_pages - count number of pages beyond high watermark
3157 * nr_free_buffer_pages() counts the number of pages which are beyond the high
3167 * nr_free_pagecache_pages - count number of pages beyond high watermark
3169 * nr_free_pagecache_pages() counts the number of pages which are beyond the
3435 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
3993 * Disable grouping by mobility if the number of pages in the build_all_zonelists()
4005 "Total pages: %ld\n", build_all_zonelists()
4018 * large so that collisions trying to wait on pages are rare.
4023 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
4024 * waitqueues, i.e. the size of the waitq table given the number of pages.
4029 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) wait_table_hash_nr_entries() argument
4033 pages /= PAGES_PER_WAITQUEUE; wait_table_hash_nr_entries()
4035 while (size < pages) wait_table_hash_nr_entries()
4058 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
4065 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) wait_table_hash_nr_entries() argument
4082 * Check if a pageblock contains reserved pages
4098 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
4151 * Blocks with reserved pages will never free, skip setup_zone_migrate_reserve()
4193 * Initially all pages are reserved - free ones are freed
4238 * can be created for invalid pages (for alignment) memmap_init_zone()
4276 * The per-cpu-pages pools are set to around 1000th of the zone_batchsize()
4294 * batches of pages, one task can end up with a lot zone_batchsize()
4295 * of pages of one half of the possible page colors zone_batchsize()
4296 * and the other with pages of the other colors. zone_batchsize()
4308 * assemble apparent contiguous memory from discontiguous pages. zone_batchsize()
4310 * Queueing large contiguous runs of pages for batching, however, zone_batchsize()
4311 * causes the pages to actually be freed in smaller chunks. As there zone_batchsize()
4478 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", zone_pcp_init()
4632 * This finds a zone that can be used for ZONE_MOVABLE pages. The
4689 * Return the number of pages a zone spans in a node, including holes
4707 /* Check that this node has pages within the zone's required range */ zone_spanned_pages_in_node()
4715 /* Return the spanned pages */ zone_spanned_pages_in_node()
4744 * It returns the number of pages frames in memory holes within a range.
4863 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ set_pageblock_order()
4901 unsigned long pages = spanned_pages; calc_memmap_size() local
4907 * memmap pages due to alignment because memmap pages for each calc_memmap_size()
4913 pages = present_pages; calc_memmap_size()
4915 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; calc_memmap_size()
4920 * - mark all pages reserved
4967 " %s zone: %lu pages used for memmap\n", free_area_init_core()
4971 " %s zone: %lu pages exceeds freesize %lu\n", free_area_init_core()
4975 /* Account for reserved pages */ free_area_init_core()
4978 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", free_area_init_core()
4984 /* Charge for highmem memmap if there are enough kernel pages */ free_area_init_core()
4993 * when the bootmem allocator frees pages into the buddy system. free_area_init_core()
4994 * And all highmem pages will be managed by the buddy system. free_area_init_core()
5163 /* convert mask to number of pages */ node_map_pfn_alignment()
5199 * Sum pages in active regions for movable zone.
5209 unsigned long pages = end_pfn - start_pfn; early_calculate_totalpages() local
5211 totalpages += pages; early_calculate_totalpages()
5212 if (pages) early_calculate_totalpages()
5345 * number of pages used as kernelcore for_each_node_state()
5415 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
5547 unsigned long pages = 0; free_reserved_area() local
5551 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { free_reserved_area()
5557 if (pages && s) free_reserved_area()
5559 s, pages << (PAGE_SHIFT - 10), start, end); free_reserved_area()
5561 return pages; free_reserved_area()
5630 * set_dma_reserve - set the specified number of pages reserved in the first zone
5631 * @new_dma_reserve: The number of pages to mark reserved
5636 * function may optionally be used to account for unfreeable pages in the
5706 /* we treat the high watermark as reserved pages. */ for_each_online_pgdat()
5718 * situation where reclaim has to clean pages for_each_online_pgdat()
5731 * has a correct pages reserved value, so an adequate number of
5732 * pages are left in the zone after a successful __alloc_pages().
5774 /* Calculate total number of !ZONE_HIGHMEM pages */ for_each_zone()
5789 * need highmem pages, so cap pages_min to a small for_each_zone()
5803 * If it's a lowmem zone, reserve a number of pages for_each_zone()
5844 * INACTIVE_ANON pages on this zone's LRU, maintained by the
5846 * the anonymous pages are kept on the inactive list.
6005 * cpu. It is the fraction of total pages in each zone that a hot per cpu
6130 * some pages at the end of hash table which alloc_large_system_hash()
6157 /* Return a pointer to the bitmap storing bits affecting a block of pages */ get_pageblock_bitmap()
6180 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
6209 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
6250 * This function checks whether pageblock includes unmovable pages or not.
6251 * If @count is not zero, it is okay to include less @count unmovable pages
6254 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
6265 * If ZONE_MOVABLE, the zone never contains unmovable pages has_unmovable_pages()
6284 * We need not scan over tail pages bacause we don't has_unmovable_pages()
6314 * If there are RECLAIMABLE pages, we need to check has_unmovable_pages()
6322 * The problematic thing here is PG_reserved pages. PG_reserved has_unmovable_pages()
6415 * alloc_contig_range() -- tries to allocate given range of pages
6426 * pages fall in.
6431 * pages which PFN is in [start, end) are allocated for the caller and
6452 * MIGRATE_ISOLATE. Because pageblock and max order pages may alloc_contig_range()
6454 * work, we align the range to biggest of the two pages so alloc_contig_range()
6460 * migrate the pages from an unaligned range (ie. pages that alloc_contig_range()
6461 * we are interested in). This will put all the pages in alloc_contig_range()
6464 * When this is done, we take the pages in range from page alloc_contig_range()
6469 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the alloc_contig_range()
6487 * more, all pages in [start, end) are free in page allocator. alloc_contig_range()
6488 * What we are going to do is to allocate all pages from alloc_contig_range()
6491 * The only problem is that pages at the beginning and at the alloc_contig_range()
6492 * end of interesting range may be not aligned with pages that alloc_contig_range()
6494 * pages. Because of this, we reserve the bigger range and alloc_contig_range()
6495 * once this is done free the pages we are not interested in. alloc_contig_range()
6497 * We don't have to hold zone->lock here because the pages are alloc_contig_range()
6522 /* Grab isolated pages from freelists. */ alloc_contig_range()
6551 WARN(count != 0, "%d pages are still in use!\n", count); free_contig_range()
6592 * All pages in the range must be isolated before calling this.
H A Dmlock.c38 * Mlocked pages are marked with PageMlocked() flag for efficient testing
183 * might otherwise copy PageMlocked to part of the tail pages before munlock_vma_page()
223 * The fast path is available only for evictable pages with single mapping.
250 * Putback multiple evictable pages to the LRU
252 * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
253 * the pages might have meanwhile become unevictable but that is OK.
267 * Munlock a batch of pages from the same zone
270 * and attempts to isolate the pages, all under a single zone lru lock.
271 * The second phase finishes the munlock only for pages where isolation
289 struct page *page = pvec->pages[i]; __munlock_pagevec()
308 pagevec_add(&pvec_putback, pvec->pages[i]); __munlock_pagevec()
309 pvec->pages[i] = NULL; __munlock_pagevec()
315 /* Now we can release pins of pages that we are not munlocking */ __munlock_pagevec()
320 struct page *page = pvec->pages[i]; __munlock_pagevec()
339 * Phase 3: page putback for pages that qualified for the fast path __munlock_pagevec()
352 * The rest of @pvec is filled by subsequent pages within the same pmd and same
354 * pages also get pinned.
405 * munlock_vma_pages_range() - munlock all pages in the vma range.'
417 * We don't save and restore VM_LOCKED here because pages are
418 * still on lru. In unmap path, pages might be scanned by reclaim
420 * free them. This will result in freeing mlocked pages.
460 * Non-huge pages are handled in batches via munlock_vma_pages_range()
H A Dpage-writeback.c7 * Contains functions related to writing back dirty pages at the
50 * Try to keep balance_dirty_pages() call intervals higher than this many pages
63 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
132 * pages fastest will get the larger share, while the slower will get a smaller
136 * dirty pages. Having them written out is the primary goal.
159 * In a memory zone, there is a certain amount of pages we consider
161 * free and reclaimable pages, minus some zone reserves to protect
165 * This number of dirtyable pages is the base value of which the
166 * user-configurable dirty ratio is the effictive number of pages that
168 * globally by using the sum of dirtyable pages over all zones.
177 * zone_dirtyable_memory - number of dirtyable pages in a zone
180 * Returns the zone's number of pages potentially available for dirty
209 * without swap) can bring down the dirtyable pages below
220 * Make sure that the number of highmem pages is never larger
232 * global_dirtyable_memory - number of globally dirtyable pages
234 * Returns the global number of pages potentially available for dirty
292 * zone_dirty_limit - maximum number of dirty pages allowed in a zone
295 * Returns the maximum number of dirty pages allowed in a zone, based
320 * Returns %true when the dirty pages in @zone are within the zone's
521 * @dirty: global dirty limit in pages
523 * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
524 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
527 * when sleeping max_pause per page is not enough to keep the dirty pages under
531 * more (rather than completely block them) when the bdi dirty pages go high.
535 * - piling up dirty pages (that will take long time to sync) on slow devices
597 * We want the dirty pages be balanced around the global/bdi setpoints.
598 * When the number of dirty pages is higher/lower than the setpoint, the
600 * decreased/increased to bring the dirty pages back to the setpoint.
631 * freerun^ setpoint^ limit^ dirty pages
678 unsigned long setpoint; /* dirty pages' target balance point */ bdi_position_ratio()
697 * from growing a large number of dirty pages before throttling. For bdi_position_ratio()
709 * Then bdi_thresh is 1% of 20% of 16GB. This amounts to ~8K pages. bdi_position_ratio()
710 * bdi_dirty_limit(bdi, bg_thresh) is about ~4K pages. bdi_setpoint is bdi_position_ratio()
711 * about ~6K pages (as the average of background and throttle bdi bdi_position_ratio()
767 * the bdi is over/under its share of dirty pages, we want to scale bdi_position_ratio()
786 * For single bdi case, the dirty pages are observed to fluctuate bdi_position_ratio()
831 * It may push the desired control point of global dirty pages higher bdi_position_ratio()
911 * global_dirty_limit which is guaranteed to lie above the dirty pages. update_dirty_limit()
973 * when dirty pages are truncated by userspace or re-dirtied by FS. bdi_update_dirty_ratelimit()
1046 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint) bdi_update_dirty_ratelimit()
1174 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
1179 * (the number of pages we may dirty without exceeding the dirty limits).
1198 * time, a small pool of dirty/writeback pages may go empty and disk go bdi_max_pause()
1219 int pages; /* target nr_dirtied_pause */ bdi_min_pause() local
1252 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); bdi_min_pause()
1260 * until reaches DIRTY_POLL_THRESH=32 pages. bdi_min_pause()
1262 if (pages < DIRTY_POLL_THRESH) { bdi_min_pause()
1264 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); bdi_min_pause()
1265 if (pages > DIRTY_POLL_THRESH) { bdi_min_pause()
1266 pages = DIRTY_POLL_THRESH; bdi_min_pause()
1271 pause = HZ * pages / (task_ratelimit + 1); bdi_min_pause()
1274 pages = task_ratelimit * t / roundup_pow_of_two(HZ); bdi_min_pause()
1277 *nr_dirtied_pause = pages; bdi_min_pause()
1281 return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t; bdi_min_pause()
1315 * to ensure we accurately count the 'dirty' pages when bdi_dirty_limits()
1318 * Otherwise it would be possible to get thresh+n pages bdi_dirty_limits()
1319 * reported dirty, even though there are thresh-m pages bdi_dirty_limits()
1336 * data. It looks at the number of dirty pages in the machine and will force
1511 * pages exceeds dirty_thresh, give the other good bdi's a pipe balance_dirty_pages()
1552 * dirty tsk->nr_dirtied_pause pages;
1556 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1593 * This prevents one CPU to accumulate too many dirtied pages without balance_dirty_pages_ratelimited()
1595 * 1000+ tasks, all of them start dirtying pages at exactly the same balance_dirty_pages_ratelimited()
1606 * Pick up the dirtied pages by the exited tasks. This avoids lots of balance_dirty_pages_ratelimited()
1753 * We used to scale dirty pages according to how total memory
1754 * related to pages that could be allocated for buffers (by
1762 * large amounts of dirty pages compared to a small amount of
1777 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
1783 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
1785 * TOWRITE tag to identify pages eligible for writeback. This mechanism is
1787 * dirty pages in the file (thus it is important for this function to be quick
1788 * so that it can tag pages faster than a dirtying process can create them).
1791 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
1813 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
1815 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1827 * To avoid livelocks (when other process dirties new pages), we first tag
1828 * pages which should be written back with TOWRITE tag and only then start
1830 * not miss some pages (e.g., because some other process has cleared TOWRITE
1883 struct page *page = pvec.pages[i]; write_cache_pages()
1960 * keep going until we have written all the pages write_cache_pages()
2004 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
2006 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2174 * systematic errors in balanced_dirty_ratelimit and the dirty pages position
2207 * For pages with a mapping this should be done under the page lock
2320 * page lock while dirtying the page, and pages are clear_page_dirty_for_io()
2414 * Return true if any of the pages in the mapping are marked with the
H A Dzbud.c8 * zbud is an special purpose allocator for storing compressed pages. Contrary
10 * allocator that "buddies" two compressed pages together in a single memory
17 * zbud works by storing compressed pages, or "zpages", together in pairs in a
26 * to zbud pages can not be less than 1. This ensures that zbud can never "do
27 * harm" by using more pages to store zpages than the uncompressed zpages would
30 * zbud pages are divided into "chunks". The size of the chunks is fixed at
31 * compile time and determined by NCHUNKS_ORDER below. Dividing zbud pages
32 * into chunks allows organizing unbuddied zbud pages into a manageable number
79 * @unbuddied: array of lists tracking zbud pages that only contain one buddy;
82 * @buddied: list tracking the zbud pages that contain two buddies;
83 * these zbud pages are full
84 * @lru: list tracking the zbud pages in LRU order by most recently
86 * @pages_nr: number of zbud pages in the pool.
154 static int zbud_zpool_shrink(void *pool, unsigned int pages, zbud_zpool_shrink() argument
160 while (total < pages) { zbud_zpool_shrink()
331 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
332 * as zbud pool pages.
457 * @retires: number of pages on the LRU list for which eviction will
486 * no pages to evict or an eviction handler is not registered, -EAGAIN if
586 * zbud_get_pool_size() - gets the zbud pool size in pages
589 * Returns: size in pages of the given pool. The pool lock need not be
/linux-4.1.27/drivers/gpu/drm/ttm/
H A Dttm_page_alloc.c29 * - Pool collects resently freed pages for reuse
31 * - doesn't track currently in use pages
62 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
68 * @list: Pool of free uc/wc pages for fast reuse.
70 * @npages: Number of pages in pool.
102 * @free_interval: minimum number of jiffies between freeing pages from pool.
105 * some pages to free.
106 * @small_allocation: Limit in number of pages what is small allocation.
164 /* Convert kb to number of pages */ ttm_pool_store()
220 static int set_pages_array_wb(struct page **pages, int addrinarray) set_pages_array_wb() argument
226 unmap_page_from_agp(pages[i]); set_pages_array_wb()
231 static int set_pages_array_wc(struct page **pages, int addrinarray) set_pages_array_wc() argument
237 map_page_into_agp(pages[i]); set_pages_array_wc()
242 static int set_pages_array_uc(struct page **pages, int addrinarray) set_pages_array_uc() argument
248 map_page_into_agp(pages[i]); set_pages_array_uc()
275 /* set memory back to wb and free the pages. */ ttm_pages_put()
276 static void ttm_pages_put(struct page *pages[], unsigned npages) ttm_pages_put() argument
279 if (set_pages_array_wb(pages, npages)) ttm_pages_put()
280 pr_err("Failed to set %d pages to wb!\n", npages); ttm_pages_put()
282 __free_page(pages[i]); ttm_pages_put()
293 * Free pages from pool.
296 * number of pages in one go.
298 * @pool: to free the pages from
299 * @free_all: If set to true will free all pages in pool
335 /* remove range of pages from the pool */ ttm_page_pool_free()
369 /* remove range of pages from the pool */ ttm_page_pool_free()
448 static int ttm_set_pages_caching(struct page **pages, ttm_set_pages_caching() argument
455 r = set_pages_array_uc(pages, cpages); ttm_set_pages_caching()
457 pr_err("Failed to set %d pages to uc!\n", cpages); ttm_set_pages_caching()
460 r = set_pages_array_wc(pages, cpages); ttm_set_pages_caching()
462 pr_err("Failed to set %d pages to wc!\n", cpages); ttm_set_pages_caching()
471 * Free pages the pages that failed to change the caching state. If there is
472 * any pages that have changed their caching state already put them to the
475 static void ttm_handle_caching_state_failure(struct list_head *pages, ttm_handle_caching_state_failure() argument
480 /* Failed pages have to be freed */ ttm_handle_caching_state_failure()
488 * Allocate new pages with correct caching.
491 * pages returned in pages array.
493 static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, ttm_alloc_new_pages() argument
507 pr_err("Unable to allocate table for new pages\n"); ttm_alloc_new_pages()
517 /* store already allocated pages in the pool after ttm_alloc_new_pages()
523 ttm_handle_caching_state_failure(pages, ttm_alloc_new_pages()
544 ttm_handle_caching_state_failure(pages, ttm_alloc_new_pages()
553 list_add(&p->lru, pages); ttm_alloc_new_pages()
559 ttm_handle_caching_state_failure(pages, ttm_alloc_new_pages()
570 * Fill the given pool if there aren't enough pages and the requested number of
571 * pages is small.
582 * If pool doesn't have enough pages for the allocation new pages are ttm_page_pool_fill_locked()
591 * pages in a pool we fill the pool up first. */ ttm_page_pool_fill_locked()
614 /* If we have any pages left put them to the pool. */ ttm_page_pool_fill_locked()
627 * Cut 'count' number of pages from the pool and put them on the return list.
629 * @return count of pages still required to fulfill the request.
632 struct list_head *pages, ttm_page_pool_get_pages()
645 /* take all pages from the pool */ ttm_page_pool_get_pages()
646 list_splice_init(&pool->list, pages); ttm_page_pool_get_pages()
651 /* find the last pages to include for requested number of pages. Split ttm_page_pool_get_pages()
666 /* Cut 'count' number of pages from the pool */ ttm_page_pool_get_pages()
667 list_cut_position(pages, &pool->list, p); ttm_page_pool_get_pages()
675 /* Put all pages in pages list to correct pool to wait for reuse */ ttm_put_pages()
676 static void ttm_put_pages(struct page **pages, unsigned npages, int flags, ttm_put_pages() argument
684 /* No pool for this memory type so free the pages */ ttm_put_pages()
686 if (pages[i]) { ttm_put_pages()
687 if (page_count(pages[i]) != 1) ttm_put_pages()
688 pr_err("Erroneous page count. Leaking pages.\n"); ttm_put_pages()
689 __free_page(pages[i]); ttm_put_pages()
690 pages[i] = NULL; ttm_put_pages()
698 if (pages[i]) { ttm_put_pages()
699 if (page_count(pages[i]) != 1) ttm_put_pages()
700 pr_err("Erroneous page count. Leaking pages.\n"); ttm_put_pages()
701 list_add_tail(&pages[i]->lru, &pool->list); ttm_put_pages()
702 pages[i] = NULL; ttm_put_pages()
710 /* free at least NUM_PAGES_TO_ALLOC number of pages ttm_put_pages()
721 * On success pages list will hold count number of correctly
722 * cached pages.
724 static int ttm_get_pages(struct page **pages, unsigned npages, int flags, ttm_get_pages() argument
738 /* No pool for cached pages */ ttm_get_pages()
753 pages[r] = p; ttm_get_pages()
761 /* First we take pages from the pool */ ttm_get_pages()
766 pages[count++] = p; ttm_get_pages()
769 /* clear the pages coming from the pool if requested */ ttm_get_pages()
779 /* If pool didn't have enough pages allocate new one. */ ttm_get_pages()
787 pages[count++] = p; ttm_get_pages()
790 /* If there is any pages in the list put them back to ttm_get_pages()
792 pr_err("Failed to allocate extra pages for large request\n"); ttm_get_pages()
793 ttm_put_pages(pages, count, flags, cstate); ttm_get_pages()
874 ret = ttm_get_pages(&ttm->pages[i], 1, ttm_pool_populate()
882 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], ttm_pool_populate()
908 if (ttm->pages[i]) { ttm_pool_unpopulate()
910 ttm->pages[i]); ttm_pool_unpopulate()
911 ttm_put_pages(&ttm->pages[i], 1, ttm_pool_unpopulate()
924 char *h[] = {"pool", "refills", "pages freed", "size"}; ttm_page_alloc_debugfs()
631 ttm_page_pool_get_pages(struct ttm_page_pool *pool, struct list_head *pages, int ttm_flags, enum ttm_caching_state cstate, unsigned count) ttm_page_pool_get_pages() argument
H A Dttm_tt.c49 * Allocates storage for pointers to the pages that back the ttm.
53 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*)); ttm_tt_alloc_page_directory()
58 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, ttm_dma_tt_alloc_page_directory()
59 sizeof(*ttm->ttm.pages) + ttm_dma_tt_alloc_page_directory()
62 ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); ttm_dma_tt_alloc_page_directory()
103 * for range of pages in a ttm.
123 drm_clflush_pages(ttm->pages, ttm->num_pages); ttm_tt_set_caching()
126 cur_page = ttm->pages[i]; ttm_tt_set_caching()
142 cur_page = ttm->pages[j]; ttm_tt_set_caching()
201 if (!ttm->pages) { ttm_tt_init()
212 drm_free_large(ttm->pages); ttm_tt_fini()
213 ttm->pages = NULL; ttm_tt_fini()
234 if (!ttm->pages) { ttm_dma_tt_init()
247 drm_free_large(ttm->pages); ttm_dma_tt_fini()
248 ttm->pages = NULL; ttm_dma_tt_fini()
309 to_page = ttm->pages[i]; ttm_tt_swapin()
353 from_page = ttm->pages[i]; ttm_tt_swapout()
384 struct page **page = ttm->pages; ttm_tt_clear_mapping()
H A Dttm_page_alloc_dma.c29 * - Pool collects resently freed pages for reuse (and hooks up to
31 * - Tracks currently in use pages
82 * for each 'struct device'. The 'cached' is for pages that are actively used.
89 * @inuse_list: Pool of pages that are in use. The order is very important and
90 * it is in the order that the TTM pages that are put back are in.
91 * @free_list: Pool of pages that are free to be used. No order requirements.
94 * @npages_free: Count of available pages for re-use.
95 * @npages_in_use: Count of pages that are in use.
218 /* Convert kb to number of pages */ ttm_pool_store()
272 static int set_pages_array_wb(struct page **pages, int addrinarray) set_pages_array_wb() argument
278 unmap_page_from_agp(pages[i]); set_pages_array_wb()
283 static int set_pages_array_wc(struct page **pages, int addrinarray) set_pages_array_wc() argument
289 map_page_into_agp(pages[i]); set_pages_array_wc()
294 static int set_pages_array_uc(struct page **pages, int addrinarray) set_pages_array_uc() argument
300 map_page_into_agp(pages[i]); set_pages_array_uc()
307 struct page **pages, unsigned cpages) ttm_set_pages_caching()
312 r = set_pages_array_uc(pages, cpages); ttm_set_pages_caching()
314 pr_err("%s: Failed to set %d pages to uc!\n", ttm_set_pages_caching()
318 r = set_pages_array_wc(pages, cpages); ttm_set_pages_caching()
320 pr_err("%s: Failed to set %d pages to wc!\n", ttm_set_pages_caching()
377 /* set memory back to wb and free the pages. */ ttm_dma_pages_put()
379 struct page *pages[], unsigned npages) ttm_dma_pages_put()
385 set_pages_array_wb(pages, npages)) ttm_dma_pages_put()
386 pr_err("%s: Failed to set %d pages to wb!\n", ttm_dma_pages_put()
399 pr_err("%s: Failed to set %d pages to wb!\n", ttm_dma_page_put()
407 * Free pages from pool.
410 * number of pages in one go.
412 * @pool: to free the pages from
413 * @nr_free: If set to true will free all pages in pool
431 pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n", ttm_dma_page_pool_free()
499 /* remove range of pages from the pool */ ttm_dma_page_pool_free()
663 * thing is at that point of time there are no pages associated with the ttm_dma_find_pool()
676 * Free pages the pages that failed to change the caching state. If there
677 * are pages that have changed their caching state already put them to the
708 * Allocate 'count' pages, and put 'need' number of them on the
709 * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
710 * The full list of pages should also be on 'd_pages'.
729 pr_err("%s: Unable to allocate table for new pages\n", ttm_dma_pool_alloc_new_pages()
735 pr_debug("%s: (%s:%d) Getting %d pages\n", ttm_dma_pool_alloc_new_pages()
745 /* store already allocated pages in the pool after ttm_dma_pool_alloc_new_pages()
795 * @return count of pages still required to fulfill the request.
840 * @return count of pages still required to fulfill the request.
857 ttm->pages[index] = d_page->p; ttm_dma_pool_get_pages()
870 * On success pages list will hold count number of correctly
871 * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
910 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], ttm_dma_populate()
931 /* Put all pages in pages list to correct pool to wait for reuse */ ttm_dma_unpopulate()
950 /* make sure pages array match list and count number of pages */ ttm_dma_unpopulate()
952 ttm->pages[count] = d_page->p; ttm_dma_unpopulate()
966 /* free at least NUM_PAGES_TO_ALLOC number of pages ttm_dma_unpopulate()
983 ttm->pages[i]); ttm_dma_unpopulate()
989 ttm->pages[i] = NULL; ttm_dma_unpopulate()
1132 char *h[] = {"pool", "refills", "pages freed", "inuse", "available", ttm_dma_page_alloc_debugfs()
306 ttm_set_pages_caching(struct dma_pool *pool, struct page **pages, unsigned cpages) ttm_set_pages_caching() argument
378 ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, struct page *pages[], unsigned npages) ttm_dma_pages_put() argument
/linux-4.1.27/include/uapi/linux/
H A Dfadvise.h7 #define POSIX_FADV_WILLNEED 3 /* Will need these pages. */
14 #define POSIX_FADV_DONTNEED 6 /* Don't need these pages. */
17 #define POSIX_FADV_DONTNEED 4 /* Don't need these pages. */
H A Dagpgart.h67 size_t pg_total; /* max pages (swap + system) */
68 size_t pg_system; /* max pages (system) */
69 size_t pg_used; /* current pages used */
81 __kernel_size_t pg_count; /* number of pages */
93 __kernel_size_t pg_count;/* number of pages */
/linux-4.1.27/net/9p/
H A Dtrans_common.c19 * p9_release_req_pages - Release pages after the transaction.
21 void p9_release_pages(struct page **pages, int nr_pages) p9_release_pages() argument
26 if (pages[i]) p9_release_pages()
27 put_page(pages[i]); p9_release_pages()
/linux-4.1.27/include/xen/
H A Dxen-ops.h34 * @vma: VMA to map the pages into
35 * @addr: Address at which to map the pages
40 * @domid: Domain owning the pages
41 * @pages: Array of pages if this domain has an auto-translated physmap
54 struct page **pages);
57 * @vma: VMA to map the pages into
58 * @addr: Address at which to map the pages
62 * @domid: Domain owning the pages
63 * @pages: Array of pages if this domain has an auto-translated physmap
72 struct page **pages);
74 int numpgs, struct page **pages);
80 struct page **pages);
82 int nr, struct page **pages);
H A Dballoon.h11 /* Number of pages in high- and low-memory balloons. */
28 int alloc_xenballooned_pages(int nr_pages, struct page **pages,
30 void free_xenballooned_pages(int nr_pages, struct page **pages);
/linux-4.1.27/arch/parisc/include/uapi/asm/
H A Dmman.h20 #define MAP_LOCKED 0x2000 /* pages are locked */
38 #define MADV_WILLNEED 3 /* will need these pages */
39 #define MADV_DONTNEED 4 /* don't need these pages */
41 #define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */
45 #define MADV_REMOVE 9 /* remove these pages & resources */
50 #define MADV_4K_PAGES 12 /* Use 4K pages */
51 #define MADV_16K_PAGES 14 /* Use 16K pages */
52 #define MADV_64K_PAGES 16 /* Use 64K pages */
53 #define MADV_256K_PAGES 18 /* Use 256K pages */
54 #define MADV_1M_PAGES 20 /* Use 1 Megabyte pages */
55 #define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */
56 #define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */
57 #define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */
59 #define MADV_MERGEABLE 65 /* KSM may merge identical pages */
60 #define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
/linux-4.1.27/fs/squashfs/
H A Dpage_actor.c32 if (actor->next_page == actor->pages) cache_next_page()
44 int pages, int length) squashfs_page_actor_init()
51 actor->length = length ? : pages * PAGE_CACHE_SIZE; squashfs_page_actor_init()
53 actor->pages = pages; squashfs_page_actor_init()
73 return actor->pageaddr = actor->next_page == actor->pages ? NULL : direct_next_page()
84 int pages, int length) squashfs_page_actor_init_special()
91 actor->length = length ? : pages * PAGE_CACHE_SIZE; squashfs_page_actor_init_special()
93 actor->pages = pages; squashfs_page_actor_init_special()
43 squashfs_page_actor_init(void **buffer, int pages, int length) squashfs_page_actor_init() argument
83 squashfs_page_actor_init_special(struct page **page, int pages, int length) squashfs_page_actor_init_special() argument
H A Dpage_actor.h14 int pages; member in struct:squashfs_page_actor
20 int pages, int length) squashfs_page_actor_init()
27 actor->length = length ? : pages * PAGE_CACHE_SIZE; squashfs_page_actor_init()
29 actor->pages = pages; squashfs_page_actor_init()
42 return actor->next_page == actor->pages ? NULL : squashfs_next_page()
60 int pages; member in struct:squashfs_page_actor
19 squashfs_page_actor_init(void **page, int pages, int length) squashfs_page_actor_init() argument
H A Dfile_direct.c24 int pages, struct page **page);
37 int i, n, pages, missing_pages, bytes, res = -ENOMEM; squashfs_readpage_block() local
45 pages = end_index - start_index + 1; squashfs_readpage_block()
47 page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL); squashfs_readpage_block()
53 * page cache pages appropriately within the decompressor squashfs_readpage_block()
55 actor = squashfs_page_actor_init_special(page, pages, 0); squashfs_readpage_block()
59 /* Try to grab all the pages covered by the Squashfs block */ squashfs_readpage_block()
60 for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) { squashfs_readpage_block()
79 * Couldn't get one or more pages, this page has either squashfs_readpage_block()
85 res = squashfs_read_cache(target_page, block, bsize, pages, squashfs_readpage_block()
101 pageaddr = kmap_atomic(page[pages - 1]); squashfs_readpage_block()
106 /* Mark pages as uptodate, unlock and release */ squashfs_readpage_block()
107 for (i = 0; i < pages; i++) { squashfs_readpage_block()
121 /* Decompression failed, mark pages as errored. Target_page is squashfs_readpage_block()
124 for (i = 0; i < pages; i++) { squashfs_readpage_block()
141 int pages, struct page **page) squashfs_read_cache()
155 for (n = 0; n < pages && bytes > 0; n++, squashfs_read_cache()
140 squashfs_read_cache(struct page *target_page, u64 block, int bsize, int pages, struct page **page) squashfs_read_cache() argument
/linux-4.1.27/fs/isofs/
H A Dcompress.c40 * to one zisofs block. Store the data in the @pages array with @pcount
45 struct page **pages, unsigned poffset, zisofs_uncompress_block()
71 if (!pages[i]) zisofs_uncompress_block()
73 memset(page_address(pages[i]), 0, PAGE_CACHE_SIZE); zisofs_uncompress_block()
74 flush_dcache_page(pages[i]); zisofs_uncompress_block()
75 SetPageUptodate(pages[i]); zisofs_uncompress_block()
121 if (pages[curpage]) { zisofs_uncompress_block()
122 stream.next_out = page_address(pages[curpage]) zisofs_uncompress_block()
174 if (pages[curpage]) { zisofs_uncompress_block()
175 flush_dcache_page(pages[curpage]); zisofs_uncompress_block()
176 SetPageUptodate(pages[curpage]); zisofs_uncompress_block()
196 * Uncompress data so that pages[full_page] is fully uptodate and possibly
197 * fills in other pages if we have data for them.
200 struct page **pages) zisofs_fill_pages()
215 BUG_ON(!pages[full_page]); zisofs_fill_pages()
220 * pages with the data we have anyway... zisofs_fill_pages()
222 start_off = page_offset(pages[full_page]); zisofs_fill_pages()
261 pcount, pages, poffset, &err); zisofs_fill_pages()
263 pages += poffset >> PAGE_CACHE_SHIFT; zisofs_fill_pages()
283 if (poffset && *pages) { zisofs_fill_pages()
284 memset(page_address(*pages) + poffset, 0, zisofs_fill_pages()
286 flush_dcache_page(*pages); zisofs_fill_pages()
287 SetPageUptodate(*pages); zisofs_fill_pages()
294 * per reference. We inject the additional pages into the page
307 struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)]; zisofs_readpage() local
332 pages[full_page] = page; zisofs_readpage()
336 pages[i] = grab_cache_page_nowait(mapping, index); zisofs_readpage()
337 if (pages[i]) { zisofs_readpage()
338 ClearPageError(pages[i]); zisofs_readpage()
339 kmap(pages[i]); zisofs_readpage()
343 err = zisofs_fill_pages(inode, full_page, pcount, pages); zisofs_readpage()
345 /* Release any residual pages, do not SetPageUptodate */ zisofs_readpage()
347 if (pages[i]) { zisofs_readpage()
348 flush_dcache_page(pages[i]); zisofs_readpage()
350 SetPageError(pages[i]); zisofs_readpage()
351 kunmap(pages[i]); zisofs_readpage()
352 unlock_page(pages[i]); zisofs_readpage()
354 page_cache_release(pages[i]); zisofs_readpage()
43 zisofs_uncompress_block(struct inode *inode, loff_t block_start, loff_t block_end, int pcount, struct page **pages, unsigned poffset, int *errp) zisofs_uncompress_block() argument
199 zisofs_fill_pages(struct inode *inode, int full_page, int pcount, struct page **pages) zisofs_fill_pages() argument
/linux-4.1.27/arch/mips/mm/
H A Dgup.c38 int write, struct page **pages, int *nr) gup_pte_range()
54 pages[*nr] = page; gup_pte_range()
72 int write, struct page **pages, int *nr) gup_huge_pmd()
89 pages[*nr] = page; gup_huge_pmd()
102 int write, struct page **pages, int *nr) gup_pmd_range()
126 if (!gup_huge_pmd(pmd, addr, next, write, pages,nr)) gup_pmd_range()
129 if (!gup_pte_range(pmd, addr, next, write, pages,nr)) gup_pmd_range()
138 int write, struct page **pages, int *nr) gup_huge_pud()
155 pages[*nr] = page; gup_huge_pud()
168 int write, struct page **pages, int *nr) gup_pud_range()
181 if (!gup_huge_pud(pud, addr, next, write, pages,nr)) gup_pud_range()
184 if (!gup_pmd_range(pud, addr, next, write, pages,nr)) gup_pud_range()
197 struct page **pages) __get_user_pages_fast()
226 * the pagetables and pages from being freed. __get_user_pages_fast()
239 if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) __get_user_pages_fast()
248 * get_user_pages_fast() - pin user pages in memory
250 * @nr_pages: number of pages from start to pin
251 * @write: whether pages will be written to
252 * @pages: array that receives pointers to the pages pinned.
255 * Attempt to pin user pages in memory without taking mm->mmap_sem.
259 * Returns number of pages pinned. This may be fewer than the number
260 * requested. If nr_pages is 0 or negative, returns 0. If no pages
264 struct page **pages) get_user_pages_fast()
289 if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) get_user_pages_fast()
300 /* Try to get the remaining pages with get_user_pages */ get_user_pages_fast()
302 pages += nr; get_user_pages_fast()
306 write, 0, pages); get_user_pages_fast()
37 gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pte_range() argument
71 gup_huge_pmd(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_huge_pmd() argument
101 gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pmd_range() argument
137 gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_huge_pud() argument
167 gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pud_range() argument
196 __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) __get_user_pages_fast() argument
263 get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) get_user_pages_fast() argument
/linux-4.1.27/fs/ramfs/
H A Dfile-nommu.c62 * add a contiguous set of pages into a ramfs inode when it's truncated from
69 struct page *pages; ramfs_nommu_expand_for_mapping() local
85 /* allocate enough contiguous pages to be able to satisfy the ramfs_nommu_expand_for_mapping()
87 pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order); ramfs_nommu_expand_for_mapping()
88 if (!pages) ramfs_nommu_expand_for_mapping()
91 /* split the high-order page into an array of single pages */ ramfs_nommu_expand_for_mapping()
95 split_page(pages, order); ramfs_nommu_expand_for_mapping()
97 /* trim off any pages we don't actually require */ ramfs_nommu_expand_for_mapping()
99 __free_page(pages + loop); ramfs_nommu_expand_for_mapping()
103 data = page_address(pages); ramfs_nommu_expand_for_mapping()
106 /* attach all the pages to the inode's address space */ ramfs_nommu_expand_for_mapping()
108 struct page *page = pages + loop; ramfs_nommu_expand_for_mapping()
127 __free_page(pages + loop++); ramfs_nommu_expand_for_mapping()
201 * - the pages to be mapped must exist
202 * - the pages be physically contiguous in sequence
210 struct page **pages = NULL, **ptr, *page; ramfs_nommu_get_unmapped_area() local
228 /* gang-find the pages */ ramfs_nommu_get_unmapped_area()
230 pages = kcalloc(lpages, sizeof(struct page *), GFP_KERNEL); ramfs_nommu_get_unmapped_area()
231 if (!pages) ramfs_nommu_get_unmapped_area()
234 nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages); ramfs_nommu_get_unmapped_area()
236 goto out_free_pages; /* leave if some pages were missing */ ramfs_nommu_get_unmapped_area()
238 /* check the pages for physical adjacency */ ramfs_nommu_get_unmapped_area()
239 ptr = pages; ramfs_nommu_get_unmapped_area()
247 ret = (unsigned long) page_address(pages[0]); ramfs_nommu_get_unmapped_area()
250 ptr = pages; ramfs_nommu_get_unmapped_area()
254 kfree(pages); ramfs_nommu_get_unmapped_area()
/linux-4.1.27/arch/m32r/include/asm/
H A Dcachectl.h23 #define CACHEABLE 0 /* make pages cacheable */
24 #define UNCACHEABLE 1 /* make pages uncacheable */
H A Duser.h25 * that an integral number of pages is written.
29 * able to write an integer number of pages.
34 size_t u_tsize; /* text size (pages) */
35 size_t u_dsize; /* data size (pages) */
36 size_t u_ssize; /* stack size (pages) */
/linux-4.1.27/arch/sparc/include/asm/
H A Dvaddrs.h25 * 256 pages will be taken as nocache per each
28 * limits enforced: nocache minimum = 256 pages
29 * nocache maximum = 1280 pages
31 #define SRMMU_NOCACHE_ALCRATIO 64 /* 256 pages per 64MB of system RAM */
46 /* Leave one empty page between IO pages at 0xfd000000 and
55 #define SUN4M_IOBASE_VADDR 0xfd000000 /* Base for mapping pages */
/linux-4.1.27/arch/s390/mm/
H A Dgup.c21 unsigned long end, int write, struct page **pages, int *nr) gup_pte_range()
43 pages[*nr] = page; gup_pte_range()
52 unsigned long end, int write, struct page **pages, int *nr) gup_huge_pmd()
70 pages[*nr] = page; gup_huge_pmd()
103 unsigned long end, int write, struct page **pages, int *nr) gup_pmd_range()
129 write, pages, nr)) gup_pmd_range()
132 write, pages, nr)) gup_pmd_range()
140 unsigned long end, int write, struct page **pages, int *nr) gup_pud_range()
155 if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr)) gup_pud_range()
167 struct page **pages) __get_user_pages_fast()
196 if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr)) __get_user_pages_fast()
205 * get_user_pages_fast() - pin user pages in memory
207 * @nr_pages: number of pages from start to pin
208 * @write: whether pages will be written to
209 * @pages: array that receives pointers to the pages pinned.
212 * Attempt to pin user pages in memory without taking mm->mmap_sem.
216 * Returns number of pages pinned. This may be fewer than the number
217 * requested. If nr_pages is 0 or negative, returns 0. If no pages
221 struct page **pages) get_user_pages_fast()
227 nr = __get_user_pages_fast(start, nr_pages, write, pages); get_user_pages_fast()
231 /* Try to get the remaining pages with get_user_pages */ get_user_pages_fast()
233 pages += nr; get_user_pages_fast()
235 nr_pages - nr, write, 0, pages); get_user_pages_fast()
20 gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pte_range() argument
51 gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_huge_pmd() argument
102 gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pmd_range() argument
139 gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pud_range() argument
166 __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) __get_user_pages_fast() argument
220 get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) get_user_pages_fast() argument
/linux-4.1.27/drivers/gpu/drm/udl/
H A Dudl_gem.c113 if (!obj->pages) udl_gem_fault()
116 page = obj->pages[page_offset]; udl_gem_fault()
132 struct page **pages; udl_gem_get_pages() local
134 if (obj->pages) udl_gem_get_pages()
137 pages = drm_gem_get_pages(&obj->base); udl_gem_get_pages()
138 if (IS_ERR(pages)) udl_gem_get_pages()
139 return PTR_ERR(pages); udl_gem_get_pages()
141 obj->pages = pages; udl_gem_get_pages()
149 drm_free_large(obj->pages); udl_gem_put_pages()
150 obj->pages = NULL; udl_gem_put_pages()
154 drm_gem_put_pages(&obj->base, obj->pages, false, false); udl_gem_put_pages()
155 obj->pages = NULL; udl_gem_put_pages()
174 obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL); udl_gem_vmap()
204 if (obj->pages) udl_gem_free_object()
/linux-4.1.27/sound/core/
H A Dsgbuf.c47 for (i = 0; i < sgbuf->pages; i++) { snd_free_sgbuf_pages()
49 continue; /* continuous pages */ snd_free_sgbuf_pages()
71 unsigned int i, pages, chunk, maxpages; snd_malloc_sgbuf_pages() local
82 pages = snd_sgbuf_aligned_pages(size); snd_malloc_sgbuf_pages()
83 sgbuf->tblsize = sgbuf_align_table(pages); snd_malloc_sgbuf_pages()
93 /* allocate pages */ snd_malloc_sgbuf_pages()
95 while (pages > 0) { snd_malloc_sgbuf_pages()
96 chunk = pages; snd_malloc_sgbuf_pages()
103 if (!sgbuf->pages) snd_malloc_sgbuf_pages()
107 size = sgbuf->pages * PAGE_SIZE; snd_malloc_sgbuf_pages()
121 sgbuf->pages += chunk; snd_malloc_sgbuf_pages()
122 pages -= chunk; snd_malloc_sgbuf_pages()
128 dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, PAGE_KERNEL); snd_malloc_sgbuf_pages()
141 * compute the max chunk size with continuous pages on sg-buffer
161 /* ok, all on continuous pages */ snd_sgbuf_get_chunk_size()
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dradeon_gart.c35 * in the GPU's address space. System pages can be mapped into
36 * the aperture and look like contiguous pages from the GPU's
37 * perspective. A page table maps the pages in the aperture
38 * to the actual backing pages in system memory.
229 * radeon_gart_unbind - unbind pages from the gart page table
233 * @pages: number of pages to unbind
235 * Unbinds the requested pages from the gart page table and
239 int pages) radeon_gart_unbind()
251 for (i = 0; i < pages; i++, p++) { radeon_gart_unbind()
252 if (rdev->gart.pages[p]) { radeon_gart_unbind()
253 rdev->gart.pages[p] = NULL; radeon_gart_unbind()
270 * radeon_gart_bind - bind pages into the gart page table
274 * @pages: number of pages to bind
275 * @pagelist: pages to bind
276 * @dma_addr: DMA addresses of pages
279 * Binds the requested pages to the gart page table
284 int pages, struct page **pagelist, dma_addr_t *dma_addr, radeon_gart_bind()
299 for (i = 0; i < pages; i++, p++) { radeon_gart_bind()
300 rdev->gart.pages[p] = pagelist[i]; radeon_gart_bind()
330 if (rdev->gart.pages) { radeon_gart_init()
344 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", radeon_gart_init()
346 /* Allocate pages table */ radeon_gart_init()
347 rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages); radeon_gart_init()
348 if (rdev->gart.pages == NULL) { radeon_gart_init()
374 /* unbind pages */ radeon_gart_fini()
378 vfree(rdev->gart.pages); radeon_gart_fini()
380 rdev->gart.pages = NULL; radeon_gart_fini()
238 radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, int pages) radeon_gart_unbind() argument
283 radeon_gart_bind(struct radeon_device *rdev, unsigned offset, int pages, struct page **pagelist, dma_addr_t *dma_addr, uint32_t flags) radeon_gart_bind() argument
/linux-4.1.27/arch/sh/mm/
H A Dgup.c75 unsigned long end, int write, struct page **pages, int *nr) gup_pte_range()
110 pages[*nr] = page; gup_pte_range()
120 int write, struct page **pages, int *nr) gup_pmd_range()
132 if (!gup_pte_range(pmd, addr, next, write, pages, nr)) gup_pmd_range()
140 int write, struct page **pages, int *nr) gup_pud_range()
152 if (!gup_pmd_range(pud, addr, next, write, pages, nr)) gup_pud_range()
164 struct page **pages) __get_user_pages_fast()
183 * the pagetables and pages from being freed. __get_user_pages_fast()
193 if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) __get_user_pages_fast()
202 * get_user_pages_fast() - pin user pages in memory
204 * @nr_pages: number of pages from start to pin
205 * @write: whether pages will be written to
206 * @pages: array that receives pointers to the pages pinned.
209 * Attempt to pin user pages in memory without taking mm->mmap_sem.
213 * Returns number of pages pinned. This may be fewer than the number
214 * requested. If nr_pages is 0 or negative, returns 0. If no pages
218 struct page **pages) get_user_pages_fast()
242 if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) get_user_pages_fast()
256 /* Try to get the remaining pages with get_user_pages */ get_user_pages_fast()
258 pages += nr; get_user_pages_fast()
261 (end - start) >> PAGE_SHIFT, write, 0, pages); get_user_pages_fast()
74 gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pte_range() argument
119 gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pmd_range() argument
139 gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pud_range() argument
163 __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) __get_user_pages_fast() argument
217 get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) get_user_pages_fast() argument
/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_scatter.c55 for (i = 0; i < entry->pages; i++) { drm_sg_cleanup()
87 unsigned long pages, i, j; drm_legacy_sg_alloc() local
104 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; drm_legacy_sg_alloc()
105 DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); drm_legacy_sg_alloc()
107 entry->pages = pages; drm_legacy_sg_alloc()
108 entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); drm_legacy_sg_alloc()
114 entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); drm_legacy_sg_alloc()
121 entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); drm_legacy_sg_alloc()
129 /* This also forces the mapping of COW pages, so our page list drm_legacy_sg_alloc()
132 memset(entry->virtual, 0, pages << PAGE_SHIFT); drm_legacy_sg_alloc()
139 for (i = (unsigned long)entry->virtual, j = 0; j < pages; drm_legacy_sg_alloc()
158 for (i = 0; i < pages; i++) { drm_legacy_sg_alloc()
H A Dati_pcigart.c62 unsigned long pages; drm_ati_pcigart_cleanup() local
75 pages = (entry->pages <= max_pages) drm_ati_pcigart_cleanup()
76 ? entry->pages : max_pages; drm_ati_pcigart_cleanup()
78 for (i = 0; i < pages; i++) { drm_ati_pcigart_cleanup()
103 unsigned long pages; drm_ati_pcigart_init() local
144 pages = (entry->pages <= max_real_pages) drm_ati_pcigart_init()
145 ? entry->pages : max_real_pages; drm_ati_pcigart_init()
154 for (i = 0; i < pages; i++) { drm_ati_pcigart_init()
159 DRM_ERROR("unable to map PCIGART pages!\n"); drm_ati_pcigart_init()
H A Ddrm_cache.c58 static void drm_cache_flush_clflush(struct page *pages[], drm_cache_flush_clflush() argument
65 drm_clflush_page(*pages++); drm_cache_flush_clflush()
71 drm_clflush_pages(struct page *pages[], unsigned long num_pages) drm_clflush_pages() argument
76 drm_cache_flush_clflush(pages, num_pages); drm_clflush_pages()
86 struct page *page = pages[i]; drm_clflush_pages()
H A Ddrm_vma_manager.c57 * in number of pages, not number of bytes. That means, object sizes and offsets
118 * @pages: Size of object (page-based)
123 * whole requested area (given the size in number of pages as @pages).
132 unsigned long pages) drm_vma_offset_lookup()
137 node = drm_vma_offset_lookup_locked(mgr, start, pages); drm_vma_offset_lookup()
148 * @pages: Size of object (page-based)
159 unsigned long pages) drm_vma_offset_lookup_locked()
184 if (offset < start + pages) drm_vma_offset_lookup_locked()
220 * @pages: Allocation size visible to user-space (in number of pages)
223 * nothing and return 0. @pages is the size of the object given in number of
224 * pages.
232 * @pages is not required to be the same size as the underlying memory object
240 struct drm_vma_offset_node *node, unsigned long pages) drm_vma_offset_add()
252 pages, 0, DRM_MM_SEARCH_DEFAULT); drm_vma_offset_add()
130 drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr, unsigned long start, unsigned long pages) drm_vma_offset_lookup() argument
157 drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, unsigned long start, unsigned long pages) drm_vma_offset_lookup_locked() argument
239 drm_vma_offset_add(struct drm_vma_offset_manager *mgr, struct drm_vma_offset_node *node, unsigned long pages) drm_vma_offset_add() argument
/linux-4.1.27/arch/tile/kernel/
H A Dvdso.c52 static struct page **vdso_setup(void *vdso_kbase, unsigned int pages) vdso_setup() argument
57 pagelist = kzalloc(sizeof(struct page *) * (pages + 1), GFP_KERNEL); vdso_setup()
59 for (i = 0; i < pages - 1; i++) { vdso_setup()
64 pagelist[pages - 1] = virt_to_page(vdso_data); vdso_setup()
65 pagelist[pages] = NULL; vdso_setup()
127 unsigned long pages; setup_vdso_pages() local
138 pages = vdso_pages; setup_vdso_pages()
142 pages = vdso32_pages; setup_vdso_pages()
150 if (pages == 0) setup_vdso_pages()
154 (pages << PAGE_SHIFT) + setup_vdso_pages()
174 * allowed to write those pages. setup_vdso_pages()
176 * those pages but it's then your responsibility to never do that on setup_vdso_pages()
180 * pages though setup_vdso_pages()
183 pages << PAGE_SHIFT, setup_vdso_pages()
H A Dmodule.c39 struct page **pages; module_alloc() local
46 pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL); module_alloc()
47 if (pages == NULL) module_alloc()
50 pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); module_alloc()
51 if (!pages[i]) module_alloc()
59 area->pages = pages; module_alloc()
61 if (map_vm_area(area, prot_rwx, pages)) { module_alloc()
70 __free_page(pages[i]); module_alloc()
71 kfree(pages); module_alloc()
/linux-4.1.27/drivers/gpu/drm/omapdrm/
H A Domap_gem_dmabuf.c53 /* this should be after _get_paddr() to ensure we have pages attached */ omap_gem_map_dma_buf()
85 struct page **pages; omap_gem_dmabuf_begin_cpu_access() local
92 /* make sure we have the pages: */ omap_gem_dmabuf_begin_cpu_access()
93 return omap_gem_get_pages(obj, &pages, true); omap_gem_dmabuf_begin_cpu_access()
108 struct page **pages; omap_gem_dmabuf_kmap_atomic() local
109 omap_gem_get_pages(obj, &pages, false); omap_gem_dmabuf_kmap_atomic()
111 return kmap_atomic(pages[page_num]); omap_gem_dmabuf_kmap_atomic()
124 struct page **pages; omap_gem_dmabuf_kmap() local
125 omap_gem_get_pages(obj, &pages, false); omap_gem_dmabuf_kmap()
127 return kmap(pages[page_num]); omap_gem_dmabuf_kmap()
134 struct page **pages; omap_gem_dmabuf_kunmap() local
135 omap_gem_get_pages(obj, &pages, false); omap_gem_dmabuf_kunmap()
136 kunmap(pages[page_num]); omap_gem_dmabuf_kunmap()
H A Domap_gem.c30 void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
85 * Array of backing pages, if allocated. Note that pages are never
88 struct page **pages; member in struct:omap_gem_object
90 /** addresses corresponding to pages in above array */
122 static int get_pages(struct drm_gem_object *obj, struct page ***pages);
149 int stride_pfn; /* stride in pages */
213 * page faulting to keep track of dirty pages
224 /** ensure backing pages are allocated */ omap_gem_attach_pages()
229 struct page **pages; omap_gem_attach_pages() local
234 WARN_ON(omap_obj->pages); omap_gem_attach_pages()
236 pages = drm_gem_get_pages(obj); omap_gem_attach_pages()
237 if (IS_ERR(pages)) { omap_gem_attach_pages()
238 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); omap_gem_attach_pages()
239 return PTR_ERR(pages); omap_gem_attach_pages()
242 /* for non-cached buffers, ensure the new pages are clean because omap_gem_attach_pages()
253 addrs[i] = dma_map_page(dev->dev, pages[i], omap_gem_attach_pages()
265 omap_obj->pages = pages; omap_gem_attach_pages()
270 drm_gem_put_pages(obj, pages, true, false); omap_gem_attach_pages()
275 /** release backing pages */ omap_gem_detach_pages()
280 /* for non-cached buffers, ensure the new pages are clean because omap_gem_detach_pages()
294 drm_gem_put_pages(obj, omap_obj->pages, true, false); omap_gem_detach_pages()
295 omap_obj->pages = NULL; omap_gem_detach_pages()
343 * pages, only the valid picture part.. so need to adjust for omap_gem_mmap_size()
377 if (omap_obj->pages) { fault_1d()
379 pfn = page_to_pfn(omap_obj->pages[pgoff]); fault_1d()
398 struct page *pages[64]; /* XXX is this too much to have on stack? */ fault_2d() local
405 * Note the height of the slot is also equal to the number of pages fault_2d()
407 * height is 64, then 64 pages fill a 4kb wide by 64 row region. fault_2d()
416 * in pages fault_2d()
458 * Map in pages. Beyond the valid pixel part of the buffer, we set fault_2d()
459 * pages[i] to NULL to get a dummy page mapped in.. if someone fault_2d()
464 memcpy(pages, &omap_obj->pages[base_pgoff], fault_2d()
466 memset(pages + slots, 0, fault_2d()
469 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); fault_2d()
510 struct page **pages; omap_gem_fault() local
518 /* if a shmem backed object, make sure we have pages attached now */ omap_gem_fault()
519 ret = get_pages(obj, &pages); omap_gem_fault()
678 struct page **pages; omap_gem_roll() local
679 ret = get_pages(obj, &pages); omap_gem_roll()
682 ret = tiler_pin(omap_obj->block, pages, npages, roll, true); omap_gem_roll()
693 /* Sync the buffer for CPU access.. note pages should already be
717 struct page **pages = omap_obj->pages; omap_gem_dma_sync() local
722 omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0, omap_gem_dma_sync()
750 struct page **pages; omap_gem_get_paddr() local
757 ret = get_pages(obj, &pages); omap_gem_get_paddr()
777 ret = tiler_pin(block, pages, npages, omap_gem_get_paddr()
823 "could not unpin pages: %d\n", ret); omap_gem_put_paddr()
870 /* acquire pages when needed (for example, for DMA where physically
873 static int get_pages(struct drm_gem_object *obj, struct page ***pages) get_pages() argument
878 if (is_shmem(obj) && !omap_obj->pages) { get_pages()
881 dev_err(obj->dev->dev, "could not attach pages\n"); get_pages()
886 /* TODO: even phys-contig.. we should have a list of pages? */ get_pages()
887 *pages = omap_obj->pages; get_pages()
892 /* if !remap, and we don't have pages backing, then fail, rather than
894 * because we don't support swapping pages back out). And 'remap'
902 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, omap_gem_get_pages() argument
908 if (!omap_obj->pages) omap_gem_get_pages()
910 *pages = omap_obj->pages; omap_gem_get_pages()
914 ret = get_pages(obj, pages); omap_gem_get_pages()
919 /* release pages when DMA no longer being performed */ omap_gem_put_pages()
922 /* do something here if we dynamically attach/detach pages.. at omap_gem_put_pages()
924 * released the pages.. omap_gem_put_pages()
938 struct page **pages; omap_gem_vaddr() local
939 int ret = get_pages(obj, &pages); omap_gem_vaddr()
942 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, omap_gem_vaddr()
961 WARN_ON(!omap_obj->pages); /* this can't happen */ omap_gem_resume()
963 omap_obj->pages, npages, omap_gem_resume()
1296 if (omap_obj->pages) omap_gem_free_object()
1452 * # of pages in the region omap_gem_init()
/linux-4.1.27/fs/proc/
H A Dmeminfo.c35 unsigned long pages[NR_LRU_LISTS]; meminfo_proc_show() local
55 pages[lru] = global_page_state(NR_LRU_BASE + lru); meminfo_proc_show()
74 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; meminfo_proc_show()
155 K(pages[LRU_ACTIVE_ANON] + pages[LRU_ACTIVE_FILE]), meminfo_proc_show()
156 K(pages[LRU_INACTIVE_ANON] + pages[LRU_INACTIVE_FILE]), meminfo_proc_show()
157 K(pages[LRU_ACTIVE_ANON]), meminfo_proc_show()
158 K(pages[LRU_INACTIVE_ANON]), meminfo_proc_show()
159 K(pages[LRU_ACTIVE_FILE]), meminfo_proc_show()
160 K(pages[LRU_INACTIVE_FILE]), meminfo_proc_show()
161 K(pages[LRU_UNEVICTABLE]), meminfo_proc_show()
H A Dpage.c97 * pseudo flags for the well known (anonymous) memory mapped pages stable_page_flags()
110 * compound pages: export both head/tail info stable_page_flags()
120 * PageTransCompound can be true for non-huge compound pages (slab stable_page_flags()
121 * pages or pages allocated by drivers with __GFP_COMP) because it stable_page_flags()
139 * Caveats on high order pages: page->_count will only be set stable_page_flags()
141 * SLOB won't set PG_slab at all on compound pages. stable_page_flags()
/linux-4.1.27/arch/m68k/mm/
H A Dsun3kmap.c49 unsigned long type, int pages) do_pmeg_mapin()
55 while(pages) { do_pmeg_mapin()
59 pages--; do_pmeg_mapin()
68 int pages; sun3_ioremap() local
87 pages = size / PAGE_SIZE; sun3_ioremap()
91 while(pages) { sun3_ioremap()
95 if(seg_pages > pages) sun3_ioremap()
96 seg_pages = pages; sun3_ioremap()
100 pages -= seg_pages; sun3_ioremap()
48 do_pmeg_mapin(unsigned long phys, unsigned long virt, unsigned long type, int pages) do_pmeg_mapin() argument
/linux-4.1.27/arch/x86/xen/
H A Dgrant-table.c119 struct page **pages; xlated_setup_gnttab_pages() local
127 pages = kcalloc(nr_grant_frames, sizeof(pages[0]), GFP_KERNEL); xlated_setup_gnttab_pages()
128 if (!pages) xlated_setup_gnttab_pages()
133 kfree(pages); xlated_setup_gnttab_pages()
136 rc = alloc_xenballooned_pages(nr_grant_frames, pages, 0 /* lowmem */); xlated_setup_gnttab_pages()
140 kfree(pages); xlated_setup_gnttab_pages()
145 pfns[i] = page_to_pfn(pages[i]); xlated_setup_gnttab_pages()
147 vaddr = vmap(pages, nr_grant_frames, 0, PAGE_KERNEL); xlated_setup_gnttab_pages()
151 free_xenballooned_pages(nr_grant_frames, pages); xlated_setup_gnttab_pages()
152 kfree(pages); xlated_setup_gnttab_pages()
156 kfree(pages); xlated_setup_gnttab_pages()
/linux-4.1.27/arch/cris/mm/
H A Dinit.c36 /* Free a range of init pages. Virtual addresses. */
52 /* Free the pages occupied by initialization code. */
59 /* Free the pages occupied by initrd code. */
/linux-4.1.27/include/linux/
H A Dsplice.h16 #define SPLICE_F_MOVE (0x01) /* move pages instead of copying */
21 #define SPLICE_F_GIFT (0x08) /* pages passed in are a gift */
54 struct page **pages; /* page map */ member in struct:splice_pipe_desc
55 struct partial_page *partial; /* pages[] may not be contig */
56 int nr_pages; /* number of populated pages in map */
57 unsigned int nr_pages_max; /* pages[] & partial[] arrays size */
H A Dballoon_compaction.h4 * Common interface definitions for making balloon pages movable by compaction.
6 * Despite being perfectly possible to perform ballooned pages migration, they
7 * make a special corner case to compaction scans because balloon pages are not
8 * enlisted at any LRU list like the other pages we do compact / migrate.
24 * pages list, the page reference counter must be raised by one and the
39 * set of exposed rules are satisfied while we are dealing with balloon pages
55 * procedures to find the proper balloon device holding memory pages they'll
60 unsigned long isolated_pages; /* # of isolated pages for migration */
61 spinlock_t pages_lock; /* Protection to pages list */
62 struct list_head pages; /* Pages enqueued & handled to Host */ member in struct:balloon_dev_info
74 INIT_LIST_HEAD(&balloon->pages); balloon_devinfo_init()
93 * balloon_page_movable - test PageBalloon to identify balloon pages
121 * pages list is held before inserting a page into the balloon device.
129 list_add(&page->lru, &balloon->pages); balloon_page_insert()
138 * pages list is held before deleting a page from the balloon device.
170 list_add(&page->lru, &balloon->pages); balloon_page_insert()
H A Dmigrate_mode.h8 * MIGRATE_SYNC will block when migrating pages
H A Dmman.h23 static inline void vm_acct_memory(long pages) vm_acct_memory() argument
25 __percpu_counter_add(&vm_committed_as, pages, vm_committed_as_batch); vm_acct_memory()
28 static inline void vm_unacct_memory(long pages) vm_unacct_memory() argument
30 vm_acct_memory(-pages); vm_unacct_memory()
H A Dpageblock-flags.h3 * pageblock_nr_pages number of pages.
28 /* Bit indices that affect a whole block of pages */
51 /* Huge pages are a constant size */
58 /* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
H A Dpage-isolation.h46 * For isolating all pages in the range finally, the caller have to
47 * free all pages in the range. test_page_isolated() can be used for
63 * Test all pages in [start_pfn, end_pfn) are isolated or not.
H A Dpage_ext.h18 * PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to
19 * implement generic debug pagealloc feature. The pages are filled with
21 * pages are verified whether the patterns are not corrupted and clear
H A Dpagevec.h5 * pages. A pagevec is a multipage container which is used for that.
20 struct page *pages[PAGEVEC_SIZE]; member in struct:pagevec
62 pvec->pages[pvec->nr++] = page; pagevec_add()
H A Dquicklist.h4 * Fast allocations and disposal of pages. Pages must be in the condition
5 * as needed after allocation when they are freed. Per cpu lists of pages
6 * are kept that only contain node local pages.
H A Dagpgart.h41 size_t pg_total; /* max pages (swap + system) */
42 size_t pg_system; /* max pages (system) */
43 size_t pg_used; /* current pages used */
55 size_t pg_count; /* number of pages */
73 size_t pg_count; /* number of pages */
H A Dpage-flags.h19 * PG_reserved is set for special pages, which can never be swapped out. Some
22 * The PG_private bitflag is set on pagecache pages if they contain filesystem
48 * PG_highmem pages are not permanently mapped into the kernel virtual address
49 * space, they need to be kmapped separately for doing IO on the pages. The
118 * state. These bits are set on pages belonging to the netfs's inodes
319 * A KSM page is one of those write-protected "shared pages" or "merged pages"
393 * flags for PageHead() and PageTail() checks of compound pages so that bit
396 * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages
420 * compound page flags with the flags used for page cache pages. Possible
421 * because PageCompound is always set for compound pages and not for
422 * pages on the LRU and/or pagecache.
430 * but makes it impossible to use compound pages for the page cache.
432 * if compound pages enter the page cache.
487 * PageHuge() only returns true for hugetlbfs pages, but not for
488 * normal or transparent huge pages.
491 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
492 * called only in the core VM paths where hugetlbfs pages can't exist.
501 * PageTransCompound returns true for both transparent huge pages
502 * and hugetlbfs pages, so it should only be called when it's known
503 * that hugetlbfs pages aren't involved.
511 * PageTransTail returns true for both transparent huge pages
512 * and hugetlbfs pages, so it should only be called when it's known
513 * that hugetlbfs pages aren't involved.
586 * If network-based swap is enabled, sl*b must keep track of whether pages
H A Dvmalloc.h15 #define VM_MAP 0x00000004 /* vmap()ed pages */
17 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
28 #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
36 struct page **pages; member in struct:vm_struct
57 extern void *vm_map_ram(struct page **pages, unsigned int count,
85 extern void *vmap(struct page **pages, unsigned int count,
124 struct page **pages);
127 pgprot_t prot, struct page **pages);
133 pgprot_t prot, struct page **pages) map_kernel_range_noflush()
132 map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages) map_kernel_range_noflush() argument
H A Dcma.h30 extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
H A Dirq_cpustat.h6 * architecture. Some arch (like s390) have per cpu hardware pages and
/linux-4.1.27/arch/mips/jazz/
H A Djazzdma.c94 int first, last, pages, frame, i; vdma_alloc() local
115 pages = VDMA_PAGE(paddr + size) - VDMA_PAGE(paddr) + 1; vdma_alloc()
120 if (first + pages > VDMA_PGTBL_ENTRIES) { /* nothing free */ vdma_alloc()
127 && last - first < pages) vdma_alloc()
130 if (last - first == pages) vdma_alloc()
136 * Mark pages as allocated vdma_alloc()
153 printk("vdma_alloc: Allocated %d pages starting from %08lx\n", vdma_alloc()
154 pages, laddr); vdma_alloc()
177 * Free previously allocated dma translation pages
179 * it just marks the free'd pages as unused!
189 ("vdma_free: trying to free other's dma pages, laddr=%8lx\n", vdma_free()
200 printk("vdma_free: freed %ld pages starting from %08lx\n", vdma_free()
214 int first, pages; vdma_remap() local
231 pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; vdma_remap()
234 printk("vdma_remap: first=%x, pages=%x\n", first, pages); vdma_remap()
235 if (first + pages > VDMA_PGTBL_ENTRIES) { vdma_remap()
242 while (pages > 0 && first < VDMA_PGTBL_ENTRIES) { vdma_remap()
245 printk("Trying to remap other's pages.\n"); vdma_remap()
251 pages--; vdma_remap()
261 pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; vdma_remap()
264 for (i = first; i < first + pages; i++) vdma_remap()
267 for (i = first; i < first + pages; i++) vdma_remap()
270 for (i = first; i < first + pages; i++) vdma_remap()
/linux-4.1.27/arch/x86/kernel/cpu/
H A Dbugs_64.c24 * Make sure the first 2MB area is not mapped by huge pages check_bugs()
26 * MTRRs into large pages causes slow downs. check_bugs()
H A Dintel.c60 * not, recommend a BIOS update and disable large pages. early_init_intel()
542 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
543 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
544 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
545 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
546 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
547 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
548 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" },
549 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
550 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
551 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
552 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
553 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
554 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
555 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
556 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
557 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
558 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
559 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
560 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
561 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
562 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
563 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
564 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
565 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
566 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
567 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
568 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
569 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
570 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
571 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
572 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
573 { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" },
574 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
/linux-4.1.27/arch/mips/include/uapi/asm/
H A Dcachectl.h23 #define CACHEABLE 0 /* make pages cacheable */
24 #define UNCACHEABLE 1 /* make pages uncacheable */
H A Dmman.h46 #define MAP_LOCKED 0x8000 /* pages are locked */
68 #define MADV_WILLNEED 3 /* will need these pages */
69 #define MADV_DONTNEED 4 /* don't need these pages */
72 #define MADV_REMOVE 9 /* remove these pages & resources */
76 #define MADV_MERGEABLE 12 /* KSM may merge identical pages */
77 #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
/linux-4.1.27/drivers/gpu/drm/exynos/
H A Dexynos_drm_buf.c60 buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); lowlevel_buffer_allocate()
61 if (!buf->pages) { lowlevel_buffer_allocate()
62 DRM_ERROR("failed to allocate pages.\n"); lowlevel_buffer_allocate()
78 buf->pages[i] = phys_to_page(start_addr); lowlevel_buffer_allocate()
84 buf->pages = dma_alloc_attrs(dev->dev, buf->size, lowlevel_buffer_allocate()
87 if (!buf->pages) { lowlevel_buffer_allocate()
93 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages); lowlevel_buffer_allocate()
107 dma_free_attrs(dev->dev, buf->size, buf->pages, lowlevel_buffer_allocate()
112 drm_free_large(buf->pages); lowlevel_buffer_allocate()
137 drm_free_large(buf->pages); lowlevel_buffer_deallocate()
139 dma_free_attrs(dev->dev, buf->size, buf->pages, lowlevel_buffer_deallocate()
H A Dexynos_drm_gem.h31 * @write: whether pages will be written to by the caller.
32 * @pages: Array of backing pages.
45 struct page **pages; member in struct:exynos_drm_gem_buf
118 /* map user space allocated by malloc to pages. */
161 /* get pages from user space. */
164 struct page **pages,
167 /* drop the reference to pages. */
168 void exynos_gem_put_pages_to_userptr(struct page **pages,
/linux-4.1.27/arch/x86/mm/
H A Dgup.c72 unsigned long end, int write, struct page **pages, int *nr) gup_pte_range()
100 pages[*nr] = page; gup_pte_range()
118 unsigned long end, int write, struct page **pages, int *nr) gup_huge_pmd()
139 pages[*nr] = page; gup_huge_pmd()
152 int write, struct page **pages, int *nr) gup_pmd_range()
183 if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) gup_pmd_range()
186 if (!gup_pte_range(pmd, addr, next, write, pages, nr)) gup_pmd_range()
195 unsigned long end, int write, struct page **pages, int *nr) gup_huge_pud()
216 pages[*nr] = page; gup_huge_pud()
229 int write, struct page **pages, int *nr) gup_pud_range()
242 if (!gup_huge_pud(pud, addr, next, write, pages, nr)) gup_pud_range()
245 if (!gup_pmd_range(pud, addr, next, write, pages, nr)) gup_pud_range()
258 struct page **pages) __get_user_pages_fast()
287 * the pagetables and pages from being freed on x86. __get_user_pages_fast()
301 if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) __get_user_pages_fast()
310 * get_user_pages_fast() - pin user pages in memory
312 * @nr_pages: number of pages from start to pin
313 * @write: whether pages will be written to
314 * @pages: array that receives pointers to the pages pinned.
317 * Attempt to pin user pages in memory without taking mm->mmap_sem.
321 * Returns number of pages pinned. This may be fewer than the number
322 * requested. If nr_pages is 0 or negative, returns 0. If no pages
326 struct page **pages) get_user_pages_fast()
359 * the pagetables and pages from being freed on x86. get_user_pages_fast()
373 if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) get_user_pages_fast()
387 /* Try to get the remaining pages with get_user_pages */ get_user_pages_fast()
389 pages += nr; get_user_pages_fast()
393 write, 0, pages); get_user_pages_fast()
71 gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pte_range() argument
117 gup_huge_pmd(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_huge_pmd() argument
151 gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pmd_range() argument
194 gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_huge_pud() argument
228 gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pud_range() argument
257 __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) __get_user_pages_fast() argument
325 get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) get_user_pages_fast() argument
H A Dpageattr.c41 struct page **pages; member in struct:cpa_data
59 void update_page_count(int level, unsigned long pages) update_page_count() argument
63 direct_pages_count[level] += pages; update_page_count()
172 * tlb invalidates for a low number of pages. Caveat: we must __cpa_flush_range()
209 int in_flags, struct page **pages) cpa_flush_array()
232 addr = (unsigned long)page_address(pages[i]); cpa_flush_array()
307 * page-table pages. Thus we can't really use different static_protections()
496 * Calculate the number of pages, which fit into this large try_preserve_large_page()
516 * req_prot is in format of 4k pages. It must be converted to large try_preserve_large_page()
547 * the pages in the range we try to preserve: try_preserve_large_page()
570 * the address is not aligned and the number of pages is try_preserve_large_page()
571 * smaller than the number of pages in the large page. Note try_preserve_large_page()
572 * that we limited the number of possible pages already to try_preserve_large_page()
573 * the number of pages in the large page. try_preserve_large_page()
577 * The address is aligned and the number of pages try_preserve_large_page()
960 * Map trailing 4K pages. populate_pmd()
986 * smaller pages. populate_pud()
1021 * Map everything starting from the Gb boundary, possibly with 1G pages populate_pud()
1131 struct page *page = cpa->pages[cpa->curpage]; __change_page_attr()
1248 struct page *page = cpa->pages[cpa->curpage]; cpa_process_alias()
1300 * Store the remaining nr of pages for the large page __change_page_attr_set_clr()
1323 * Adjust the number of pages with the result of the __change_page_attr_set_clr()
1341 struct page **pages) change_page_attr_set_clr()
1392 cpa.pages = pages; change_page_attr_set_clr()
1429 cpa.flags, pages); change_page_attr_set_clr()
1453 static inline int cpa_set_pages_array(struct page **pages, int numpages, cpa_set_pages_array() argument
1457 CPA_PAGES_ARRAY, pages); cpa_set_pages_array()
1460 static inline int cpa_clear_pages_array(struct page **pages, int numpages, cpa_clear_pages_array() argument
1464 CPA_PAGES_ARRAY, pages); cpa_clear_pages_array()
1680 static int _set_pages_array(struct page **pages, int addrinarray, _set_pages_array() argument
1690 if (PageHighMem(pages[i])) _set_pages_array()
1692 start = page_to_pfn(pages[i]) << PAGE_SHIFT; _set_pages_array()
1698 ret = cpa_set_pages_array(pages, addrinarray, _set_pages_array()
1705 0, CPA_PAGES_ARRAY, pages); _set_pages_array()
1712 if (PageHighMem(pages[i])) _set_pages_array()
1714 start = page_to_pfn(pages[i]) << PAGE_SHIFT; _set_pages_array()
1721 int set_pages_array_uc(struct page **pages, int addrinarray) set_pages_array_uc() argument
1723 return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); set_pages_array_uc()
1727 int set_pages_array_wc(struct page **pages, int addrinarray) set_pages_array_wc() argument
1729 return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WC); set_pages_array_wc()
1741 int set_pages_array_wb(struct page **pages, int addrinarray) set_pages_array_wb() argument
1749 retval = cpa_clear_pages_array(pages, addrinarray, set_pages_array_wb()
1755 if (PageHighMem(pages[i])) set_pages_array_wb()
1757 start = page_to_pfn(pages[i]) << PAGE_SHIFT; set_pages_array_wb()
1810 * we may need to break large pages for 64-bit kernel text __set_pages_p()
1829 * we may need to break large pages for 64-bit kernel text __set_pages_np()
1847 * Large pages for identity mappings are not used at boot time __kernel_map_pages()
208 cpa_flush_array(unsigned long *start, int numpages, int cache, int in_flags, struct page **pages) cpa_flush_array() argument
1338 change_page_attr_set_clr(unsigned long *addr, int numpages, pgprot_t mask_set, pgprot_t mask_clr, int force_split, int in_flag, struct page **pages) change_page_attr_set_clr() argument
/linux-4.1.27/arch/sparc/mm/
H A Dgup.c21 unsigned long end, int write, struct page **pages, int *nr) gup_pte_range()
62 pages[*nr] = page; gup_pte_range()
70 unsigned long end, int write, struct page **pages, gup_huge_pmd()
88 pages[*nr] = page; gup_huge_pmd()
119 int write, struct page **pages, int *nr) gup_pmd_range()
133 write, pages, nr)) gup_pmd_range()
136 pages, nr)) gup_pmd_range()
144 int write, struct page **pages, int *nr) gup_pud_range()
156 if (!gup_pmd_range(pud, addr, next, write, pages, nr)) gup_pud_range()
164 struct page **pages) __get_user_pages_fast()
185 if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) __get_user_pages_fast()
194 struct page **pages) get_user_pages_fast()
233 if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) get_user_pages_fast()
248 /* Try to get the remaining pages with get_user_pages */ get_user_pages_fast()
250 pages += nr; get_user_pages_fast()
253 (end - start) >> PAGE_SHIFT, write, 0, pages); get_user_pages_fast()
20 gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pte_range() argument
69 gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_huge_pmd() argument
118 gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pmd_range() argument
143 gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) gup_pud_range() argument
163 __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) __get_user_pages_fast() argument
193 get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) get_user_pages_fast() argument
/linux-4.1.27/sound/firewire/
H A Dpackets-buffer.c25 unsigned int packets_per_page, pages; iso_packets_buffer_init() local
42 pages = DIV_ROUND_UP(count, packets_per_page); iso_packets_buffer_init()
45 pages, direction); iso_packets_buffer_init()
51 p = page_address(b->iso_buffer.pages[page_index]); iso_packets_buffer_init()
/linux-4.1.27/sound/pci/ctxfi/
H A Dctvmem.h21 #define CT_PTP_NUM 4 /* num of device page table pages */
28 /* The chip can handle the page table of 4k pages
29 * (emu20k1 can handle even 8k pages, but we don't use it right now)
46 struct snd_dma_buffer ptp[CT_PTP_NUM]; /* Device page table pages */
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/
H A Den_resources.c86 struct page **pages; mlx4_en_map_buffer() local
92 pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); mlx4_en_map_buffer()
93 if (!pages) mlx4_en_map_buffer()
97 pages[i] = virt_to_page(buf->page_list[i].buf); mlx4_en_map_buffer()
99 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); mlx4_en_map_buffer()
100 kfree(pages); mlx4_en_map_buffer()
/linux-4.1.27/arch/blackfin/kernel/
H A Ddma-mapping.c38 printk(KERN_INFO "%s: dma_page @ 0x%p - %d pages at 0x%08lx\n", __func__, dma_alloc_init()
47 static unsigned long __alloc_dma_pages(unsigned int pages) __alloc_dma_pages() argument
57 start = bitmap_find_next_zero_area(dma_page, dma_pages, 0, pages, 0); __alloc_dma_pages()
60 bitmap_set(dma_page, start, pages); __alloc_dma_pages()
66 static void __free_dma_pages(unsigned long addr, unsigned int pages) __free_dma_pages() argument
71 if ((page + pages) > dma_pages) { __free_dma_pages()
77 bitmap_clear(dma_page, page, pages); __free_dma_pages()
/linux-4.1.27/fs/exofs/
H A Dore_raid.c37 * __stripe_pages_2d is a 2d array of pages, and it is also a corner turn.
58 struct page **pages; member in struct:__stripe_pages_2d::__1_page_stripe
80 struct page *pages[group_width]; _sp2d_alloc() member in struct:_alloc_all_bytes::__alloc_1p_arrays
126 /* First *pages is marked for kfree of the buffer */ _sp2d_alloc()
130 sp2d->_1p_stripes[i].pages = __a1pa->pages; _sp2d_alloc()
157 struct page *page = _1ps->pages[c]; _sp2d_reset()
167 memset(_1ps->pages, 0, group_width * sizeof(*_1ps->pages)); _sp2d_reset()
184 kfree(sp2d->_1p_stripes[i].pages); _sp2d_free()
236 _1ps->tx = async_xor(_1ps->pages[sp2d->data_devs], _gen_xor_unit()
237 _1ps->pages, 0, sp2d->data_devs, _gen_xor_unit()
240 _1ps->tx = async_gen_syndrome(_1ps->pages, 0, _gen_xor_unit()
263 _1ps->pages[si->cur_comp] = page; _ore_add_stripe_page()
323 /* We want to only read those pages not in cache so worst case _alloc_read_4_write()
415 page = ios->sp2d->_1p_stripes[p].pages[c]; _add_to_r4w_last_page()
433 /* loop on all devices all pages */ _mark_read4write_pages_uptodate()
452 * It is assumed to be called after the to_be_written pages of the first stripe
455 * NOTE: We call ios->r4w->lock_fn for all pages needed for parity calculations
456 * These pages are held at sp2d[p].pages[c] but with
457 * sp2d[p].page_is_read[c] = true. At _sp2d_reset these pages are
463 * need_to_read_pages_count is the actual number of pages not present in cache.
465 * approximation? In this mode the read pages are put in the empty places of
466 * ios->sp2d[p][*], xor is calculated the same way. These pages are
491 struct page **pp = &_1ps->pages[c]; _read_4_write_first_stripe()
498 /* to-be-written pages start here */ _read_4_write_first_stripe()
510 /* Mark read-pages to be cache_released */ _read_4_write_first_stripe()
561 BUG_ON(_1ps->pages[c]); _read_4_write_last_stripe()
567 _1ps->pages[c] = page; _read_4_write_last_stripe()
568 /* Mark read-pages to be cache_released */ _read_4_write_last_stripe()
602 ios_read->pages = ios->pages; _read_4_write_execute()
638 struct page **pages = ios->parity_pages + ios->cur_par_page; _ore_add_parity_unit() local
649 /* If first stripe, Read in all read4write pages _ore_add_parity_unit()
656 /* If last stripe r4w pages of last stripe */ _ore_add_parity_unit()
661 pages[i] = _raid_page_alloc(); _ore_add_parity_unit()
662 if (unlikely(!pages[i])) _ore_add_parity_unit()
671 ret = _ore_add_stripe_unit(ios, &array_start, 0, pages, _ore_add_parity_unit()
711 /* If IO returned an error pages might need unlocking */ _ore_free_raid_stuff()
H A Dinode.c43 unsigned pages = min_t(unsigned, expected_pages, exofs_max_io_pages() local
46 return pages; exofs_max_io_pages()
55 struct page **pages; member in struct:page_collect
61 * And the pages should not be unlocked.
76 pcol->pages = NULL; _pcol_init()
89 pcol->pages = NULL; _pcol_reset()
107 unsigned pages; pcol_try_alloc() local
110 pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages); pcol_try_alloc()
112 for (; pages; pages >>= 1) { pcol_try_alloc()
113 pcol->pages = kmalloc(pages * sizeof(struct page *), pcol_try_alloc()
115 if (likely(pcol->pages)) { pcol_try_alloc()
116 pcol->alloc_pages = pages; pcol_try_alloc()
128 kfree(pcol->pages); pcol_free()
129 pcol->pages = NULL; pcol_free()
143 pcol->pages[pcol->nr_pages++] = page; pcol_add_page()
192 /* Called at the end of reads, to optionally unlock pages and update their
215 struct page *page = pcol->pages[i]; __readpages_done()
220 continue; /* osd might add more pages at end */ __readpages_done()
257 struct page *page = pcol->pages[i]; _unlock_pcol_pages()
285 /* Left over pages are passed to the next io */ _maybe_not_all_in_one_io()
289 src_page = pcol_src->pages + pcol_src->nr_pages; _maybe_not_all_in_one_io()
297 pcol->pages[i] = *src_page++; _maybe_not_all_in_one_io()
315 if (!pcol->pages) read_exec()
328 ios->pages = pcol->pages; read_exec()
345 /* pages ownership was passed to pcol_copy */ read_exec()
375 * collect as many contiguous pages as posible. If a discontinuity is
439 if (!pcol->pages) { readpage_strip()
453 EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p " readpage_strip()
474 struct list_head *pages, unsigned nr_pages) exofs_readpages()
481 ret = read_cache_pages(mapping, pages, readpage_strip, &pcol); exofs_readpages()
543 struct page *page = pcol->pages[i]; writepages_done()
548 continue; /* osd might add more pages to a bio */ writepages_done()
634 if (!pcol->pages) write_exec()
654 ios->pages = pcol_copy->pages; write_exec()
659 /* pages ownership was passed to pcol_copy */ write_exec()
690 * It will try to collect as many contiguous pages as possible. If a
751 if (!pcol->pages) { writepage_strip()
833 struct page *page = pcol.pages[i]; exofs_writepages()
473 exofs_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) exofs_readpages() argument
/linux-4.1.27/include/uapi/asm-generic/
H A Dmman-common.h35 #define MADV_WILLNEED 3 /* will need these pages */
36 #define MADV_DONTNEED 4 /* don't need these pages */
39 #define MADV_REMOVE 9 /* remove these pages & resources */
45 #define MADV_MERGEABLE 12 /* KSM may merge identical pages */
46 #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
H A Dmman.h9 #define MAP_LOCKED 0x2000 /* pages are locked */
/linux-4.1.27/arch/powerpc/include/asm/
H A Duser.h26 * that an integral number of pages is written.
30 * to write an integer number of pages.
34 size_t u_tsize; /* text size (pages) */
35 size_t u_dsize; /* data size (pages) */
36 size_t u_ssize; /* stack size (pages) */
H A Dhighmem.h6 * Used in CONFIG_HIGHMEM systems for memory pages which
41 * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte
43 * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP
H A Dpte-hash64.h6 * Common bits between 4K and 64K pages in a linux-style PTE.
11 * have full read/write to pages above PAGE_OFFSET (pages below that
H A Dpte-hash64-64k.h5 #define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */
12 * test this, so a multi-bit mask will work. For combo pages, this
14 * all the sub bits. For real 64k pages, we now have the assembly set
25 * 4k pages as the same assembly will be used to insert 64K pages
44 * With 64K pages on hash table, we have a special PTE format that
46 * in order to deal with 64K made of 4K HW pages. Thus we override the
H A Dpte-40x.h22 * - bits 20 and 21 must be cleared, because we use 4k pages (40x can
23 * support down to 1k pages), this is done in the TLBMiss exception
25 * - We use only zones 0 (for kernel pages) and 1 (for user pages)
/linux-4.1.27/arch/cris/include/asm/
H A Duser.h25 * that an integral number of pages is written.
29 * to write an integer number of pages.
34 size_t u_tsize; /* text size (pages) */
35 size_t u_dsize; /* data size (pages) */
36 size_t u_ssize; /* stack size (pages) */
/linux-4.1.27/arch/ia64/include/asm/
H A Duser.h21 * that an integral number of pages is written.
25 * to write an integer number of pages.
40 size_t u_tsize; /* text size (pages) */
41 size_t u_dsize; /* data size (pages) */
42 size_t u_ssize; /* stack size (pages) */
H A Dtlb.h17 * (4) Release the pages that were freed up in step (2).
64 struct page **pages; member in struct:mmu_gather
137 /* lastly, release the freed pages */ ia64_tlb_flush_mmu_free()
143 free_page_and_swap_cache(tlb->pages[i]); ia64_tlb_flush_mmu_free()
148 * freed pages that where gathered up to this point.
164 tlb->pages = (void *)addr; __tlb_alloc_page()
175 tlb->pages = tlb->local; tlb_gather_mmu()
199 if (tlb->pages != tlb->local) tlb_finish_mmu()
200 free_pages((unsigned long)tlb->pages, 0); tlb_finish_mmu()
212 if (!tlb->nr && tlb->pages == tlb->local) __tlb_remove_page()
215 tlb->pages[tlb->nr++] = page; __tlb_remove_page()
/linux-4.1.27/arch/alpha/include/asm/
H A Duser.h27 * that an integral number of pages is written.
31 * to write an integer number of pages.
35 size_t u_tsize; /* text size (pages) */
36 size_t u_dsize; /* data size (pages) */
37 size_t u_ssize; /* stack size (pages) */
/linux-4.1.27/drivers/media/v4l2-core/
H A Dvideobuf-dma-sg.c95 static struct scatterlist *videobuf_pages_to_sg(struct page **pages, videobuf_pages_to_sg() argument
101 if (NULL == pages[0]) videobuf_pages_to_sg()
108 if (PageHighMem(pages[0])) videobuf_pages_to_sg()
109 /* DMA to highmem pages might not work */ videobuf_pages_to_sg()
111 sg_set_page(&sglist[0], pages[0], videobuf_pages_to_sg()
115 if (NULL == pages[i]) videobuf_pages_to_sg()
117 if (PageHighMem(pages[i])) videobuf_pages_to_sg()
119 sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0); videobuf_pages_to_sg()
177 dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL); videobuf_dma_init_user_locked()
178 if (NULL == dma->pages) videobuf_dma_init_user_locked()
181 dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n", videobuf_dma_init_user_locked()
187 dma->pages, NULL); videobuf_dma_init_user_locked()
214 dprintk(1, "init kernel [%d pages]\n", nr_pages); videobuf_dma_init_kernel()
240 dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages); videobuf_dma_init_kernel()
272 dprintk(1, "init overlay [%d pages @ bus 0x%lx]\n", videobuf_dma_init_overlay()
290 if (dma->pages) { videobuf_dma_map()
291 dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages, videobuf_dma_map()
351 if (dma->pages) { videobuf_dma_free()
353 page_cache_release(dma->pages[i]); videobuf_dma_free()
354 kfree(dma->pages); videobuf_dma_free()
355 dma->pages = NULL; videobuf_dma_free()
503 int err, pages; __videobuf_iolock() local
520 pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT; __videobuf_iolock()
523 pages); __videobuf_iolock()
556 pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT; __videobuf_iolock()
558 bus, pages); __videobuf_iolock()
637 vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */ __videobuf_mmap_mapper()
H A Dvideobuf-vmalloc.c162 int pages; __videobuf_iolock() local
179 pages = PAGE_ALIGN(vb->size); __videobuf_iolock()
192 mem->vaddr = vmalloc_user(pages); __videobuf_iolock()
194 printk(KERN_ERR "vmalloc (%d pages) failed\n", pages); __videobuf_iolock()
197 dprintk(1, "vmalloc is at addr %p (%d pages)\n", __videobuf_iolock()
198 mem->vaddr, pages); __videobuf_iolock()
238 int retval, pages; __videobuf_mmap_mapper() local
256 pages = PAGE_ALIGN(vma->vm_end - vma->vm_start); __videobuf_mmap_mapper()
257 mem->vaddr = vmalloc_user(pages); __videobuf_mmap_mapper()
259 printk(KERN_ERR "vmalloc (%d pages) failed\n", pages); __videobuf_mmap_mapper()
262 dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages); __videobuf_mmap_mapper()
H A Dvideobuf2-dma-sg.c40 struct page **pages; member in struct:vb2_dma_sg_buf
68 struct page *pages; vb2_dma_sg_alloc_compacted() local
77 pages = NULL; vb2_dma_sg_alloc_compacted()
78 while (!pages) { vb2_dma_sg_alloc_compacted()
79 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO | vb2_dma_sg_alloc_compacted()
81 if (pages) vb2_dma_sg_alloc_compacted()
86 __free_page(buf->pages[last_page]); vb2_dma_sg_alloc_compacted()
92 split_page(pages, order); vb2_dma_sg_alloc_compacted()
94 buf->pages[last_page++] = &pages[i]; vb2_dma_sg_alloc_compacted()
128 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), vb2_dma_sg_alloc()
130 if (!buf->pages) vb2_dma_sg_alloc()
137 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, vb2_dma_sg_alloc()
160 dprintk(1, "%s: Allocated buffer of %d pages\n", vb2_dma_sg_alloc()
170 __free_page(buf->pages[num_pages]); vb2_dma_sg_alloc()
172 kfree(buf->pages); vb2_dma_sg_alloc()
188 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, vb2_dma_sg_put()
196 __free_page(buf->pages[i]); vb2_dma_sg_put()
197 kfree(buf->pages); vb2_dma_sg_put()
261 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), vb2_dma_sg_get_userptr()
263 if (!buf->pages) vb2_dma_sg_get_userptr()
294 buf->pages[num_pages_from_user] = pfn_to_page(pfn); vb2_dma_sg_get_userptr()
302 buf->pages, vb2_dma_sg_get_userptr()
308 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, vb2_dma_sg_get_userptr()
330 put_page(buf->pages[num_pages_from_user]); vb2_dma_sg_get_userptr()
333 kfree(buf->pages); vb2_dma_sg_get_userptr()
352 dprintk(1, "%s: Releasing userspace buffer of %d pages\n", vb2_dma_sg_put_userptr()
360 set_page_dirty_lock(buf->pages[i]); vb2_dma_sg_put_userptr()
362 put_page(buf->pages[i]); vb2_dma_sg_put_userptr()
364 kfree(buf->pages); vb2_dma_sg_put_userptr()
379 buf->vaddr = vm_map_ram(buf->pages, vb2_dma_sg_vaddr()
409 ret = vm_insert_page(vma, uaddr, buf->pages[i++]); vb2_dma_sg_mmap()
H A Dvideobuf2-vmalloc.c26 struct page **pages; member in struct:vb2_vmalloc_buf
105 buf->pages = kzalloc(buf->n_pages * sizeof(struct page *), vb2_vmalloc_get_userptr()
107 if (!buf->pages) vb2_vmalloc_get_userptr()
115 buf->pages, NULL); vb2_vmalloc_get_userptr()
119 buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1, vb2_vmalloc_get_userptr()
132 put_page(buf->pages[n_pages]); vb2_vmalloc_get_userptr()
133 kfree(buf->pages); vb2_vmalloc_get_userptr()
147 if (buf->pages) { vb2_vmalloc_put_userptr()
152 set_page_dirty_lock(buf->pages[i]); vb2_vmalloc_put_userptr()
153 put_page(buf->pages[i]); vb2_vmalloc_put_userptr()
155 kfree(buf->pages); vb2_vmalloc_put_userptr()
H A Dvideobuf2-dma-contig.c470 static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, vb2_dc_get_user_pages() argument
488 pages[i] = pfn_to_page(pfn); vb2_dc_get_user_pages()
494 n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL); vb2_dc_get_user_pages()
498 pr_err("got only %d of %d user pages\n", n, n_pages); vb2_dc_get_user_pages()
500 put_page(pages[--n]); vb2_dc_get_user_pages()
541 * so all that can be done to support such 'pages' is to try to convert
578 struct page **pages; vb2_dc_get_userptr() local
612 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL); vb2_dc_get_userptr()
613 if (!pages) { vb2_dc_get_userptr()
615 pr_err("failed to allocate pages table\n"); vb2_dc_get_userptr()
641 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir); vb2_dc_get_userptr()
647 kfree(pages); vb2_dc_get_userptr()
651 pr_err("failed to get user pages\n"); vb2_dc_get_userptr()
662 ret = sg_alloc_table_from_pages(sgt, pages, n_pages, vb2_dc_get_userptr()
669 /* pages are no longer needed */ vb2_dc_get_userptr()
670 kfree(pages); vb2_dc_get_userptr()
671 pages = NULL; vb2_dc_get_userptr()
712 if (pages && !vma_is_io(buf->vma)) vb2_dc_get_userptr()
714 put_page(pages[--n_pages]); vb2_dc_get_userptr()
720 kfree(pages); /* kfree is NULL-proof */ vb2_dc_get_userptr()
/linux-4.1.27/arch/ia64/kernel/
H A Duncached.c9 * allocator first utilizes the spare (spill) pages found in the EFI
10 * memmap and will then start converting cached pages to uncached ones
12 * pool of pages per node.
70 * Add a new chunk of uncached memory pages to the specified pool.
75 * This is accomplished by first allocating a granule of cached memory pages
76 * and then converting them to uncached memory pages.
98 /* attempt to allocate a granule's worth of cached memory pages */ uncached_add_chunk()
108 /* convert the memory pages from cached to uncached */ uncached_add_chunk()
153 * The chunk of memory pages has been converted to uncached so now we uncached_add_chunk()
179 * @n_pages: number of contiguous pages to allocate
181 * Allocate the specified number of contiguous uncached pages on the
182 * the requested node. If not enough contiguous uncached pages are available
222 * @n_pages: number of contiguous pages to free
224 * Free the specified number of uncached pages.
249 * Called at boot time to build a map of pages that can be used for
/linux-4.1.27/net/rds/
H A Dinfo.c48 * buffer is big enough. The destination pages that make up the buffer
65 struct page **pages; member in struct:rds_info_iterator
113 * get_user_pages() called flush_dcache_page() on the pages for us.
122 iter->addr = kmap_atomic(*iter->pages); rds_info_copy()
127 "bytes %lu\n", *iter->pages, iter->addr, rds_info_copy()
140 iter->pages++; rds_info_copy()
167 struct page **pages = NULL; rds_info_getsockopt() local
191 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); rds_info_getsockopt()
192 if (!pages) { rds_info_getsockopt()
196 ret = get_user_pages_fast(start, nr_pages, 1, pages); rds_info_getsockopt()
215 iter.pages = pages; rds_info_getsockopt()
238 for (i = 0; pages && i < nr_pages; i++) rds_info_getsockopt()
239 put_page(pages[i]); rds_info_getsockopt()
240 kfree(pages); rds_info_getsockopt()
/linux-4.1.27/drivers/misc/
H A Dvmw_balloon.c26 * acts like a "balloon" that can be inflated to reclaim physical pages by
28 * freeing up the underlying machine pages so they can be allocated to
56 * measured in pages.
74 * Rates for releasing pages while deflating balloon.
87 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
97 * while memory is reclaimed, and won't take pages from emergency
105 /* Maximum number of refused pages we accumulate during inflation cycle */
184 /* list of reserved physical pages */
185 struct list_head pages; member in struct:vmballoon
187 /* transient list of non-balloonable pages */
191 /* balloon size in pages */
198 /* adjustment rates (pages per second) */
315 * fear that guest will need it. Host may reject some pages, we need to
341 * the pool of available (to the guest) pages.
364 * Quickly release all pages allocated for the balloon. This function is
366 * Unlike normal "deflate" we do not (shall not) notify host of the pages
374 list_for_each_entry_safe(page, next, &b->pages, lru) { vmballoon_pop()
394 /* free all pages, skipping monitor unlock */ vmballoon_reset()
407 * is satisfied. "Refused" pages are released at the end of inflation cycle
408 * (when we allocate b->rate_alloc pages).
445 * Place page on the list of non-balloonable pages vmballoon_reserve_page()
456 list_add(&page->lru, &b->pages); vmballoon_reserve_page()
487 * Release pages that were allocated while attempting to inflate the
523 * free pages in the guest quickly (if the balloon target is high). vmballoon_inflate()
524 * As a side-effect, draining free pages helps to inform (force) vmballoon_inflate()
527 * all available CPU cycles if too many pages are allocated in a vmballoon_inflate()
574 * allocated b->rate_alloc pages, let's pause, vmballoon_inflate()
593 /* We allocated enough pages, let's take a break. */ vmballoon_inflate()
630 /* free pages to reach target */ vmballoon_deflate()
631 list_for_each_entry_safe(page, next, &b->pages, lru) { vmballoon_deflate()
697 "target: %8d pages\n" vmballoon_debug_show()
698 "current: %8d pages\n", vmballoon_debug_show()
703 "rateNoSleepAlloc: %8d pages/sec\n" vmballoon_debug_show()
704 "rateSleepAlloc: %8d pages/sec\n" vmballoon_debug_show()
705 "rateFree: %8d pages/sec\n", vmballoon_debug_show()
793 INIT_LIST_HEAD(&balloon.pages); vmballoon_init()
834 * additional spurious resets from guest touching deallocated pages. vmballoon_exit()
/linux-4.1.27/drivers/hwmon/pmbus/
H A Dpmbus.c74 /* Sensors detected on all pages */ pmbus_find_sensor_groups()
75 for (page = 0; page < info->pages; page++) { pmbus_find_sensor_groups()
101 if (!info->pages) { pmbus_identify()
105 * maximum number of pages has been reached. Assume that pmbus_identify()
106 * this is the number of pages supported by the chip. pmbus_identify()
116 info->pages = page; pmbus_identify()
118 info->pages = 1; pmbus_identify()
175 info->pages = id->driver_data; pmbus_probe()
182 * Use driver_data to set the number of pages supported by the chip.
H A Ducd9200.c97 * Calculate number of configured pages (rails) from PHASE_INFO ucd9200_probe()
102 info->pages = 0; ucd9200_probe()
106 info->pages++; ucd9200_probe()
108 if (!info->pages) { ucd9200_probe()
112 dev_info(&client->dev, "%d rails configured\n", info->pages); ucd9200_probe()
115 * Set PHASE registers on all pages to 0xff to ensure that phase ucd9200_probe()
122 for (i = 0; i < info->pages; i++) { ucd9200_probe()
143 if (info->pages > 1) ucd9200_probe()
153 for (i = 1; i < info->pages; i++) ucd9200_probe()
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
H A Drw26.c189 size_t size, struct page ***pages, ll_get_user_pages()
196 *pages = NULL; ll_get_user_pages()
203 OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages)); ll_get_user_pages()
204 if (*pages) { ll_get_user_pages()
206 (rw == READ), *pages); ll_get_user_pages()
208 OBD_FREE_LARGE(*pages, *max_pages * sizeof(**pages)); ll_get_user_pages()
215 * @pages: array of page struct pointers underlying target buffer */ ll_free_user_pages()
216 static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) ll_free_user_pages() argument
222 set_page_dirty_lock(pages[i]); ll_free_user_pages()
223 page_cache_release(pages[i]); ll_free_user_pages()
225 kvfree(pages); ll_free_user_pages()
240 struct page **pages = pv->ldp_pages; ll_direct_rw_pages() local
277 src_page = (rw == WRITE) ? pages[i] : vmpage; ll_direct_rw_pages()
278 dst_page = (rw == WRITE) ? vmpage : pages[i]; ll_direct_rw_pages()
337 struct page **pages, int page_count) ll_direct_IO_26_seg()
339 struct ll_dio_pages pvec = { .ldp_pages = pages, ll_direct_IO_26_seg()
384 "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n", ll_direct_IO_26()
400 * 1. Need inode mutex to operate transient pages. ll_direct_IO_26()
407 struct page **pages; ll_direct_IO_26() local
418 result = iov_iter_get_pages_alloc(iter, &pages, count, &offs); ll_direct_IO_26()
423 result, file_offset, pages, ll_direct_IO_26()
425 ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ); ll_direct_IO_26()
434 size > (PAGE_CACHE_SIZE / sizeof(*pages)) * ll_direct_IO_26()
188 ll_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages, int *max_pages) ll_get_user_pages() argument
333 ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io, int rw, struct inode *inode, struct address_space *mapping, size_t size, loff_t file_offset, struct page **pages, int page_count) ll_direct_IO_26_seg() argument
/linux-4.1.27/drivers/gpu/drm/i915/
H A Di915_gem_dmabuf.c60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); i915_gem_map_dma_buf()
64 src = obj->pages->sgl; i915_gem_map_dma_buf()
66 for (i = 0; i < obj->pages->nents; i++) { i915_gem_map_dma_buf()
114 struct page **pages; i915_gem_dmabuf_vmap() local
134 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); i915_gem_dmabuf_vmap()
135 if (pages == NULL) i915_gem_dmabuf_vmap()
139 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) i915_gem_dmabuf_vmap()
140 pages[i++] = sg_page_iter_page(&sg_iter); i915_gem_dmabuf_vmap()
142 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); i915_gem_dmabuf_vmap()
143 drm_free_large(pages); i915_gem_dmabuf_vmap()
258 obj->pages = sg; i915_gem_object_get_pages_dmabuf()
266 obj->pages, DMA_BIDIRECTIONAL); i915_gem_object_put_pages_dmabuf()
/linux-4.1.27/drivers/block/zram/
H A Dzram_drv.h66 /* Flags for zram pages (table[page_no].value) */
84 atomic64_t compr_data_size; /* compressed size of pages stored */
92 atomic64_t zero_pages; /* no. of zero filled pages */
93 atomic64_t pages_stored; /* no. of pages currently stored */
94 atomic_long_t max_used_pages; /* no. of maximum pages stored */
109 * the number of pages zram can consume for storing compressed data
/linux-4.1.27/arch/powerpc/kvm/
H A Dbook3s_64_vio.c56 __free_page(stt->pages[i]); release_spapr_tce_table()
71 page = stt->pages[vmf->pgoff]; kvm_spapr_tce_fault()
126 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); kvm_vm_ioctl_create_spapr_tce()
127 if (!stt->pages[i]) kvm_vm_ioctl_create_spapr_tce()
144 if (stt->pages[i]) kvm_vm_ioctl_create_spapr_tce()
145 __free_page(stt->pages[i]); kvm_vm_ioctl_create_spapr_tce()
/linux-4.1.27/arch/alpha/include/uapi/asm/
H A Dmman.h38 #define MCL_CURRENT 8192 /* lock all currently mapped pages */
44 #define MADV_WILLNEED 3 /* will need these pages */
46 #define MADV_DONTNEED 6 /* don't need these pages */
49 #define MADV_REMOVE 9 /* remove these pages & resources */
53 #define MADV_MERGEABLE 12 /* KSM may merge identical pages */
54 #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
/linux-4.1.27/drivers/xen/
H A Dballoon.c97 /* List of ballooned pages, threaded through the mem_map array. */
119 /* Lowmem is re-populated first, so highmem pages go at list tail. */ __balloon_append()
217 * pages with PG_reserved bit not set; online_pages_range() does not allow page
433 * Ensure that ballooned highmem pages don't have kmaps. decrease_reservation()
436 * reads PTEs to obtain pages (and hence needs the original decrease_reservation()
525 * alloc_xenballooned_pages - get pages that have been ballooned out
526 * @nr_pages: Number of pages to get
527 * @pages: pages returned
528 * @highmem: allow highmem pages
531 int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem) alloc_xenballooned_pages() argument
539 pages[pgno++] = page; alloc_xenballooned_pages()
554 balloon_append(pages[--pgno]); alloc_xenballooned_pages()
563 * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
564 * @nr_pages: Number of pages
565 * @pages: pages to return
567 void free_xenballooned_pages(int nr_pages, struct page **pages) free_xenballooned_pages() argument
574 if (pages[i]) free_xenballooned_pages()
575 balloon_append(pages[i]); free_xenballooned_pages()
587 unsigned long pages) balloon_add_region()
594 * the 'mem' command line parameter), don't add pages beyond balloon_add_region()
597 extra_pfn_end = min(max_pfn, start_pfn + pages); balloon_add_region()
638 * Initialize the balloon with pages from the extra memory balloon_init()
586 balloon_add_region(unsigned long start_pfn, unsigned long pages) balloon_add_region() argument
H A Dgntdev.c4 * Device for accessing (in user-space) pages that have been granted by other
95 struct page **pages; member in struct:grant_map
99 static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
122 if (map->pages) gntdev_free_map()
123 gnttab_free_pages(map->count, map->pages); gntdev_free_map()
124 kfree(map->pages); gntdev_free_map()
147 add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); gntdev_alloc_map()
153 NULL == add->pages) gntdev_alloc_map()
156 if (gnttab_alloc_pages(count, add->pages)) gntdev_alloc_map()
230 if (map->pages && !use_ptemod) gntdev_put_map()
283 pfn_to_kaddr(page_to_pfn(map->pages[i])); map_grant_pages()
293 * to the kernel linear addresses of the struct pages. map_grant_pages()
299 pfn_to_kaddr(page_to_pfn(map->pages[i])); map_grant_pages()
300 BUG_ON(PageHighMem(map->pages[i])); map_grant_pages()
313 map->pages, map->count); map_grant_pages()
330 static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) __unmap_grant_pages() argument
337 if (pgno >= offset && pgno < offset + pages) { __unmap_grant_pages()
338 /* No need for kmap, pages are in lowmem */ __unmap_grant_pages()
339 uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); __unmap_grant_pages()
347 unmap_data.pages = map->pages + offset; __unmap_grant_pages()
348 unmap_data.count = pages; __unmap_grant_pages()
354 for (i = 0; i < pages; i++) { __unmap_grant_pages()
365 static int unmap_grant_pages(struct grant_map *map, int offset, int pages) unmap_grant_pages() argument
369 pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages); unmap_grant_pages()
374 while (pages && !err) { unmap_grant_pages()
375 while (pages && map->unmap_ops[offset].handle == -1) { unmap_grant_pages()
377 pages--; unmap_grant_pages()
380 while (range < pages) { unmap_grant_pages()
389 pages -= range; unmap_grant_pages()
433 return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT]; gntdev_vma_find_special_page()
846 map->pages[i]); gntdev_mmap()
H A Dprivcmd.c69 static void free_page_list(struct list_head *pages) free_page_list() argument
73 list_for_each_entry_safe(p, n, pages, lru) free_page_list()
76 INIT_LIST_HEAD(pages); free_page_list()
80 * Given an array of items in userspace, return a list of pages
128 * over a list of pages.
316 struct page **pages = vma->vm_private_data; mmap_batch_fn() local
321 cur_pages = &pages[st->index]; mmap_batch_fn()
398 struct page **pages; alloc_empty_pages() local
400 pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL); alloc_empty_pages()
401 if (pages == NULL) alloc_empty_pages()
404 rc = alloc_xenballooned_pages(numpgs, pages, 0); alloc_empty_pages()
408 kfree(pages); alloc_empty_pages()
412 vma->vm_private_data = pages; alloc_empty_pages()
483 * pages required for the auto_translated_physmap case. privcmd_ioctl_mmap_batch()
583 struct page **pages = vma->vm_private_data; privcmd_close() local
587 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) privcmd_close()
590 rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); privcmd_close()
592 free_xenballooned_pages(numpgs, pages); privcmd_close()
594 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n", privcmd_close()
596 kfree(pages); privcmd_close()
H A Dxlate_mmu.c70 struct page **pages; member in struct:remap_data
80 struct page *page = info->pages[info->index++]; remap_pte_fn()
101 struct page **pages) xen_xlate_remap_gfn_array()
115 data.pages = pages; xen_xlate_remap_gfn_array()
127 int nr, struct page **pages) xen_xlate_unmap_gfn_range()
135 pfn = page_to_pfn(pages[i]); xen_xlate_unmap_gfn_range()
96 xen_xlate_remap_gfn_array(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t *mfn, int nr, int *err_ptr, pgprot_t prot, unsigned domid, struct page **pages) xen_xlate_remap_gfn_array() argument
126 xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, int nr, struct page **pages) xen_xlate_unmap_gfn_range() argument
/linux-4.1.27/drivers/gpu/drm/gma500/
H A Dgtt.c89 struct page **pages; psb_gtt_insert() local
92 if (r->pages == NULL) { psb_gtt_insert()
100 pages = r->pages; psb_gtt_insert()
104 set_pages_array_wc(pages, r->npage); psb_gtt_insert()
109 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), psb_gtt_insert()
114 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), psb_gtt_insert()
149 set_pages_array_wb(r->pages, r->npage); psb_gtt_remove()
158 * Roll an existing pinned mapping by moving the pages through the GTT.
183 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), psb_gtt_roll()
188 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), psb_gtt_roll()
196 * psb_gtt_attach_pages - attach and pin GEM pages
199 * Pin and build an in kernel list of the pages that back our GEM object.
200 * While we hold this the pages cannot be swapped out. This is protected
205 struct page **pages; psb_gtt_attach_pages() local
207 WARN_ON(gt->pages); psb_gtt_attach_pages()
209 pages = drm_gem_get_pages(&gt->gem); psb_gtt_attach_pages()
210 if (IS_ERR(pages)) psb_gtt_attach_pages()
211 return PTR_ERR(pages); psb_gtt_attach_pages()
214 gt->pages = pages; psb_gtt_attach_pages()
220 * psb_gtt_detach_pages - attach and pin GEM pages
223 * Undo the effect of psb_gtt_attach_pages. At this point the pages
230 drm_gem_put_pages(&gt->gem, gt->pages, true, false); psb_gtt_detach_pages()
231 gt->pages = NULL; psb_gtt_detach_pages()
235 * psb_gtt_pin - pin pages into the GTT
238 * Pin a set of pages into the GTT. The pins are refcounted so that
263 gt->pages, (gpu_base + gt->offset), psb_gtt_pin()
323 * @backed: resource should be backed by stolen pages
342 /* The start of the GTT is the stolen pages */ psb_gtt_alloc_range()
458 /* CDV doesn't report this. In which case the system has 64 gtt pages */ psb_gtt_init()
532 * Insert vram stolen pages into the GTT psb_gtt_init()
537 dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n", psb_gtt_init()
574 if (range->pages) { psb_gtt_restore()
H A Dgtt.h51 struct page **pages; /* Backing pages if present */ member in struct:gtt_range
52 int npage; /* Number of backing pages */
/linux-4.1.27/net/sunrpc/xprtrdma/
H A Dsvc_rdma_recvfrom.c54 * Replace the pages in the rq_argpages array with the pages from the SGE in
55 * the RDMA_RECV completion. The SGL should contain full pages up until the
68 page = ctxt->pages[0]; rdma_build_arg_xdr()
89 rqstp->rq_arg.pages = &rqstp->rq_pages[0]; rdma_build_arg_xdr()
91 rqstp->rq_arg.pages = &rqstp->rq_pages[1]; rdma_build_arg_xdr()
95 page = ctxt->pages[sge_no]; rdma_build_arg_xdr()
105 /* If not all pages were used from the SGL, free the remaining ones */ rdma_build_arg_xdr()
108 page = ctxt->pages[sge_no++]; rdma_build_arg_xdr()
155 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no]; rdma_read_chunk_lcl()
160 rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1]; rdma_read_chunk_lcl()
164 head->arg.pages[pg_no], pg_off, rdma_read_chunk_lcl()
252 frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]); rdma_read_chunk_frmr()
261 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no]; rdma_read_chunk_frmr()
266 rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1]; rdma_read_chunk_frmr()
270 head->arg.pages[pg_no], 0, rdma_read_chunk_frmr()
369 /* If there was additional inline content, append it to the end of arg.pages.
371 * pages are needed for RDMA READ.
390 destp = page_address(rqstp->rq_arg.pages[page_no]); rdma_copy_tail()
404 destp = page_address(rqstp->rq_arg.pages[page_no]); rdma_copy_tail()
408 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1]; rdma_copy_tail()
440 * head context keeps all the pages that comprise the rdma_read_chunks()
456 head->arg.pages = &head->pages[0]; rdma_read_chunks()
459 head->arg.pages = &head->pages[head->count]; rdma_read_chunks()
506 /* Detach arg pages. svc_recv will replenish them */ rdma_read_chunks()
520 /* Copy RPC pages */ rdma_read_complete()
523 rqstp->rq_pages[page_no] = head->pages[page_no]; rdma_read_complete()
540 /* Point rq_arg.pages past header */ rdma_read_complete()
541 rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count]; rdma_read_complete()
546 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no]; rdma_read_complete()
/linux-4.1.27/fs/nfs/
H A Dnfs3acl.c17 struct page *pages[NFSACL_MAXPAGES] = { }; nfs3_get_acl() local
20 /* The xdr layer may allocate pages here. */ nfs3_get_acl()
21 .pages = pages, nfs3_get_acl()
61 /* pages may have been allocated at the xdr layer. */ nfs3_get_acl()
62 for (count = 0; count < NFSACL_MAXPAGES && args.pages[count]; count++) nfs3_get_acl()
63 __free_page(args.pages[count]); nfs3_get_acl()
122 struct page *pages[NFSACL_MAXPAGES]; __nfs3_proc_setacls() local
127 .pages = pages, __nfs3_proc_setacls()
161 args.pages[args.npages] = alloc_page(GFP_KERNEL); __nfs3_proc_setacls()
162 if (args.pages[args.npages] == NULL) __nfs3_proc_setacls()
199 __free_page(args.pages[args.npages]); __nfs3_proc_setacls()
H A Dpnfs_dev.c102 struct page **pages = NULL; nfs4_get_device_info() local
123 pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); nfs4_get_device_info()
124 if (!pages) nfs4_get_device_info()
128 pages[i] = alloc_page(gfp_flags); nfs4_get_device_info()
129 if (!pages[i]) nfs4_get_device_info()
135 pdev->pages = pages; nfs4_get_device_info()
157 __free_page(pages[i]); nfs4_get_device_info()
158 kfree(pages); nfs4_get_device_info()
/linux-4.1.27/arch/sparc/include/uapi/asm/
H A Dmman.h9 #define MAP_NORESERVE 0x40 /* don't reserve swap pages */
18 #define MCL_CURRENT 0x2000 /* lock all currently mapped pages */
/linux-4.1.27/arch/powerpc/include/uapi/asm/
H A Dmman.h16 #define MAP_NORESERVE 0x40 /* don't reserve swap pages */
23 #define MCL_CURRENT 0x2000 /* lock all currently mapped pages */
/linux-4.1.27/arch/avr32/include/asm/
H A Dtlbflush.h20 * - flush_tlb_range(vma, start, end) flushes a range of pages
21 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
H A Duser.h34 * that an integral number of pages is written.
38 * to write an integer number of pages.
47 size_t u_tsize; /* text size (pages) */
48 size_t u_dsize; /* data size (pages) */
49 size_t u_ssize; /* stack size (pages) */
/linux-4.1.27/tools/vm/
H A Dpage-types.c166 static int opt_list; /* list pages (in ranges) */
224 static unsigned long pages2mb(unsigned long pages) pages2mb() argument
226 return (pages * page_size) >> 20; pages2mb()
278 unsigned long pages) kpageflags_read()
280 return do_u64_read(kpageflags_fd, PROC_KPAGEFLAGS, buf, index, pages); kpageflags_read()
285 unsigned long pages) pagemap_read()
287 return do_u64_read(pagemap_fd, "/proc/pid/pagemap", buf, index, pages); pagemap_read()
464 /* hide non-hugeTLB compound pages */ well_known_flags()
589 #define KPAGEFLAGS_BATCH (64 << 10) /* 64k pages */ walk_pfn()
597 unsigned long pages; walk_pfn() local
602 pages = kpageflags_read(buf, index, batch); walk_pfn()
603 if (pages == 0) walk_pfn()
606 for (i = 0; i < pages; i++) walk_pfn()
609 index += pages; walk_pfn()
610 count -= pages; walk_pfn()
619 unsigned long pages; walk_vma() local
625 pages = pagemap_read(buf, index, batch); walk_vma()
626 if (pages == 0) walk_vma()
629 for (i = 0; i < pages; i++) { walk_vma()
635 index += pages; walk_vma()
636 count -= pages; walk_vma()
712 " -a|--addr addr-spec Walk a range of pages\n" usage()
713 " -b|--bits bits-spec Walk pages with specified bits\n" usage()
719 " -X|--hwpoison hwpoison pages\n" usage()
720 " -x|--unpoison unpoison pages\n" usage()
727 " N one page at offset N (unit: pages)\n" usage()
728 " N+M pages range from N to N+M-1\n" usage()
729 " N,M pages range from N to M-1\n" usage()
730 " N, pages range from N to end\n" usage()
731 " ,M pages range from 0 to M-1\n" usage()
819 printf("%s\tInode: %u\tSize: %llu (%llu pages)\n", show_file()
871 /* determine cached pages */ walk_file()
276 kpageflags_read(uint64_t *buf, unsigned long index, unsigned long pages) kpageflags_read() argument
283 pagemap_read(uint64_t *buf, unsigned long index, unsigned long pages) pagemap_read() argument
/linux-4.1.27/drivers/gpu/drm/nouveau/
H A Dnouveau_sgdma.c34 node->pages = NULL; nv04_sgdma_bind()
37 node->pages = nvbe->ttm.dma_address; nv04_sgdma_bind()
69 node->pages = NULL; nv50_sgdma_bind()
72 node->pages = nvbe->ttm.dma_address; nv50_sgdma_bind()
/linux-4.1.27/arch/microblaze/include/asm/
H A Dhighmem.h4 * Used in CONFIG_HIGHMEM systems for memory pages which
37 * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte
39 * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP
H A Dpgalloc.h28 * are all 0's and I want to be able to use these zero'd pages elsewhere
43 extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
44 extern atomic_t zero_sz; /* # currently pre-zero'd pages */
45 extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */
46 extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */
47 extern atomic_t zerototal; /* # pages zero'd over time */
H A Dtlbflush.h51 * pages. We don't need to do anything here, there's nothing special
52 * about our page-table pages. -- paulus
/linux-4.1.27/arch/m68k/include/asm/
H A Dkexec.h6 /* Maximum physical address we can use pages from */
H A Duser.h23 number of pages is written.
27 to write an integer number of pages.
28 The minimum core file size is 3 pages, or 12288 bytes.
65 unsigned long int u_tsize; /* Text segment size (pages). */
66 unsigned long int u_dsize; /* Data segment size (pages). */
67 unsigned long int u_ssize; /* Stack segment size (pages). */
/linux-4.1.27/tools/testing/selftests/vm/
H A Dhugepage-shm.c6 * memory that is backed by huge pages. The application uses the flag
8 * requesting huge pages.
11 * huge pages. That means that if one requires a fixed address, a huge page
24 * total amount of shared memory in pages. To set it to 16GB on a system
/linux-4.1.27/drivers/staging/android/ion/
H A Dion_heap.c37 struct page **pages = vmalloc(sizeof(struct page *) * npages); ion_heap_map_kernel() local
38 struct page **tmp = pages; ion_heap_map_kernel()
40 if (!pages) ion_heap_map_kernel()
56 vaddr = vmap(pages, npages, VM_MAP, pgprot); ion_heap_map_kernel()
57 vfree(pages); ion_heap_map_kernel()
106 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) ion_heap_clear_pages() argument
108 void *addr = vm_map_ram(pages, num, -1, pgprot); ion_heap_clear_pages()
124 struct page *pages[32]; ion_heap_sglist_zero() local
127 pages[p++] = sg_page_iter_page(&piter); ion_heap_sglist_zero()
128 if (p == ARRAY_SIZE(pages)) { ion_heap_sglist_zero()
129 ret = ion_heap_clear_pages(pages, p, pgprot); ion_heap_sglist_zero()
136 ret = ion_heap_clear_pages(pages, p, pgprot); ion_heap_sglist_zero()
H A Dion_system_heap.c130 struct list_head pages; ion_system_heap_allocate() local
142 INIT_LIST_HEAD(&pages); ion_system_heap_allocate()
148 list_add_tail(&page->lru, &pages); ion_system_heap_allocate()
161 list_for_each_entry_safe(page, tmp_page, &pages, lru) { ion_system_heap_allocate()
173 list_for_each_entry_safe(page, tmp_page, &pages, lru) ion_system_heap_allocate()
188 /* uncached pages come from the page pools, zero them before returning ion_system_heap_free()
251 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n", ion_system_heap_debug_show()
254 seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n", ion_system_heap_debug_show()
365 unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; ion_system_contig_heap_free() local
368 for (i = 0; i < pages; i++) ion_system_contig_heap_free()
/linux-4.1.27/arch/mips/ar7/
H A Dmemory.c61 unsigned long pages; prom_meminit() local
63 pages = memsize() >> PAGE_SHIFT; prom_meminit()
64 add_memory_region(PHYS_OFFSET, pages << PAGE_SHIFT, BOOT_MEM_RAM); prom_meminit()
/linux-4.1.27/drivers/gpu/drm/vgem/
H A Dvgem_drv.c47 drm_gem_put_pages(&obj->base, obj->pages, false, false); vgem_gem_put_pages()
48 obj->pages = NULL; vgem_gem_put_pages()
64 if (vgem_obj->pages) vgem_gem_free_object()
67 vgem_obj->pages = NULL; vgem_gem_free_object()
74 struct page **pages; vgem_gem_get_pages() local
76 if (obj->pages || obj->use_dma_buf) vgem_gem_get_pages()
79 pages = drm_gem_get_pages(&obj->base); vgem_gem_get_pages()
80 if (IS_ERR(pages)) { vgem_gem_get_pages()
81 return PTR_ERR(pages); vgem_gem_get_pages()
84 obj->pages = pages; vgem_gem_get_pages()
109 obj->pages[page_offset]); vgem_gem_fault()
/linux-4.1.27/fs/ntfs/
H A Dcompress.c133 * ntfs_decompress - decompress a compression block into an array of pages
134 * @dest_pages: destination array of pages
150 * destination pages @dest_pages starting at index @dest_index into @dest_pages
225 /* Second stage: finalize completed pages. */ ntfs_decompress()
308 * completed pages. ntfs_decompress()
455 * 2. Get hold of all pages corresponding to this/these compression block(s).
457 * 4. Decompress it into the corresponding pages.
461 * Warning: We have to be careful what we do about existing pages. They might
469 * apparent if pages are above 8kiB and the NTFS volume only uses 512 byte
513 * Number of pages required to store the uncompressed data from all ntfs_read_compressed_block()
522 struct page **pages; ntfs_read_compressed_block() local
534 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS); ntfs_read_compressed_block()
540 if (unlikely(!pages || !bhs)) { ntfs_read_compressed_block()
542 kfree(pages); ntfs_read_compressed_block()
554 pages[xpage] = page; ntfs_read_compressed_block()
556 * The remaining pages need to be allocated and inserted into the page ntfs_read_compressed_block()
568 kfree(pages); ntfs_read_compressed_block()
579 pages[i] = grab_cache_page_nowait(mapping, offset); ntfs_read_compressed_block()
580 page = pages[i]; ntfs_read_compressed_block()
595 pages[i] = NULL; ntfs_read_compressed_block()
600 * We have the runlist, and all the destination pages we need to fill. ntfs_read_compressed_block()
754 page = pages[cur_page]; ntfs_read_compressed_block()
775 pages[cur_page] = NULL; ntfs_read_compressed_block()
784 page = pages[cur_page]; ntfs_read_compressed_block()
801 /* Uncompressed cb, copy it to the destination pages. */ ntfs_read_compressed_block()
804 * before we read all the pages and use block_read_full_page() ntfs_read_compressed_block()
805 * on all full pages instead (we still have to treat partial ntfs_read_compressed_block()
806 * pages especially but at least we are getting rid of the ntfs_read_compressed_block()
807 * synchronous io for the majority of pages. ntfs_read_compressed_block()
809 * could just return block_read_full_page(pages[xpage]) as long ntfs_read_compressed_block()
814 /* First stage: copy data into destination pages. */ ntfs_read_compressed_block()
816 page = pages[cur_page]; ntfs_read_compressed_block()
827 page = pages[cur_page]; ntfs_read_compressed_block()
836 /* Second stage: finalize pages. */ ntfs_read_compressed_block()
838 page = pages[cur2_page]; ntfs_read_compressed_block()
854 pages[cur2_page] = NULL; ntfs_read_compressed_block()
866 err = ntfs_decompress(pages, &cur_page, &cur_ofs, ntfs_read_compressed_block()
879 /* Release the unfinished pages. */ ntfs_read_compressed_block()
881 page = pages[prev_cur_page]; ntfs_read_compressed_block()
888 pages[prev_cur_page] = NULL; ntfs_read_compressed_block()
905 /* Clean up if we have any pages left. Should never happen. */ ntfs_read_compressed_block()
907 page = pages[cur_page]; ntfs_read_compressed_block()
909 ntfs_error(vol->sb, "Still have pages left! " ntfs_read_compressed_block()
918 pages[cur_page] = NULL; ntfs_read_compressed_block()
922 /* We no longer need the list of pages. */ ntfs_read_compressed_block()
923 kfree(pages); ntfs_read_compressed_block()
958 page = pages[i]; ntfs_read_compressed_block()
967 kfree(pages); ntfs_read_compressed_block()
H A Dfile.c81 * disk (if relevant complete pages are already uptodate in the page cache then
262 * thousands of pages or as in the above example more than ntfs_attr_extend_initialized()
263 * two and a half million pages! ntfs_attr_extend_initialized()
265 * TODO: For sparse pages could optimize this workload by using ntfs_attr_extend_initialized()
267 * would be set in readpage for sparse pages and here we would ntfs_attr_extend_initialized()
268 * not need to mark dirty any pages which have this bit set. ntfs_attr_extend_initialized()
274 * call readpage() on pages which are not in sparse regions as ntfs_attr_extend_initialized()
276 * number of pages we read and make dirty in the case of sparse ntfs_attr_extend_initialized()
494 * __ntfs_grab_cache_pages - obtain a number of locked pages
495 * @mapping: address space mapping from which to obtain page cache pages
496 * @index: starting index in @mapping at which to begin obtaining pages
497 * @nr_pages: number of page cache pages to obtain
498 * @pages: array of pages in which to return the obtained page cache pages
501 * Obtain @nr_pages locked page cache pages from the mapping @mapping and
509 pgoff_t index, const unsigned nr_pages, struct page **pages, __ntfs_grab_cache_pages()
517 pages[nr] = find_get_page_flags(mapping, index, FGP_LOCK | __ntfs_grab_cache_pages()
519 if (!pages[nr]) { __ntfs_grab_cache_pages()
534 pages[nr] = *cached_page; __ntfs_grab_cache_pages()
544 unlock_page(pages[--nr]); __ntfs_grab_cache_pages()
545 page_cache_release(pages[nr]); __ntfs_grab_cache_pages()
559 * ntfs_prepare_pages_for_non_resident_write - prepare pages for receiving data
560 * @pages: array of destination pages
561 * @nr_pages: number of pages in @pages
566 * with i_mutex held on the inode (@pages[0]->mapping->host). There are
567 * @nr_pages pages in @pages which are locked but not kmap()ped. The source
568 * data has not yet been copied into the @pages.
575 * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside
583 static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ntfs_prepare_pages_for_non_resident_write() argument
613 BUG_ON(!pages); ntfs_prepare_pages_for_non_resident_write()
614 BUG_ON(!*pages); ntfs_prepare_pages_for_non_resident_write()
615 vi = pages[0]->mapping->host; ntfs_prepare_pages_for_non_resident_write()
620 vi->i_ino, ni->type, pages[0]->index, nr_pages, ntfs_prepare_pages_for_non_resident_write()
626 page = pages[u]; ntfs_prepare_pages_for_non_resident_write()
654 page = pages[u]; ntfs_prepare_pages_for_non_resident_write()
1178 * If the number of remaining clusters in the @pages is smaller ntfs_prepare_pages_for_non_resident_write()
1231 bh = head = page_buffers(pages[u]); ntfs_prepare_pages_for_non_resident_write()
1348 page = pages[u]; ntfs_prepare_pages_for_non_resident_write()
1374 static inline void ntfs_flush_dcache_pages(struct page **pages, ntfs_flush_dcache_pages() argument
1385 flush_dcache_page(pages[nr_pages]); ntfs_flush_dcache_pages()
1391 * @pages: array of destination pages
1392 * @nr_pages: number of pages in @pages
1399 struct page **pages, const unsigned nr_pages, ntfs_commit_pages_after_non_resident_write()
1413 vi = pages[0]->mapping->host; ntfs_commit_pages_after_non_resident_write()
1423 page = pages[u]; ntfs_commit_pages_after_non_resident_write()
1519 * @pages: array of destination pages
1520 * @nr_pages: number of pages in @pages
1525 * (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are
1553 static int ntfs_commit_pages_after_write(struct page **pages, ntfs_commit_pages_after_write() argument
1570 BUG_ON(!pages); ntfs_commit_pages_after_write()
1571 page = pages[0]; ntfs_commit_pages_after_write()
1580 return ntfs_commit_pages_after_non_resident_write(pages, ntfs_commit_pages_after_write()
1695 * Copy as much as we can into the pages and return the number of bytes which
1696 * were successfully copied. If a fault is encountered then clear the pages
1699 static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages, ntfs_copy_from_user_iter() argument
1702 struct page **last_page = pages + nr_pages; ntfs_copy_from_user_iter()
1711 copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs, ntfs_copy_from_user_iter()
1721 } while (++pages < last_page); ntfs_copy_from_user_iter()
1730 zero_user(*pages, copied, len); ntfs_copy_from_user_iter()
1734 } while (++pages < last_page); ntfs_copy_from_user_iter()
1751 struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER]; ntfs_perform_write() local
1785 * Determine the number of pages per cluster for non-resident ntfs_perform_write()
1808 * it is a hole, need to lock down all pages in ntfs_perform_write()
1849 * pages being swapped out between us bringing them into memory ntfs_perform_write()
1858 pages, &cached_page); ntfs_perform_write()
1869 pages, do_pages, pos, bytes); ntfs_perform_write()
1872 unlock_page(pages[--do_pages]); ntfs_perform_write()
1873 page_cache_release(pages[do_pages]); ntfs_perform_write()
1878 u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index; ntfs_perform_write()
1879 copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs, ntfs_perform_write()
1881 ntfs_flush_dcache_pages(pages + u, do_pages - u); ntfs_perform_write()
1884 status = ntfs_commit_pages_after_write(pages, do_pages, ntfs_perform_write()
1890 unlock_page(pages[--do_pages]); ntfs_perform_write()
1891 page_cache_release(pages[do_pages]); ntfs_perform_write()
1979 * but we always wait on the page cache pages to be written out.
508 __ntfs_grab_cache_pages(struct address_space *mapping, pgoff_t index, const unsigned nr_pages, struct page **pages, struct page **cached_page) __ntfs_grab_cache_pages() argument
1398 ntfs_commit_pages_after_non_resident_write( struct page **pages, const unsigned nr_pages, s64 pos, size_t bytes) ntfs_commit_pages_after_non_resident_write() argument
/linux-4.1.27/arch/s390/include/asm/
H A Dkexec.h19 /* Maximum physical address we can use pages from */
25 /* Maximum address we can use for the control pages */
32 /* Maximum address we can use for the crash control pages */
H A Duser.h31 number of pages is written.
35 to write an integer number of pages.
36 The minimum core file size is 3 pages, or 12288 bytes.
55 unsigned long int u_tsize; /* Text segment size (pages). */
56 unsigned long int u_dsize; /* Data segment size (pages). */
57 unsigned long int u_ssize; /* Stack segment size (pages). */
/linux-4.1.27/fs/btrfs/tests/
H A Dextent-io-tests.c32 struct page *pages[16]; process_page_range() local
43 ARRAY_SIZE(pages)), pages); process_page_range()
46 !PageLocked(pages[i])) process_page_range()
48 if (flags & PROCESS_UNLOCK && PageLocked(pages[i])) process_page_range()
49 unlock_page(pages[i]); process_page_range()
50 page_cache_release(pages[i]); process_page_range()
52 page_cache_release(pages[i]); process_page_range()
88 * First go through and create and mark all of our pages dirty, we pin test_find_delalloc()
89 * everything to make sure our pages don't get evicted and screw up our test_find_delalloc()
159 test_msg("There were unlocked pages in the range\n"); test_find_delalloc()
237 * Currently if we fail to find dirty pages in the delalloc range we test_find_delalloc()
/linux-4.1.27/kernel/power/
H A Dsnapshot.c70 /* List of PBEs needed for restoring the pages that were allocated before
82 * we can only use memory pages that do not conflict with the pages
83 * used before suspend. The unsafe pages have PageNosaveFree set
153 /* struct linked_page is used to build chains of pages */
175 * a linked list of pages called 'the chain'.
191 gfp_t gfp_mask; /* mask for allocating pages */
192 int safe_needed; /* if set, only "safe" pages are allocated */
235 * and a pointer to the list of pages used for allocating all of the
307 struct linked_page *p_list; /* list of pages used to store zone
434 unsigned long pages; create_zone_bm_rtree() local
436 pages = end - start; create_zone_bm_rtree()
445 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); create_zone_bm_rtree()
460 * Free all node pages of the radix tree. The mem_zone_bm_rtree
804 unsigned long bits, pfn, pages; memory_bm_next_pfn() local
808 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; memory_bm_next_pfn()
809 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK); memory_bm_next_pfn()
940 pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n", mark_nosave_pages()
1036 * snapshot_additional_pages - estimate the number of additional pages
1059 * pages, system-wide.
1079 * and it isn't a part of a free chunk of pages.
1106 * pages.
1140 * of pages statically defined as 'unsaveable', and it isn't a part of
1141 * a free chunk of pages.
1171 * pages.
1293 /* Total number of image pages */
1295 /* Number of pages needed for saving the original pfns of the image pages */
1303 * Memory bitmap used for marking saveable pages (during hibernation) or
1304 * hibernation image pages (during restore)
1309 * will contain copies of saveable pages. During restore it is initially used
1310 * for marking hibernation image pages, but then the set bits from it are
1312 * used for marking "safe" highmem pages, but it has to be reinitialized for
1318 * swsusp_free - free pages allocated for the suspend.
1320 * Suspend pages are alocated before the atomic copy is made, so we
1372 * preallocate_image_pages - Allocate a number of pages for hibernation image
1454 * free_unnecessary_pages - Release preallocated pages not needed for the image
1509 * @saveable: Number of saveable pages in the system.
1517 * [number of saveable pages] - [number of pages that can be freed in theory]
1519 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1520 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
1521 * minus mapped file pages.
1549 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1556 * pages in the system is below the requested image size or the minimum
1562 unsigned long saveable, size, max_size, count, highmem, pages = 0; hibernate_preallocate_memory() local
1581 /* Count the number of saveable data pages. */ hibernate_preallocate_memory()
1587 * number of pages needed for image metadata (size). hibernate_preallocate_memory()
1604 /* Add number of pages required for page keys (s390 only). */
1607 /* Compute the maximum number of saveable pages to leave in memory. */
1610 /* Compute the desired number of image pages specified by image_size. */
1615 * If the desired number of image pages is at least as large as the
1616 * current number of saveable pages in memory, allocate page frames for
1620 pages = preallocate_image_highmem(save_highmem);
1621 pages += preallocate_image_memory(saveable - pages, avail_normal);
1626 pages = minimum_image_size(saveable);
1630 * small, in which case don't preallocate pages from it at all).
1632 if (avail_normal > pages)
1633 avail_normal -= pages;
1636 if (size < pages)
1637 size = min_t(unsigned long, pages, max_size);
1648 * The number of saveable pages in memory was too high, so apply some
1660 pages = preallocate_image_memory(alloc, avail_normal);
1661 if (pages < alloc) {
1662 /* We have exhausted non-highmem pages, try highmem. */
1663 alloc -= pages;
1664 pages += pages_highmem;
1668 pages += pages_highmem;
1670 * size is the desired number of saveable pages to leave in
1671 * memory, so try to preallocate (all memory - size) pages.
1673 alloc = (count - pages) - size;
1674 pages += preallocate_image_highmem(alloc);
1677 * There are approximately max_size saveable pages at this point
1686 pages += pages_highmem + size;
1691 * pages in memory, but we have allocated more. Release the excessive
1694 pages -= free_unnecessary_pages();
1698 printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1699 swsusp_show_speed(start, stop, pages, "Allocated");
1711 * count_pages_for_highmem - compute the number of non-highmem pages
1712 * that will be necessary for creating copies of highmem pages.
1746 pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n", enough_free_mem()
1754 * get_highmem_buffer - if there are some highmem pages in the suspend
1765 * alloc_highmem_image_pages - allocate some highmem pages for the image.
1766 * Try to allocate as many pages as needed, but if the number of free
1767 * highmem pages is lesser than that, allocate them all.
1797 * We first try to allocate as many highmem pages as there are alloc_highmem_pages()
1798 * saveable highmem pages in the system. If that fails, we allocate alloc_highmem_pages()
1799 * non-highmem pages for the copies of the remaining highmem ones. alloc_highmem_pages()
1801 * In this approach it is likely that the copies of highmem pages will alloc_highmem_pages()
1846 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem); swsusp_save()
1858 /* During allocating of suspend pagedir, new cold pages may appear. swsusp_save()
1874 printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n", swsusp_save()
1914 info->pages = snapshot_get_image_size(); init_header()
1915 info->size = info->pages; init_header()
1984 /* Highmem pages are copied to the buffer, snapshot_read_next()
2003 * mark_unsafe_pages - mark the pages that cannot be used for storing
2004 * the image during resume, because they conflict with the pages that
2021 /* Mark pages that correspond to the "original" pfns as "unsafe" */
2078 nr_meta_pages = info->pages - info->image_pages - 1; load_header()
2107 /* List of "safe" pages that may be used to store data loaded from the suspend
2113 /* struct highmem_pbe is used for creating the list of highmem pages that
2123 /* List of highmem PBEs needed for restoring the highmem pages that were
2131 * count_highmem_image_pages - compute the number of highmem pages in the
2133 * image pages are assumed to be set.
2153 * prepare_highmem_image - try to allocate as many highmem pages as
2154 * there are highmem image pages (@nr_highmem_p points to the variable
2155 * containing the number of highmem image pages). The pages that are
2161 * image pages.
2318 * prepare_image - use the memory bitmap @bm to mark the pages that will free_highmem_data()
2320 * from the suspend image ("unsafe" pages) and allocate memory for the free_highmem_data()
2324 * as many pages as needed for the image data, but not to assign these free_highmem_data()
2325 * pages to specific tasks initially. Instead, we just mark them as free_highmem_data()
2326 * allocated and create a lists of "safe" pages that will be used free_highmem_data()
2327 * later. On systems with high memory a list of "safe" highmem pages is free_highmem_data()
2360 /* Reserve some safe pages for potential later use. prepare_image()
2362 * NOTE: This way we make sure there will be enough safe pages for the prepare_image()
2399 /* Free the reserved safe pages so that chain_alloc() can use them */ prepare_image()
H A Dswap.c46 * The swap map is created during suspend. The swap map pages are
56 * Number of free pages that are not high.
64 * Number of pages required to be kept free while writing the image. Always
65 * half of all available low pages before the writing starts.
111 * swap pages, so that they can be freed in case of an error.
183 * free_all_swap_pages - free swap pages allocated for saving image data.
291 ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */ write_page()
380 * Recalculate the number of required free pages, to swap_write_page()
419 /* Number of pages/bytes we'll compress at one time. */
423 /* Number of pages/bytes we need for compressed data (worst case). */
431 /* Minimum/maximum number of pages for read buffering. */
452 printk(KERN_INFO "PM: Saving image data pages (%u pages)...\n", save_image()
573 * @nr_to_write: Number of pages to save.
664 * Adjust the number of required free pages after all allocations have save_image_lzo()
665 * been done. We don't want to run out of pages when writing. save_image_lzo()
671 "PM: Compressing and saving image data (%u pages)...\n", save_image_lzo()
798 pr_debug("PM: Free swap pages: %u\n", free_swap); enough_swap()
819 unsigned long pages; swsusp_write() local
822 pages = snapshot_get_image_size(); swsusp_write()
829 if (!enough_swap(pages, flags)) { swsusp_write()
847 save_image(&handle, &snapshot, pages - 1) : swsusp_write()
848 save_image_lzo(&handle, &snapshot, pages - 1); swsusp_write()
960 * (assume there are @nr_pages pages to load)
975 printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n", load_image()
1061 * @nr_to_read: Number of pages to load.
1155 * Set the number of pages for read buffering. load_image_lzo()
1159 * say that none of the image pages are from high memory. load_image_lzo()
1175 "PM: Failed to allocate LZO pages\n"); load_image_lzo()
1187 "PM: Loading and decompressing image data (%u pages)...\n", load_image_lzo()
1408 load_image(&handle, &snapshot, header->pages - 1) : swsusp_read()
1409 load_image_lzo(&handle, &snapshot, header->pages - 1); swsusp_read()
/linux-4.1.27/drivers/md/
H A Ddm-kcopyd.c38 * pages for kcopyd io.
41 struct page_list *pages; member in struct:dm_kcopyd_client
60 * i) jobs waiting for pages
61 * ii) jobs that have pages, and are waiting for the io to be issued.
217 * Add the provided pages to a client's free page list, releasing
230 pl->next = kc->pages; kcopyd_put_pages()
231 kc->pages = pl; kcopyd_put_pages()
240 unsigned int nr, struct page_list **pages) kcopyd_get_pages()
244 *pages = NULL; kcopyd_get_pages()
249 /* Use reserved pages */ kcopyd_get_pages()
250 pl = kc->pages; kcopyd_get_pages()
253 kc->pages = pl->next; kcopyd_get_pages()
256 pl->next = *pages; kcopyd_get_pages()
257 *pages = pl; kcopyd_get_pages()
263 if (*pages) kcopyd_get_pages()
264 kcopyd_put_pages(kc, *pages); kcopyd_get_pages()
310 drop_pages(kc->pages); client_free_pages()
311 kc->pages = NULL; client_free_pages()
343 struct page_list *pages; member in struct:kcopyd_job
444 if (job->pages && job->pages != &zero_page_list) run_complete_job()
445 kcopyd_put_pages(kc, job->pages); run_complete_job()
501 .mem.ptr.pl = job->pages, run_io_job()
523 r = kcopyd_get_pages(job->kc, nr_pages, &job->pages); run_pages_job()
587 * complete jobs can free some pages for pages jobs. do_work()
610 else if (job->pages == &zero_page_list) dispatch_job()
727 job->pages = NULL; dm_kcopyd_copy()
732 job->pages = &zero_page_list; dm_kcopyd_copy()
840 kc->pages = NULL; dm_kcopyd_client_create()
239 kcopyd_get_pages(struct dm_kcopyd_client *kc, unsigned int nr, struct page_list **pages) kcopyd_get_pages() argument
/linux-4.1.27/drivers/gpu/drm/msm/
H A Dmsm_gem.c41 /* allocate pages from VRAM carveout, used when no IOMMU: */ get_pages_vram()
76 if (!msm_obj->pages) { get_pages()
87 dev_err(dev->dev, "could not get pages: %ld\n", get_pages()
98 msm_obj->pages = p; get_pages()
100 /* For non-cached buffers, ensure the new pages are clean get_pages()
108 return msm_obj->pages; get_pages()
115 if (msm_obj->pages) { put_pages()
116 /* For non-cached buffers, ensure the new pages are clean put_pages()
126 drm_gem_put_pages(obj, msm_obj->pages, true, false); put_pages()
129 drm_free_large(msm_obj->pages); put_pages()
132 msm_obj->pages = NULL; put_pages()
197 struct page **pages; msm_gem_fault() local
209 /* make sure we have pages attached now */ msm_gem_fault()
210 pages = get_pages(obj); msm_gem_fault()
211 if (IS_ERR(pages)) { msm_gem_fault()
212 ret = PTR_ERR(pages); msm_gem_fault()
220 pfn = page_to_pfn(pages[pgoff]); msm_gem_fault()
291 struct page **pages = get_pages(obj); msm_gem_get_iova_locked() local
293 if (IS_ERR(pages)) msm_gem_get_iova_locked()
294 return PTR_ERR(pages); msm_gem_get_iova_locked()
393 struct page **pages = get_pages(obj); msm_gem_vaddr_locked() local
394 if (IS_ERR(pages)) msm_gem_vaddr_locked()
395 return ERR_CAST(pages); msm_gem_vaddr_locked()
396 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, msm_gem_vaddr_locked()
537 /* Don't drop the pages for imported dmabuf, as they are not msm_gem_free_object()
540 if (msm_obj->pages) msm_gem_free_object()
541 drm_free_large(msm_obj->pages); msm_gem_free_object()
690 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); msm_gem_import()
691 if (!msm_obj->pages) { msm_gem_import()
696 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); msm_gem_import()
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/
H A Dsec_bulk.c74 unsigned long epp_max_pages; /* maximum pages can hold, const */
78 * wait queue in case of not enough free pages.
82 unsigned long epp_pages_short; /* # of pages wanted of in-q users */
83 unsigned int epp_growing:1; /* during adding pages */
87 * this is counted based on each time when getting pages from
99 * in-pool pages bookkeeping
102 unsigned long epp_total_pages; /* total pages in pools */
103 unsigned long epp_free_pages; /* current pages available */
108 unsigned long epp_st_max_pages; /* # of pages ever reached */
110 unsigned int epp_st_grow_fails; /* # of add pages failures */
114 unsigned long epp_st_lowfree; /* lowest free pages reached */
131 "physical pages: %lu\n" sptlrpc_proc_enc_pool_seq_show()
132 "pages per pool: %lu\n" sptlrpc_proc_enc_pool_seq_show()
133 "max pages: %lu\n" sptlrpc_proc_enc_pool_seq_show()
135 "total pages: %lu\n" sptlrpc_proc_enc_pool_seq_show()
140 "max pages reached: %lu\n" sptlrpc_proc_enc_pool_seq_show()
220 * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
242 * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
252 CDEBUG(D_SEC, "released %ld pages, %ld left\n", enc_pools_shrink_scan()
282 * return how many pages cleaned up.
306 * merge @npools pointed by @pools which contains @npages new pages
328 /* free slots are those left by rent pages, and the extra ones with enc_pools_insert()
387 CDEBUG(D_SEC, "add %d pages to total %lu\n", npages, enc_pools_insert()
433 CDEBUG(D_SEC, "added %d pages into pools\n", npages); enc_pools_add_pages()
442 CERROR("Failed to allocate %d enc pages\n", npages); enc_pools_add_pages()
469 /* if total pages is not enough, we need to grow */ enc_pools_should_grow()
493 * we allocate the requested pages atomically.
657 * initial pages in add_user() if current pools are empty, rest would be
773 "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait " sptlrpc_enc_pool_fini()
/linux-4.1.27/arch/sh/include/asm/
H A Duser.h23 * that an integral number of pages is written.
27 * to write an integer number of pages.
48 size_t u_tsize; /* text size (pages) */
49 size_t u_dsize; /* data size (pages) */
50 size_t u_ssize; /* stack size (pages) */
/linux-4.1.27/arch/m32r/mm/
H A Ddiscontig.c29 unsigned long pages; member in struct:__anon1760
48 mp->pages = PFN_DOWN(memory_end - memory_start); mem_prof_init()
66 mp->pages = PFN_DOWN(CONFIG_IRAM_SIZE) + holes; mem_prof_init()
88 max_pfn = mp->start_pfn + mp->pages; for_each_online_node()
93 PFN_PHYS(mp->pages)); for_each_online_node()
/linux-4.1.27/drivers/base/
H A Ddma-contiguous.c180 * dma_alloc_from_contiguous() - allocate pages from contiguous area
182 * @count: Requested number of pages.
183 * @align: Requested alignment of pages (in PAGE_SIZE order).
200 * dma_release_from_contiguous() - release allocated pages
201 * @dev: Pointer to device for which the pages were allocated.
202 * @pages: Allocated pages.
203 * @count: Number of allocated pages.
206 * It returns false when provided pages do not belong to contiguous area and
209 bool dma_release_from_contiguous(struct device *dev, struct page *pages, dma_release_from_contiguous() argument
212 return cma_release(dev_get_cma_area(dev), pages, count); dma_release_from_contiguous()
H A Ddma-mapping.c275 * remaps an array of PAGE_SIZE pages into another vm_area
278 void *dma_common_pages_remap(struct page **pages, size_t size, dma_common_pages_remap() argument
288 area->pages = pages; dma_common_pages_remap()
290 if (map_vm_area(area, prot, pages)) { dma_common_pages_remap()
308 struct page **pages; dma_common_contiguous_remap() local
312 pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); dma_common_contiguous_remap()
313 if (!pages) dma_common_contiguous_remap()
317 pages[i] = pfn_to_page(pfn + i); dma_common_contiguous_remap()
319 ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller); dma_common_contiguous_remap()
321 kfree(pages); dma_common_contiguous_remap()
/linux-4.1.27/fs/btrfs/
H A Dcompression.c49 /* the pages with the compressed data on them */
55 /* starting offset in the inode for our pages */
67 /* number of compressed pages in the array */
145 /* when we finish reading compressed pages from the disk, we
147 * decompressed pages (in the inode address space).
152 * The compressed pages are freed here, and it must be run
191 /* release the compressed pages */ end_compressed_bio_read()
225 * pages for a compressed write
232 struct page *pages[16]; end_compressed_writeback() local
243 nr_pages, ARRAY_SIZE(pages)), pages); end_compressed_writeback()
251 SetPageError(pages[i]); end_compressed_writeback()
252 end_page_writeback(pages[i]); end_compressed_writeback()
253 page_cache_release(pages[i]); end_compressed_writeback()
262 * do the cleanup once all the compressed pages hit the disk.
263 * This will clear writeback on the file pages and free the compressed
264 * pages.
266 * This also calls the writeback end hooks for the file pages so that
303 * release the compressed pages, these came from alloc_page and end_compressed_bio_write()
321 * worker function to build and submit bios for previously compressed pages.
322 * The corresponding pages in the inode should be marked for writeback
323 * and the compressed pages should have a reference on them for dropping
373 /* create and submit bios for the compressed pages */ btrfs_submit_compressed_write()
556 * for a compressed read, the bio we get passed has all the inode pages
557 * in it. We don't actually do IO on those pages but allocate new ones
558 * to hold the compressed pages on disk.
561 * bio->bi_io_vec points to all of the inode pages
562 * bio->bi_vcnt is a count of pages
564 * After the compressed pages are read, we copy the bytes into the
650 /* include any pages we added in add_ra-bio_pages */ btrfs_submit_compressed_read()
867 * pages are allocated to hold the compressed result and stored
868 * in 'pages'
870 * out_pages is used to return the number of pages allocated. There
871 * may be pages allocated even if we return an error
875 * ran out of room in the pages array or because we cross the
881 * stuff into pages
885 struct page **pages, btrfs_compress_pages()
900 start, len, pages, btrfs_compress_pages()
909 * pages_in is an array of pages with compressed data.
913 * bvec is a bio_vec of pages from the file that we want to decompress into
915 * vcnt is the count of pages in the biovec
920 * The pages in the bio are for the uncompressed data, and they may not
971 * Copy uncompressed data from working buffer to pages.
1013 /* copy bytes from the working buffer into the pages */ btrfs_decompress_buf2page()
883 btrfs_compress_pages(int type, struct address_space *mapping, u64 start, unsigned long len, struct page **pages, unsigned long nr_dest_pages, unsigned long *out_pages, unsigned long *total_in, unsigned long *total_out, unsigned long max_out) btrfs_compress_pages() argument
/linux-4.1.27/drivers/net/ethernet/amd/xgbe/
H A Dxgbe-desc.c141 if (ring->rx_hdr_pa.pages) { xgbe_free_ring()
144 put_page(ring->rx_hdr_pa.pages); xgbe_free_ring()
146 ring->rx_hdr_pa.pages = NULL; xgbe_free_ring()
152 if (ring->rx_buf_pa.pages) { xgbe_free_ring()
155 put_page(ring->rx_buf_pa.pages); xgbe_free_ring()
157 ring->rx_buf_pa.pages = NULL; xgbe_free_ring()
261 struct page *pages = NULL; xgbe_alloc_pages() local
265 /* Try to obtain pages, decreasing order if necessary */ xgbe_alloc_pages()
268 pages = alloc_pages(gfp, order); xgbe_alloc_pages()
269 if (pages) xgbe_alloc_pages()
274 if (!pages) xgbe_alloc_pages()
277 /* Map the pages */ xgbe_alloc_pages()
278 pages_dma = dma_map_page(pdata->dev, pages, 0, xgbe_alloc_pages()
282 put_page(pages); xgbe_alloc_pages()
286 pa->pages = pages; xgbe_alloc_pages()
298 get_page(pa->pages); xgbe_set_buffer_data()
310 pa->pages = NULL; xgbe_set_buffer_data()
323 if (!ring->rx_hdr_pa.pages) { xgbe_map_rx_buffer()
329 if (!ring->rx_buf_pa.pages) { xgbe_map_rx_buffer()
452 if (rdata->rx.hdr.pa.pages) xgbe_unmap_rdata()
453 put_page(rdata->rx.hdr.pa.pages); xgbe_unmap_rdata()
455 if (rdata->rx.hdr.pa_unmap.pages) { xgbe_unmap_rdata()
459 put_page(rdata->rx.hdr.pa_unmap.pages); xgbe_unmap_rdata()
462 if (rdata->rx.buf.pa.pages) xgbe_unmap_rdata()
463 put_page(rdata->rx.buf.pa.pages); xgbe_unmap_rdata()
465 if (rdata->rx.buf.pa_unmap.pages) { xgbe_unmap_rdata()
469 put_page(rdata->rx.buf.pa_unmap.pages); xgbe_unmap_rdata()
/linux-4.1.27/arch/powerpc/platforms/cell/spufs/
H A Dlscsa_alloc.c44 /* Set LS pages reserved to allow for user-space mapping. */ spu_alloc_lscsa_std()
77 /* Check availability of 64K pages */ spu_alloc_lscsa()
83 pr_debug("spu_alloc_lscsa(csa=0x%p), trying to allocate 64K pages\n", spu_alloc_lscsa()
86 /* First try to allocate our 64K pages. We need 5 of them spu_alloc_lscsa()
89 * allowing us to require only 4 64K pages per context spu_alloc_lscsa()
126 /* Set LS pages reserved to allow for user-space mapping. spu_alloc_lscsa()
139 pr_debug("spufs: failed to allocate lscsa 64K pages, falling back\n"); spu_alloc_lscsa()
/linux-4.1.27/fs/ceph/
H A Daddr.c28 * count dirty pages on the inode. In the absence of snapshots,
33 * with dirty pages (dirty pages implies there is a cap) gets a new
43 * we look for the first capsnap in i_cap_snaps and write out pages in
45 * eventually reaching the "live" or "head" context (i.e., pages that
47 * pages.
154 * We can get non-dirty pages here due to races between ceph_invalidatepage()
267 /* unlock all pages, zeroing any data we didn't read */ finish_read()
273 struct page *page = osd_data->pages[i]; finish_read()
292 kfree(osd_data->pages); finish_read()
295 static void ceph_unlock_page_vector(struct page **pages, int num_pages) ceph_unlock_page_vector() argument
300 unlock_page(pages[i]); ceph_unlock_page_vector()
318 struct page **pages; start_read() local
325 /* count pages */ start_read()
349 pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS);
351 if (!pages)
369 pages[i] = page;
371 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
385 ceph_unlock_page_vector(pages, nr_pages);
386 ceph_release_page_vector(pages, nr_pages);
394 * Read multiple pages. Leave pages we don't read + unlock in page_list;
447 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, get_oldest_context()
458 dout(" head snapc %p has %d dirty pages\n", get_oldest_context()
579 static void ceph_release_pages(struct page **pages, int num) ceph_release_pages() argument
586 if (pagevec_add(&pvec, pages[i]) == 0) ceph_release_pages()
622 * Assume we wrote the pages we originally sent. The writepages_finish()
623 * osd might reply with fewer pages if our writeback writepages_finish()
632 dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n", writepages_finish()
635 /* clean all pages */ writepages_finish()
637 page = osd_data->pages[i]; writepages_finish()
665 dout("%p wrote+cleaned %d pages\n", inode, wrote); writepages_finish()
668 ceph_release_pages(osd_data->pages, num_pages); writepages_finish()
670 mempool_free(osd_data->pages, writepages_finish()
673 kfree(osd_data->pages); writepages_finish()
778 struct page **pages = NULL; ceph_writepages_start() local
802 page = pvec.pages[i]; ceph_writepages_start()
809 /* only dirty pages, or our accounting breaks */ ceph_writepages_start()
867 BUG_ON(pages); ceph_writepages_start()
893 pages = kmalloc(max_pages * sizeof (*pages), ceph_writepages_start()
895 if (!pages) { ceph_writepages_start()
897 pages = mempool_alloc(pool, GFP_NOFS); ceph_writepages_start()
898 BUG_ON(!pages); ceph_writepages_start()
917 pages[locked_pages] = page; ceph_writepages_start()
936 /* shift unused pages over in the pvec... we ceph_writepages_start()
940 pvec.pages[j]); ceph_writepages_start()
941 pvec.pages[j-i+first] = pvec.pages[j]; ceph_writepages_start()
948 offset = page_offset(pages[0]); ceph_writepages_start()
951 dout("writepages got %d pages at %llu~%llu\n", ceph_writepages_start()
954 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, ceph_writepages_start()
957 pages = NULL; /* request message now owns the pages array */ ceph_writepages_start()
979 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, ceph_writepages_start()
980 pvec.nr ? pvec.pages[0] : NULL); ceph_writepages_start()
H A Dcache.h45 struct list_head *pages,
85 struct list_head *pages) ceph_fscache_readpages_cancel()
88 return fscache_readpages_cancel(ci->fscache, pages); ceph_fscache_readpages_cancel()
121 struct page *pages) ceph_fscache_uncache_page()
133 struct list_head *pages, ceph_readpages_from_fscache()
172 struct list_head *pages) ceph_fscache_readpages_cancel()
84 ceph_fscache_readpages_cancel(struct inode *inode, struct list_head *pages) ceph_fscache_readpages_cancel() argument
120 ceph_fscache_uncache_page(struct inode *inode, struct page *pages) ceph_fscache_uncache_page() argument
131 ceph_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) ceph_readpages_from_fscache() argument
171 ceph_fscache_readpages_cancel(struct inode *inode, struct list_head *pages) ceph_fscache_readpages_cancel() argument
/linux-4.1.27/net/sunrpc/
H A Dxdr.c125 kaddr = kmap_atomic(buf->pages[0]); xdr_terminate_string()
133 struct page **pages, unsigned int base, unsigned int len) xdr_inline_pages()
142 xdr->pages = pages; xdr_inline_pages()
159 * @pages: vector of pages containing both the source and dest memory area.
166 * if a memory area starts at byte 'base' in page 'pages[i]',
172 _shift_data_right_pages(struct page **pages, size_t pgto_base, _shift_data_right_pages() argument
184 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); _shift_data_right_pages()
185 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); _shift_data_right_pages()
224 * @pages: array of pages
229 * Copies data from an arbitrary memory location into an array of pages
233 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) _copy_to_pages() argument
239 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT); _copy_to_pages()
269 * @pages: array of pages
273 * Copies data into an arbitrary memory location from an array of pages
277 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) _copy_from_pages() argument
283 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT); _copy_from_pages()
313 * moved into the inlined pages and/or the tail.
336 /* Copy from the inlined pages into the tail */ xdr_shrink_bufhead()
347 buf->pages, xdr_shrink_bufhead()
361 /* Now handle pages */ xdr_shrink_bufhead()
364 _shift_data_right_pages(buf->pages, xdr_shrink_bufhead()
371 _copy_to_pages(buf->pages, buf->page_base, xdr_shrink_bufhead()
385 * @len: bytes to remove from buf->pages
387 * Shrinks XDR buffer's page array buf->pages by
418 /* Copy from the inlined pages into the tail */ xdr_shrink_pagelen()
420 buf->pages, buf->page_base + pglen - len, xdr_shrink_pagelen()
599 * cache pages (as in a zero-copy server read reply), except for the
633 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT); xdr_truncate_encode()
684 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
686 * @pages: list of pages
691 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, xdr_write_pages() argument
696 buf->pages = pages; xdr_write_pages()
748 xdr->page_ptr = &xdr->buf->pages[pgnr]; xdr_set_page_base()
766 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT; xdr_set_next_page()
811 * @pages: list of pages to decode into
812 * @len: length in bytes of buffer in pages
815 struct page **pages, unsigned int len) xdr_init_decode_pages()
818 buf->pages = pages; xdr_init_decode_pages()
845 * The scratch buffer is used when decoding from an array of pages.
911 /* Realign pages to current pointer position */ xdr_align_pages()
941 * Returns the number of XDR encoded bytes now contained in the pages
1042 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT]; xdr_buf_subsegment()
1118 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len); __read_bytes_from_xdr_buf()
1149 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len); __write_bytes_to_xdr_buf()
1288 base = buf->head->iov_len; /* align to start of pages */ xdr_xcode_array2()
1291 /* process pages array */ xdr_xcode_array2()
1300 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT); xdr_xcode_array2()
1488 sg_set_page(sg, buf->pages[i], thislen, page_offset); xdr_process_buf()
132 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, struct page **pages, unsigned int base, unsigned int len) xdr_inline_pages() argument
814 xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, struct page **pages, unsigned int len) xdr_init_decode_pages() argument
/linux-4.1.27/drivers/block/xen-blkback/
H A Dblkback.c56 * Maximum number of unused free pages to keep in the internal buffer.
68 "Maximum number of free pages to keep in each block backend buffer");
106 /* Number of free pages to remove on each call to gnttab_free_pages */
143 /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */ shrink_free_pagepool()
272 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; free_persistent_gnts() local
278 unmap_data.pages = pages; free_persistent_gnts()
291 pages[segs_to_unmap] = persistent_gnt->page; foreach_grant_safe()
299 put_free_pages(blkif, pages, segs_to_unmap); foreach_grant_safe()
313 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; xen_blkbk_unmap_purged_grants() local
319 unmap_data.pages = pages; xen_blkbk_unmap_purged_grants()
334 pages[segs_to_unmap] = persistent_gnt->page; xen_blkbk_unmap_purged_grants()
339 put_free_pages(blkif, pages, segs_to_unmap); xen_blkbk_unmap_purged_grants()
347 put_free_pages(blkif, pages, segs_to_unmap); xen_blkbk_unmap_purged_grants()
652 * Remove persistent grants and empty the pool of free pages
656 /* Free all persistent grant pages */ xen_blkbk_free_caches()
664 /* Since we are shutting down remove all pages from the buffer */ xen_blkbk_free_caches()
670 struct grant_page **pages, xen_blkbk_unmap_prepare()
678 if (pages[i]->persistent_gnt != NULL) { xen_blkbk_unmap_prepare()
679 put_persistent_gnt(blkif, pages[i]->persistent_gnt); xen_blkbk_unmap_prepare()
682 if (pages[i]->handle == BLKBACK_INVALID_HANDLE) xen_blkbk_unmap_prepare()
684 unmap_pages[invcount] = pages[i]->page; xen_blkbk_unmap_prepare()
685 gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page), xen_blkbk_unmap_prepare()
686 GNTMAP_host_map, pages[i]->handle); xen_blkbk_unmap_prepare()
687 pages[i]->handle = BLKBACK_INVALID_HANDLE; xen_blkbk_unmap_prepare()
703 put_free_pages(blkif, data->pages, data->count); xen_blkbk_unmap_and_respond_callback()
729 struct grant_page **pages = req->segments; xen_blkbk_unmap_and_respond() local
732 invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_pages, xen_blkbk_unmap_and_respond()
739 work->pages = req->unmap_pages; xen_blkbk_unmap_and_respond()
754 struct grant_page *pages[], xen_blkbk_unmap()
765 invcount = xen_blkbk_unmap_prepare(blkif, pages, batch, xen_blkbk_unmap()
772 pages += batch; xen_blkbk_unmap()
778 struct grant_page *pages[], xen_blkbk_map()
805 pages[i]->gref); xen_blkbk_map()
812 pages[i]->page = persistent_gnt->page; xen_blkbk_map()
813 pages[i]->persistent_gnt = persistent_gnt; xen_blkbk_map()
815 if (get_free_page(blkif, &pages[i]->page)) xen_blkbk_map()
817 addr = vaddr(pages[i]->page); xen_blkbk_map()
818 pages_to_gnt[segs_to_map] = pages[i]->page; xen_blkbk_map()
819 pages[i]->persistent_gnt = NULL; xen_blkbk_map()
824 flags, pages[i]->gref, xen_blkbk_map()
843 if (!pages[seg_idx]->persistent_gnt) { xen_blkbk_map()
848 put_free_pages(blkif, &pages[seg_idx]->page, 1); xen_blkbk_map()
849 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; xen_blkbk_map()
853 pages[seg_idx]->handle = map[new_map_idx].handle; xen_blkbk_map()
875 persistent_gnt->page = pages[seg_idx]->page; xen_blkbk_map()
882 pages[seg_idx]->persistent_gnt = persistent_gnt; xen_blkbk_map()
929 struct grant_page **pages = pending_req->indirect_pages; xen_blkbk_parse_indirect() local
939 pages[i]->gref = req->u.indirect.indirect_grefs[i]; xen_blkbk_parse_indirect()
941 rc = xen_blkbk_map(blkif, pages, indirect_grefs, true); xen_blkbk_parse_indirect()
950 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); xen_blkbk_parse_indirect()
968 xen_blkbk_unmap(blkif, pages, indirect_grefs); xen_blkbk_parse_indirect()
1201 struct grant_page **pages = pending_req->segments; dispatch_rw_block_io() local
1260 pages[i]->gref = req->u.rw.seg[i].gref; dispatch_rw_block_io()
1324 pages[i]->page, dispatch_rw_block_io()
668 xen_blkbk_unmap_prepare( struct xen_blkif *blkif, struct grant_page **pages, unsigned int num, struct gnttab_unmap_grant_ref *unmap_ops, struct page **unmap_pages) xen_blkbk_unmap_prepare() argument
753 xen_blkbk_unmap(struct xen_blkif *blkif, struct grant_page *pages[], int num) xen_blkbk_unmap() argument
777 xen_blkbk_map(struct xen_blkif *blkif, struct grant_page *pages[], int num, bool ro) xen_blkbk_map() argument
/linux-4.1.27/arch/parisc/mm/
H A Dinit.c152 tmp = pmem_ranges[j-1].pages; setup_bootmem()
153 pmem_ranges[j-1].pages = pmem_ranges[j].pages; setup_bootmem()
154 pmem_ranges[j].pages = tmp; setup_bootmem()
167 pmem_ranges[i-1].pages) > MAX_GAP) { setup_bootmem()
169 printk("Large gap in memory detected (%ld pages). " setup_bootmem()
173 pmem_ranges[i-1].pages)); setup_bootmem()
189 size = (pmem_ranges[i].pages << PAGE_SHIFT); setup_bootmem()
201 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1; setup_bootmem()
221 rsize = pmem_ranges[i].pages << PAGE_SHIFT; setup_bootmem()
227 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT) setup_bootmem()
247 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; setup_bootmem()
253 pmem_holes[npmem_holes++].pages = hole_pages; setup_bootmem()
256 end_pfn += pmem_ranges[i].pages; setup_bootmem()
259 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn; setup_bootmem()
266 bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages); setup_bootmem()
297 npages = pmem_ranges[i].pages; setup_bootmem()
342 (pmem_holes[i].pages << PAGE_SHIFT), setup_bootmem()
521 /* The init text pages are marked R-X. We have to free_initmem()
542 * pages are no-longer executable */ free_initmem()
670 printk(KERN_INFO "%d pages of RAM\n", total);
671 printk(KERN_INFO "%d reserved pages\n", reserved);
699 * Since gateway pages cannot be dereferenced this has the desirable
715 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT); pagetable_init()
716 size = pmem_ranges[range].pages << PAGE_SHIFT; pagetable_init()
766 zones_size[ZONE_NORMAL] = pmem_ranges[i].pages; paging_init()
774 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT); paging_init()
/linux-4.1.27/fs/nilfs2/
H A Dpage.c207 * This function is for both data pages and btnode pages. The dirty flag
270 struct page *page = pvec.pages[i], *dpage; nilfs_copy_dirty_pages()
303 * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
307 * No pages must no be added to the cache during this process.
323 index = pvec.pages[n - 1]->index + 1; nilfs_copy_back_pages()
326 struct page *page = pvec.pages[i], *dpage; nilfs_copy_back_pages()
373 * nilfs_clear_dirty_pages - discard dirty pages in address space
374 * @mapping: address space with dirty pages for discarding
388 struct page *page = pvec.pages[i]; nilfs_clear_dirty_pages()
472 * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
473 * page dirty flags when it copies back pages from the shadow cache
478 * in dirty state, and this needs to cancel the dirty state of their pages.
533 pvec.pages); nilfs_find_uncommitted_extent()
537 if (length > 0 && pvec.pages[0]->index > index) nilfs_find_uncommitted_extent()
540 b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); nilfs_find_uncommitted_extent()
543 page = pvec.pages[i]; nilfs_find_uncommitted_extent()
/linux-4.1.27/arch/x86/um/
H A Dldt.c77 if (copy_to_user(ptr, ldt->u.pages[i], size)) { read_ldt()
156 ldt->u.pages[i] = (struct ldt_entry *) write_ldt()
158 if (!ldt->u.pages[i]) { write_ldt()
166 memcpy(ldt->u.pages[0], &entry0, write_ldt()
168 memcpy(ldt->u.pages[0]+1, ldt->u.entries+1, write_ldt()
180 ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] + write_ldt()
342 new_mm->arch.ldt.u.pages[i] = init_new_ldt()
344 memcpy(new_mm->arch.ldt.u.pages[i], init_new_ldt()
345 from_mm->arch.ldt.u.pages[i], PAGE_SIZE); init_new_ldt()
363 free_page((long) mm->arch.ldt.u.pages[i]); free_ldt()
/linux-4.1.27/arch/arm/include/asm/
H A Dtlb.h62 * TLB handling. This allows us to remove pages from the page
78 struct page **pages; member in struct:mmu_gather
93 * 3. Unmapping argument pages. See shift_arg_pages().
123 tlb->pages = (void *)addr; __tlb_alloc_page()
138 free_pages_and_swap_cache(tlb->pages, tlb->nr); tlb_flush_mmu_free()
140 if (tlb->pages == tlb->local) tlb_flush_mmu_free()
159 tlb->pages = tlb->local; tlb_gather_mmu()
176 if (tlb->pages != tlb->local) tlb_finish_mmu()
177 free_pages((unsigned long)tlb->pages, 0); tlb_finish_mmu()
214 tlb->pages[tlb->nr++] = page; __tlb_remove_page()
H A Duser.h25 number of pages is written.
29 to write an integer number of pages.
30 The minimum core file size is 3 pages, or 12288 bytes.
60 unsigned long int u_tsize; /* Text segment size (pages). */
61 unsigned long int u_dsize; /* Data segment size (pages). */
62 unsigned long int u_ssize; /* Stack segment size (pages). */
/linux-4.1.27/fs/afs/
H A Dfile.c28 struct list_head *pages, unsigned nr_pages);
238 * read a set of pages
241 struct list_head *pages, unsigned nr_pages) afs_readpages()
258 /* attempt to read as many of the pages as possible */ afs_readpages()
262 pages, afs_readpages()
272 /* all pages are being read from the cache */ afs_readpages()
274 BUG_ON(!list_empty(pages)); afs_readpages()
279 /* there were pages that couldn't be read from the cache */ afs_readpages()
290 /* load the missing pages from the network */ afs_readpages()
291 ret = read_cache_pages(mapping, pages, afs_page_filler, key); afs_readpages()
240 afs_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) afs_readpages() argument
/linux-4.1.27/drivers/firewire/
H A Dcore-iso.c48 buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]), fw_iso_buffer_alloc()
50 if (buffer->pages == NULL) fw_iso_buffer_alloc()
54 buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); fw_iso_buffer_alloc()
55 if (buffer->pages[i] == NULL) fw_iso_buffer_alloc()
76 address = dma_map_page(card->device, buffer->pages[i], fw_iso_buffer_map_dma()
81 set_page_private(buffer->pages[i], address); fw_iso_buffer_map_dma()
115 err = vm_insert_page(vma, uaddr, buffer->pages[i]); fw_iso_buffer_map_vma()
132 address = page_private(buffer->pages[i]); fw_iso_buffer_destroy()
137 __free_page(buffer->pages[i]); fw_iso_buffer_destroy()
139 kfree(buffer->pages); fw_iso_buffer_destroy()
140 buffer->pages = NULL; fw_iso_buffer_destroy()
154 address = page_private(buffer->pages[i]); fw_iso_buffer_lookup()
/linux-4.1.27/arch/xtensa/include/uapi/asm/
H A Dmman.h53 #define MAP_LOCKED 0x8000 /* pages are locked */
81 #define MADV_WILLNEED 3 /* will need these pages */
82 #define MADV_DONTNEED 4 /* don't need these pages */
85 #define MADV_REMOVE 9 /* remove these pages & resources */
89 #define MADV_MERGEABLE 12 /* KSM may merge identical pages */
90 #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
/linux-4.1.27/arch/metag/mm/
H A Dmmu-meta2.c139 unsigned int pages = DIV_ROUND_UP(mem_size, 1 << 22); mmu_init() local
167 * At this point we can also map the kernel with 4MB pages to mmu_init()
170 second_level_table = alloc_bootmem_pages(SECOND_LEVEL_ALIGN * pages); mmu_init()
176 while (pages > 0) { mmu_init()
196 /* Second level pages must be 64byte aligned. */ mmu_init()
199 pages--; mmu_init()
/linux-4.1.27/arch/frv/include/asm/
H A Duser.h40 * rounded in such a way that an integral number of pages is
47 * be able to write an integer number of pages. The minimum core
48 * file size is 3 pages, or 12288 bytes.
61 unsigned long u_tsize; /* Text segment size (pages). */
62 unsigned long u_dsize; /* Data segment size (pages). */
63 unsigned long u_ssize; /* Stack segment size (pages). */
/linux-4.1.27/arch/frv/mm/
H A Ddma-alloc.c74 * This function will allocate the requested contiguous pages and
76 * get unique mapping for these pages, outside of the kernel's 1:1
79 * still get unique uncached pages for consistent DMA.
101 /* allocate some common virtual space to map the new pages */ consistent_alloc()
113 /* set refcount=1 on all pages in an order>0 allocation so that vfree() will actually free consistent_alloc()
114 * all pages that were allocated. consistent_alloc()
/linux-4.1.27/fs/cifs/
H A Dfscache.h79 struct list_head *pages, cifs_readpages_from_fscache()
83 return __cifs_readpages_from_fscache(inode, mapping, pages, cifs_readpages_from_fscache()
96 struct list_head *pages) cifs_fscache_readpages_cancel()
99 return __cifs_fscache_readpages_cancel(inode, pages); cifs_fscache_readpages_cancel()
133 struct list_head *pages, cifs_readpages_from_fscache()
143 struct list_head *pages) cifs_fscache_readpages_cancel()
77 cifs_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) cifs_readpages_from_fscache() argument
95 cifs_fscache_readpages_cancel(struct inode *inode, struct list_head *pages) cifs_fscache_readpages_cancel() argument
131 cifs_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) cifs_readpages_from_fscache() argument
142 cifs_fscache_readpages_cancel(struct inode *inode, struct list_head *pages) cifs_fscache_readpages_cancel() argument
/linux-4.1.27/arch/x86/include/asm/
H A Duser32.h51 __u32 u_tsize; /* Text segment size (pages). */
52 __u32 u_dsize; /* Data segment size (pages). */
53 __u32 u_ssize; /* Stack segment size (pages). */
/linux-4.1.27/arch/s390/kernel/
H A Dcompat_ptrace.h41 u32 u_tsize; /* Text segment size (pages). */
42 u32 u_dsize; /* Data segment size (pages). */
43 u32 u_ssize; /* Stack segment size (pages). */
/linux-4.1.27/arch/mn10300/include/asm/
H A Duser.h30 unsigned long int u_tsize; /* Text segment size (pages). */
31 unsigned long int u_dsize; /* Data segment size (pages). */
32 unsigned long int u_ssize; /* Stack segment size (pages). */
/linux-4.1.27/fs/9p/
H A Dcache.c172 ClearPageFsCache(pvec.pages[loop]); v9fs_cache_inode_now_uncached()
174 first = pvec.pages[nr_pages - 1]->index + 1; v9fs_cache_inode_now_uncached()
316 * Returns 0 if the pages are in cache and a BIO is submitted,
317 * 1 if the pages are not in cache and -error otherwise.
349 * __v9fs_readpages_from_fscache - read multiple pages from cache
351 * Returns 0 if the pages are in cache and a BIO is submitted,
352 * 1 if the pages are not in cache and -error otherwise.
357 struct list_head *pages, __v9fs_readpages_from_fscache()
363 p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages); __v9fs_readpages_from_fscache()
368 mapping, pages, nr_pages, __v9fs_readpages_from_fscache()
375 p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret); __v9fs_readpages_from_fscache()
378 BUG_ON(!list_empty(pages)); __v9fs_readpages_from_fscache()
355 __v9fs_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) __v9fs_readpages_from_fscache() argument
H A Dcache.h50 struct list_head *pages,
75 struct list_head *pages, v9fs_readpages_from_fscache()
78 return __v9fs_readpages_from_fscache(inode, mapping, pages, v9fs_readpages_from_fscache()
131 struct list_head *pages, v9fs_readpages_from_fscache()
73 v9fs_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) v9fs_readpages_from_fscache() argument
129 v9fs_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) v9fs_readpages_from_fscache() argument
/linux-4.1.27/arch/um/include/asm/
H A Dtlbflush.h19 * - flush_tlb_range(vma, start, end) flushes a range of pages
/linux-4.1.27/arch/mips/include/asm/mach-ar7/
H A Dspaces.h15 * We handle pages at KSEG0 for kernels with 32 bit address space.
/linux-4.1.27/drivers/usb/storage/
H A Dalauda.c97 unsigned int blocksize; /* number of pages per block */
180 unsigned char blockshift; /* 1<<bs pages per block */
728 * redundancy data. Returns (pagesize+64)*pages bytes in data.
731 unsigned int page, unsigned int pages, unsigned char *data) alauda_read_block_raw()
736 PBA_ZONE(pba), 0, PBA_LO(pba) + page, pages, 0, MEDIA_PORT(us) alauda_read_block_raw()
739 usb_stor_dbg(us, "pba %d page %d count %d\n", pba, page, pages); alauda_read_block_raw()
747 data, (MEDIA_INFO(us).pagesize + 64) * pages, NULL); alauda_read_block_raw()
752 * data. Returns pagesize*pages bytes in data. Note that data must be big enough
753 * to hold (pagesize+64)*pages bytes of data, but you can ignore those 'extra'
757 unsigned int page, unsigned int pages, unsigned char *data) alauda_read_block()
762 rc = alauda_read_block_raw(us, pba, page, pages, data); alauda_read_block()
767 for (i = 0; i < pages; i++) { alauda_read_block()
810 unsigned int page, unsigned int pages, alauda_write_lba()
879 for (i = page; i < page+pages; i++) { alauda_write_lba()
954 unsigned int pages; alauda_read_data() local
966 /* Find number of pages we can read in this block */ alauda_read_data()
967 pages = min(sectors, blocksize - page); alauda_read_data()
968 len = pages << pageshift; alauda_read_data()
974 usb_stor_dbg(us, "Read %d zero pages (LBA %d) page %d\n", alauda_read_data()
975 pages, lba, page); alauda_read_data()
984 usb_stor_dbg(us, "Read %d pages, from PBA %d (LBA %d) page %d\n", alauda_read_data()
985 pages, pba, lba, page); alauda_read_data()
987 result = alauda_read_block(us, pba, page, pages, buffer); alauda_read_data()
998 sectors -= pages; alauda_read_data()
1056 unsigned int pages = min(sectors, blocksize - page); alauda_write_data() local
1057 len = pages << pageshift; alauda_write_data()
1071 result = alauda_write_lba(us, lba, page, pages, buffer, alauda_write_data()
1078 sectors -= pages; alauda_write_data()
1174 unsigned int page, pages; alauda_transport() local
1183 pages = short_pack(srb->cmnd[8], srb->cmnd[7]); alauda_transport()
1185 usb_stor_dbg(us, "READ_10: page %d pagect %d\n", page, pages); alauda_transport()
1187 return alauda_read_data(us, page, pages); alauda_transport()
1191 unsigned int page, pages; alauda_transport() local
1200 pages = short_pack(srb->cmnd[8], srb->cmnd[7]); alauda_transport()
1202 usb_stor_dbg(us, "WRITE_10: page %d pagect %d\n", page, pages); alauda_transport()
1204 return alauda_write_data(us, page, pages); alauda_transport()
730 alauda_read_block_raw(struct us_data *us, u16 pba, unsigned int page, unsigned int pages, unsigned char *data) alauda_read_block_raw() argument
756 alauda_read_block(struct us_data *us, u16 pba, unsigned int page, unsigned int pages, unsigned char *data) alauda_read_block() argument
809 alauda_write_lba(struct us_data *us, u16 lba, unsigned int page, unsigned int pages, unsigned char *ptr, unsigned char *blockbuffer) alauda_write_lba() argument
/linux-4.1.27/fs/jfs/
H A Dresize.c154 * an even number of pages. jfs_extendfs()
169 * Need enough 4k pages to cover: jfs_extendfs()
171 * - 1 extra page to handle control page and intermediate level pages jfs_extendfs()
172 * - 50 extra pages for the chkdsk service log jfs_extendfs()
207 * all wip transactions and flush modified pages s.t. jfs_extendfs()
297 /* number of data pages of new bmap file: jfs_extendfs()
329 * update map pages for new extension: jfs_extendfs()
351 * allocate new map pages and its backing blocks, and jfs_extendfs()
354 /* compute number of data pages of current bmap file */ jfs_extendfs()
362 * grow bmap file for the new map pages required: jfs_extendfs()
365 * bmap file only grows sequentially, i.e., both data pages jfs_extendfs()
366 * and possibly xtree index pages may grow in append mode, jfs_extendfs()
368 * by washing away bmap file of pages outside s_size boundary; jfs_extendfs()
378 /* synchronous write of data pages: bmap data pages are jfs_extendfs()
416 * (it could have been used up for new map pages), jfs_extendfs()
/linux-4.1.27/sound/pci/emu10k1/
H A Dmemory.c34 * aligned pages in others
93 blk->pages = blk->last_page - blk->first_page + 1; emu10k1_memblk_init()
126 page = blk->mapped_page + blk->pages; search_empty_map_area()
147 page = search_empty_map_area(emu, blk->pages, &next); map_memblk()
165 * return the size of resultant empty pages
178 start_page = q->mapped_page + q->pages; unmap_memblk()
201 * search empty pages with the given size, and create a memory block
235 * check if the given pointer is valid for pages
255 * if no empty pages are found, tries to release unused memory blocks
284 if (size >= blk->pages) { snd_emu10k1_memblk_map()
371 * memory allocation using multiple pages (for synth)
372 * Unlike the DMA allocation above, non-contiguous pages are assined.
449 /* release allocated pages */ __synth_free_pages()
463 * allocate kernel pages
471 /* allocate kernel pages */ synth_alloc_pages()
495 * free pages
/linux-4.1.27/include/drm/ttm/
H A Dttm_page_alloc.h46 * @ttm: The struct ttm_tt to contain the backing pages.
48 * Add backing pages to all of @ttm
55 * @ttm: The struct ttm_tt which to free backing pages.
57 * Free all pages of @ttm
/linux-4.1.27/arch/tile/mm/
H A Dinit.c72 * page table, and also means that in cases where we use huge pages,
73 * we are guaranteed to later be able to shatter those huge pages and
83 static void init_prealloc_ptes(int node, int pages) init_prealloc_ptes() argument
85 BUG_ON(pages & (PTRS_PER_PTE - 1)); init_prealloc_ptes()
86 if (pages) { init_prealloc_ptes()
87 num_l2_ptes[node] = pages; init_prealloc_ptes()
88 l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t), init_prealloc_ptes()
103 * What caching do we expect pages from the heap to have when
191 static int __initdata ktext_hash = 1; /* .text pages */
192 static int __initdata kdata_hash = 1; /* .data and .bss pages */
193 int __write_once hash_default = 1; /* kernel allocator pages */
198 * CPUs to use to for striping the pages of kernel data. If hash-for-home
206 int __write_once kdata_huge; /* if no homecaching, small pages */
237 * We map the aliased pages of permanent text so we can init_pgprot()
263 * All the LOWMEM pages that we mark this way will get their init_pgprot()
265 * The HIGHMEM pages we leave with a default zero for their init_pgprot()
309 * always be used to disable local caching of text pages, if desired.
342 /* Pay TLB cost but get no cache benefit: cache small pages locally */ setup_ktext()
346 pr_info("ktext: using small pages with local caching\n"); setup_ktext()
349 /* Neighborhood cache ktext pages on all cpus. */ setup_ktext()
357 /* Neighborhood ktext pages on specified mask */ setup_ktext()
361 pr_info("ktext: using caching neighborhood %*pbl with small pages\n", setup_ktext()
393 * of max_low_pfn pages, by creating page tables starting from address
397 * pages to using some more precise caching, including removing access
398 * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START)
401 * to pages above the top of RAM (thus ensuring a page fault from a bad
449 /* Pre-shatter the last huge page to allow per-cpu pages. */ kernel_physical_mapping_init()
664 /* Optimize by freeing pages in large batches */ init_free_pfn_range()
905 /* Select whether to free (1) or mark unusable (0) the __init pages. */ set_initfree()
911 pr_info("initfree: %s free init pages\n", set_initfree()
933 * and they won't be touching any of these pages. free_init_pages()
965 * We are guaranteed that no one will touch the init pages any more. free_initmem()
969 /* Free the data pages that we won't use again after init. */ free_initmem()
975 * Free the pages mapped from 0xc0000000 that correspond to code free_initmem()
976 * pages from MEM_SV_START that we won't use again after init. free_initmem()
/linux-4.1.27/arch/arm/mm/
H A Ddma-mapping.c227 * Ensure that the allocated pages are zeroed, and that any data __dma_clear_buffer()
264 * Now split the huge page and free the excess pages __dma_alloc_buffer()
636 * Following is a work-around (a.k.a. hack) to prevent pages __dma_alloc()
794 * pages. But we still need to process highmem pages individually. dma_cache_maint_page()
865 * Mark the D-cache clean for these pages to avoid extra flushing. __dma_page_dev_to_cpu()
1118 struct page **pages; __iommu_alloc_buffer() local
1124 pages = kzalloc(array_size, GFP_KERNEL); __iommu_alloc_buffer()
1126 pages = vzalloc(array_size); __iommu_alloc_buffer()
1127 if (!pages) __iommu_alloc_buffer()
1142 pages[i] = page + i; __iommu_alloc_buffer()
1144 return pages; __iommu_alloc_buffer()
1148 * IOMMU can map any pages, so himem can also be used here __iommu_alloc_buffer()
1158 * as we can fall back to single pages, so we force __iommu_alloc_buffer()
1161 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); __iommu_alloc_buffer()
1162 if (pages[i]) __iommu_alloc_buffer()
1166 if (!pages[i]) { __iommu_alloc_buffer()
1171 pages[i] = alloc_pages(gfp, 0); __iommu_alloc_buffer()
1172 if (!pages[i]) __iommu_alloc_buffer()
1177 split_page(pages[i], order); __iommu_alloc_buffer()
1180 pages[i + j] = pages[i] + j; __iommu_alloc_buffer()
1183 __dma_clear_buffer(pages[i], PAGE_SIZE << order); __iommu_alloc_buffer()
1188 return pages; __iommu_alloc_buffer()
1191 if (pages[i]) __iommu_alloc_buffer()
1192 __free_pages(pages[i], 0); __iommu_alloc_buffer()
1194 kfree(pages); __iommu_alloc_buffer()
1196 vfree(pages); __iommu_alloc_buffer()
1200 static int __iommu_free_buffer(struct device *dev, struct page **pages, __iommu_free_buffer() argument
1208 dma_release_from_contiguous(dev, pages[0], count); __iommu_free_buffer()
1211 if (pages[i]) __iommu_free_buffer()
1212 __free_pages(pages[i], 0); __iommu_free_buffer()
1216 kfree(pages); __iommu_free_buffer()
1218 vfree(pages); __iommu_free_buffer()
1223 * Create a CPU mapping for a specified pages
1226 __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, __iommu_alloc_remap() argument
1229 return dma_common_pages_remap(pages, size, __iommu_alloc_remap()
1234 * Create a mapping in device IO address space for specified pages
1237 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) __iommu_create_mapping() argument
1250 unsigned int next_pfn = page_to_pfn(pages[i]) + 1; __iommu_create_mapping()
1251 phys_addr_t phys = page_to_phys(pages[i]); __iommu_create_mapping()
1255 if (page_to_pfn(pages[j]) != next_pfn) __iommu_create_mapping()
1312 return area->pages; __iommu_get_pages()
1348 struct page **pages; arm_iommu_alloc_attrs() local
1358 * Following is a work-around (a.k.a. hack) to prevent pages arm_iommu_alloc_attrs()
1366 pages = __iommu_alloc_buffer(dev, size, gfp, attrs); arm_iommu_alloc_attrs()
1367 if (!pages) arm_iommu_alloc_attrs()
1370 *handle = __iommu_create_mapping(dev, pages, size); arm_iommu_alloc_attrs()
1375 return pages; arm_iommu_alloc_attrs()
1377 addr = __iommu_alloc_remap(pages, size, gfp, prot, arm_iommu_alloc_attrs()
1387 __iommu_free_buffer(dev, pages, size, attrs); arm_iommu_alloc_attrs()
1397 struct page **pages = __iommu_get_pages(cpu_addr, attrs); arm_iommu_mmap_attrs() local
1403 if (!pages) arm_iommu_mmap_attrs()
1409 pages += off; arm_iommu_mmap_attrs()
1412 int ret = vm_insert_page(vma, uaddr, *pages++); arm_iommu_mmap_attrs()
1431 struct page **pages; arm_iommu_free_attrs() local
1439 pages = __iommu_get_pages(cpu_addr, attrs); arm_iommu_free_attrs()
1440 if (!pages) { arm_iommu_free_attrs()
1451 __iommu_free_buffer(dev, pages, size, attrs); arm_iommu_free_attrs()
1459 struct page **pages = __iommu_get_pages(cpu_addr, attrs); arm_iommu_get_sgtable() local
1461 if (!pages) arm_iommu_get_sgtable()
1464 return sg_alloc_table_from_pages(sgt, pages, count, 0, size, arm_iommu_get_sgtable()
/linux-4.1.27/fs/
H A Dmpage.c7 * multiple pagecache pages.
36 * The mpage code never puts partial pages into a BIO (except for end-of-file).
93 * The idea is to avoid adding buffers to pages that don't already have
313 * mpage_readpages - populate an address space with some pages & start reads against them
315 * @pages: The address of a list_head which contains the target pages. These
316 * pages have their ->index populated and are otherwise uninitialised.
317 * The page at @pages->prev has the lowest file offset, and reads should be
318 * issued in @pages->prev to @pages->next order.
319 * @nr_pages: The number of pages at *@pages
322 * This function walks the pages and the blocks within each page, building and
337 * There is a problem. The mpage read code assembles several pages, gets all
356 mpage_readpages(struct address_space *mapping, struct list_head *pages, mpage_readpages() argument
368 struct page *page = list_entry(pages->prev, struct page, lru); mpage_readpages()
382 BUG_ON(!list_empty(pages)); mpage_readpages()
413 * mapping. We only support pages which are fully mapped-and-dirty, with a
414 * special case for pages which are unmapped at the end: end-of-file.
421 * FIXME: This code wants an estimate of how many pages are still to be
657 * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
659 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
/linux-4.1.27/arch/s390/hypfs/
H A Dhypfs_diag.c50 static int diag204_buf_pages; /* number of pages for diag204 data */
358 * up to 93 pages!
379 static void *diag204_alloc_vbuf(int pages) diag204_alloc_vbuf() argument
382 diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1)); diag204_alloc_vbuf()
386 diag204_buf_pages = pages; diag204_alloc_vbuf()
399 static void *diag204_get_buffer(enum diag204_format fmt, int *pages) diag204_get_buffer() argument
402 *pages = diag204_buf_pages; diag204_get_buffer()
406 *pages = 1; diag204_get_buffer()
409 *pages = diag204((unsigned long)SUBC_RSI | diag204_get_buffer()
411 if (*pages <= 0) diag204_get_buffer()
414 return diag204_alloc_vbuf(*pages); diag204_get_buffer()
435 int pages, rc; diag204_probe() local
437 buf = diag204_get_buffer(INFO_EXT, &pages); diag204_probe()
440 (unsigned long)INFO_EXT, pages, buf) >= 0) { diag204_probe()
446 (unsigned long)INFO_EXT, pages, buf) >= 0) { diag204_probe()
456 buf = diag204_get_buffer(INFO_SIMPLE, &pages); diag204_probe()
462 (unsigned long)INFO_SIMPLE, pages, buf) >= 0) { diag204_probe()
478 static int diag204_do_store(void *buf, int pages) diag204_do_store() argument
483 (unsigned long) diag204_info_type, pages, buf); diag204_do_store()
490 int pages, rc; diag204_store() local
492 buf = diag204_get_buffer(diag204_info_type, &pages); diag204_store()
495 rc = diag204_do_store(buf, pages); diag204_store()
/linux-4.1.27/tools/perf/util/
H A Devlist.c894 static size_t perf_evlist__mmap_size(unsigned long pages) perf_evlist__mmap_size() argument
896 if (pages == UINT_MAX) { perf_evlist__mmap_size()
910 pages = (max * 1024) / page_size; perf_evlist__mmap_size()
911 if (!is_power_of_2(pages)) perf_evlist__mmap_size()
912 pages = rounddown_pow_of_two(pages); perf_evlist__mmap_size()
913 } else if (!is_power_of_2(pages)) perf_evlist__mmap_size()
916 return (pages + 1) * page_size; perf_evlist__mmap_size()
922 unsigned long pages, val; parse_pages_arg() local
937 pages = PERF_ALIGN(val, page_size) / page_size; parse_pages_arg()
939 /* we got pages count value */ parse_pages_arg()
941 pages = strtoul(str, &eptr, 10); parse_pages_arg()
946 if (pages == 0 && min == 0) { parse_pages_arg()
947 /* leave number of pages at 0 */ parse_pages_arg()
948 } else if (!is_power_of_2(pages)) { parse_pages_arg()
949 /* round pages up to next power of 2 */ parse_pages_arg()
950 pages = roundup_pow_of_two(pages); parse_pages_arg()
951 if (!pages) parse_pages_arg()
953 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n", parse_pages_arg()
954 pages * page_size, pages); parse_pages_arg()
957 if (pages > max) parse_pages_arg()
960 return pages; parse_pages_arg()
968 long pages; perf_evlist__parse_mmap_pages() local
973 pages = parse_pages_arg(str, 1, max); perf_evlist__parse_mmap_pages()
974 if (pages < 0) { perf_evlist__parse_mmap_pages()
979 *mmap_pages = pages; perf_evlist__parse_mmap_pages()
986 * @pages: map length in pages
995 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, perf_evlist__mmap() argument
1012 evlist->mmap_len = perf_evlist__mmap_size(pages); perf_evlist__mmap()
1529 "Hint:\tTry using a smaller -m/--mmap-pages value."); perf_evlist__strerror_mmap()
/linux-4.1.27/drivers/mtd/nand/
H A Dsm_common.h24 /* one sector is always 512 bytes, but it can consist of two nand pages */
27 /* oob area is also 16 bytes, but might be from two pages */
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/
H A Dalloc.c47 * multiple pages, so we don't require too much contiguous memory.
94 struct page **pages; mlx5_buf_alloc() local
95 pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL); mlx5_buf_alloc()
96 if (!pages) mlx5_buf_alloc()
99 pages[i] = virt_to_page(buf->page_list[i].buf); mlx5_buf_alloc()
100 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); mlx5_buf_alloc()
101 kfree(pages); mlx5_buf_alloc()
/linux-4.1.27/arch/metag/include/asm/
H A Dtlbflush.h16 * - flush_tlb_range(mm, start, end) flushes a range of pages
17 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages

Completed in 4357 milliseconds

1234567891011