1#include <linux/bootmem.h> 2#include <linux/compiler.h> 3#include <linux/fs.h> 4#include <linux/init.h> 5#include <linux/ksm.h> 6#include <linux/mm.h> 7#include <linux/mmzone.h> 8#include <linux/huge_mm.h> 9#include <linux/proc_fs.h> 10#include <linux/seq_file.h> 11#include <linux/hugetlb.h> 12#include <linux/kernel-page-flags.h> 13#include <asm/uaccess.h> 14#include "internal.h" 15 16#define KPMSIZE sizeof(u64) 17#define KPMMASK (KPMSIZE - 1) 18 19/* /proc/kpagecount - an array exposing page counts 20 * 21 * Each entry is a u64 representing the corresponding 22 * physical page count. 23 */ 24static ssize_t kpagecount_read(struct file *file, char __user *buf, 25 size_t count, loff_t *ppos) 26{ 27 u64 __user *out = (u64 __user *)buf; 28 struct page *ppage; 29 unsigned long src = *ppos; 30 unsigned long pfn; 31 ssize_t ret = 0; 32 u64 pcount; 33 34 pfn = src / KPMSIZE; 35 count = min_t(size_t, count, (max_pfn * KPMSIZE) - src); 36 if (src & KPMMASK || count & KPMMASK) 37 return -EINVAL; 38 39 while (count > 0) { 40 if (pfn_valid(pfn)) 41 ppage = pfn_to_page(pfn); 42 else 43 ppage = NULL; 44 if (!ppage || PageSlab(ppage)) 45 pcount = 0; 46 else 47 pcount = page_mapcount(ppage); 48 49 if (put_user(pcount, out)) { 50 ret = -EFAULT; 51 break; 52 } 53 54 pfn++; 55 out++; 56 count -= KPMSIZE; 57 } 58 59 *ppos += (char __user *)out - buf; 60 if (!ret) 61 ret = (char __user *)out - buf; 62 return ret; 63} 64 65static const struct file_operations proc_kpagecount_operations = { 66 .llseek = mem_lseek, 67 .read = kpagecount_read, 68}; 69 70/* /proc/kpageflags - an array exposing page flags 71 * 72 * Each entry is a u64 representing the corresponding 73 * physical page flags. 74 */ 75 76static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit) 77{ 78 return ((kflags >> kbit) & 1) << ubit; 79} 80 81u64 stable_page_flags(struct page *page) 82{ 83 u64 k; 84 u64 u; 85 86 /* 87 * pseudo flag: KPF_NOPAGE 88 * it differentiates a memory hole from a page with no flags 89 */ 90 if (!page) 91 return 1 << KPF_NOPAGE; 92 93 k = page->flags; 94 u = 0; 95 96 /* 97 * pseudo flags for the well known (anonymous) memory mapped pages 98 * 99 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the 100 * simple test in page_mapped() is not enough. 101 */ 102 if (!PageSlab(page) && page_mapped(page)) 103 u |= 1 << KPF_MMAP; 104 if (PageAnon(page)) 105 u |= 1 << KPF_ANON; 106 if (PageKsm(page)) 107 u |= 1 << KPF_KSM; 108 109 /* 110 * compound pages: export both head/tail info 111 * they together define a compound page's start/end pos and order 112 */ 113 if (PageHead(page)) 114 u |= 1 << KPF_COMPOUND_HEAD; 115 if (PageTail(page)) 116 u |= 1 << KPF_COMPOUND_TAIL; 117 if (PageHuge(page)) 118 u |= 1 << KPF_HUGE; 119 /* 120 * PageTransCompound can be true for non-huge compound pages (slab 121 * pages or pages allocated by drivers with __GFP_COMP) because it 122 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon 123 * to make sure a given page is a thp, not a non-huge compound page. 124 */ 125 else if (PageTransCompound(page)) { 126 struct page *head = compound_head(page); 127 128 if (PageLRU(head) || PageAnon(head)) 129 u |= 1 << KPF_THP; 130 else if (is_huge_zero_page(head)) { 131 u |= 1 << KPF_ZERO_PAGE; 132 u |= 1 << KPF_THP; 133 } 134 } else if (is_zero_pfn(page_to_pfn(page))) 135 u |= 1 << KPF_ZERO_PAGE; 136 137 138 /* 139 * Caveats on high order pages: page->_count will only be set 140 * -1 on the head page; SLUB/SLQB do the same for PG_slab; 141 * SLOB won't set PG_slab at all on compound pages. 142 */ 143 if (PageBuddy(page)) 144 u |= 1 << KPF_BUDDY; 145 146 if (PageBalloon(page)) 147 u |= 1 << KPF_BALLOON; 148 149 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); 150 151 u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); 152 153 u |= kpf_copy_bit(k, KPF_ERROR, PG_error); 154 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); 155 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate); 156 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback); 157 158 u |= kpf_copy_bit(k, KPF_LRU, PG_lru); 159 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced); 160 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); 161 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); 162 163 u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); 164 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); 165 166 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); 167 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); 168 169#ifdef CONFIG_MEMORY_FAILURE 170 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); 171#endif 172 173#ifdef CONFIG_ARCH_USES_PG_UNCACHED 174 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); 175#endif 176 177 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved); 178 u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk); 179 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private); 180 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2); 181 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1); 182 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1); 183 184 return u; 185}; 186 187static ssize_t kpageflags_read(struct file *file, char __user *buf, 188 size_t count, loff_t *ppos) 189{ 190 u64 __user *out = (u64 __user *)buf; 191 struct page *ppage; 192 unsigned long src = *ppos; 193 unsigned long pfn; 194 ssize_t ret = 0; 195 196 pfn = src / KPMSIZE; 197 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src); 198 if (src & KPMMASK || count & KPMMASK) 199 return -EINVAL; 200 201 while (count > 0) { 202 if (pfn_valid(pfn)) 203 ppage = pfn_to_page(pfn); 204 else 205 ppage = NULL; 206 207 if (put_user(stable_page_flags(ppage), out)) { 208 ret = -EFAULT; 209 break; 210 } 211 212 pfn++; 213 out++; 214 count -= KPMSIZE; 215 } 216 217 *ppos += (char __user *)out - buf; 218 if (!ret) 219 ret = (char __user *)out - buf; 220 return ret; 221} 222 223static const struct file_operations proc_kpageflags_operations = { 224 .llseek = mem_lseek, 225 .read = kpageflags_read, 226}; 227 228static int __init proc_page_init(void) 229{ 230 proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations); 231 proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations); 232 return 0; 233} 234fs_initcall(proc_page_init); 235