1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
3 
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/mm.h>
7 #include <linux/mmzone.h>
8 #include <linux/vm_event_item.h>
9 #include <linux/atomic.h>
10 
11 extern int sysctl_stat_interval;
12 
13 #ifdef CONFIG_VM_EVENT_COUNTERS
14 /*
15  * Light weight per cpu counter implementation.
16  *
17  * Counters should only be incremented and no critical kernel component
18  * should rely on the counter values.
19  *
20  * Counters are handled completely inline. On many platforms the code
21  * generated will simply be the increment of a global address.
22  */
23 
24 struct vm_event_state {
25 	unsigned long event[NR_VM_EVENT_ITEMS];
26 };
27 
28 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
29 
30 /*
31  * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
32  * local_irq_disable overhead.
33  */
__count_vm_event(enum vm_event_item item)34 static inline void __count_vm_event(enum vm_event_item item)
35 {
36 	raw_cpu_inc(vm_event_states.event[item]);
37 }
38 
count_vm_event(enum vm_event_item item)39 static inline void count_vm_event(enum vm_event_item item)
40 {
41 	this_cpu_inc(vm_event_states.event[item]);
42 }
43 
__count_vm_events(enum vm_event_item item,long delta)44 static inline void __count_vm_events(enum vm_event_item item, long delta)
45 {
46 	raw_cpu_add(vm_event_states.event[item], delta);
47 }
48 
count_vm_events(enum vm_event_item item,long delta)49 static inline void count_vm_events(enum vm_event_item item, long delta)
50 {
51 	this_cpu_add(vm_event_states.event[item], delta);
52 }
53 
54 extern void all_vm_events(unsigned long *);
55 
56 extern void vm_events_fold_cpu(int cpu);
57 
58 #else
59 
60 /* Disable counters */
count_vm_event(enum vm_event_item item)61 static inline void count_vm_event(enum vm_event_item item)
62 {
63 }
count_vm_events(enum vm_event_item item,long delta)64 static inline void count_vm_events(enum vm_event_item item, long delta)
65 {
66 }
__count_vm_event(enum vm_event_item item)67 static inline void __count_vm_event(enum vm_event_item item)
68 {
69 }
__count_vm_events(enum vm_event_item item,long delta)70 static inline void __count_vm_events(enum vm_event_item item, long delta)
71 {
72 }
all_vm_events(unsigned long * ret)73 static inline void all_vm_events(unsigned long *ret)
74 {
75 }
vm_events_fold_cpu(int cpu)76 static inline void vm_events_fold_cpu(int cpu)
77 {
78 }
79 
80 #endif /* CONFIG_VM_EVENT_COUNTERS */
81 
82 #ifdef CONFIG_NUMA_BALANCING
83 #define count_vm_numa_event(x)     count_vm_event(x)
84 #define count_vm_numa_events(x, y) count_vm_events(x, y)
85 #else
86 #define count_vm_numa_event(x) do {} while (0)
87 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
88 #endif /* CONFIG_NUMA_BALANCING */
89 
90 #ifdef CONFIG_DEBUG_TLBFLUSH
91 #define count_vm_tlb_event(x)	   count_vm_event(x)
92 #define count_vm_tlb_events(x, y)  count_vm_events(x, y)
93 #else
94 #define count_vm_tlb_event(x)     do {} while (0)
95 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
96 #endif
97 
98 #ifdef CONFIG_DEBUG_VM_VMACACHE
99 #define count_vm_vmacache_event(x) count_vm_event(x)
100 #else
101 #define count_vm_vmacache_event(x) do {} while (0)
102 #endif
103 
104 #define __count_zone_vm_events(item, zone, delta) \
105 		__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
106 		zone_idx(zone), delta)
107 
108 /*
109  * Zone based page accounting with per cpu differentials.
110  */
111 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
112 
zone_page_state_add(long x,struct zone * zone,enum zone_stat_item item)113 static inline void zone_page_state_add(long x, struct zone *zone,
114 				 enum zone_stat_item item)
115 {
116 	atomic_long_add(x, &zone->vm_stat[item]);
117 	atomic_long_add(x, &vm_stat[item]);
118 }
119 
global_page_state(enum zone_stat_item item)120 static inline unsigned long global_page_state(enum zone_stat_item item)
121 {
122 	long x = atomic_long_read(&vm_stat[item]);
123 #ifdef CONFIG_SMP
124 	if (x < 0)
125 		x = 0;
126 #endif
127 	return x;
128 }
129 
zone_page_state(struct zone * zone,enum zone_stat_item item)130 static inline unsigned long zone_page_state(struct zone *zone,
131 					enum zone_stat_item item)
132 {
133 	long x = atomic_long_read(&zone->vm_stat[item]);
134 #ifdef CONFIG_SMP
135 	if (x < 0)
136 		x = 0;
137 #endif
138 	return x;
139 }
140 
141 /*
142  * More accurate version that also considers the currently pending
143  * deltas. For that we need to loop over all cpus to find the current
144  * deltas. There is no synchronization so the result cannot be
145  * exactly accurate either.
146  */
zone_page_state_snapshot(struct zone * zone,enum zone_stat_item item)147 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
148 					enum zone_stat_item item)
149 {
150 	long x = atomic_long_read(&zone->vm_stat[item]);
151 
152 #ifdef CONFIG_SMP
153 	int cpu;
154 	for_each_online_cpu(cpu)
155 		x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
156 
157 	if (x < 0)
158 		x = 0;
159 #endif
160 	return x;
161 }
162 
163 #ifdef CONFIG_NUMA
164 
165 extern unsigned long node_page_state(int node, enum zone_stat_item item);
166 extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
167 
168 #else
169 
170 #define node_page_state(node, item) global_page_state(item)
171 #define zone_statistics(_zl, _z, gfp) do { } while (0)
172 
173 #endif /* CONFIG_NUMA */
174 
175 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
176 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
177 
178 #ifdef CONFIG_SMP
179 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
180 void __inc_zone_page_state(struct page *, enum zone_stat_item);
181 void __dec_zone_page_state(struct page *, enum zone_stat_item);
182 
183 void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
184 void inc_zone_page_state(struct page *, enum zone_stat_item);
185 void dec_zone_page_state(struct page *, enum zone_stat_item);
186 
187 extern void inc_zone_state(struct zone *, enum zone_stat_item);
188 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
189 extern void dec_zone_state(struct zone *, enum zone_stat_item);
190 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
191 
192 void cpu_vm_stats_fold(int cpu);
193 void refresh_zone_stat_thresholds(void);
194 
195 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
196 
197 int calculate_pressure_threshold(struct zone *zone);
198 int calculate_normal_threshold(struct zone *zone);
199 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
200 				int (*calculate_pressure)(struct zone *));
201 #else /* CONFIG_SMP */
202 
203 /*
204  * We do not maintain differentials in a single processor configuration.
205  * The functions directly modify the zone and global counters.
206  */
__mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta)207 static inline void __mod_zone_page_state(struct zone *zone,
208 			enum zone_stat_item item, long delta)
209 {
210 	zone_page_state_add(delta, zone, item);
211 }
212 
__inc_zone_state(struct zone * zone,enum zone_stat_item item)213 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
214 {
215 	atomic_long_inc(&zone->vm_stat[item]);
216 	atomic_long_inc(&vm_stat[item]);
217 }
218 
__dec_zone_state(struct zone * zone,enum zone_stat_item item)219 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
220 {
221 	atomic_long_dec(&zone->vm_stat[item]);
222 	atomic_long_dec(&vm_stat[item]);
223 }
224 
__inc_zone_page_state(struct page * page,enum zone_stat_item item)225 static inline void __inc_zone_page_state(struct page *page,
226 			enum zone_stat_item item)
227 {
228 	__inc_zone_state(page_zone(page), item);
229 }
230 
__dec_zone_page_state(struct page * page,enum zone_stat_item item)231 static inline void __dec_zone_page_state(struct page *page,
232 			enum zone_stat_item item)
233 {
234 	__dec_zone_state(page_zone(page), item);
235 }
236 
237 /*
238  * We only use atomic operations to update counters. So there is no need to
239  * disable interrupts.
240  */
241 #define inc_zone_page_state __inc_zone_page_state
242 #define dec_zone_page_state __dec_zone_page_state
243 #define mod_zone_page_state __mod_zone_page_state
244 
245 #define inc_zone_state __inc_zone_state
246 #define dec_zone_state __dec_zone_state
247 
248 #define set_pgdat_percpu_threshold(pgdat, callback) { }
249 
refresh_zone_stat_thresholds(void)250 static inline void refresh_zone_stat_thresholds(void) { }
cpu_vm_stats_fold(int cpu)251 static inline void cpu_vm_stats_fold(int cpu) { }
252 
drain_zonestat(struct zone * zone,struct per_cpu_pageset * pset)253 static inline void drain_zonestat(struct zone *zone,
254 			struct per_cpu_pageset *pset) { }
255 #endif		/* CONFIG_SMP */
256 
__mod_zone_freepage_state(struct zone * zone,int nr_pages,int migratetype)257 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
258 					     int migratetype)
259 {
260 	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
261 	if (is_migrate_cma(migratetype))
262 		__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
263 }
264 
265 extern const char * const vmstat_text[];
266 
267 #endif /* _LINUX_VMSTAT_H */
268