1/*
2 * FLoating proportions
3 *
4 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
5 *
6 * This file contains the public data structure and API definitions.
7 */
8
9#ifndef _LINUX_PROPORTIONS_H
10#define _LINUX_PROPORTIONS_H
11
12#include <linux/percpu_counter.h>
13#include <linux/spinlock.h>
14#include <linux/mutex.h>
15#include <linux/gfp.h>
16
17struct prop_global {
18	/*
19	 * The period over which we differentiate
20	 *
21	 *   period = 2^shift
22	 */
23	int shift;
24	/*
25	 * The total event counter aka 'time'.
26	 *
27	 * Treated as an unsigned long; the lower 'shift - 1' bits are the
28	 * counter bits, the remaining upper bits the period counter.
29	 */
30	struct percpu_counter events;
31};
32
33/*
34 * global proportion descriptor
35 *
36 * this is needed to consitently flip prop_global structures.
37 */
38struct prop_descriptor {
39	int index;
40	struct prop_global pg[2];
41	struct mutex mutex;		/* serialize the prop_global switch */
42};
43
44int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp);
45void prop_change_shift(struct prop_descriptor *pd, int new_shift);
46
47/*
48 * ----- PERCPU ------
49 */
50
51struct prop_local_percpu {
52	/*
53	 * the local events counter
54	 */
55	struct percpu_counter events;
56
57	/*
58	 * snapshot of the last seen global state
59	 */
60	int shift;
61	unsigned long period;
62	raw_spinlock_t lock;		/* protect the snapshot state */
63};
64
65int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp);
66void prop_local_destroy_percpu(struct prop_local_percpu *pl);
67void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
68void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
69		long *numerator, long *denominator);
70
71static inline
72void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
73{
74	unsigned long flags;
75
76	local_irq_save(flags);
77	__prop_inc_percpu(pd, pl);
78	local_irq_restore(flags);
79}
80
81/*
82 * Limit the time part in order to ensure there are some bits left for the
83 * cycle counter and fraction multiply.
84 */
85#if BITS_PER_LONG == 32
86#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
87#else
88#define PROP_MAX_SHIFT (BITS_PER_LONG/2)
89#endif
90
91#define PROP_FRAC_SHIFT		(BITS_PER_LONG - PROP_MAX_SHIFT - 1)
92#define PROP_FRAC_BASE		(1UL << PROP_FRAC_SHIFT)
93
94void __prop_inc_percpu_max(struct prop_descriptor *pd,
95			   struct prop_local_percpu *pl, long frac);
96
97
98/*
99 * ----- SINGLE ------
100 */
101
102struct prop_local_single {
103	/*
104	 * the local events counter
105	 */
106	unsigned long events;
107
108	/*
109	 * snapshot of the last seen global state
110	 * and a lock protecting this state
111	 */
112	unsigned long period;
113	int shift;
114	raw_spinlock_t lock;		/* protect the snapshot state */
115};
116
117#define INIT_PROP_LOCAL_SINGLE(name)			\
118{	.lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock),	\
119}
120
121int prop_local_init_single(struct prop_local_single *pl);
122void prop_local_destroy_single(struct prop_local_single *pl);
123void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
124void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
125		long *numerator, long *denominator);
126
127static inline
128void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
129{
130	unsigned long flags;
131
132	local_irq_save(flags);
133	__prop_inc_single(pd, pl);
134	local_irq_restore(flags);
135}
136
137#endif /* _LINUX_PROPORTIONS_H */
138