This source file includes following definitions.
- dummy_alloc
- dummy_free
- dummy_check
- alloc_work_func
- cleanup_work_func
- livepatch_shadow_mod_init
- livepatch_shadow_mod_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/sched.h>
68 #include <linux/slab.h>
69 #include <linux/stat.h>
70 #include <linux/workqueue.h>
71
72 MODULE_LICENSE("GPL");
73 MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
74 MODULE_DESCRIPTION("Buggy module for shadow variable demo");
75
76
77 #define ALLOC_PERIOD 1
78
79 #define CLEANUP_PERIOD (3 * ALLOC_PERIOD)
80
81 #define EXPIRE_PERIOD (4 * CLEANUP_PERIOD)
82
83
84
85
86
87 static LIST_HEAD(dummy_list);
88 static DEFINE_MUTEX(dummy_list_mutex);
89
90 struct dummy {
91 struct list_head list;
92 unsigned long jiffies_expire;
93 };
94
95 static __used noinline struct dummy *dummy_alloc(void)
96 {
97 struct dummy *d;
98 void *leak;
99
100 d = kzalloc(sizeof(*d), GFP_KERNEL);
101 if (!d)
102 return NULL;
103
104 d->jiffies_expire = jiffies +
105 msecs_to_jiffies(1000 * EXPIRE_PERIOD);
106
107
108 leak = kzalloc(sizeof(int), GFP_KERNEL);
109 if (!leak) {
110 kfree(d);
111 return NULL;
112 }
113
114 pr_info("%s: dummy @ %p, expires @ %lx\n",
115 __func__, d, d->jiffies_expire);
116
117 return d;
118 }
119
120 static __used noinline void dummy_free(struct dummy *d)
121 {
122 pr_info("%s: dummy @ %p, expired = %lx\n",
123 __func__, d, d->jiffies_expire);
124
125 kfree(d);
126 }
127
128 static __used noinline bool dummy_check(struct dummy *d,
129 unsigned long jiffies)
130 {
131 return time_after(jiffies, d->jiffies_expire);
132 }
133
134
135
136
137
138
139
140 static void alloc_work_func(struct work_struct *work);
141 static DECLARE_DELAYED_WORK(alloc_dwork, alloc_work_func);
142
143 static void alloc_work_func(struct work_struct *work)
144 {
145 struct dummy *d;
146
147 d = dummy_alloc();
148 if (!d)
149 return;
150
151 mutex_lock(&dummy_list_mutex);
152 list_add(&d->list, &dummy_list);
153 mutex_unlock(&dummy_list_mutex);
154
155 schedule_delayed_work(&alloc_dwork,
156 msecs_to_jiffies(1000 * ALLOC_PERIOD));
157 }
158
159
160
161
162
163
164
165 static void cleanup_work_func(struct work_struct *work);
166 static DECLARE_DELAYED_WORK(cleanup_dwork, cleanup_work_func);
167
168 static void cleanup_work_func(struct work_struct *work)
169 {
170 struct dummy *d, *tmp;
171 unsigned long j;
172
173 j = jiffies;
174 pr_info("%s: jiffies = %lx\n", __func__, j);
175
176 mutex_lock(&dummy_list_mutex);
177 list_for_each_entry_safe(d, tmp, &dummy_list, list) {
178
179
180 if (dummy_check(d, j)) {
181 list_del(&d->list);
182 dummy_free(d);
183 }
184 }
185 mutex_unlock(&dummy_list_mutex);
186
187 schedule_delayed_work(&cleanup_dwork,
188 msecs_to_jiffies(1000 * CLEANUP_PERIOD));
189 }
190
191 static int livepatch_shadow_mod_init(void)
192 {
193 schedule_delayed_work(&alloc_dwork,
194 msecs_to_jiffies(1000 * ALLOC_PERIOD));
195 schedule_delayed_work(&cleanup_dwork,
196 msecs_to_jiffies(1000 * CLEANUP_PERIOD));
197
198 return 0;
199 }
200
201 static void livepatch_shadow_mod_exit(void)
202 {
203 struct dummy *d, *tmp;
204
205
206 cancel_delayed_work_sync(&alloc_dwork);
207 cancel_delayed_work_sync(&cleanup_dwork);
208
209
210 list_for_each_entry_safe(d, tmp, &dummy_list, list) {
211 list_del(&d->list);
212 dummy_free(d);
213 }
214 }
215
216 module_init(livepatch_shadow_mod_init);
217 module_exit(livepatch_shadow_mod_exit);