This source file includes following definitions.
- klp_transition_work_fn
- klp_sync
- klp_synchronize_transition
- klp_complete_transition
- klp_cancel_transition
- klp_update_patch_state
- klp_check_stack_func
- klp_check_stack
- klp_try_switch_task
- klp_send_signals
- klp_try_complete_transition
- klp_start_transition
- klp_init_transition
- klp_reverse_transition
- klp_copy_process
- klp_force_transition
1
2
3
4
5
6
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/cpu.h>
11 #include <linux/stacktrace.h>
12 #include "core.h"
13 #include "patch.h"
14 #include "transition.h"
15 #include "../sched/sched.h"
16
17 #define MAX_STACK_ENTRIES 100
18 #define STACK_ERR_BUF_SIZE 128
19
20 #define SIGNALS_TIMEOUT 15
21
22 struct klp_patch *klp_transition_patch;
23
24 static int klp_target_state = KLP_UNDEFINED;
25
26 static unsigned int klp_signals_cnt;
27
28
29
30
31
32 static void klp_transition_work_fn(struct work_struct *work)
33 {
34 mutex_lock(&klp_mutex);
35
36 if (klp_transition_patch)
37 klp_try_complete_transition();
38
39 mutex_unlock(&klp_mutex);
40 }
41 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
42
43
44
45
46
47
48 static void klp_sync(struct work_struct *work)
49 {
50 }
51
52
53
54
55
56
57
58
59
60 static void klp_synchronize_transition(void)
61 {
62 schedule_on_each_cpu(klp_sync);
63 }
64
65
66
67
68
69 static void klp_complete_transition(void)
70 {
71 struct klp_object *obj;
72 struct klp_func *func;
73 struct task_struct *g, *task;
74 unsigned int cpu;
75
76 pr_debug("'%s': completing %s transition\n",
77 klp_transition_patch->mod->name,
78 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
79
80 if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
81 klp_discard_replaced_patches(klp_transition_patch);
82 klp_discard_nops(klp_transition_patch);
83 }
84
85 if (klp_target_state == KLP_UNPATCHED) {
86
87
88
89
90 klp_unpatch_objects(klp_transition_patch);
91
92
93
94
95
96
97
98 klp_synchronize_transition();
99 }
100
101 klp_for_each_object(klp_transition_patch, obj)
102 klp_for_each_func(obj, func)
103 func->transition = false;
104
105
106 if (klp_target_state == KLP_PATCHED)
107 klp_synchronize_transition();
108
109 read_lock(&tasklist_lock);
110 for_each_process_thread(g, task) {
111 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
112 task->patch_state = KLP_UNDEFINED;
113 }
114 read_unlock(&tasklist_lock);
115
116 for_each_possible_cpu(cpu) {
117 task = idle_task(cpu);
118 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
119 task->patch_state = KLP_UNDEFINED;
120 }
121
122 klp_for_each_object(klp_transition_patch, obj) {
123 if (!klp_is_object_loaded(obj))
124 continue;
125 if (klp_target_state == KLP_PATCHED)
126 klp_post_patch_callback(obj);
127 else if (klp_target_state == KLP_UNPATCHED)
128 klp_post_unpatch_callback(obj);
129 }
130
131 pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
132 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
133
134 klp_target_state = KLP_UNDEFINED;
135 klp_transition_patch = NULL;
136 }
137
138
139
140
141
142
143
144 void klp_cancel_transition(void)
145 {
146 if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
147 return;
148
149 pr_debug("'%s': canceling patching transition, going to unpatch\n",
150 klp_transition_patch->mod->name);
151
152 klp_target_state = KLP_UNPATCHED;
153 klp_complete_transition();
154 }
155
156
157
158
159
160
161
162
163 void klp_update_patch_state(struct task_struct *task)
164 {
165
166
167
168
169 preempt_disable_notrace();
170
171
172
173
174
175
176
177
178
179
180
181
182
183 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
184 task->patch_state = READ_ONCE(klp_target_state);
185
186 preempt_enable_notrace();
187 }
188
189
190
191
192
193 static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
194 unsigned int nr_entries)
195 {
196 unsigned long func_addr, func_size, address;
197 struct klp_ops *ops;
198 int i;
199
200 for (i = 0; i < nr_entries; i++) {
201 address = entries[i];
202
203 if (klp_target_state == KLP_UNPATCHED) {
204
205
206
207
208 func_addr = (unsigned long)func->new_func;
209 func_size = func->new_size;
210 } else {
211
212
213
214
215 ops = klp_find_ops(func->old_func);
216
217 if (list_is_singular(&ops->func_stack)) {
218
219 func_addr = (unsigned long)func->old_func;
220 func_size = func->old_size;
221 } else {
222
223 struct klp_func *prev;
224
225 prev = list_next_entry(func, stack_node);
226 func_addr = (unsigned long)prev->new_func;
227 func_size = prev->new_size;
228 }
229 }
230
231 if (address >= func_addr && address < func_addr + func_size)
232 return -EAGAIN;
233 }
234
235 return 0;
236 }
237
238
239
240
241
242 static int klp_check_stack(struct task_struct *task, char *err_buf)
243 {
244 static unsigned long entries[MAX_STACK_ENTRIES];
245 struct klp_object *obj;
246 struct klp_func *func;
247 int ret, nr_entries;
248
249 ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
250 if (ret < 0) {
251 snprintf(err_buf, STACK_ERR_BUF_SIZE,
252 "%s: %s:%d has an unreliable stack\n",
253 __func__, task->comm, task->pid);
254 return ret;
255 }
256 nr_entries = ret;
257
258 klp_for_each_object(klp_transition_patch, obj) {
259 if (!obj->patched)
260 continue;
261 klp_for_each_func(obj, func) {
262 ret = klp_check_stack_func(func, entries, nr_entries);
263 if (ret) {
264 snprintf(err_buf, STACK_ERR_BUF_SIZE,
265 "%s: %s:%d is sleeping on function %s\n",
266 __func__, task->comm, task->pid,
267 func->old_name);
268 return ret;
269 }
270 }
271 }
272
273 return 0;
274 }
275
276
277
278
279
280
281 static bool klp_try_switch_task(struct task_struct *task)
282 {
283 static char err_buf[STACK_ERR_BUF_SIZE];
284 struct rq *rq;
285 struct rq_flags flags;
286 int ret;
287 bool success = false;
288
289 err_buf[0] = '\0';
290
291
292 if (task->patch_state == klp_target_state)
293 return true;
294
295
296
297
298
299 if (!klp_have_reliable_stack())
300 return false;
301
302
303
304
305
306
307 rq = task_rq_lock(task, &flags);
308
309 if (task_running(rq, task) && task != current) {
310 snprintf(err_buf, STACK_ERR_BUF_SIZE,
311 "%s: %s:%d is running\n", __func__, task->comm,
312 task->pid);
313 goto done;
314 }
315
316 ret = klp_check_stack(task, err_buf);
317 if (ret)
318 goto done;
319
320 success = true;
321
322 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
323 task->patch_state = klp_target_state;
324
325 done:
326 task_rq_unlock(rq, task, &flags);
327
328
329
330
331
332
333 if (err_buf[0] != '\0')
334 pr_debug("%s", err_buf);
335
336 return success;
337 }
338
339
340
341
342
343 static void klp_send_signals(void)
344 {
345 struct task_struct *g, *task;
346
347 if (klp_signals_cnt == SIGNALS_TIMEOUT)
348 pr_notice("signaling remaining tasks\n");
349
350 read_lock(&tasklist_lock);
351 for_each_process_thread(g, task) {
352 if (!klp_patch_pending(task))
353 continue;
354
355
356
357
358
359
360
361 if (task->flags & PF_KTHREAD) {
362
363
364
365
366 wake_up_state(task, TASK_INTERRUPTIBLE);
367 } else {
368
369
370
371
372 spin_lock_irq(&task->sighand->siglock);
373 signal_wake_up(task, 0);
374 spin_unlock_irq(&task->sighand->siglock);
375 }
376 }
377 read_unlock(&tasklist_lock);
378 }
379
380
381
382
383
384
385
386
387
388 void klp_try_complete_transition(void)
389 {
390 unsigned int cpu;
391 struct task_struct *g, *task;
392 struct klp_patch *patch;
393 bool complete = true;
394
395 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
396
397
398
399
400
401
402
403
404
405
406 read_lock(&tasklist_lock);
407 for_each_process_thread(g, task)
408 if (!klp_try_switch_task(task))
409 complete = false;
410 read_unlock(&tasklist_lock);
411
412
413
414
415 get_online_cpus();
416 for_each_possible_cpu(cpu) {
417 task = idle_task(cpu);
418 if (cpu_online(cpu)) {
419 if (!klp_try_switch_task(task))
420 complete = false;
421 } else if (task->patch_state != klp_target_state) {
422
423 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
424 task->patch_state = klp_target_state;
425 }
426 }
427 put_online_cpus();
428
429 if (!complete) {
430 if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
431 klp_send_signals();
432 klp_signals_cnt++;
433
434
435
436
437
438
439 schedule_delayed_work(&klp_transition_work,
440 round_jiffies_relative(HZ));
441 return;
442 }
443
444
445 patch = klp_transition_patch;
446 klp_complete_transition();
447
448
449
450
451
452
453 if (!patch->enabled) {
454 klp_free_patch_start(patch);
455 schedule_work(&patch->free_work);
456 }
457 }
458
459
460
461
462
463 void klp_start_transition(void)
464 {
465 struct task_struct *g, *task;
466 unsigned int cpu;
467
468 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
469
470 pr_notice("'%s': starting %s transition\n",
471 klp_transition_patch->mod->name,
472 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
473
474
475
476
477
478
479 read_lock(&tasklist_lock);
480 for_each_process_thread(g, task)
481 if (task->patch_state != klp_target_state)
482 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
483 read_unlock(&tasklist_lock);
484
485
486
487
488
489
490 for_each_possible_cpu(cpu) {
491 task = idle_task(cpu);
492 if (task->patch_state != klp_target_state)
493 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
494 }
495
496 klp_signals_cnt = 0;
497 }
498
499
500
501
502
503
504 void klp_init_transition(struct klp_patch *patch, int state)
505 {
506 struct task_struct *g, *task;
507 unsigned int cpu;
508 struct klp_object *obj;
509 struct klp_func *func;
510 int initial_state = !state;
511
512 WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
513
514 klp_transition_patch = patch;
515
516
517
518
519
520 klp_target_state = state;
521
522 pr_debug("'%s': initializing %s transition\n", patch->mod->name,
523 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
524
525
526
527
528
529 read_lock(&tasklist_lock);
530 for_each_process_thread(g, task) {
531 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
532 task->patch_state = initial_state;
533 }
534 read_unlock(&tasklist_lock);
535
536
537
538
539 for_each_possible_cpu(cpu) {
540 task = idle_task(cpu);
541 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
542 task->patch_state = initial_state;
543 }
544
545
546
547
548
549
550
551
552
553
554 smp_wmb();
555
556
557
558
559
560
561
562
563
564
565
566
567 klp_for_each_object(patch, obj)
568 klp_for_each_func(obj, func)
569 func->transition = true;
570 }
571
572
573
574
575
576
577
578 void klp_reverse_transition(void)
579 {
580 unsigned int cpu;
581 struct task_struct *g, *task;
582
583 pr_debug("'%s': reversing transition from %s\n",
584 klp_transition_patch->mod->name,
585 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
586 "unpatching to patching");
587
588 klp_transition_patch->enabled = !klp_transition_patch->enabled;
589
590 klp_target_state = !klp_target_state;
591
592
593
594
595
596
597 read_lock(&tasklist_lock);
598 for_each_process_thread(g, task)
599 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
600 read_unlock(&tasklist_lock);
601
602 for_each_possible_cpu(cpu)
603 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
604
605
606 klp_synchronize_transition();
607
608 klp_start_transition();
609 }
610
611
612 void klp_copy_process(struct task_struct *child)
613 {
614 child->patch_state = current->patch_state;
615
616
617 }
618
619
620
621
622
623
624
625
626
627
628 void klp_force_transition(void)
629 {
630 struct klp_patch *patch;
631 struct task_struct *g, *task;
632 unsigned int cpu;
633
634 pr_warn("forcing remaining tasks to the patched state\n");
635
636 read_lock(&tasklist_lock);
637 for_each_process_thread(g, task)
638 klp_update_patch_state(task);
639 read_unlock(&tasklist_lock);
640
641 for_each_possible_cpu(cpu)
642 klp_update_patch_state(idle_task(cpu));
643
644 klp_for_each_patch(patch)
645 patch->forced = true;
646 }