1 /*
2  * core.c - Kernel Live Patching Core
3  *
4  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5  * Copyright (C) 2014 SUSE
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version 2
10  * of the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/ftrace.h>
28 #include <linux/list.h>
29 #include <linux/kallsyms.h>
30 #include <linux/livepatch.h>
31 
32 /**
33  * struct klp_ops - structure for tracking registered ftrace ops structs
34  *
35  * A single ftrace_ops is shared between all enabled replacement functions
36  * (klp_func structs) which have the same old_addr.  This allows the switch
37  * between function versions to happen instantaneously by updating the klp_ops
38  * struct's func_stack list.  The winner is the klp_func at the top of the
39  * func_stack (front of the list).
40  *
41  * @node:	node for the global klp_ops list
42  * @func_stack:	list head for the stack of klp_func's (active func is on top)
43  * @fops:	registered ftrace ops struct
44  */
45 struct klp_ops {
46 	struct list_head node;
47 	struct list_head func_stack;
48 	struct ftrace_ops fops;
49 };
50 
51 /*
52  * The klp_mutex protects the global lists and state transitions of any
53  * structure reachable from them.  References to any structure must be obtained
54  * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
55  * ensure it gets consistent data).
56  */
57 static DEFINE_MUTEX(klp_mutex);
58 
59 static LIST_HEAD(klp_patches);
60 static LIST_HEAD(klp_ops);
61 
62 static struct kobject *klp_root_kobj;
63 
klp_find_ops(unsigned long old_addr)64 static struct klp_ops *klp_find_ops(unsigned long old_addr)
65 {
66 	struct klp_ops *ops;
67 	struct klp_func *func;
68 
69 	list_for_each_entry(ops, &klp_ops, node) {
70 		func = list_first_entry(&ops->func_stack, struct klp_func,
71 					stack_node);
72 		if (func->old_addr == old_addr)
73 			return ops;
74 	}
75 
76 	return NULL;
77 }
78 
klp_is_module(struct klp_object * obj)79 static bool klp_is_module(struct klp_object *obj)
80 {
81 	return obj->name;
82 }
83 
klp_is_object_loaded(struct klp_object * obj)84 static bool klp_is_object_loaded(struct klp_object *obj)
85 {
86 	return !obj->name || obj->mod;
87 }
88 
89 /* sets obj->mod if object is not vmlinux and module is found */
klp_find_object_module(struct klp_object * obj)90 static void klp_find_object_module(struct klp_object *obj)
91 {
92 	struct module *mod;
93 
94 	if (!klp_is_module(obj))
95 		return;
96 
97 	mutex_lock(&module_mutex);
98 	/*
99 	 * We do not want to block removal of patched modules and therefore
100 	 * we do not take a reference here. The patches are removed by
101 	 * a going module handler instead.
102 	 */
103 	mod = find_module(obj->name);
104 	/*
105 	 * Do not mess work of the module coming and going notifiers.
106 	 * Note that the patch might still be needed before the going handler
107 	 * is called. Module functions can be called even in the GOING state
108 	 * until mod->exit() finishes. This is especially important for
109 	 * patches that modify semantic of the functions.
110 	 */
111 	if (mod && mod->klp_alive)
112 		obj->mod = mod;
113 
114 	mutex_unlock(&module_mutex);
115 }
116 
117 /* klp_mutex must be held by caller */
klp_is_patch_registered(struct klp_patch * patch)118 static bool klp_is_patch_registered(struct klp_patch *patch)
119 {
120 	struct klp_patch *mypatch;
121 
122 	list_for_each_entry(mypatch, &klp_patches, list)
123 		if (mypatch == patch)
124 			return true;
125 
126 	return false;
127 }
128 
klp_initialized(void)129 static bool klp_initialized(void)
130 {
131 	return klp_root_kobj;
132 }
133 
134 struct klp_find_arg {
135 	const char *objname;
136 	const char *name;
137 	unsigned long addr;
138 	/*
139 	 * If count == 0, the symbol was not found. If count == 1, a unique
140 	 * match was found and addr is set.  If count > 1, there is
141 	 * unresolvable ambiguity among "count" number of symbols with the same
142 	 * name in the same object.
143 	 */
144 	unsigned long count;
145 };
146 
klp_find_callback(void * data,const char * name,struct module * mod,unsigned long addr)147 static int klp_find_callback(void *data, const char *name,
148 			     struct module *mod, unsigned long addr)
149 {
150 	struct klp_find_arg *args = data;
151 
152 	if ((mod && !args->objname) || (!mod && args->objname))
153 		return 0;
154 
155 	if (strcmp(args->name, name))
156 		return 0;
157 
158 	if (args->objname && strcmp(args->objname, mod->name))
159 		return 0;
160 
161 	/*
162 	 * args->addr might be overwritten if another match is found
163 	 * but klp_find_object_symbol() handles this and only returns the
164 	 * addr if count == 1.
165 	 */
166 	args->addr = addr;
167 	args->count++;
168 
169 	return 0;
170 }
171 
klp_find_object_symbol(const char * objname,const char * name,unsigned long * addr)172 static int klp_find_object_symbol(const char *objname, const char *name,
173 				  unsigned long *addr)
174 {
175 	struct klp_find_arg args = {
176 		.objname = objname,
177 		.name = name,
178 		.addr = 0,
179 		.count = 0
180 	};
181 
182 	mutex_lock(&module_mutex);
183 	kallsyms_on_each_symbol(klp_find_callback, &args);
184 	mutex_unlock(&module_mutex);
185 
186 	if (args.count == 0)
187 		pr_err("symbol '%s' not found in symbol table\n", name);
188 	else if (args.count > 1)
189 		pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n",
190 		       args.count, name, objname);
191 	else {
192 		*addr = args.addr;
193 		return 0;
194 	}
195 
196 	*addr = 0;
197 	return -EINVAL;
198 }
199 
200 struct klp_verify_args {
201 	const char *name;
202 	const unsigned long addr;
203 };
204 
klp_verify_callback(void * data,const char * name,struct module * mod,unsigned long addr)205 static int klp_verify_callback(void *data, const char *name,
206 			       struct module *mod, unsigned long addr)
207 {
208 	struct klp_verify_args *args = data;
209 
210 	if (!mod &&
211 	    !strcmp(args->name, name) &&
212 	    args->addr == addr)
213 		return 1;
214 
215 	return 0;
216 }
217 
klp_verify_vmlinux_symbol(const char * name,unsigned long addr)218 static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
219 {
220 	struct klp_verify_args args = {
221 		.name = name,
222 		.addr = addr,
223 	};
224 	int ret;
225 
226 	mutex_lock(&module_mutex);
227 	ret = kallsyms_on_each_symbol(klp_verify_callback, &args);
228 	mutex_unlock(&module_mutex);
229 
230 	if (!ret) {
231 		pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
232 			name, addr);
233 		return -EINVAL;
234 	}
235 
236 	return 0;
237 }
238 
klp_find_verify_func_addr(struct klp_object * obj,struct klp_func * func)239 static int klp_find_verify_func_addr(struct klp_object *obj,
240 				     struct klp_func *func)
241 {
242 	int ret;
243 
244 #if defined(CONFIG_RANDOMIZE_BASE)
245 	/* KASLR is enabled, disregard old_addr from user */
246 	func->old_addr = 0;
247 #endif
248 
249 	if (!func->old_addr || klp_is_module(obj))
250 		ret = klp_find_object_symbol(obj->name, func->old_name,
251 					     &func->old_addr);
252 	else
253 		ret = klp_verify_vmlinux_symbol(func->old_name,
254 						func->old_addr);
255 
256 	return ret;
257 }
258 
259 /*
260  * external symbols are located outside the parent object (where the parent
261  * object is either vmlinux or the kmod being patched).
262  */
klp_find_external_symbol(struct module * pmod,const char * name,unsigned long * addr)263 static int klp_find_external_symbol(struct module *pmod, const char *name,
264 				    unsigned long *addr)
265 {
266 	const struct kernel_symbol *sym;
267 
268 	/* first, check if it's an exported symbol */
269 	preempt_disable();
270 	sym = find_symbol(name, NULL, NULL, true, true);
271 	if (sym) {
272 		*addr = sym->value;
273 		preempt_enable();
274 		return 0;
275 	}
276 	preempt_enable();
277 
278 	/* otherwise check if it's in another .o within the patch module */
279 	return klp_find_object_symbol(pmod->name, name, addr);
280 }
281 
klp_write_object_relocations(struct module * pmod,struct klp_object * obj)282 static int klp_write_object_relocations(struct module *pmod,
283 					struct klp_object *obj)
284 {
285 	int ret;
286 	struct klp_reloc *reloc;
287 
288 	if (WARN_ON(!klp_is_object_loaded(obj)))
289 		return -EINVAL;
290 
291 	if (WARN_ON(!obj->relocs))
292 		return -EINVAL;
293 
294 	for (reloc = obj->relocs; reloc->name; reloc++) {
295 		if (!klp_is_module(obj)) {
296 			ret = klp_verify_vmlinux_symbol(reloc->name,
297 							reloc->val);
298 			if (ret)
299 				return ret;
300 		} else {
301 			/* module, reloc->val needs to be discovered */
302 			if (reloc->external)
303 				ret = klp_find_external_symbol(pmod,
304 							       reloc->name,
305 							       &reloc->val);
306 			else
307 				ret = klp_find_object_symbol(obj->mod->name,
308 							     reloc->name,
309 							     &reloc->val);
310 			if (ret)
311 				return ret;
312 		}
313 		ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
314 					     reloc->val + reloc->addend);
315 		if (ret) {
316 			pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
317 			       reloc->name, reloc->val, ret);
318 			return ret;
319 		}
320 	}
321 
322 	return 0;
323 }
324 
klp_ftrace_handler(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * fops,struct pt_regs * regs)325 static void notrace klp_ftrace_handler(unsigned long ip,
326 				       unsigned long parent_ip,
327 				       struct ftrace_ops *fops,
328 				       struct pt_regs *regs)
329 {
330 	struct klp_ops *ops;
331 	struct klp_func *func;
332 
333 	ops = container_of(fops, struct klp_ops, fops);
334 
335 	rcu_read_lock();
336 	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
337 				      stack_node);
338 	if (WARN_ON_ONCE(!func))
339 		goto unlock;
340 
341 	klp_arch_set_pc(regs, (unsigned long)func->new_func);
342 unlock:
343 	rcu_read_unlock();
344 }
345 
klp_disable_func(struct klp_func * func)346 static void klp_disable_func(struct klp_func *func)
347 {
348 	struct klp_ops *ops;
349 
350 	WARN_ON(func->state != KLP_ENABLED);
351 	WARN_ON(!func->old_addr);
352 
353 	ops = klp_find_ops(func->old_addr);
354 	if (WARN_ON(!ops))
355 		return;
356 
357 	if (list_is_singular(&ops->func_stack)) {
358 		WARN_ON(unregister_ftrace_function(&ops->fops));
359 		WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
360 
361 		list_del_rcu(&func->stack_node);
362 		list_del(&ops->node);
363 		kfree(ops);
364 	} else {
365 		list_del_rcu(&func->stack_node);
366 	}
367 
368 	func->state = KLP_DISABLED;
369 }
370 
klp_enable_func(struct klp_func * func)371 static int klp_enable_func(struct klp_func *func)
372 {
373 	struct klp_ops *ops;
374 	int ret;
375 
376 	if (WARN_ON(!func->old_addr))
377 		return -EINVAL;
378 
379 	if (WARN_ON(func->state != KLP_DISABLED))
380 		return -EINVAL;
381 
382 	ops = klp_find_ops(func->old_addr);
383 	if (!ops) {
384 		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
385 		if (!ops)
386 			return -ENOMEM;
387 
388 		ops->fops.func = klp_ftrace_handler;
389 		ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
390 				  FTRACE_OPS_FL_DYNAMIC |
391 				  FTRACE_OPS_FL_IPMODIFY;
392 
393 		list_add(&ops->node, &klp_ops);
394 
395 		INIT_LIST_HEAD(&ops->func_stack);
396 		list_add_rcu(&func->stack_node, &ops->func_stack);
397 
398 		ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
399 		if (ret) {
400 			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
401 			       func->old_name, ret);
402 			goto err;
403 		}
404 
405 		ret = register_ftrace_function(&ops->fops);
406 		if (ret) {
407 			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
408 			       func->old_name, ret);
409 			ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
410 			goto err;
411 		}
412 
413 
414 	} else {
415 		list_add_rcu(&func->stack_node, &ops->func_stack);
416 	}
417 
418 	func->state = KLP_ENABLED;
419 
420 	return 0;
421 
422 err:
423 	list_del_rcu(&func->stack_node);
424 	list_del(&ops->node);
425 	kfree(ops);
426 	return ret;
427 }
428 
klp_disable_object(struct klp_object * obj)429 static void klp_disable_object(struct klp_object *obj)
430 {
431 	struct klp_func *func;
432 
433 	for (func = obj->funcs; func->old_name; func++)
434 		if (func->state == KLP_ENABLED)
435 			klp_disable_func(func);
436 
437 	obj->state = KLP_DISABLED;
438 }
439 
klp_enable_object(struct klp_object * obj)440 static int klp_enable_object(struct klp_object *obj)
441 {
442 	struct klp_func *func;
443 	int ret;
444 
445 	if (WARN_ON(obj->state != KLP_DISABLED))
446 		return -EINVAL;
447 
448 	if (WARN_ON(!klp_is_object_loaded(obj)))
449 		return -EINVAL;
450 
451 	for (func = obj->funcs; func->old_name; func++) {
452 		ret = klp_enable_func(func);
453 		if (ret) {
454 			klp_disable_object(obj);
455 			return ret;
456 		}
457 	}
458 	obj->state = KLP_ENABLED;
459 
460 	return 0;
461 }
462 
__klp_disable_patch(struct klp_patch * patch)463 static int __klp_disable_patch(struct klp_patch *patch)
464 {
465 	struct klp_object *obj;
466 
467 	/* enforce stacking: only the last enabled patch can be disabled */
468 	if (!list_is_last(&patch->list, &klp_patches) &&
469 	    list_next_entry(patch, list)->state == KLP_ENABLED)
470 		return -EBUSY;
471 
472 	pr_notice("disabling patch '%s'\n", patch->mod->name);
473 
474 	for (obj = patch->objs; obj->funcs; obj++) {
475 		if (obj->state == KLP_ENABLED)
476 			klp_disable_object(obj);
477 	}
478 
479 	patch->state = KLP_DISABLED;
480 
481 	return 0;
482 }
483 
484 /**
485  * klp_disable_patch() - disables a registered patch
486  * @patch:	The registered, enabled patch to be disabled
487  *
488  * Unregisters the patched functions from ftrace.
489  *
490  * Return: 0 on success, otherwise error
491  */
klp_disable_patch(struct klp_patch * patch)492 int klp_disable_patch(struct klp_patch *patch)
493 {
494 	int ret;
495 
496 	mutex_lock(&klp_mutex);
497 
498 	if (!klp_is_patch_registered(patch)) {
499 		ret = -EINVAL;
500 		goto err;
501 	}
502 
503 	if (patch->state == KLP_DISABLED) {
504 		ret = -EINVAL;
505 		goto err;
506 	}
507 
508 	ret = __klp_disable_patch(patch);
509 
510 err:
511 	mutex_unlock(&klp_mutex);
512 	return ret;
513 }
514 EXPORT_SYMBOL_GPL(klp_disable_patch);
515 
__klp_enable_patch(struct klp_patch * patch)516 static int __klp_enable_patch(struct klp_patch *patch)
517 {
518 	struct klp_object *obj;
519 	int ret;
520 
521 	if (WARN_ON(patch->state != KLP_DISABLED))
522 		return -EINVAL;
523 
524 	/* enforce stacking: only the first disabled patch can be enabled */
525 	if (patch->list.prev != &klp_patches &&
526 	    list_prev_entry(patch, list)->state == KLP_DISABLED)
527 		return -EBUSY;
528 
529 	pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
530 	add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
531 
532 	pr_notice("enabling patch '%s'\n", patch->mod->name);
533 
534 	for (obj = patch->objs; obj->funcs; obj++) {
535 		if (!klp_is_object_loaded(obj))
536 			continue;
537 
538 		ret = klp_enable_object(obj);
539 		if (ret)
540 			goto unregister;
541 	}
542 
543 	patch->state = KLP_ENABLED;
544 
545 	return 0;
546 
547 unregister:
548 	WARN_ON(__klp_disable_patch(patch));
549 	return ret;
550 }
551 
552 /**
553  * klp_enable_patch() - enables a registered patch
554  * @patch:	The registered, disabled patch to be enabled
555  *
556  * Performs the needed symbol lookups and code relocations,
557  * then registers the patched functions with ftrace.
558  *
559  * Return: 0 on success, otherwise error
560  */
klp_enable_patch(struct klp_patch * patch)561 int klp_enable_patch(struct klp_patch *patch)
562 {
563 	int ret;
564 
565 	mutex_lock(&klp_mutex);
566 
567 	if (!klp_is_patch_registered(patch)) {
568 		ret = -EINVAL;
569 		goto err;
570 	}
571 
572 	ret = __klp_enable_patch(patch);
573 
574 err:
575 	mutex_unlock(&klp_mutex);
576 	return ret;
577 }
578 EXPORT_SYMBOL_GPL(klp_enable_patch);
579 
580 /*
581  * Sysfs Interface
582  *
583  * /sys/kernel/livepatch
584  * /sys/kernel/livepatch/<patch>
585  * /sys/kernel/livepatch/<patch>/enabled
586  * /sys/kernel/livepatch/<patch>/<object>
587  * /sys/kernel/livepatch/<patch>/<object>/<func>
588  */
589 
enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)590 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
591 			     const char *buf, size_t count)
592 {
593 	struct klp_patch *patch;
594 	int ret;
595 	unsigned long val;
596 
597 	ret = kstrtoul(buf, 10, &val);
598 	if (ret)
599 		return -EINVAL;
600 
601 	if (val != KLP_DISABLED && val != KLP_ENABLED)
602 		return -EINVAL;
603 
604 	patch = container_of(kobj, struct klp_patch, kobj);
605 
606 	mutex_lock(&klp_mutex);
607 
608 	if (val == patch->state) {
609 		/* already in requested state */
610 		ret = -EINVAL;
611 		goto err;
612 	}
613 
614 	if (val == KLP_ENABLED) {
615 		ret = __klp_enable_patch(patch);
616 		if (ret)
617 			goto err;
618 	} else {
619 		ret = __klp_disable_patch(patch);
620 		if (ret)
621 			goto err;
622 	}
623 
624 	mutex_unlock(&klp_mutex);
625 
626 	return count;
627 
628 err:
629 	mutex_unlock(&klp_mutex);
630 	return ret;
631 }
632 
enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)633 static ssize_t enabled_show(struct kobject *kobj,
634 			    struct kobj_attribute *attr, char *buf)
635 {
636 	struct klp_patch *patch;
637 
638 	patch = container_of(kobj, struct klp_patch, kobj);
639 	return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
640 }
641 
642 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
643 static struct attribute *klp_patch_attrs[] = {
644 	&enabled_kobj_attr.attr,
645 	NULL
646 };
647 
klp_kobj_release_patch(struct kobject * kobj)648 static void klp_kobj_release_patch(struct kobject *kobj)
649 {
650 	/*
651 	 * Once we have a consistency model we'll need to module_put() the
652 	 * patch module here.  See klp_register_patch() for more details.
653 	 */
654 }
655 
656 static struct kobj_type klp_ktype_patch = {
657 	.release = klp_kobj_release_patch,
658 	.sysfs_ops = &kobj_sysfs_ops,
659 	.default_attrs = klp_patch_attrs,
660 };
661 
klp_kobj_release_func(struct kobject * kobj)662 static void klp_kobj_release_func(struct kobject *kobj)
663 {
664 }
665 
666 static struct kobj_type klp_ktype_func = {
667 	.release = klp_kobj_release_func,
668 	.sysfs_ops = &kobj_sysfs_ops,
669 };
670 
671 /*
672  * Free all functions' kobjects in the array up to some limit. When limit is
673  * NULL, all kobjects are freed.
674  */
klp_free_funcs_limited(struct klp_object * obj,struct klp_func * limit)675 static void klp_free_funcs_limited(struct klp_object *obj,
676 				   struct klp_func *limit)
677 {
678 	struct klp_func *func;
679 
680 	for (func = obj->funcs; func->old_name && func != limit; func++)
681 		kobject_put(&func->kobj);
682 }
683 
684 /* Clean up when a patched object is unloaded */
klp_free_object_loaded(struct klp_object * obj)685 static void klp_free_object_loaded(struct klp_object *obj)
686 {
687 	struct klp_func *func;
688 
689 	obj->mod = NULL;
690 
691 	for (func = obj->funcs; func->old_name; func++)
692 		func->old_addr = 0;
693 }
694 
695 /*
696  * Free all objects' kobjects in the array up to some limit. When limit is
697  * NULL, all kobjects are freed.
698  */
klp_free_objects_limited(struct klp_patch * patch,struct klp_object * limit)699 static void klp_free_objects_limited(struct klp_patch *patch,
700 				     struct klp_object *limit)
701 {
702 	struct klp_object *obj;
703 
704 	for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
705 		klp_free_funcs_limited(obj, NULL);
706 		kobject_put(obj->kobj);
707 	}
708 }
709 
klp_free_patch(struct klp_patch * patch)710 static void klp_free_patch(struct klp_patch *patch)
711 {
712 	klp_free_objects_limited(patch, NULL);
713 	if (!list_empty(&patch->list))
714 		list_del(&patch->list);
715 	kobject_put(&patch->kobj);
716 }
717 
klp_init_func(struct klp_object * obj,struct klp_func * func)718 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
719 {
720 	INIT_LIST_HEAD(&func->stack_node);
721 	func->state = KLP_DISABLED;
722 
723 	return kobject_init_and_add(&func->kobj, &klp_ktype_func,
724 				    obj->kobj, "%s", func->old_name);
725 }
726 
727 /* parts of the initialization that is done only when the object is loaded */
klp_init_object_loaded(struct klp_patch * patch,struct klp_object * obj)728 static int klp_init_object_loaded(struct klp_patch *patch,
729 				  struct klp_object *obj)
730 {
731 	struct klp_func *func;
732 	int ret;
733 
734 	if (obj->relocs) {
735 		ret = klp_write_object_relocations(patch->mod, obj);
736 		if (ret)
737 			return ret;
738 	}
739 
740 	for (func = obj->funcs; func->old_name; func++) {
741 		ret = klp_find_verify_func_addr(obj, func);
742 		if (ret)
743 			return ret;
744 	}
745 
746 	return 0;
747 }
748 
klp_init_object(struct klp_patch * patch,struct klp_object * obj)749 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
750 {
751 	struct klp_func *func;
752 	int ret;
753 	const char *name;
754 
755 	if (!obj->funcs)
756 		return -EINVAL;
757 
758 	obj->state = KLP_DISABLED;
759 	obj->mod = NULL;
760 
761 	klp_find_object_module(obj);
762 
763 	name = klp_is_module(obj) ? obj->name : "vmlinux";
764 	obj->kobj = kobject_create_and_add(name, &patch->kobj);
765 	if (!obj->kobj)
766 		return -ENOMEM;
767 
768 	for (func = obj->funcs; func->old_name; func++) {
769 		ret = klp_init_func(obj, func);
770 		if (ret)
771 			goto free;
772 	}
773 
774 	if (klp_is_object_loaded(obj)) {
775 		ret = klp_init_object_loaded(patch, obj);
776 		if (ret)
777 			goto free;
778 	}
779 
780 	return 0;
781 
782 free:
783 	klp_free_funcs_limited(obj, func);
784 	kobject_put(obj->kobj);
785 	return ret;
786 }
787 
klp_init_patch(struct klp_patch * patch)788 static int klp_init_patch(struct klp_patch *patch)
789 {
790 	struct klp_object *obj;
791 	int ret;
792 
793 	if (!patch->objs)
794 		return -EINVAL;
795 
796 	mutex_lock(&klp_mutex);
797 
798 	patch->state = KLP_DISABLED;
799 
800 	ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
801 				   klp_root_kobj, "%s", patch->mod->name);
802 	if (ret)
803 		goto unlock;
804 
805 	for (obj = patch->objs; obj->funcs; obj++) {
806 		ret = klp_init_object(patch, obj);
807 		if (ret)
808 			goto free;
809 	}
810 
811 	list_add_tail(&patch->list, &klp_patches);
812 
813 	mutex_unlock(&klp_mutex);
814 
815 	return 0;
816 
817 free:
818 	klp_free_objects_limited(patch, obj);
819 	kobject_put(&patch->kobj);
820 unlock:
821 	mutex_unlock(&klp_mutex);
822 	return ret;
823 }
824 
825 /**
826  * klp_unregister_patch() - unregisters a patch
827  * @patch:	Disabled patch to be unregistered
828  *
829  * Frees the data structures and removes the sysfs interface.
830  *
831  * Return: 0 on success, otherwise error
832  */
klp_unregister_patch(struct klp_patch * patch)833 int klp_unregister_patch(struct klp_patch *patch)
834 {
835 	int ret = 0;
836 
837 	mutex_lock(&klp_mutex);
838 
839 	if (!klp_is_patch_registered(patch)) {
840 		ret = -EINVAL;
841 		goto out;
842 	}
843 
844 	if (patch->state == KLP_ENABLED) {
845 		ret = -EBUSY;
846 		goto out;
847 	}
848 
849 	klp_free_patch(patch);
850 
851 out:
852 	mutex_unlock(&klp_mutex);
853 	return ret;
854 }
855 EXPORT_SYMBOL_GPL(klp_unregister_patch);
856 
857 /**
858  * klp_register_patch() - registers a patch
859  * @patch:	Patch to be registered
860  *
861  * Initializes the data structure associated with the patch and
862  * creates the sysfs interface.
863  *
864  * Return: 0 on success, otherwise error
865  */
klp_register_patch(struct klp_patch * patch)866 int klp_register_patch(struct klp_patch *patch)
867 {
868 	int ret;
869 
870 	if (!klp_initialized())
871 		return -ENODEV;
872 
873 	if (!patch || !patch->mod)
874 		return -EINVAL;
875 
876 	/*
877 	 * A reference is taken on the patch module to prevent it from being
878 	 * unloaded.  Right now, we don't allow patch modules to unload since
879 	 * there is currently no method to determine if a thread is still
880 	 * running in the patched code contained in the patch module once
881 	 * the ftrace registration is successful.
882 	 */
883 	if (!try_module_get(patch->mod))
884 		return -ENODEV;
885 
886 	ret = klp_init_patch(patch);
887 	if (ret)
888 		module_put(patch->mod);
889 
890 	return ret;
891 }
892 EXPORT_SYMBOL_GPL(klp_register_patch);
893 
klp_module_notify_coming(struct klp_patch * patch,struct klp_object * obj)894 static void klp_module_notify_coming(struct klp_patch *patch,
895 				     struct klp_object *obj)
896 {
897 	struct module *pmod = patch->mod;
898 	struct module *mod = obj->mod;
899 	int ret;
900 
901 	ret = klp_init_object_loaded(patch, obj);
902 	if (ret)
903 		goto err;
904 
905 	if (patch->state == KLP_DISABLED)
906 		return;
907 
908 	pr_notice("applying patch '%s' to loading module '%s'\n",
909 		  pmod->name, mod->name);
910 
911 	ret = klp_enable_object(obj);
912 	if (!ret)
913 		return;
914 
915 err:
916 	pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
917 		pmod->name, mod->name, ret);
918 }
919 
klp_module_notify_going(struct klp_patch * patch,struct klp_object * obj)920 static void klp_module_notify_going(struct klp_patch *patch,
921 				    struct klp_object *obj)
922 {
923 	struct module *pmod = patch->mod;
924 	struct module *mod = obj->mod;
925 
926 	if (patch->state == KLP_DISABLED)
927 		goto disabled;
928 
929 	pr_notice("reverting patch '%s' on unloading module '%s'\n",
930 		  pmod->name, mod->name);
931 
932 	klp_disable_object(obj);
933 
934 disabled:
935 	klp_free_object_loaded(obj);
936 }
937 
klp_module_notify(struct notifier_block * nb,unsigned long action,void * data)938 static int klp_module_notify(struct notifier_block *nb, unsigned long action,
939 			     void *data)
940 {
941 	struct module *mod = data;
942 	struct klp_patch *patch;
943 	struct klp_object *obj;
944 
945 	if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
946 		return 0;
947 
948 	mutex_lock(&klp_mutex);
949 
950 	/*
951 	 * Each module has to know that the notifier has been called.
952 	 * We never know what module will get patched by a new patch.
953 	 */
954 	if (action == MODULE_STATE_COMING)
955 		mod->klp_alive = true;
956 	else /* MODULE_STATE_GOING */
957 		mod->klp_alive = false;
958 
959 	list_for_each_entry(patch, &klp_patches, list) {
960 		for (obj = patch->objs; obj->funcs; obj++) {
961 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
962 				continue;
963 
964 			if (action == MODULE_STATE_COMING) {
965 				obj->mod = mod;
966 				klp_module_notify_coming(patch, obj);
967 			} else /* MODULE_STATE_GOING */
968 				klp_module_notify_going(patch, obj);
969 
970 			break;
971 		}
972 	}
973 
974 	mutex_unlock(&klp_mutex);
975 
976 	return 0;
977 }
978 
979 static struct notifier_block klp_module_nb = {
980 	.notifier_call = klp_module_notify,
981 	.priority = INT_MIN+1, /* called late but before ftrace notifier */
982 };
983 
klp_init(void)984 static int klp_init(void)
985 {
986 	int ret;
987 
988 	ret = klp_check_compiler_support();
989 	if (ret) {
990 		pr_info("Your compiler is too old; turning off.\n");
991 		return -EINVAL;
992 	}
993 
994 	ret = register_module_notifier(&klp_module_nb);
995 	if (ret)
996 		return ret;
997 
998 	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
999 	if (!klp_root_kobj) {
1000 		ret = -ENOMEM;
1001 		goto unregister;
1002 	}
1003 
1004 	return 0;
1005 
1006 unregister:
1007 	unregister_module_notifier(&klp_module_nb);
1008 	return ret;
1009 }
1010 
1011 module_init(klp_init);
1012