1#define pr_fmt(fmt) "SMP alternatives: " fmt
2
3#include <linux/module.h>
4#include <linux/sched.h>
5#include <linux/mutex.h>
6#include <linux/list.h>
7#include <linux/stringify.h>
8#include <linux/mm.h>
9#include <linux/vmalloc.h>
10#include <linux/memory.h>
11#include <linux/stop_machine.h>
12#include <linux/slab.h>
13#include <linux/kdebug.h>
14#include <asm/alternative.h>
15#include <asm/sections.h>
16#include <asm/pgtable.h>
17#include <asm/mce.h>
18#include <asm/nmi.h>
19#include <asm/cacheflush.h>
20#include <asm/tlbflush.h>
21#include <asm/io.h>
22#include <asm/fixmap.h>
23
24#define MAX_PATCH_LEN (255-1)
25
26static int __initdata_or_module debug_alternative;
27
28static int __init debug_alt(char *str)
29{
30	debug_alternative = 1;
31	return 1;
32}
33__setup("debug-alternative", debug_alt);
34
35static int noreplace_smp;
36
37static int __init setup_noreplace_smp(char *str)
38{
39	noreplace_smp = 1;
40	return 1;
41}
42__setup("noreplace-smp", setup_noreplace_smp);
43
44#ifdef CONFIG_PARAVIRT
45static int __initdata_or_module noreplace_paravirt = 0;
46
47static int __init setup_noreplace_paravirt(char *str)
48{
49	noreplace_paravirt = 1;
50	return 1;
51}
52__setup("noreplace-paravirt", setup_noreplace_paravirt);
53#endif
54
55#define DPRINTK(fmt, args...)						\
56do {									\
57	if (debug_alternative)						\
58		printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args);	\
59} while (0)
60
61#define DUMP_BYTES(buf, len, fmt, args...)				\
62do {									\
63	if (unlikely(debug_alternative)) {				\
64		int j;							\
65									\
66		if (!(len))						\
67			break;						\
68									\
69		printk(KERN_DEBUG fmt, ##args);				\
70		for (j = 0; j < (len) - 1; j++)				\
71			printk(KERN_CONT "%02hhx ", buf[j]);		\
72		printk(KERN_CONT "%02hhx\n", buf[j]);			\
73	}								\
74} while (0)
75
76/*
77 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
78 * that correspond to that nop. Getting from one nop to the next, we
79 * add to the array the offset that is equal to the sum of all sizes of
80 * nops preceding the one we are after.
81 *
82 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
83 * nice symmetry of sizes of the previous nops.
84 */
85#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
86static const unsigned char intelnops[] =
87{
88	GENERIC_NOP1,
89	GENERIC_NOP2,
90	GENERIC_NOP3,
91	GENERIC_NOP4,
92	GENERIC_NOP5,
93	GENERIC_NOP6,
94	GENERIC_NOP7,
95	GENERIC_NOP8,
96	GENERIC_NOP5_ATOMIC
97};
98static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
99{
100	NULL,
101	intelnops,
102	intelnops + 1,
103	intelnops + 1 + 2,
104	intelnops + 1 + 2 + 3,
105	intelnops + 1 + 2 + 3 + 4,
106	intelnops + 1 + 2 + 3 + 4 + 5,
107	intelnops + 1 + 2 + 3 + 4 + 5 + 6,
108	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
109	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
110};
111#endif
112
113#ifdef K8_NOP1
114static const unsigned char k8nops[] =
115{
116	K8_NOP1,
117	K8_NOP2,
118	K8_NOP3,
119	K8_NOP4,
120	K8_NOP5,
121	K8_NOP6,
122	K8_NOP7,
123	K8_NOP8,
124	K8_NOP5_ATOMIC
125};
126static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
127{
128	NULL,
129	k8nops,
130	k8nops + 1,
131	k8nops + 1 + 2,
132	k8nops + 1 + 2 + 3,
133	k8nops + 1 + 2 + 3 + 4,
134	k8nops + 1 + 2 + 3 + 4 + 5,
135	k8nops + 1 + 2 + 3 + 4 + 5 + 6,
136	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
137	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
138};
139#endif
140
141#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
142static const unsigned char k7nops[] =
143{
144	K7_NOP1,
145	K7_NOP2,
146	K7_NOP3,
147	K7_NOP4,
148	K7_NOP5,
149	K7_NOP6,
150	K7_NOP7,
151	K7_NOP8,
152	K7_NOP5_ATOMIC
153};
154static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
155{
156	NULL,
157	k7nops,
158	k7nops + 1,
159	k7nops + 1 + 2,
160	k7nops + 1 + 2 + 3,
161	k7nops + 1 + 2 + 3 + 4,
162	k7nops + 1 + 2 + 3 + 4 + 5,
163	k7nops + 1 + 2 + 3 + 4 + 5 + 6,
164	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
165	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
166};
167#endif
168
169#ifdef P6_NOP1
170static const unsigned char p6nops[] =
171{
172	P6_NOP1,
173	P6_NOP2,
174	P6_NOP3,
175	P6_NOP4,
176	P6_NOP5,
177	P6_NOP6,
178	P6_NOP7,
179	P6_NOP8,
180	P6_NOP5_ATOMIC
181};
182static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
183{
184	NULL,
185	p6nops,
186	p6nops + 1,
187	p6nops + 1 + 2,
188	p6nops + 1 + 2 + 3,
189	p6nops + 1 + 2 + 3 + 4,
190	p6nops + 1 + 2 + 3 + 4 + 5,
191	p6nops + 1 + 2 + 3 + 4 + 5 + 6,
192	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
193	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
194};
195#endif
196
197/* Initialize these to a safe default */
198#ifdef CONFIG_X86_64
199const unsigned char * const *ideal_nops = p6_nops;
200#else
201const unsigned char * const *ideal_nops = intel_nops;
202#endif
203
204void __init arch_init_ideal_nops(void)
205{
206	switch (boot_cpu_data.x86_vendor) {
207	case X86_VENDOR_INTEL:
208		/*
209		 * Due to a decoder implementation quirk, some
210		 * specific Intel CPUs actually perform better with
211		 * the "k8_nops" than with the SDM-recommended NOPs.
212		 */
213		if (boot_cpu_data.x86 == 6 &&
214		    boot_cpu_data.x86_model >= 0x0f &&
215		    boot_cpu_data.x86_model != 0x1c &&
216		    boot_cpu_data.x86_model != 0x26 &&
217		    boot_cpu_data.x86_model != 0x27 &&
218		    boot_cpu_data.x86_model < 0x30) {
219			ideal_nops = k8_nops;
220		} else if (boot_cpu_has(X86_FEATURE_NOPL)) {
221			   ideal_nops = p6_nops;
222		} else {
223#ifdef CONFIG_X86_64
224			ideal_nops = k8_nops;
225#else
226			ideal_nops = intel_nops;
227#endif
228		}
229		break;
230	default:
231#ifdef CONFIG_X86_64
232		ideal_nops = k8_nops;
233#else
234		if (boot_cpu_has(X86_FEATURE_K8))
235			ideal_nops = k8_nops;
236		else if (boot_cpu_has(X86_FEATURE_K7))
237			ideal_nops = k7_nops;
238		else
239			ideal_nops = intel_nops;
240#endif
241	}
242}
243
244/* Use this to add nops to a buffer, then text_poke the whole buffer. */
245static void __init_or_module add_nops(void *insns, unsigned int len)
246{
247	while (len > 0) {
248		unsigned int noplen = len;
249		if (noplen > ASM_NOP_MAX)
250			noplen = ASM_NOP_MAX;
251		memcpy(insns, ideal_nops[noplen], noplen);
252		insns += noplen;
253		len -= noplen;
254	}
255}
256
257extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
258extern s32 __smp_locks[], __smp_locks_end[];
259void *text_poke_early(void *addr, const void *opcode, size_t len);
260
261/*
262 * Are we looking at a near JMP with a 1 or 4-byte displacement.
263 */
264static inline bool is_jmp(const u8 opcode)
265{
266	return opcode == 0xeb || opcode == 0xe9;
267}
268
269static void __init_or_module
270recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
271{
272	u8 *next_rip, *tgt_rip;
273	s32 n_dspl, o_dspl;
274	int repl_len;
275
276	if (a->replacementlen != 5)
277		return;
278
279	o_dspl = *(s32 *)(insnbuf + 1);
280
281	/* next_rip of the replacement JMP */
282	next_rip = repl_insn + a->replacementlen;
283	/* target rip of the replacement JMP */
284	tgt_rip  = next_rip + o_dspl;
285	n_dspl = tgt_rip - orig_insn;
286
287	DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl);
288
289	if (tgt_rip - orig_insn >= 0) {
290		if (n_dspl - 2 <= 127)
291			goto two_byte_jmp;
292		else
293			goto five_byte_jmp;
294	/* negative offset */
295	} else {
296		if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
297			goto two_byte_jmp;
298		else
299			goto five_byte_jmp;
300	}
301
302two_byte_jmp:
303	n_dspl -= 2;
304
305	insnbuf[0] = 0xeb;
306	insnbuf[1] = (s8)n_dspl;
307	add_nops(insnbuf + 2, 3);
308
309	repl_len = 2;
310	goto done;
311
312five_byte_jmp:
313	n_dspl -= 5;
314
315	insnbuf[0] = 0xe9;
316	*(s32 *)&insnbuf[1] = n_dspl;
317
318	repl_len = 5;
319
320done:
321
322	DPRINTK("final displ: 0x%08x, JMP 0x%lx",
323		n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
324}
325
326static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
327{
328	unsigned long flags;
329
330	if (instr[0] != 0x90)
331		return;
332
333	local_irq_save(flags);
334	add_nops(instr + (a->instrlen - a->padlen), a->padlen);
335	sync_core();
336	local_irq_restore(flags);
337
338	DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
339		   instr, a->instrlen - a->padlen, a->padlen);
340}
341
342/*
343 * Replace instructions with better alternatives for this CPU type. This runs
344 * before SMP is initialized to avoid SMP problems with self modifying code.
345 * This implies that asymmetric systems where APs have less capabilities than
346 * the boot processor are not handled. Tough. Make sure you disable such
347 * features by hand.
348 */
349void __init_or_module apply_alternatives(struct alt_instr *start,
350					 struct alt_instr *end)
351{
352	struct alt_instr *a;
353	u8 *instr, *replacement;
354	u8 insnbuf[MAX_PATCH_LEN];
355
356	DPRINTK("alt table %p -> %p", start, end);
357	/*
358	 * The scan order should be from start to end. A later scanned
359	 * alternative code can overwrite previously scanned alternative code.
360	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
361	 * patch code.
362	 *
363	 * So be careful if you want to change the scan order to any other
364	 * order.
365	 */
366	for (a = start; a < end; a++) {
367		int insnbuf_sz = 0;
368
369		instr = (u8 *)&a->instr_offset + a->instr_offset;
370		replacement = (u8 *)&a->repl_offset + a->repl_offset;
371		BUG_ON(a->instrlen > sizeof(insnbuf));
372		BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
373		if (!boot_cpu_has(a->cpuid)) {
374			if (a->padlen > 1)
375				optimize_nops(a, instr);
376
377			continue;
378		}
379
380		DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
381			a->cpuid >> 5,
382			a->cpuid & 0x1f,
383			instr, a->instrlen,
384			replacement, a->replacementlen, a->padlen);
385
386		DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
387		DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
388
389		memcpy(insnbuf, replacement, a->replacementlen);
390		insnbuf_sz = a->replacementlen;
391
392		/* 0xe8 is a relative jump; fix the offset. */
393		if (*insnbuf == 0xe8 && a->replacementlen == 5) {
394			*(s32 *)(insnbuf + 1) += replacement - instr;
395			DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
396				*(s32 *)(insnbuf + 1),
397				(unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
398		}
399
400		if (a->replacementlen && is_jmp(replacement[0]))
401			recompute_jump(a, instr, replacement, insnbuf);
402
403		if (a->instrlen > a->replacementlen) {
404			add_nops(insnbuf + a->replacementlen,
405				 a->instrlen - a->replacementlen);
406			insnbuf_sz += a->instrlen - a->replacementlen;
407		}
408		DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
409
410		text_poke_early(instr, insnbuf, insnbuf_sz);
411	}
412}
413
414#ifdef CONFIG_SMP
415static void alternatives_smp_lock(const s32 *start, const s32 *end,
416				  u8 *text, u8 *text_end)
417{
418	const s32 *poff;
419
420	mutex_lock(&text_mutex);
421	for (poff = start; poff < end; poff++) {
422		u8 *ptr = (u8 *)poff + *poff;
423
424		if (!*poff || ptr < text || ptr >= text_end)
425			continue;
426		/* turn DS segment override prefix into lock prefix */
427		if (*ptr == 0x3e)
428			text_poke(ptr, ((unsigned char []){0xf0}), 1);
429	}
430	mutex_unlock(&text_mutex);
431}
432
433static void alternatives_smp_unlock(const s32 *start, const s32 *end,
434				    u8 *text, u8 *text_end)
435{
436	const s32 *poff;
437
438	mutex_lock(&text_mutex);
439	for (poff = start; poff < end; poff++) {
440		u8 *ptr = (u8 *)poff + *poff;
441
442		if (!*poff || ptr < text || ptr >= text_end)
443			continue;
444		/* turn lock prefix into DS segment override prefix */
445		if (*ptr == 0xf0)
446			text_poke(ptr, ((unsigned char []){0x3E}), 1);
447	}
448	mutex_unlock(&text_mutex);
449}
450
451struct smp_alt_module {
452	/* what is this ??? */
453	struct module	*mod;
454	char		*name;
455
456	/* ptrs to lock prefixes */
457	const s32	*locks;
458	const s32	*locks_end;
459
460	/* .text segment, needed to avoid patching init code ;) */
461	u8		*text;
462	u8		*text_end;
463
464	struct list_head next;
465};
466static LIST_HEAD(smp_alt_modules);
467static DEFINE_MUTEX(smp_alt);
468static bool uniproc_patched = false;	/* protected by smp_alt */
469
470void __init_or_module alternatives_smp_module_add(struct module *mod,
471						  char *name,
472						  void *locks, void *locks_end,
473						  void *text,  void *text_end)
474{
475	struct smp_alt_module *smp;
476
477	mutex_lock(&smp_alt);
478	if (!uniproc_patched)
479		goto unlock;
480
481	if (num_possible_cpus() == 1)
482		/* Don't bother remembering, we'll never have to undo it. */
483		goto smp_unlock;
484
485	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
486	if (NULL == smp)
487		/* we'll run the (safe but slow) SMP code then ... */
488		goto unlock;
489
490	smp->mod	= mod;
491	smp->name	= name;
492	smp->locks	= locks;
493	smp->locks_end	= locks_end;
494	smp->text	= text;
495	smp->text_end	= text_end;
496	DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
497		smp->locks, smp->locks_end,
498		smp->text, smp->text_end, smp->name);
499
500	list_add_tail(&smp->next, &smp_alt_modules);
501smp_unlock:
502	alternatives_smp_unlock(locks, locks_end, text, text_end);
503unlock:
504	mutex_unlock(&smp_alt);
505}
506
507void __init_or_module alternatives_smp_module_del(struct module *mod)
508{
509	struct smp_alt_module *item;
510
511	mutex_lock(&smp_alt);
512	list_for_each_entry(item, &smp_alt_modules, next) {
513		if (mod != item->mod)
514			continue;
515		list_del(&item->next);
516		kfree(item);
517		break;
518	}
519	mutex_unlock(&smp_alt);
520}
521
522void alternatives_enable_smp(void)
523{
524	struct smp_alt_module *mod;
525
526	/* Why bother if there are no other CPUs? */
527	BUG_ON(num_possible_cpus() == 1);
528
529	mutex_lock(&smp_alt);
530
531	if (uniproc_patched) {
532		pr_info("switching to SMP code\n");
533		BUG_ON(num_online_cpus() != 1);
534		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
535		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
536		list_for_each_entry(mod, &smp_alt_modules, next)
537			alternatives_smp_lock(mod->locks, mod->locks_end,
538					      mod->text, mod->text_end);
539		uniproc_patched = false;
540	}
541	mutex_unlock(&smp_alt);
542}
543
544/* Return 1 if the address range is reserved for smp-alternatives */
545int alternatives_text_reserved(void *start, void *end)
546{
547	struct smp_alt_module *mod;
548	const s32 *poff;
549	u8 *text_start = start;
550	u8 *text_end = end;
551
552	list_for_each_entry(mod, &smp_alt_modules, next) {
553		if (mod->text > text_end || mod->text_end < text_start)
554			continue;
555		for (poff = mod->locks; poff < mod->locks_end; poff++) {
556			const u8 *ptr = (const u8 *)poff + *poff;
557
558			if (text_start <= ptr && text_end > ptr)
559				return 1;
560		}
561	}
562
563	return 0;
564}
565#endif /* CONFIG_SMP */
566
567#ifdef CONFIG_PARAVIRT
568void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
569				     struct paravirt_patch_site *end)
570{
571	struct paravirt_patch_site *p;
572	char insnbuf[MAX_PATCH_LEN];
573
574	if (noreplace_paravirt)
575		return;
576
577	for (p = start; p < end; p++) {
578		unsigned int used;
579
580		BUG_ON(p->len > MAX_PATCH_LEN);
581		/* prep the buffer with the original instructions */
582		memcpy(insnbuf, p->instr, p->len);
583		used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
584					 (unsigned long)p->instr, p->len);
585
586		BUG_ON(used > p->len);
587
588		/* Pad the rest with nops */
589		add_nops(insnbuf + used, p->len - used);
590		text_poke_early(p->instr, insnbuf, p->len);
591	}
592}
593extern struct paravirt_patch_site __start_parainstructions[],
594	__stop_parainstructions[];
595#endif	/* CONFIG_PARAVIRT */
596
597void __init alternative_instructions(void)
598{
599	/* The patching is not fully atomic, so try to avoid local interruptions
600	   that might execute the to be patched code.
601	   Other CPUs are not running. */
602	stop_nmi();
603
604	/*
605	 * Don't stop machine check exceptions while patching.
606	 * MCEs only happen when something got corrupted and in this
607	 * case we must do something about the corruption.
608	 * Ignoring it is worse than a unlikely patching race.
609	 * Also machine checks tend to be broadcast and if one CPU
610	 * goes into machine check the others follow quickly, so we don't
611	 * expect a machine check to cause undue problems during to code
612	 * patching.
613	 */
614
615	apply_alternatives(__alt_instructions, __alt_instructions_end);
616
617#ifdef CONFIG_SMP
618	/* Patch to UP if other cpus not imminent. */
619	if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
620		uniproc_patched = true;
621		alternatives_smp_module_add(NULL, "core kernel",
622					    __smp_locks, __smp_locks_end,
623					    _text, _etext);
624	}
625
626	if (!uniproc_patched || num_possible_cpus() == 1)
627		free_init_pages("SMP alternatives",
628				(unsigned long)__smp_locks,
629				(unsigned long)__smp_locks_end);
630#endif
631
632	apply_paravirt(__parainstructions, __parainstructions_end);
633
634	restart_nmi();
635}
636
637/**
638 * text_poke_early - Update instructions on a live kernel at boot time
639 * @addr: address to modify
640 * @opcode: source of the copy
641 * @len: length to copy
642 *
643 * When you use this code to patch more than one byte of an instruction
644 * you need to make sure that other CPUs cannot execute this code in parallel.
645 * Also no thread must be currently preempted in the middle of these
646 * instructions. And on the local CPU you need to be protected again NMI or MCE
647 * handlers seeing an inconsistent instruction while you patch.
648 */
649void *__init_or_module text_poke_early(void *addr, const void *opcode,
650					      size_t len)
651{
652	unsigned long flags;
653	local_irq_save(flags);
654	memcpy(addr, opcode, len);
655	sync_core();
656	local_irq_restore(flags);
657	/* Could also do a CLFLUSH here to speed up CPU recovery; but
658	   that causes hangs on some VIA CPUs. */
659	return addr;
660}
661
662/**
663 * text_poke - Update instructions on a live kernel
664 * @addr: address to modify
665 * @opcode: source of the copy
666 * @len: length to copy
667 *
668 * Only atomic text poke/set should be allowed when not doing early patching.
669 * It means the size must be writable atomically and the address must be aligned
670 * in a way that permits an atomic write. It also makes sure we fit on a single
671 * page.
672 *
673 * Note: Must be called under text_mutex.
674 */
675void *text_poke(void *addr, const void *opcode, size_t len)
676{
677	unsigned long flags;
678	char *vaddr;
679	struct page *pages[2];
680	int i;
681
682	if (!core_kernel_text((unsigned long)addr)) {
683		pages[0] = vmalloc_to_page(addr);
684		pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
685	} else {
686		pages[0] = virt_to_page(addr);
687		WARN_ON(!PageReserved(pages[0]));
688		pages[1] = virt_to_page(addr + PAGE_SIZE);
689	}
690	BUG_ON(!pages[0]);
691	local_irq_save(flags);
692	set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
693	if (pages[1])
694		set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
695	vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
696	memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
697	clear_fixmap(FIX_TEXT_POKE0);
698	if (pages[1])
699		clear_fixmap(FIX_TEXT_POKE1);
700	local_flush_tlb();
701	sync_core();
702	/* Could also do a CLFLUSH here to speed up CPU recovery; but
703	   that causes hangs on some VIA CPUs. */
704	for (i = 0; i < len; i++)
705		BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
706	local_irq_restore(flags);
707	return addr;
708}
709
710static void do_sync_core(void *info)
711{
712	sync_core();
713}
714
715static bool bp_patching_in_progress;
716static void *bp_int3_handler, *bp_int3_addr;
717
718int poke_int3_handler(struct pt_regs *regs)
719{
720	/* bp_patching_in_progress */
721	smp_rmb();
722
723	if (likely(!bp_patching_in_progress))
724		return 0;
725
726	if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
727		return 0;
728
729	/* set up the specified breakpoint handler */
730	regs->ip = (unsigned long) bp_int3_handler;
731
732	return 1;
733
734}
735
736/**
737 * text_poke_bp() -- update instructions on live kernel on SMP
738 * @addr:	address to patch
739 * @opcode:	opcode of new instruction
740 * @len:	length to copy
741 * @handler:	address to jump to when the temporary breakpoint is hit
742 *
743 * Modify multi-byte instruction by using int3 breakpoint on SMP.
744 * We completely avoid stop_machine() here, and achieve the
745 * synchronization using int3 breakpoint.
746 *
747 * The way it is done:
748 *	- add a int3 trap to the address that will be patched
749 *	- sync cores
750 *	- update all but the first byte of the patched range
751 *	- sync cores
752 *	- replace the first byte (int3) by the first byte of
753 *	  replacing opcode
754 *	- sync cores
755 *
756 * Note: must be called under text_mutex.
757 */
758void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
759{
760	unsigned char int3 = 0xcc;
761
762	bp_int3_handler = handler;
763	bp_int3_addr = (u8 *)addr + sizeof(int3);
764	bp_patching_in_progress = true;
765	/*
766	 * Corresponding read barrier in int3 notifier for
767	 * making sure the in_progress flags is correctly ordered wrt.
768	 * patching
769	 */
770	smp_wmb();
771
772	text_poke(addr, &int3, sizeof(int3));
773
774	on_each_cpu(do_sync_core, NULL, 1);
775
776	if (len - sizeof(int3) > 0) {
777		/* patch all but the first byte */
778		text_poke((char *)addr + sizeof(int3),
779			  (const char *) opcode + sizeof(int3),
780			  len - sizeof(int3));
781		/*
782		 * According to Intel, this core syncing is very likely
783		 * not necessary and we'd be safe even without it. But
784		 * better safe than sorry (plus there's not only Intel).
785		 */
786		on_each_cpu(do_sync_core, NULL, 1);
787	}
788
789	/* patch the first byte */
790	text_poke(addr, opcode, sizeof(int3));
791
792	on_each_cpu(do_sync_core, NULL, 1);
793
794	bp_patching_in_progress = false;
795	smp_wmb();
796
797	return addr;
798}
799
800