1#define pr_fmt(fmt) "SMP alternatives: " fmt 2 3#include <linux/module.h> 4#include <linux/sched.h> 5#include <linux/mutex.h> 6#include <linux/list.h> 7#include <linux/stringify.h> 8#include <linux/mm.h> 9#include <linux/vmalloc.h> 10#include <linux/memory.h> 11#include <linux/stop_machine.h> 12#include <linux/slab.h> 13#include <linux/kdebug.h> 14#include <asm/alternative.h> 15#include <asm/sections.h> 16#include <asm/pgtable.h> 17#include <asm/mce.h> 18#include <asm/nmi.h> 19#include <asm/cacheflush.h> 20#include <asm/tlbflush.h> 21#include <asm/io.h> 22#include <asm/fixmap.h> 23 24int __read_mostly alternatives_patched; 25 26EXPORT_SYMBOL_GPL(alternatives_patched); 27 28#define MAX_PATCH_LEN (255-1) 29 30static int __initdata_or_module debug_alternative; 31 32static int __init debug_alt(char *str) 33{ 34 debug_alternative = 1; 35 return 1; 36} 37__setup("debug-alternative", debug_alt); 38 39static int noreplace_smp; 40 41static int __init setup_noreplace_smp(char *str) 42{ 43 noreplace_smp = 1; 44 return 1; 45} 46__setup("noreplace-smp", setup_noreplace_smp); 47 48#ifdef CONFIG_PARAVIRT 49static int __initdata_or_module noreplace_paravirt = 0; 50 51static int __init setup_noreplace_paravirt(char *str) 52{ 53 noreplace_paravirt = 1; 54 return 1; 55} 56__setup("noreplace-paravirt", setup_noreplace_paravirt); 57#endif 58 59#define DPRINTK(fmt, args...) \ 60do { \ 61 if (debug_alternative) \ 62 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \ 63} while (0) 64 65#define DUMP_BYTES(buf, len, fmt, args...) \ 66do { \ 67 if (unlikely(debug_alternative)) { \ 68 int j; \ 69 \ 70 if (!(len)) \ 71 break; \ 72 \ 73 printk(KERN_DEBUG fmt, ##args); \ 74 for (j = 0; j < (len) - 1; j++) \ 75 printk(KERN_CONT "%02hhx ", buf[j]); \ 76 printk(KERN_CONT "%02hhx\n", buf[j]); \ 77 } \ 78} while (0) 79 80/* 81 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes 82 * that correspond to that nop. Getting from one nop to the next, we 83 * add to the array the offset that is equal to the sum of all sizes of 84 * nops preceding the one we are after. 85 * 86 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the 87 * nice symmetry of sizes of the previous nops. 88 */ 89#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) 90static const unsigned char intelnops[] = 91{ 92 GENERIC_NOP1, 93 GENERIC_NOP2, 94 GENERIC_NOP3, 95 GENERIC_NOP4, 96 GENERIC_NOP5, 97 GENERIC_NOP6, 98 GENERIC_NOP7, 99 GENERIC_NOP8, 100 GENERIC_NOP5_ATOMIC 101}; 102static const unsigned char * const intel_nops[ASM_NOP_MAX+2] = 103{ 104 NULL, 105 intelnops, 106 intelnops + 1, 107 intelnops + 1 + 2, 108 intelnops + 1 + 2 + 3, 109 intelnops + 1 + 2 + 3 + 4, 110 intelnops + 1 + 2 + 3 + 4 + 5, 111 intelnops + 1 + 2 + 3 + 4 + 5 + 6, 112 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 113 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 114}; 115#endif 116 117#ifdef K8_NOP1 118static const unsigned char k8nops[] = 119{ 120 K8_NOP1, 121 K8_NOP2, 122 K8_NOP3, 123 K8_NOP4, 124 K8_NOP5, 125 K8_NOP6, 126 K8_NOP7, 127 K8_NOP8, 128 K8_NOP5_ATOMIC 129}; 130static const unsigned char * const k8_nops[ASM_NOP_MAX+2] = 131{ 132 NULL, 133 k8nops, 134 k8nops + 1, 135 k8nops + 1 + 2, 136 k8nops + 1 + 2 + 3, 137 k8nops + 1 + 2 + 3 + 4, 138 k8nops + 1 + 2 + 3 + 4 + 5, 139 k8nops + 1 + 2 + 3 + 4 + 5 + 6, 140 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 141 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 142}; 143#endif 144 145#if defined(K7_NOP1) && !defined(CONFIG_X86_64) 146static const unsigned char k7nops[] = 147{ 148 K7_NOP1, 149 K7_NOP2, 150 K7_NOP3, 151 K7_NOP4, 152 K7_NOP5, 153 K7_NOP6, 154 K7_NOP7, 155 K7_NOP8, 156 K7_NOP5_ATOMIC 157}; 158static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = 159{ 160 NULL, 161 k7nops, 162 k7nops + 1, 163 k7nops + 1 + 2, 164 k7nops + 1 + 2 + 3, 165 k7nops + 1 + 2 + 3 + 4, 166 k7nops + 1 + 2 + 3 + 4 + 5, 167 k7nops + 1 + 2 + 3 + 4 + 5 + 6, 168 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 169 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 170}; 171#endif 172 173#ifdef P6_NOP1 174static const unsigned char p6nops[] = 175{ 176 P6_NOP1, 177 P6_NOP2, 178 P6_NOP3, 179 P6_NOP4, 180 P6_NOP5, 181 P6_NOP6, 182 P6_NOP7, 183 P6_NOP8, 184 P6_NOP5_ATOMIC 185}; 186static const unsigned char * const p6_nops[ASM_NOP_MAX+2] = 187{ 188 NULL, 189 p6nops, 190 p6nops + 1, 191 p6nops + 1 + 2, 192 p6nops + 1 + 2 + 3, 193 p6nops + 1 + 2 + 3 + 4, 194 p6nops + 1 + 2 + 3 + 4 + 5, 195 p6nops + 1 + 2 + 3 + 4 + 5 + 6, 196 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 197 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 198}; 199#endif 200 201/* Initialize these to a safe default */ 202#ifdef CONFIG_X86_64 203const unsigned char * const *ideal_nops = p6_nops; 204#else 205const unsigned char * const *ideal_nops = intel_nops; 206#endif 207 208void __init arch_init_ideal_nops(void) 209{ 210 switch (boot_cpu_data.x86_vendor) { 211 case X86_VENDOR_INTEL: 212 /* 213 * Due to a decoder implementation quirk, some 214 * specific Intel CPUs actually perform better with 215 * the "k8_nops" than with the SDM-recommended NOPs. 216 */ 217 if (boot_cpu_data.x86 == 6 && 218 boot_cpu_data.x86_model >= 0x0f && 219 boot_cpu_data.x86_model != 0x1c && 220 boot_cpu_data.x86_model != 0x26 && 221 boot_cpu_data.x86_model != 0x27 && 222 boot_cpu_data.x86_model < 0x30) { 223 ideal_nops = k8_nops; 224 } else if (boot_cpu_has(X86_FEATURE_NOPL)) { 225 ideal_nops = p6_nops; 226 } else { 227#ifdef CONFIG_X86_64 228 ideal_nops = k8_nops; 229#else 230 ideal_nops = intel_nops; 231#endif 232 } 233 break; 234 235 case X86_VENDOR_AMD: 236 if (boot_cpu_data.x86 > 0xf) { 237 ideal_nops = p6_nops; 238 return; 239 } 240 241 /* fall through */ 242 243 default: 244#ifdef CONFIG_X86_64 245 ideal_nops = k8_nops; 246#else 247 if (boot_cpu_has(X86_FEATURE_K8)) 248 ideal_nops = k8_nops; 249 else if (boot_cpu_has(X86_FEATURE_K7)) 250 ideal_nops = k7_nops; 251 else 252 ideal_nops = intel_nops; 253#endif 254 } 255} 256 257/* Use this to add nops to a buffer, then text_poke the whole buffer. */ 258static void __init_or_module add_nops(void *insns, unsigned int len) 259{ 260 while (len > 0) { 261 unsigned int noplen = len; 262 if (noplen > ASM_NOP_MAX) 263 noplen = ASM_NOP_MAX; 264 memcpy(insns, ideal_nops[noplen], noplen); 265 insns += noplen; 266 len -= noplen; 267 } 268} 269 270extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; 271extern s32 __smp_locks[], __smp_locks_end[]; 272void *text_poke_early(void *addr, const void *opcode, size_t len); 273 274/* 275 * Are we looking at a near JMP with a 1 or 4-byte displacement. 276 */ 277static inline bool is_jmp(const u8 opcode) 278{ 279 return opcode == 0xeb || opcode == 0xe9; 280} 281 282static void __init_or_module 283recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) 284{ 285 u8 *next_rip, *tgt_rip; 286 s32 n_dspl, o_dspl; 287 int repl_len; 288 289 if (a->replacementlen != 5) 290 return; 291 292 o_dspl = *(s32 *)(insnbuf + 1); 293 294 /* next_rip of the replacement JMP */ 295 next_rip = repl_insn + a->replacementlen; 296 /* target rip of the replacement JMP */ 297 tgt_rip = next_rip + o_dspl; 298 n_dspl = tgt_rip - orig_insn; 299 300 DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl); 301 302 if (tgt_rip - orig_insn >= 0) { 303 if (n_dspl - 2 <= 127) 304 goto two_byte_jmp; 305 else 306 goto five_byte_jmp; 307 /* negative offset */ 308 } else { 309 if (((n_dspl - 2) & 0xff) == (n_dspl - 2)) 310 goto two_byte_jmp; 311 else 312 goto five_byte_jmp; 313 } 314 315two_byte_jmp: 316 n_dspl -= 2; 317 318 insnbuf[0] = 0xeb; 319 insnbuf[1] = (s8)n_dspl; 320 add_nops(insnbuf + 2, 3); 321 322 repl_len = 2; 323 goto done; 324 325five_byte_jmp: 326 n_dspl -= 5; 327 328 insnbuf[0] = 0xe9; 329 *(s32 *)&insnbuf[1] = n_dspl; 330 331 repl_len = 5; 332 333done: 334 335 DPRINTK("final displ: 0x%08x, JMP 0x%lx", 336 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len); 337} 338 339static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) 340{ 341 unsigned long flags; 342 343 if (instr[0] != 0x90) 344 return; 345 346 local_irq_save(flags); 347 add_nops(instr + (a->instrlen - a->padlen), a->padlen); 348 sync_core(); 349 local_irq_restore(flags); 350 351 DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", 352 instr, a->instrlen - a->padlen, a->padlen); 353} 354 355/* 356 * Replace instructions with better alternatives for this CPU type. This runs 357 * before SMP is initialized to avoid SMP problems with self modifying code. 358 * This implies that asymmetric systems where APs have less capabilities than 359 * the boot processor are not handled. Tough. Make sure you disable such 360 * features by hand. 361 */ 362void __init_or_module apply_alternatives(struct alt_instr *start, 363 struct alt_instr *end) 364{ 365 struct alt_instr *a; 366 u8 *instr, *replacement; 367 u8 insnbuf[MAX_PATCH_LEN]; 368 369 DPRINTK("alt table %p -> %p", start, end); 370 /* 371 * The scan order should be from start to end. A later scanned 372 * alternative code can overwrite previously scanned alternative code. 373 * Some kernel functions (e.g. memcpy, memset, etc) use this order to 374 * patch code. 375 * 376 * So be careful if you want to change the scan order to any other 377 * order. 378 */ 379 for (a = start; a < end; a++) { 380 int insnbuf_sz = 0; 381 382 instr = (u8 *)&a->instr_offset + a->instr_offset; 383 replacement = (u8 *)&a->repl_offset + a->repl_offset; 384 BUG_ON(a->instrlen > sizeof(insnbuf)); 385 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); 386 if (!boot_cpu_has(a->cpuid)) { 387 if (a->padlen > 1) 388 optimize_nops(a, instr); 389 390 continue; 391 } 392 393 DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d", 394 a->cpuid >> 5, 395 a->cpuid & 0x1f, 396 instr, a->instrlen, 397 replacement, a->replacementlen, a->padlen); 398 399 DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr); 400 DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement); 401 402 memcpy(insnbuf, replacement, a->replacementlen); 403 insnbuf_sz = a->replacementlen; 404 405 /* 0xe8 is a relative jump; fix the offset. */ 406 if (*insnbuf == 0xe8 && a->replacementlen == 5) { 407 *(s32 *)(insnbuf + 1) += replacement - instr; 408 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", 409 *(s32 *)(insnbuf + 1), 410 (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5); 411 } 412 413 if (a->replacementlen && is_jmp(replacement[0])) 414 recompute_jump(a, instr, replacement, insnbuf); 415 416 if (a->instrlen > a->replacementlen) { 417 add_nops(insnbuf + a->replacementlen, 418 a->instrlen - a->replacementlen); 419 insnbuf_sz += a->instrlen - a->replacementlen; 420 } 421 DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr); 422 423 text_poke_early(instr, insnbuf, insnbuf_sz); 424 } 425} 426 427#ifdef CONFIG_SMP 428static void alternatives_smp_lock(const s32 *start, const s32 *end, 429 u8 *text, u8 *text_end) 430{ 431 const s32 *poff; 432 433 mutex_lock(&text_mutex); 434 for (poff = start; poff < end; poff++) { 435 u8 *ptr = (u8 *)poff + *poff; 436 437 if (!*poff || ptr < text || ptr >= text_end) 438 continue; 439 /* turn DS segment override prefix into lock prefix */ 440 if (*ptr == 0x3e) 441 text_poke(ptr, ((unsigned char []){0xf0}), 1); 442 } 443 mutex_unlock(&text_mutex); 444} 445 446static void alternatives_smp_unlock(const s32 *start, const s32 *end, 447 u8 *text, u8 *text_end) 448{ 449 const s32 *poff; 450 451 mutex_lock(&text_mutex); 452 for (poff = start; poff < end; poff++) { 453 u8 *ptr = (u8 *)poff + *poff; 454 455 if (!*poff || ptr < text || ptr >= text_end) 456 continue; 457 /* turn lock prefix into DS segment override prefix */ 458 if (*ptr == 0xf0) 459 text_poke(ptr, ((unsigned char []){0x3E}), 1); 460 } 461 mutex_unlock(&text_mutex); 462} 463 464struct smp_alt_module { 465 /* what is this ??? */ 466 struct module *mod; 467 char *name; 468 469 /* ptrs to lock prefixes */ 470 const s32 *locks; 471 const s32 *locks_end; 472 473 /* .text segment, needed to avoid patching init code ;) */ 474 u8 *text; 475 u8 *text_end; 476 477 struct list_head next; 478}; 479static LIST_HEAD(smp_alt_modules); 480static DEFINE_MUTEX(smp_alt); 481static bool uniproc_patched = false; /* protected by smp_alt */ 482 483void __init_or_module alternatives_smp_module_add(struct module *mod, 484 char *name, 485 void *locks, void *locks_end, 486 void *text, void *text_end) 487{ 488 struct smp_alt_module *smp; 489 490 mutex_lock(&smp_alt); 491 if (!uniproc_patched) 492 goto unlock; 493 494 if (num_possible_cpus() == 1) 495 /* Don't bother remembering, we'll never have to undo it. */ 496 goto smp_unlock; 497 498 smp = kzalloc(sizeof(*smp), GFP_KERNEL); 499 if (NULL == smp) 500 /* we'll run the (safe but slow) SMP code then ... */ 501 goto unlock; 502 503 smp->mod = mod; 504 smp->name = name; 505 smp->locks = locks; 506 smp->locks_end = locks_end; 507 smp->text = text; 508 smp->text_end = text_end; 509 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n", 510 smp->locks, smp->locks_end, 511 smp->text, smp->text_end, smp->name); 512 513 list_add_tail(&smp->next, &smp_alt_modules); 514smp_unlock: 515 alternatives_smp_unlock(locks, locks_end, text, text_end); 516unlock: 517 mutex_unlock(&smp_alt); 518} 519 520void __init_or_module alternatives_smp_module_del(struct module *mod) 521{ 522 struct smp_alt_module *item; 523 524 mutex_lock(&smp_alt); 525 list_for_each_entry(item, &smp_alt_modules, next) { 526 if (mod != item->mod) 527 continue; 528 list_del(&item->next); 529 kfree(item); 530 break; 531 } 532 mutex_unlock(&smp_alt); 533} 534 535void alternatives_enable_smp(void) 536{ 537 struct smp_alt_module *mod; 538 539 /* Why bother if there are no other CPUs? */ 540 BUG_ON(num_possible_cpus() == 1); 541 542 mutex_lock(&smp_alt); 543 544 if (uniproc_patched) { 545 pr_info("switching to SMP code\n"); 546 BUG_ON(num_online_cpus() != 1); 547 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); 548 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); 549 list_for_each_entry(mod, &smp_alt_modules, next) 550 alternatives_smp_lock(mod->locks, mod->locks_end, 551 mod->text, mod->text_end); 552 uniproc_patched = false; 553 } 554 mutex_unlock(&smp_alt); 555} 556 557/* Return 1 if the address range is reserved for smp-alternatives */ 558int alternatives_text_reserved(void *start, void *end) 559{ 560 struct smp_alt_module *mod; 561 const s32 *poff; 562 u8 *text_start = start; 563 u8 *text_end = end; 564 565 list_for_each_entry(mod, &smp_alt_modules, next) { 566 if (mod->text > text_end || mod->text_end < text_start) 567 continue; 568 for (poff = mod->locks; poff < mod->locks_end; poff++) { 569 const u8 *ptr = (const u8 *)poff + *poff; 570 571 if (text_start <= ptr && text_end > ptr) 572 return 1; 573 } 574 } 575 576 return 0; 577} 578#endif /* CONFIG_SMP */ 579 580#ifdef CONFIG_PARAVIRT 581void __init_or_module apply_paravirt(struct paravirt_patch_site *start, 582 struct paravirt_patch_site *end) 583{ 584 struct paravirt_patch_site *p; 585 char insnbuf[MAX_PATCH_LEN]; 586 587 if (noreplace_paravirt) 588 return; 589 590 for (p = start; p < end; p++) { 591 unsigned int used; 592 593 BUG_ON(p->len > MAX_PATCH_LEN); 594 /* prep the buffer with the original instructions */ 595 memcpy(insnbuf, p->instr, p->len); 596 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, 597 (unsigned long)p->instr, p->len); 598 599 BUG_ON(used > p->len); 600 601 /* Pad the rest with nops */ 602 add_nops(insnbuf + used, p->len - used); 603 text_poke_early(p->instr, insnbuf, p->len); 604 } 605} 606extern struct paravirt_patch_site __start_parainstructions[], 607 __stop_parainstructions[]; 608#endif /* CONFIG_PARAVIRT */ 609 610void __init alternative_instructions(void) 611{ 612 /* The patching is not fully atomic, so try to avoid local interruptions 613 that might execute the to be patched code. 614 Other CPUs are not running. */ 615 stop_nmi(); 616 617 /* 618 * Don't stop machine check exceptions while patching. 619 * MCEs only happen when something got corrupted and in this 620 * case we must do something about the corruption. 621 * Ignoring it is worse than a unlikely patching race. 622 * Also machine checks tend to be broadcast and if one CPU 623 * goes into machine check the others follow quickly, so we don't 624 * expect a machine check to cause undue problems during to code 625 * patching. 626 */ 627 628 apply_alternatives(__alt_instructions, __alt_instructions_end); 629 630#ifdef CONFIG_SMP 631 /* Patch to UP if other cpus not imminent. */ 632 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { 633 uniproc_patched = true; 634 alternatives_smp_module_add(NULL, "core kernel", 635 __smp_locks, __smp_locks_end, 636 _text, _etext); 637 } 638 639 if (!uniproc_patched || num_possible_cpus() == 1) 640 free_init_pages("SMP alternatives", 641 (unsigned long)__smp_locks, 642 (unsigned long)__smp_locks_end); 643#endif 644 645 apply_paravirt(__parainstructions, __parainstructions_end); 646 647 restart_nmi(); 648 alternatives_patched = 1; 649} 650 651/** 652 * text_poke_early - Update instructions on a live kernel at boot time 653 * @addr: address to modify 654 * @opcode: source of the copy 655 * @len: length to copy 656 * 657 * When you use this code to patch more than one byte of an instruction 658 * you need to make sure that other CPUs cannot execute this code in parallel. 659 * Also no thread must be currently preempted in the middle of these 660 * instructions. And on the local CPU you need to be protected again NMI or MCE 661 * handlers seeing an inconsistent instruction while you patch. 662 */ 663void *__init_or_module text_poke_early(void *addr, const void *opcode, 664 size_t len) 665{ 666 unsigned long flags; 667 local_irq_save(flags); 668 memcpy(addr, opcode, len); 669 sync_core(); 670 local_irq_restore(flags); 671 /* Could also do a CLFLUSH here to speed up CPU recovery; but 672 that causes hangs on some VIA CPUs. */ 673 return addr; 674} 675 676/** 677 * text_poke - Update instructions on a live kernel 678 * @addr: address to modify 679 * @opcode: source of the copy 680 * @len: length to copy 681 * 682 * Only atomic text poke/set should be allowed when not doing early patching. 683 * It means the size must be writable atomically and the address must be aligned 684 * in a way that permits an atomic write. It also makes sure we fit on a single 685 * page. 686 * 687 * Note: Must be called under text_mutex. 688 */ 689void *text_poke(void *addr, const void *opcode, size_t len) 690{ 691 unsigned long flags; 692 char *vaddr; 693 struct page *pages[2]; 694 int i; 695 696 if (!core_kernel_text((unsigned long)addr)) { 697 pages[0] = vmalloc_to_page(addr); 698 pages[1] = vmalloc_to_page(addr + PAGE_SIZE); 699 } else { 700 pages[0] = virt_to_page(addr); 701 WARN_ON(!PageReserved(pages[0])); 702 pages[1] = virt_to_page(addr + PAGE_SIZE); 703 } 704 BUG_ON(!pages[0]); 705 local_irq_save(flags); 706 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); 707 if (pages[1]) 708 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); 709 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); 710 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); 711 clear_fixmap(FIX_TEXT_POKE0); 712 if (pages[1]) 713 clear_fixmap(FIX_TEXT_POKE1); 714 local_flush_tlb(); 715 sync_core(); 716 /* Could also do a CLFLUSH here to speed up CPU recovery; but 717 that causes hangs on some VIA CPUs. */ 718 for (i = 0; i < len; i++) 719 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); 720 local_irq_restore(flags); 721 return addr; 722} 723 724static void do_sync_core(void *info) 725{ 726 sync_core(); 727} 728 729static bool bp_patching_in_progress; 730static void *bp_int3_handler, *bp_int3_addr; 731 732int poke_int3_handler(struct pt_regs *regs) 733{ 734 /* bp_patching_in_progress */ 735 smp_rmb(); 736 737 if (likely(!bp_patching_in_progress)) 738 return 0; 739 740 if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr) 741 return 0; 742 743 /* set up the specified breakpoint handler */ 744 regs->ip = (unsigned long) bp_int3_handler; 745 746 return 1; 747 748} 749 750/** 751 * text_poke_bp() -- update instructions on live kernel on SMP 752 * @addr: address to patch 753 * @opcode: opcode of new instruction 754 * @len: length to copy 755 * @handler: address to jump to when the temporary breakpoint is hit 756 * 757 * Modify multi-byte instruction by using int3 breakpoint on SMP. 758 * We completely avoid stop_machine() here, and achieve the 759 * synchronization using int3 breakpoint. 760 * 761 * The way it is done: 762 * - add a int3 trap to the address that will be patched 763 * - sync cores 764 * - update all but the first byte of the patched range 765 * - sync cores 766 * - replace the first byte (int3) by the first byte of 767 * replacing opcode 768 * - sync cores 769 * 770 * Note: must be called under text_mutex. 771 */ 772void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) 773{ 774 unsigned char int3 = 0xcc; 775 776 bp_int3_handler = handler; 777 bp_int3_addr = (u8 *)addr + sizeof(int3); 778 bp_patching_in_progress = true; 779 /* 780 * Corresponding read barrier in int3 notifier for 781 * making sure the in_progress flags is correctly ordered wrt. 782 * patching 783 */ 784 smp_wmb(); 785 786 text_poke(addr, &int3, sizeof(int3)); 787 788 on_each_cpu(do_sync_core, NULL, 1); 789 790 if (len - sizeof(int3) > 0) { 791 /* patch all but the first byte */ 792 text_poke((char *)addr + sizeof(int3), 793 (const char *) opcode + sizeof(int3), 794 len - sizeof(int3)); 795 /* 796 * According to Intel, this core syncing is very likely 797 * not necessary and we'd be safe even without it. But 798 * better safe than sorry (plus there's not only Intel). 799 */ 800 on_each_cpu(do_sync_core, NULL, 1); 801 } 802 803 /* patch the first byte */ 804 text_poke(addr, opcode, sizeof(int3)); 805 806 on_each_cpu(do_sync_core, NULL, 1); 807 808 bp_patching_in_progress = false; 809 smp_wmb(); 810 811 return addr; 812} 813 814