1/** 2 * @file nmi_int.c 3 * 4 * @remark Copyright 2002-2009 OProfile authors 5 * @remark Read the file COPYING 6 * 7 * @author John Levon <levon@movementarian.org> 8 * @author Robert Richter <robert.richter@amd.com> 9 * @author Barry Kasindorf <barry.kasindorf@amd.com> 10 * @author Jason Yeh <jason.yeh@amd.com> 11 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> 12 */ 13 14#include <linux/init.h> 15#include <linux/notifier.h> 16#include <linux/smp.h> 17#include <linux/oprofile.h> 18#include <linux/syscore_ops.h> 19#include <linux/slab.h> 20#include <linux/moduleparam.h> 21#include <linux/kdebug.h> 22#include <linux/cpu.h> 23#include <asm/nmi.h> 24#include <asm/msr.h> 25#include <asm/apic.h> 26 27#include "op_counter.h" 28#include "op_x86_model.h" 29 30static struct op_x86_model_spec *model; 31static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); 32static DEFINE_PER_CPU(unsigned long, saved_lvtpc); 33 34/* must be protected with get_online_cpus()/put_online_cpus(): */ 35static int nmi_enabled; 36static int ctr_running; 37 38struct op_counter_config counter_config[OP_MAX_COUNTER]; 39 40/* common functions */ 41 42u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, 43 struct op_counter_config *counter_config) 44{ 45 u64 val = 0; 46 u16 event = (u16)counter_config->event; 47 48 val |= ARCH_PERFMON_EVENTSEL_INT; 49 val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0; 50 val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0; 51 val |= (counter_config->unit_mask & 0xFF) << 8; 52 counter_config->extra &= (ARCH_PERFMON_EVENTSEL_INV | 53 ARCH_PERFMON_EVENTSEL_EDGE | 54 ARCH_PERFMON_EVENTSEL_CMASK); 55 val |= counter_config->extra; 56 event &= model->event_mask ? model->event_mask : 0xFF; 57 val |= event & 0xFF; 58 val |= (u64)(event & 0x0F00) << 24; 59 60 return val; 61} 62 63 64static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs) 65{ 66 if (ctr_running) 67 model->check_ctrs(regs, this_cpu_ptr(&cpu_msrs)); 68 else if (!nmi_enabled) 69 return NMI_DONE; 70 else 71 model->stop(this_cpu_ptr(&cpu_msrs)); 72 return NMI_HANDLED; 73} 74 75static void nmi_cpu_save_registers(struct op_msrs *msrs) 76{ 77 struct op_msr *counters = msrs->counters; 78 struct op_msr *controls = msrs->controls; 79 unsigned int i; 80 81 for (i = 0; i < model->num_counters; ++i) { 82 if (counters[i].addr) 83 rdmsrl(counters[i].addr, counters[i].saved); 84 } 85 86 for (i = 0; i < model->num_controls; ++i) { 87 if (controls[i].addr) 88 rdmsrl(controls[i].addr, controls[i].saved); 89 } 90} 91 92static void nmi_cpu_start(void *dummy) 93{ 94 struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs); 95 if (!msrs->controls) 96 WARN_ON_ONCE(1); 97 else 98 model->start(msrs); 99} 100 101static int nmi_start(void) 102{ 103 get_online_cpus(); 104 ctr_running = 1; 105 /* make ctr_running visible to the nmi handler: */ 106 smp_mb(); 107 on_each_cpu(nmi_cpu_start, NULL, 1); 108 put_online_cpus(); 109 return 0; 110} 111 112static void nmi_cpu_stop(void *dummy) 113{ 114 struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs); 115 if (!msrs->controls) 116 WARN_ON_ONCE(1); 117 else 118 model->stop(msrs); 119} 120 121static void nmi_stop(void) 122{ 123 get_online_cpus(); 124 on_each_cpu(nmi_cpu_stop, NULL, 1); 125 ctr_running = 0; 126 put_online_cpus(); 127} 128 129#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 130 131static DEFINE_PER_CPU(int, switch_index); 132 133static inline int has_mux(void) 134{ 135 return !!model->switch_ctrl; 136} 137 138inline int op_x86_phys_to_virt(int phys) 139{ 140 return __this_cpu_read(switch_index) + phys; 141} 142 143inline int op_x86_virt_to_phys(int virt) 144{ 145 return virt % model->num_counters; 146} 147 148static void nmi_shutdown_mux(void) 149{ 150 int i; 151 152 if (!has_mux()) 153 return; 154 155 for_each_possible_cpu(i) { 156 kfree(per_cpu(cpu_msrs, i).multiplex); 157 per_cpu(cpu_msrs, i).multiplex = NULL; 158 per_cpu(switch_index, i) = 0; 159 } 160} 161 162static int nmi_setup_mux(void) 163{ 164 size_t multiplex_size = 165 sizeof(struct op_msr) * model->num_virt_counters; 166 int i; 167 168 if (!has_mux()) 169 return 1; 170 171 for_each_possible_cpu(i) { 172 per_cpu(cpu_msrs, i).multiplex = 173 kzalloc(multiplex_size, GFP_KERNEL); 174 if (!per_cpu(cpu_msrs, i).multiplex) 175 return 0; 176 } 177 178 return 1; 179} 180 181static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) 182{ 183 int i; 184 struct op_msr *multiplex = msrs->multiplex; 185 186 if (!has_mux()) 187 return; 188 189 for (i = 0; i < model->num_virt_counters; ++i) { 190 if (counter_config[i].enabled) { 191 multiplex[i].saved = -(u64)counter_config[i].count; 192 } else { 193 multiplex[i].saved = 0; 194 } 195 } 196 197 per_cpu(switch_index, cpu) = 0; 198} 199 200static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs) 201{ 202 struct op_msr *counters = msrs->counters; 203 struct op_msr *multiplex = msrs->multiplex; 204 int i; 205 206 for (i = 0; i < model->num_counters; ++i) { 207 int virt = op_x86_phys_to_virt(i); 208 if (counters[i].addr) 209 rdmsrl(counters[i].addr, multiplex[virt].saved); 210 } 211} 212 213static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs) 214{ 215 struct op_msr *counters = msrs->counters; 216 struct op_msr *multiplex = msrs->multiplex; 217 int i; 218 219 for (i = 0; i < model->num_counters; ++i) { 220 int virt = op_x86_phys_to_virt(i); 221 if (counters[i].addr) 222 wrmsrl(counters[i].addr, multiplex[virt].saved); 223 } 224} 225 226static void nmi_cpu_switch(void *dummy) 227{ 228 int cpu = smp_processor_id(); 229 int si = per_cpu(switch_index, cpu); 230 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); 231 232 nmi_cpu_stop(NULL); 233 nmi_cpu_save_mpx_registers(msrs); 234 235 /* move to next set */ 236 si += model->num_counters; 237 if ((si >= model->num_virt_counters) || (counter_config[si].count == 0)) 238 per_cpu(switch_index, cpu) = 0; 239 else 240 per_cpu(switch_index, cpu) = si; 241 242 model->switch_ctrl(model, msrs); 243 nmi_cpu_restore_mpx_registers(msrs); 244 245 nmi_cpu_start(NULL); 246} 247 248 249/* 250 * Quick check to see if multiplexing is necessary. 251 * The check should be sufficient since counters are used 252 * in ordre. 253 */ 254static int nmi_multiplex_on(void) 255{ 256 return counter_config[model->num_counters].count ? 0 : -EINVAL; 257} 258 259static int nmi_switch_event(void) 260{ 261 if (!has_mux()) 262 return -ENOSYS; /* not implemented */ 263 if (nmi_multiplex_on() < 0) 264 return -EINVAL; /* not necessary */ 265 266 get_online_cpus(); 267 if (ctr_running) 268 on_each_cpu(nmi_cpu_switch, NULL, 1); 269 put_online_cpus(); 270 271 return 0; 272} 273 274static inline void mux_init(struct oprofile_operations *ops) 275{ 276 if (has_mux()) 277 ops->switch_events = nmi_switch_event; 278} 279 280static void mux_clone(int cpu) 281{ 282 if (!has_mux()) 283 return; 284 285 memcpy(per_cpu(cpu_msrs, cpu).multiplex, 286 per_cpu(cpu_msrs, 0).multiplex, 287 sizeof(struct op_msr) * model->num_virt_counters); 288} 289 290#else 291 292inline int op_x86_phys_to_virt(int phys) { return phys; } 293inline int op_x86_virt_to_phys(int virt) { return virt; } 294static inline void nmi_shutdown_mux(void) { } 295static inline int nmi_setup_mux(void) { return 1; } 296static inline void 297nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { } 298static inline void mux_init(struct oprofile_operations *ops) { } 299static void mux_clone(int cpu) { } 300 301#endif 302 303static void free_msrs(void) 304{ 305 int i; 306 for_each_possible_cpu(i) { 307 kfree(per_cpu(cpu_msrs, i).counters); 308 per_cpu(cpu_msrs, i).counters = NULL; 309 kfree(per_cpu(cpu_msrs, i).controls); 310 per_cpu(cpu_msrs, i).controls = NULL; 311 } 312 nmi_shutdown_mux(); 313} 314 315static int allocate_msrs(void) 316{ 317 size_t controls_size = sizeof(struct op_msr) * model->num_controls; 318 size_t counters_size = sizeof(struct op_msr) * model->num_counters; 319 320 int i; 321 for_each_possible_cpu(i) { 322 per_cpu(cpu_msrs, i).counters = kzalloc(counters_size, 323 GFP_KERNEL); 324 if (!per_cpu(cpu_msrs, i).counters) 325 goto fail; 326 per_cpu(cpu_msrs, i).controls = kzalloc(controls_size, 327 GFP_KERNEL); 328 if (!per_cpu(cpu_msrs, i).controls) 329 goto fail; 330 } 331 332 if (!nmi_setup_mux()) 333 goto fail; 334 335 return 1; 336 337fail: 338 free_msrs(); 339 return 0; 340} 341 342static void nmi_cpu_setup(void *dummy) 343{ 344 int cpu = smp_processor_id(); 345 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); 346 nmi_cpu_save_registers(msrs); 347 raw_spin_lock(&oprofilefs_lock); 348 model->setup_ctrs(model, msrs); 349 nmi_cpu_setup_mux(cpu, msrs); 350 raw_spin_unlock(&oprofilefs_lock); 351 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC); 352 apic_write(APIC_LVTPC, APIC_DM_NMI); 353} 354 355static void nmi_cpu_restore_registers(struct op_msrs *msrs) 356{ 357 struct op_msr *counters = msrs->counters; 358 struct op_msr *controls = msrs->controls; 359 unsigned int i; 360 361 for (i = 0; i < model->num_controls; ++i) { 362 if (controls[i].addr) 363 wrmsrl(controls[i].addr, controls[i].saved); 364 } 365 366 for (i = 0; i < model->num_counters; ++i) { 367 if (counters[i].addr) 368 wrmsrl(counters[i].addr, counters[i].saved); 369 } 370} 371 372static void nmi_cpu_shutdown(void *dummy) 373{ 374 unsigned int v; 375 int cpu = smp_processor_id(); 376 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); 377 378 /* restoring APIC_LVTPC can trigger an apic error because the delivery 379 * mode and vector nr combination can be illegal. That's by design: on 380 * power on apic lvt contain a zero vector nr which are legal only for 381 * NMI delivery mode. So inhibit apic err before restoring lvtpc 382 */ 383 v = apic_read(APIC_LVTERR); 384 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); 385 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); 386 apic_write(APIC_LVTERR, v); 387 nmi_cpu_restore_registers(msrs); 388} 389 390static void nmi_cpu_up(void *dummy) 391{ 392 if (nmi_enabled) 393 nmi_cpu_setup(dummy); 394 if (ctr_running) 395 nmi_cpu_start(dummy); 396} 397 398static void nmi_cpu_down(void *dummy) 399{ 400 if (ctr_running) 401 nmi_cpu_stop(dummy); 402 if (nmi_enabled) 403 nmi_cpu_shutdown(dummy); 404} 405 406static int nmi_create_files(struct dentry *root) 407{ 408 unsigned int i; 409 410 for (i = 0; i < model->num_virt_counters; ++i) { 411 struct dentry *dir; 412 char buf[4]; 413 414 /* quick little hack to _not_ expose a counter if it is not 415 * available for use. This should protect userspace app. 416 * NOTE: assumes 1:1 mapping here (that counters are organized 417 * sequentially in their struct assignment). 418 */ 419 if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i))) 420 continue; 421 422 snprintf(buf, sizeof(buf), "%d", i); 423 dir = oprofilefs_mkdir(root, buf); 424 oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled); 425 oprofilefs_create_ulong(dir, "event", &counter_config[i].event); 426 oprofilefs_create_ulong(dir, "count", &counter_config[i].count); 427 oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask); 428 oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel); 429 oprofilefs_create_ulong(dir, "user", &counter_config[i].user); 430 oprofilefs_create_ulong(dir, "extra", &counter_config[i].extra); 431 } 432 433 return 0; 434} 435 436static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action, 437 void *data) 438{ 439 int cpu = (unsigned long)data; 440 switch (action) { 441 case CPU_DOWN_FAILED: 442 case CPU_ONLINE: 443 smp_call_function_single(cpu, nmi_cpu_up, NULL, 0); 444 break; 445 case CPU_DOWN_PREPARE: 446 smp_call_function_single(cpu, nmi_cpu_down, NULL, 1); 447 break; 448 } 449 return NOTIFY_DONE; 450} 451 452static struct notifier_block oprofile_cpu_nb = { 453 .notifier_call = oprofile_cpu_notifier 454}; 455 456static int nmi_setup(void) 457{ 458 int err = 0; 459 int cpu; 460 461 if (!allocate_msrs()) 462 return -ENOMEM; 463 464 /* We need to serialize save and setup for HT because the subset 465 * of msrs are distinct for save and setup operations 466 */ 467 468 /* Assume saved/restored counters are the same on all CPUs */ 469 err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); 470 if (err) 471 goto fail; 472 473 for_each_possible_cpu(cpu) { 474 if (!cpu) 475 continue; 476 477 memcpy(per_cpu(cpu_msrs, cpu).counters, 478 per_cpu(cpu_msrs, 0).counters, 479 sizeof(struct op_msr) * model->num_counters); 480 481 memcpy(per_cpu(cpu_msrs, cpu).controls, 482 per_cpu(cpu_msrs, 0).controls, 483 sizeof(struct op_msr) * model->num_controls); 484 485 mux_clone(cpu); 486 } 487 488 nmi_enabled = 0; 489 ctr_running = 0; 490 /* make variables visible to the nmi handler: */ 491 smp_mb(); 492 err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify, 493 0, "oprofile"); 494 if (err) 495 goto fail; 496 497 cpu_notifier_register_begin(); 498 499 /* Use get/put_online_cpus() to protect 'nmi_enabled' */ 500 get_online_cpus(); 501 nmi_enabled = 1; 502 /* make nmi_enabled visible to the nmi handler: */ 503 smp_mb(); 504 on_each_cpu(nmi_cpu_setup, NULL, 1); 505 __register_cpu_notifier(&oprofile_cpu_nb); 506 put_online_cpus(); 507 508 cpu_notifier_register_done(); 509 510 return 0; 511fail: 512 free_msrs(); 513 return err; 514} 515 516static void nmi_shutdown(void) 517{ 518 struct op_msrs *msrs; 519 520 cpu_notifier_register_begin(); 521 522 /* Use get/put_online_cpus() to protect 'nmi_enabled' & 'ctr_running' */ 523 get_online_cpus(); 524 on_each_cpu(nmi_cpu_shutdown, NULL, 1); 525 nmi_enabled = 0; 526 ctr_running = 0; 527 __unregister_cpu_notifier(&oprofile_cpu_nb); 528 put_online_cpus(); 529 530 cpu_notifier_register_done(); 531 532 /* make variables visible to the nmi handler: */ 533 smp_mb(); 534 unregister_nmi_handler(NMI_LOCAL, "oprofile"); 535 msrs = &get_cpu_var(cpu_msrs); 536 model->shutdown(msrs); 537 free_msrs(); 538 put_cpu_var(cpu_msrs); 539} 540 541#ifdef CONFIG_PM 542 543static int nmi_suspend(void) 544{ 545 /* Only one CPU left, just stop that one */ 546 if (nmi_enabled == 1) 547 nmi_cpu_stop(NULL); 548 return 0; 549} 550 551static void nmi_resume(void) 552{ 553 if (nmi_enabled == 1) 554 nmi_cpu_start(NULL); 555} 556 557static struct syscore_ops oprofile_syscore_ops = { 558 .resume = nmi_resume, 559 .suspend = nmi_suspend, 560}; 561 562static void __init init_suspend_resume(void) 563{ 564 register_syscore_ops(&oprofile_syscore_ops); 565} 566 567static void exit_suspend_resume(void) 568{ 569 unregister_syscore_ops(&oprofile_syscore_ops); 570} 571 572#else 573 574static inline void init_suspend_resume(void) { } 575static inline void exit_suspend_resume(void) { } 576 577#endif /* CONFIG_PM */ 578 579static int __init p4_init(char **cpu_type) 580{ 581 __u8 cpu_model = boot_cpu_data.x86_model; 582 583 if (cpu_model > 6 || cpu_model == 5) 584 return 0; 585 586#ifndef CONFIG_SMP 587 *cpu_type = "i386/p4"; 588 model = &op_p4_spec; 589 return 1; 590#else 591 switch (smp_num_siblings) { 592 case 1: 593 *cpu_type = "i386/p4"; 594 model = &op_p4_spec; 595 return 1; 596 597 case 2: 598 *cpu_type = "i386/p4-ht"; 599 model = &op_p4_ht2_spec; 600 return 1; 601 } 602#endif 603 604 printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n"); 605 printk(KERN_INFO "oprofile: Reverting to timer mode.\n"); 606 return 0; 607} 608 609enum __force_cpu_type { 610 reserved = 0, /* do not force */ 611 timer, 612 arch_perfmon, 613}; 614 615static int force_cpu_type; 616 617static int set_cpu_type(const char *str, struct kernel_param *kp) 618{ 619 if (!strcmp(str, "timer")) { 620 force_cpu_type = timer; 621 printk(KERN_INFO "oprofile: forcing NMI timer mode\n"); 622 } else if (!strcmp(str, "arch_perfmon")) { 623 force_cpu_type = arch_perfmon; 624 printk(KERN_INFO "oprofile: forcing architectural perfmon\n"); 625 } else { 626 force_cpu_type = 0; 627 } 628 629 return 0; 630} 631module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0); 632 633static int __init ppro_init(char **cpu_type) 634{ 635 __u8 cpu_model = boot_cpu_data.x86_model; 636 struct op_x86_model_spec *spec = &op_ppro_spec; /* default */ 637 638 if (force_cpu_type == arch_perfmon && cpu_has_arch_perfmon) 639 return 0; 640 641 /* 642 * Documentation on identifying Intel processors by CPU family 643 * and model can be found in the Intel Software Developer's 644 * Manuals (SDM): 645 * 646 * http://www.intel.com/products/processor/manuals/ 647 * 648 * As of May 2010 the documentation for this was in the: 649 * "Intel 64 and IA-32 Architectures Software Developer's 650 * Manual Volume 3B: System Programming Guide", "Table B-1 651 * CPUID Signature Values of DisplayFamily_DisplayModel". 652 */ 653 switch (cpu_model) { 654 case 0 ... 2: 655 *cpu_type = "i386/ppro"; 656 break; 657 case 3 ... 5: 658 *cpu_type = "i386/pii"; 659 break; 660 case 6 ... 8: 661 case 10 ... 11: 662 *cpu_type = "i386/piii"; 663 break; 664 case 9: 665 case 13: 666 *cpu_type = "i386/p6_mobile"; 667 break; 668 case 14: 669 *cpu_type = "i386/core"; 670 break; 671 case 0x0f: 672 case 0x16: 673 case 0x17: 674 case 0x1d: 675 *cpu_type = "i386/core_2"; 676 break; 677 case 0x1a: 678 case 0x1e: 679 case 0x2e: 680 spec = &op_arch_perfmon_spec; 681 *cpu_type = "i386/core_i7"; 682 break; 683 case 0x1c: 684 *cpu_type = "i386/atom"; 685 break; 686 default: 687 /* Unknown */ 688 return 0; 689 } 690 691 model = spec; 692 return 1; 693} 694 695int __init op_nmi_init(struct oprofile_operations *ops) 696{ 697 __u8 vendor = boot_cpu_data.x86_vendor; 698 __u8 family = boot_cpu_data.x86; 699 char *cpu_type = NULL; 700 int ret = 0; 701 702 if (!cpu_has_apic) 703 return -ENODEV; 704 705 if (force_cpu_type == timer) 706 return -ENODEV; 707 708 switch (vendor) { 709 case X86_VENDOR_AMD: 710 /* Needs to be at least an Athlon (or hammer in 32bit mode) */ 711 712 switch (family) { 713 case 6: 714 cpu_type = "i386/athlon"; 715 break; 716 case 0xf: 717 /* 718 * Actually it could be i386/hammer too, but 719 * give user space an consistent name. 720 */ 721 cpu_type = "x86-64/hammer"; 722 break; 723 case 0x10: 724 cpu_type = "x86-64/family10"; 725 break; 726 case 0x11: 727 cpu_type = "x86-64/family11h"; 728 break; 729 case 0x12: 730 cpu_type = "x86-64/family12h"; 731 break; 732 case 0x14: 733 cpu_type = "x86-64/family14h"; 734 break; 735 case 0x15: 736 cpu_type = "x86-64/family15h"; 737 break; 738 default: 739 return -ENODEV; 740 } 741 model = &op_amd_spec; 742 break; 743 744 case X86_VENDOR_INTEL: 745 switch (family) { 746 /* Pentium IV */ 747 case 0xf: 748 p4_init(&cpu_type); 749 break; 750 751 /* A P6-class processor */ 752 case 6: 753 ppro_init(&cpu_type); 754 break; 755 756 default: 757 break; 758 } 759 760 if (cpu_type) 761 break; 762 763 if (!cpu_has_arch_perfmon) 764 return -ENODEV; 765 766 /* use arch perfmon as fallback */ 767 cpu_type = "i386/arch_perfmon"; 768 model = &op_arch_perfmon_spec; 769 break; 770 771 default: 772 return -ENODEV; 773 } 774 775 /* default values, can be overwritten by model */ 776 ops->create_files = nmi_create_files; 777 ops->setup = nmi_setup; 778 ops->shutdown = nmi_shutdown; 779 ops->start = nmi_start; 780 ops->stop = nmi_stop; 781 ops->cpu_type = cpu_type; 782 783 if (model->init) 784 ret = model->init(ops); 785 if (ret) 786 return ret; 787 788 if (!model->num_virt_counters) 789 model->num_virt_counters = model->num_counters; 790 791 mux_init(ops); 792 793 init_suspend_resume(); 794 795 printk(KERN_INFO "oprofile: using NMI interrupt.\n"); 796 return 0; 797} 798 799void op_nmi_exit(void) 800{ 801 exit_suspend_resume(); 802} 803