1/* 2 * Intel Running Average Power Limit (RAPL) Driver 3 * Copyright (c) 2013, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc. 16 * 17 */ 18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20#include <linux/kernel.h> 21#include <linux/module.h> 22#include <linux/list.h> 23#include <linux/types.h> 24#include <linux/device.h> 25#include <linux/slab.h> 26#include <linux/log2.h> 27#include <linux/bitmap.h> 28#include <linux/delay.h> 29#include <linux/sysfs.h> 30#include <linux/cpu.h> 31#include <linux/powercap.h> 32#include <asm/iosf_mbi.h> 33 34#include <asm/processor.h> 35#include <asm/cpu_device_id.h> 36 37/* bitmasks for RAPL MSRs, used by primitive access functions */ 38#define ENERGY_STATUS_MASK 0xffffffff 39 40#define POWER_LIMIT1_MASK 0x7FFF 41#define POWER_LIMIT1_ENABLE BIT(15) 42#define POWER_LIMIT1_CLAMP BIT(16) 43 44#define POWER_LIMIT2_MASK (0x7FFFULL<<32) 45#define POWER_LIMIT2_ENABLE BIT_ULL(47) 46#define POWER_LIMIT2_CLAMP BIT_ULL(48) 47#define POWER_PACKAGE_LOCK BIT_ULL(63) 48#define POWER_PP_LOCK BIT(31) 49 50#define TIME_WINDOW1_MASK (0x7FULL<<17) 51#define TIME_WINDOW2_MASK (0x7FULL<<49) 52 53#define POWER_UNIT_OFFSET 0 54#define POWER_UNIT_MASK 0x0F 55 56#define ENERGY_UNIT_OFFSET 0x08 57#define ENERGY_UNIT_MASK 0x1F00 58 59#define TIME_UNIT_OFFSET 0x10 60#define TIME_UNIT_MASK 0xF0000 61 62#define POWER_INFO_MAX_MASK (0x7fffULL<<32) 63#define POWER_INFO_MIN_MASK (0x7fffULL<<16) 64#define POWER_INFO_MAX_TIME_WIN_MASK (0x3fULL<<48) 65#define POWER_INFO_THERMAL_SPEC_MASK 0x7fff 66 67#define PERF_STATUS_THROTTLE_TIME_MASK 0xffffffff 68#define PP_POLICY_MASK 0x1F 69 70/* Non HW constants */ 71#define RAPL_PRIMITIVE_DERIVED BIT(1) /* not from raw data */ 72#define RAPL_PRIMITIVE_DUMMY BIT(2) 73 74#define TIME_WINDOW_MAX_MSEC 40000 75#define TIME_WINDOW_MIN_MSEC 250 76#define ENERGY_UNIT_SCALE 1000 /* scale from driver unit to powercap unit */ 77enum unit_type { 78 ARBITRARY_UNIT, /* no translation */ 79 POWER_UNIT, 80 ENERGY_UNIT, 81 TIME_UNIT, 82}; 83 84enum rapl_domain_type { 85 RAPL_DOMAIN_PACKAGE, /* entire package/socket */ 86 RAPL_DOMAIN_PP0, /* core power plane */ 87 RAPL_DOMAIN_PP1, /* graphics uncore */ 88 RAPL_DOMAIN_DRAM,/* DRAM control_type */ 89 RAPL_DOMAIN_MAX, 90}; 91 92enum rapl_domain_msr_id { 93 RAPL_DOMAIN_MSR_LIMIT, 94 RAPL_DOMAIN_MSR_STATUS, 95 RAPL_DOMAIN_MSR_PERF, 96 RAPL_DOMAIN_MSR_POLICY, 97 RAPL_DOMAIN_MSR_INFO, 98 RAPL_DOMAIN_MSR_MAX, 99}; 100 101/* per domain data, some are optional */ 102enum rapl_primitives { 103 ENERGY_COUNTER, 104 POWER_LIMIT1, 105 POWER_LIMIT2, 106 FW_LOCK, 107 108 PL1_ENABLE, /* power limit 1, aka long term */ 109 PL1_CLAMP, /* allow frequency to go below OS request */ 110 PL2_ENABLE, /* power limit 2, aka short term, instantaneous */ 111 PL2_CLAMP, 112 113 TIME_WINDOW1, /* long term */ 114 TIME_WINDOW2, /* short term */ 115 THERMAL_SPEC_POWER, 116 MAX_POWER, 117 118 MIN_POWER, 119 MAX_TIME_WINDOW, 120 THROTTLED_TIME, 121 PRIORITY_LEVEL, 122 123 /* below are not raw primitive data */ 124 AVERAGE_POWER, 125 NR_RAPL_PRIMITIVES, 126}; 127 128#define NR_RAW_PRIMITIVES (NR_RAPL_PRIMITIVES - 2) 129 130/* Can be expanded to include events, etc.*/ 131struct rapl_domain_data { 132 u64 primitives[NR_RAPL_PRIMITIVES]; 133 unsigned long timestamp; 134}; 135 136 137#define DOMAIN_STATE_INACTIVE BIT(0) 138#define DOMAIN_STATE_POWER_LIMIT_SET BIT(1) 139#define DOMAIN_STATE_BIOS_LOCKED BIT(2) 140 141#define NR_POWER_LIMITS (2) 142struct rapl_power_limit { 143 struct powercap_zone_constraint *constraint; 144 int prim_id; /* primitive ID used to enable */ 145 struct rapl_domain *domain; 146 const char *name; 147}; 148 149static const char pl1_name[] = "long_term"; 150static const char pl2_name[] = "short_term"; 151 152struct rapl_domain { 153 const char *name; 154 enum rapl_domain_type id; 155 int msrs[RAPL_DOMAIN_MSR_MAX]; 156 struct powercap_zone power_zone; 157 struct rapl_domain_data rdd; 158 struct rapl_power_limit rpl[NR_POWER_LIMITS]; 159 u64 attr_map; /* track capabilities */ 160 unsigned int state; 161 unsigned int domain_energy_unit; 162 int package_id; 163}; 164#define power_zone_to_rapl_domain(_zone) \ 165 container_of(_zone, struct rapl_domain, power_zone) 166 167 168/* Each physical package contains multiple domains, these are the common 169 * data across RAPL domains within a package. 170 */ 171struct rapl_package { 172 unsigned int id; /* physical package/socket id */ 173 unsigned int nr_domains; 174 unsigned long domain_map; /* bit map of active domains */ 175 unsigned int power_unit; 176 unsigned int energy_unit; 177 unsigned int time_unit; 178 struct rapl_domain *domains; /* array of domains, sized at runtime */ 179 struct powercap_zone *power_zone; /* keep track of parent zone */ 180 int nr_cpus; /* active cpus on the package, topology info is lost during 181 * cpu hotplug. so we have to track ourselves. 182 */ 183 unsigned long power_limit_irq; /* keep track of package power limit 184 * notify interrupt enable status. 185 */ 186 struct list_head plist; 187}; 188 189struct rapl_defaults { 190 u8 floor_freq_reg_addr; 191 int (*check_unit)(struct rapl_package *rp, int cpu); 192 void (*set_floor_freq)(struct rapl_domain *rd, bool mode); 193 u64 (*compute_time_window)(struct rapl_package *rp, u64 val, 194 bool to_raw); 195 unsigned int dram_domain_energy_unit; 196}; 197static struct rapl_defaults *rapl_defaults; 198 199/* Sideband MBI registers */ 200#define IOSF_CPU_POWER_BUDGET_CTL_BYT (0x2) 201#define IOSF_CPU_POWER_BUDGET_CTL_TNG (0xdf) 202 203#define PACKAGE_PLN_INT_SAVED BIT(0) 204#define MAX_PRIM_NAME (32) 205 206/* per domain data. used to describe individual knobs such that access function 207 * can be consolidated into one instead of many inline functions. 208 */ 209struct rapl_primitive_info { 210 const char *name; 211 u64 mask; 212 int shift; 213 enum rapl_domain_msr_id id; 214 enum unit_type unit; 215 u32 flag; 216}; 217 218#define PRIMITIVE_INFO_INIT(p, m, s, i, u, f) { \ 219 .name = #p, \ 220 .mask = m, \ 221 .shift = s, \ 222 .id = i, \ 223 .unit = u, \ 224 .flag = f \ 225 } 226 227static void rapl_init_domains(struct rapl_package *rp); 228static int rapl_read_data_raw(struct rapl_domain *rd, 229 enum rapl_primitives prim, 230 bool xlate, u64 *data); 231static int rapl_write_data_raw(struct rapl_domain *rd, 232 enum rapl_primitives prim, 233 unsigned long long value); 234static u64 rapl_unit_xlate(struct rapl_domain *rd, int package, 235 enum unit_type type, u64 value, 236 int to_raw); 237static void package_power_limit_irq_save(int package_id); 238 239static LIST_HEAD(rapl_packages); /* guarded by CPU hotplug lock */ 240 241static const char * const rapl_domain_names[] = { 242 "package", 243 "core", 244 "uncore", 245 "dram", 246}; 247 248static struct powercap_control_type *control_type; /* PowerCap Controller */ 249 250/* caller to ensure CPU hotplug lock is held */ 251static struct rapl_package *find_package_by_id(int id) 252{ 253 struct rapl_package *rp; 254 255 list_for_each_entry(rp, &rapl_packages, plist) { 256 if (rp->id == id) 257 return rp; 258 } 259 260 return NULL; 261} 262 263/* caller to ensure CPU hotplug lock is held */ 264static int find_active_cpu_on_package(int package_id) 265{ 266 int i; 267 268 for_each_online_cpu(i) { 269 if (topology_physical_package_id(i) == package_id) 270 return i; 271 } 272 /* all CPUs on this package are offline */ 273 274 return -ENODEV; 275} 276 277/* caller must hold cpu hotplug lock */ 278static void rapl_cleanup_data(void) 279{ 280 struct rapl_package *p, *tmp; 281 282 list_for_each_entry_safe(p, tmp, &rapl_packages, plist) { 283 kfree(p->domains); 284 list_del(&p->plist); 285 kfree(p); 286 } 287} 288 289static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw) 290{ 291 struct rapl_domain *rd; 292 u64 energy_now; 293 294 /* prevent CPU hotplug, make sure the RAPL domain does not go 295 * away while reading the counter. 296 */ 297 get_online_cpus(); 298 rd = power_zone_to_rapl_domain(power_zone); 299 300 if (!rapl_read_data_raw(rd, ENERGY_COUNTER, true, &energy_now)) { 301 *energy_raw = energy_now; 302 put_online_cpus(); 303 304 return 0; 305 } 306 put_online_cpus(); 307 308 return -EIO; 309} 310 311static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy) 312{ 313 struct rapl_domain *rd = power_zone_to_rapl_domain(pcd_dev); 314 315 *energy = rapl_unit_xlate(rd, 0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0); 316 return 0; 317} 318 319static int release_zone(struct powercap_zone *power_zone) 320{ 321 struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone); 322 struct rapl_package *rp; 323 324 /* package zone is the last zone of a package, we can free 325 * memory here since all children has been unregistered. 326 */ 327 if (rd->id == RAPL_DOMAIN_PACKAGE) { 328 rp = find_package_by_id(rd->package_id); 329 if (!rp) { 330 dev_warn(&power_zone->dev, "no package id %s\n", 331 rd->name); 332 return -ENODEV; 333 } 334 kfree(rd); 335 rp->domains = NULL; 336 } 337 338 return 0; 339 340} 341 342static int find_nr_power_limit(struct rapl_domain *rd) 343{ 344 int i; 345 346 for (i = 0; i < NR_POWER_LIMITS; i++) { 347 if (rd->rpl[i].name == NULL) 348 break; 349 } 350 351 return i; 352} 353 354static int set_domain_enable(struct powercap_zone *power_zone, bool mode) 355{ 356 struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone); 357 358 if (rd->state & DOMAIN_STATE_BIOS_LOCKED) 359 return -EACCES; 360 361 get_online_cpus(); 362 rapl_write_data_raw(rd, PL1_ENABLE, mode); 363 if (rapl_defaults->set_floor_freq) 364 rapl_defaults->set_floor_freq(rd, mode); 365 put_online_cpus(); 366 367 return 0; 368} 369 370static int get_domain_enable(struct powercap_zone *power_zone, bool *mode) 371{ 372 struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone); 373 u64 val; 374 375 if (rd->state & DOMAIN_STATE_BIOS_LOCKED) { 376 *mode = false; 377 return 0; 378 } 379 get_online_cpus(); 380 if (rapl_read_data_raw(rd, PL1_ENABLE, true, &val)) { 381 put_online_cpus(); 382 return -EIO; 383 } 384 *mode = val; 385 put_online_cpus(); 386 387 return 0; 388} 389 390/* per RAPL domain ops, in the order of rapl_domain_type */ 391static struct powercap_zone_ops zone_ops[] = { 392 /* RAPL_DOMAIN_PACKAGE */ 393 { 394 .get_energy_uj = get_energy_counter, 395 .get_max_energy_range_uj = get_max_energy_counter, 396 .release = release_zone, 397 .set_enable = set_domain_enable, 398 .get_enable = get_domain_enable, 399 }, 400 /* RAPL_DOMAIN_PP0 */ 401 { 402 .get_energy_uj = get_energy_counter, 403 .get_max_energy_range_uj = get_max_energy_counter, 404 .release = release_zone, 405 .set_enable = set_domain_enable, 406 .get_enable = get_domain_enable, 407 }, 408 /* RAPL_DOMAIN_PP1 */ 409 { 410 .get_energy_uj = get_energy_counter, 411 .get_max_energy_range_uj = get_max_energy_counter, 412 .release = release_zone, 413 .set_enable = set_domain_enable, 414 .get_enable = get_domain_enable, 415 }, 416 /* RAPL_DOMAIN_DRAM */ 417 { 418 .get_energy_uj = get_energy_counter, 419 .get_max_energy_range_uj = get_max_energy_counter, 420 .release = release_zone, 421 .set_enable = set_domain_enable, 422 .get_enable = get_domain_enable, 423 }, 424}; 425 426static int set_power_limit(struct powercap_zone *power_zone, int id, 427 u64 power_limit) 428{ 429 struct rapl_domain *rd; 430 struct rapl_package *rp; 431 int ret = 0; 432 433 get_online_cpus(); 434 rd = power_zone_to_rapl_domain(power_zone); 435 rp = find_package_by_id(rd->package_id); 436 if (!rp) { 437 ret = -ENODEV; 438 goto set_exit; 439 } 440 441 if (rd->state & DOMAIN_STATE_BIOS_LOCKED) { 442 dev_warn(&power_zone->dev, "%s locked by BIOS, monitoring only\n", 443 rd->name); 444 ret = -EACCES; 445 goto set_exit; 446 } 447 448 switch (rd->rpl[id].prim_id) { 449 case PL1_ENABLE: 450 rapl_write_data_raw(rd, POWER_LIMIT1, power_limit); 451 break; 452 case PL2_ENABLE: 453 rapl_write_data_raw(rd, POWER_LIMIT2, power_limit); 454 break; 455 default: 456 ret = -EINVAL; 457 } 458 if (!ret) 459 package_power_limit_irq_save(rd->package_id); 460set_exit: 461 put_online_cpus(); 462 return ret; 463} 464 465static int get_current_power_limit(struct powercap_zone *power_zone, int id, 466 u64 *data) 467{ 468 struct rapl_domain *rd; 469 u64 val; 470 int prim; 471 int ret = 0; 472 473 get_online_cpus(); 474 rd = power_zone_to_rapl_domain(power_zone); 475 switch (rd->rpl[id].prim_id) { 476 case PL1_ENABLE: 477 prim = POWER_LIMIT1; 478 break; 479 case PL2_ENABLE: 480 prim = POWER_LIMIT2; 481 break; 482 default: 483 put_online_cpus(); 484 return -EINVAL; 485 } 486 if (rapl_read_data_raw(rd, prim, true, &val)) 487 ret = -EIO; 488 else 489 *data = val; 490 491 put_online_cpus(); 492 493 return ret; 494} 495 496static int set_time_window(struct powercap_zone *power_zone, int id, 497 u64 window) 498{ 499 struct rapl_domain *rd; 500 int ret = 0; 501 502 get_online_cpus(); 503 rd = power_zone_to_rapl_domain(power_zone); 504 switch (rd->rpl[id].prim_id) { 505 case PL1_ENABLE: 506 rapl_write_data_raw(rd, TIME_WINDOW1, window); 507 break; 508 case PL2_ENABLE: 509 rapl_write_data_raw(rd, TIME_WINDOW2, window); 510 break; 511 default: 512 ret = -EINVAL; 513 } 514 put_online_cpus(); 515 return ret; 516} 517 518static int get_time_window(struct powercap_zone *power_zone, int id, u64 *data) 519{ 520 struct rapl_domain *rd; 521 u64 val; 522 int ret = 0; 523 524 get_online_cpus(); 525 rd = power_zone_to_rapl_domain(power_zone); 526 switch (rd->rpl[id].prim_id) { 527 case PL1_ENABLE: 528 ret = rapl_read_data_raw(rd, TIME_WINDOW1, true, &val); 529 break; 530 case PL2_ENABLE: 531 ret = rapl_read_data_raw(rd, TIME_WINDOW2, true, &val); 532 break; 533 default: 534 put_online_cpus(); 535 return -EINVAL; 536 } 537 if (!ret) 538 *data = val; 539 put_online_cpus(); 540 541 return ret; 542} 543 544static const char *get_constraint_name(struct powercap_zone *power_zone, int id) 545{ 546 struct rapl_power_limit *rpl; 547 struct rapl_domain *rd; 548 549 rd = power_zone_to_rapl_domain(power_zone); 550 rpl = (struct rapl_power_limit *) &rd->rpl[id]; 551 552 return rpl->name; 553} 554 555 556static int get_max_power(struct powercap_zone *power_zone, int id, 557 u64 *data) 558{ 559 struct rapl_domain *rd; 560 u64 val; 561 int prim; 562 int ret = 0; 563 564 get_online_cpus(); 565 rd = power_zone_to_rapl_domain(power_zone); 566 switch (rd->rpl[id].prim_id) { 567 case PL1_ENABLE: 568 prim = THERMAL_SPEC_POWER; 569 break; 570 case PL2_ENABLE: 571 prim = MAX_POWER; 572 break; 573 default: 574 put_online_cpus(); 575 return -EINVAL; 576 } 577 if (rapl_read_data_raw(rd, prim, true, &val)) 578 ret = -EIO; 579 else 580 *data = val; 581 582 put_online_cpus(); 583 584 return ret; 585} 586 587static struct powercap_zone_constraint_ops constraint_ops = { 588 .set_power_limit_uw = set_power_limit, 589 .get_power_limit_uw = get_current_power_limit, 590 .set_time_window_us = set_time_window, 591 .get_time_window_us = get_time_window, 592 .get_max_power_uw = get_max_power, 593 .get_name = get_constraint_name, 594}; 595 596/* called after domain detection and package level data are set */ 597static void rapl_init_domains(struct rapl_package *rp) 598{ 599 int i; 600 struct rapl_domain *rd = rp->domains; 601 602 for (i = 0; i < RAPL_DOMAIN_MAX; i++) { 603 unsigned int mask = rp->domain_map & (1 << i); 604 switch (mask) { 605 case BIT(RAPL_DOMAIN_PACKAGE): 606 rd->name = rapl_domain_names[RAPL_DOMAIN_PACKAGE]; 607 rd->id = RAPL_DOMAIN_PACKAGE; 608 rd->msrs[0] = MSR_PKG_POWER_LIMIT; 609 rd->msrs[1] = MSR_PKG_ENERGY_STATUS; 610 rd->msrs[2] = MSR_PKG_PERF_STATUS; 611 rd->msrs[3] = 0; 612 rd->msrs[4] = MSR_PKG_POWER_INFO; 613 rd->rpl[0].prim_id = PL1_ENABLE; 614 rd->rpl[0].name = pl1_name; 615 rd->rpl[1].prim_id = PL2_ENABLE; 616 rd->rpl[1].name = pl2_name; 617 break; 618 case BIT(RAPL_DOMAIN_PP0): 619 rd->name = rapl_domain_names[RAPL_DOMAIN_PP0]; 620 rd->id = RAPL_DOMAIN_PP0; 621 rd->msrs[0] = MSR_PP0_POWER_LIMIT; 622 rd->msrs[1] = MSR_PP0_ENERGY_STATUS; 623 rd->msrs[2] = 0; 624 rd->msrs[3] = MSR_PP0_POLICY; 625 rd->msrs[4] = 0; 626 rd->rpl[0].prim_id = PL1_ENABLE; 627 rd->rpl[0].name = pl1_name; 628 break; 629 case BIT(RAPL_DOMAIN_PP1): 630 rd->name = rapl_domain_names[RAPL_DOMAIN_PP1]; 631 rd->id = RAPL_DOMAIN_PP1; 632 rd->msrs[0] = MSR_PP1_POWER_LIMIT; 633 rd->msrs[1] = MSR_PP1_ENERGY_STATUS; 634 rd->msrs[2] = 0; 635 rd->msrs[3] = MSR_PP1_POLICY; 636 rd->msrs[4] = 0; 637 rd->rpl[0].prim_id = PL1_ENABLE; 638 rd->rpl[0].name = pl1_name; 639 break; 640 case BIT(RAPL_DOMAIN_DRAM): 641 rd->name = rapl_domain_names[RAPL_DOMAIN_DRAM]; 642 rd->id = RAPL_DOMAIN_DRAM; 643 rd->msrs[0] = MSR_DRAM_POWER_LIMIT; 644 rd->msrs[1] = MSR_DRAM_ENERGY_STATUS; 645 rd->msrs[2] = MSR_DRAM_PERF_STATUS; 646 rd->msrs[3] = 0; 647 rd->msrs[4] = MSR_DRAM_POWER_INFO; 648 rd->rpl[0].prim_id = PL1_ENABLE; 649 rd->rpl[0].name = pl1_name; 650 rd->domain_energy_unit = 651 rapl_defaults->dram_domain_energy_unit; 652 if (rd->domain_energy_unit) 653 pr_info("DRAM domain energy unit %dpj\n", 654 rd->domain_energy_unit); 655 break; 656 } 657 if (mask) { 658 rd->package_id = rp->id; 659 rd++; 660 } 661 } 662} 663 664static u64 rapl_unit_xlate(struct rapl_domain *rd, int package, 665 enum unit_type type, u64 value, 666 int to_raw) 667{ 668 u64 units = 1; 669 struct rapl_package *rp; 670 u64 scale = 1; 671 672 rp = find_package_by_id(package); 673 if (!rp) 674 return value; 675 676 switch (type) { 677 case POWER_UNIT: 678 units = rp->power_unit; 679 break; 680 case ENERGY_UNIT: 681 scale = ENERGY_UNIT_SCALE; 682 /* per domain unit takes precedence */ 683 if (rd && rd->domain_energy_unit) 684 units = rd->domain_energy_unit; 685 else 686 units = rp->energy_unit; 687 break; 688 case TIME_UNIT: 689 return rapl_defaults->compute_time_window(rp, value, to_raw); 690 case ARBITRARY_UNIT: 691 default: 692 return value; 693 }; 694 695 if (to_raw) 696 return div64_u64(value, units) * scale; 697 698 value *= units; 699 700 return div64_u64(value, scale); 701} 702 703/* in the order of enum rapl_primitives */ 704static struct rapl_primitive_info rpi[] = { 705 /* name, mask, shift, msr index, unit divisor */ 706 PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0, 707 RAPL_DOMAIN_MSR_STATUS, ENERGY_UNIT, 0), 708 PRIMITIVE_INFO_INIT(POWER_LIMIT1, POWER_LIMIT1_MASK, 0, 709 RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0), 710 PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32, 711 RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0), 712 PRIMITIVE_INFO_INIT(FW_LOCK, POWER_PP_LOCK, 31, 713 RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0), 714 PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15, 715 RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0), 716 PRIMITIVE_INFO_INIT(PL1_CLAMP, POWER_LIMIT1_CLAMP, 16, 717 RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0), 718 PRIMITIVE_INFO_INIT(PL2_ENABLE, POWER_LIMIT2_ENABLE, 47, 719 RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0), 720 PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48, 721 RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0), 722 PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17, 723 RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0), 724 PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49, 725 RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0), 726 PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, POWER_INFO_THERMAL_SPEC_MASK, 727 0, RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0), 728 PRIMITIVE_INFO_INIT(MAX_POWER, POWER_INFO_MAX_MASK, 32, 729 RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0), 730 PRIMITIVE_INFO_INIT(MIN_POWER, POWER_INFO_MIN_MASK, 16, 731 RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0), 732 PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, POWER_INFO_MAX_TIME_WIN_MASK, 48, 733 RAPL_DOMAIN_MSR_INFO, TIME_UNIT, 0), 734 PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0, 735 RAPL_DOMAIN_MSR_PERF, TIME_UNIT, 0), 736 PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0, 737 RAPL_DOMAIN_MSR_POLICY, ARBITRARY_UNIT, 0), 738 /* non-hardware */ 739 PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT, 740 RAPL_PRIMITIVE_DERIVED), 741 {NULL, 0, 0, 0}, 742}; 743 744/* Read primitive data based on its related struct rapl_primitive_info. 745 * if xlate flag is set, return translated data based on data units, i.e. 746 * time, energy, and power. 747 * RAPL MSRs are non-architectual and are laid out not consistently across 748 * domains. Here we use primitive info to allow writing consolidated access 749 * functions. 750 * For a given primitive, it is processed by MSR mask and shift. Unit conversion 751 * is pre-assigned based on RAPL unit MSRs read at init time. 752 * 63-------------------------- 31--------------------------- 0 753 * | xxxxx (mask) | 754 * | |<- shift ----------------| 755 * 63-------------------------- 31--------------------------- 0 756 */ 757static int rapl_read_data_raw(struct rapl_domain *rd, 758 enum rapl_primitives prim, 759 bool xlate, u64 *data) 760{ 761 u64 value, final; 762 u32 msr; 763 struct rapl_primitive_info *rp = &rpi[prim]; 764 int cpu; 765 766 if (!rp->name || rp->flag & RAPL_PRIMITIVE_DUMMY) 767 return -EINVAL; 768 769 msr = rd->msrs[rp->id]; 770 if (!msr) 771 return -EINVAL; 772 /* use physical package id to look up active cpus */ 773 cpu = find_active_cpu_on_package(rd->package_id); 774 if (cpu < 0) 775 return cpu; 776 777 /* special-case package domain, which uses a different bit*/ 778 if (prim == FW_LOCK && rd->id == RAPL_DOMAIN_PACKAGE) { 779 rp->mask = POWER_PACKAGE_LOCK; 780 rp->shift = 63; 781 } 782 /* non-hardware data are collected by the polling thread */ 783 if (rp->flag & RAPL_PRIMITIVE_DERIVED) { 784 *data = rd->rdd.primitives[prim]; 785 return 0; 786 } 787 788 if (rdmsrl_safe_on_cpu(cpu, msr, &value)) { 789 pr_debug("failed to read msr 0x%x on cpu %d\n", msr, cpu); 790 return -EIO; 791 } 792 793 final = value & rp->mask; 794 final = final >> rp->shift; 795 if (xlate) 796 *data = rapl_unit_xlate(rd, rd->package_id, rp->unit, final, 0); 797 else 798 *data = final; 799 800 return 0; 801} 802 803/* Similar use of primitive info in the read counterpart */ 804static int rapl_write_data_raw(struct rapl_domain *rd, 805 enum rapl_primitives prim, 806 unsigned long long value) 807{ 808 u64 msr_val; 809 u32 msr; 810 struct rapl_primitive_info *rp = &rpi[prim]; 811 int cpu; 812 813 cpu = find_active_cpu_on_package(rd->package_id); 814 if (cpu < 0) 815 return cpu; 816 msr = rd->msrs[rp->id]; 817 if (rdmsrl_safe_on_cpu(cpu, msr, &msr_val)) { 818 dev_dbg(&rd->power_zone.dev, 819 "failed to read msr 0x%x on cpu %d\n", msr, cpu); 820 return -EIO; 821 } 822 value = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1); 823 msr_val &= ~rp->mask; 824 msr_val |= value << rp->shift; 825 if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) { 826 dev_dbg(&rd->power_zone.dev, 827 "failed to write msr 0x%x on cpu %d\n", msr, cpu); 828 return -EIO; 829 } 830 831 return 0; 832} 833 834/* 835 * Raw RAPL data stored in MSRs are in certain scales. We need to 836 * convert them into standard units based on the units reported in 837 * the RAPL unit MSRs. This is specific to CPUs as the method to 838 * calculate units differ on different CPUs. 839 * We convert the units to below format based on CPUs. 840 * i.e. 841 * energy unit: picoJoules : Represented in picoJoules by default 842 * power unit : microWatts : Represented in milliWatts by default 843 * time unit : microseconds: Represented in seconds by default 844 */ 845static int rapl_check_unit_core(struct rapl_package *rp, int cpu) 846{ 847 u64 msr_val; 848 u32 value; 849 850 if (rdmsrl_safe_on_cpu(cpu, MSR_RAPL_POWER_UNIT, &msr_val)) { 851 pr_err("Failed to read power unit MSR 0x%x on CPU %d, exit.\n", 852 MSR_RAPL_POWER_UNIT, cpu); 853 return -ENODEV; 854 } 855 856 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; 857 rp->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value); 858 859 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; 860 rp->power_unit = 1000000 / (1 << value); 861 862 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; 863 rp->time_unit = 1000000 / (1 << value); 864 865 pr_debug("Core CPU package %d energy=%dpJ, time=%dus, power=%duW\n", 866 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); 867 868 return 0; 869} 870 871static int rapl_check_unit_atom(struct rapl_package *rp, int cpu) 872{ 873 u64 msr_val; 874 u32 value; 875 876 if (rdmsrl_safe_on_cpu(cpu, MSR_RAPL_POWER_UNIT, &msr_val)) { 877 pr_err("Failed to read power unit MSR 0x%x on CPU %d, exit.\n", 878 MSR_RAPL_POWER_UNIT, cpu); 879 return -ENODEV; 880 } 881 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; 882 rp->energy_unit = ENERGY_UNIT_SCALE * 1 << value; 883 884 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; 885 rp->power_unit = (1 << value) * 1000; 886 887 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; 888 rp->time_unit = 1000000 / (1 << value); 889 890 pr_debug("Atom package %d energy=%dpJ, time=%dus, power=%duW\n", 891 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); 892 893 return 0; 894} 895 896 897/* REVISIT: 898 * When package power limit is set artificially low by RAPL, LVT 899 * thermal interrupt for package power limit should be ignored 900 * since we are not really exceeding the real limit. The intention 901 * is to avoid excessive interrupts while we are trying to save power. 902 * A useful feature might be routing the package_power_limit interrupt 903 * to userspace via eventfd. once we have a usecase, this is simple 904 * to do by adding an atomic notifier. 905 */ 906 907static void package_power_limit_irq_save(int package_id) 908{ 909 u32 l, h = 0; 910 int cpu; 911 struct rapl_package *rp; 912 913 rp = find_package_by_id(package_id); 914 if (!rp) 915 return; 916 917 if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN)) 918 return; 919 920 cpu = find_active_cpu_on_package(package_id); 921 if (cpu < 0) 922 return; 923 /* save the state of PLN irq mask bit before disabling it */ 924 rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h); 925 if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) { 926 rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE; 927 rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED; 928 } 929 l &= ~PACKAGE_THERM_INT_PLN_ENABLE; 930 wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); 931} 932 933/* restore per package power limit interrupt enable state */ 934static void package_power_limit_irq_restore(int package_id) 935{ 936 u32 l, h; 937 int cpu; 938 struct rapl_package *rp; 939 940 rp = find_package_by_id(package_id); 941 if (!rp) 942 return; 943 944 if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN)) 945 return; 946 947 cpu = find_active_cpu_on_package(package_id); 948 if (cpu < 0) 949 return; 950 951 /* irq enable state not saved, nothing to restore */ 952 if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) 953 return; 954 rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h); 955 956 if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE) 957 l |= PACKAGE_THERM_INT_PLN_ENABLE; 958 else 959 l &= ~PACKAGE_THERM_INT_PLN_ENABLE; 960 961 wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); 962} 963 964static void set_floor_freq_default(struct rapl_domain *rd, bool mode) 965{ 966 int nr_powerlimit = find_nr_power_limit(rd); 967 968 /* always enable clamp such that p-state can go below OS requested 969 * range. power capping priority over guranteed frequency. 970 */ 971 rapl_write_data_raw(rd, PL1_CLAMP, mode); 972 973 /* some domains have pl2 */ 974 if (nr_powerlimit > 1) { 975 rapl_write_data_raw(rd, PL2_ENABLE, mode); 976 rapl_write_data_raw(rd, PL2_CLAMP, mode); 977 } 978} 979 980static void set_floor_freq_atom(struct rapl_domain *rd, bool enable) 981{ 982 static u32 power_ctrl_orig_val; 983 u32 mdata; 984 985 if (!rapl_defaults->floor_freq_reg_addr) { 986 pr_err("Invalid floor frequency config register\n"); 987 return; 988 } 989 990 if (!power_ctrl_orig_val) 991 iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_PMC_READ, 992 rapl_defaults->floor_freq_reg_addr, 993 &power_ctrl_orig_val); 994 mdata = power_ctrl_orig_val; 995 if (enable) { 996 mdata &= ~(0x7f << 8); 997 mdata |= 1 << 8; 998 } 999 iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_PMC_WRITE, 1000 rapl_defaults->floor_freq_reg_addr, mdata); 1001} 1002 1003static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value, 1004 bool to_raw) 1005{ 1006 u64 f, y; /* fraction and exp. used for time unit */ 1007 1008 /* 1009 * Special processing based on 2^Y*(1+F/4), refer 1010 * to Intel Software Developer's manual Vol.3B: CH 14.9.3. 1011 */ 1012 if (!to_raw) { 1013 f = (value & 0x60) >> 5; 1014 y = value & 0x1f; 1015 value = (1 << y) * (4 + f) * rp->time_unit / 4; 1016 } else { 1017 do_div(value, rp->time_unit); 1018 y = ilog2(value); 1019 f = div64_u64(4 * (value - (1 << y)), 1 << y); 1020 value = (y & 0x1f) | ((f & 0x3) << 5); 1021 } 1022 return value; 1023} 1024 1025static u64 rapl_compute_time_window_atom(struct rapl_package *rp, u64 value, 1026 bool to_raw) 1027{ 1028 /* 1029 * Atom time unit encoding is straight forward val * time_unit, 1030 * where time_unit is default to 1 sec. Never 0. 1031 */ 1032 if (!to_raw) 1033 return (value) ? value *= rp->time_unit : rp->time_unit; 1034 else 1035 value = div64_u64(value, rp->time_unit); 1036 1037 return value; 1038} 1039 1040static const struct rapl_defaults rapl_defaults_core = { 1041 .floor_freq_reg_addr = 0, 1042 .check_unit = rapl_check_unit_core, 1043 .set_floor_freq = set_floor_freq_default, 1044 .compute_time_window = rapl_compute_time_window_core, 1045}; 1046 1047static const struct rapl_defaults rapl_defaults_hsw_server = { 1048 .check_unit = rapl_check_unit_core, 1049 .set_floor_freq = set_floor_freq_default, 1050 .compute_time_window = rapl_compute_time_window_core, 1051 .dram_domain_energy_unit = 15300, 1052}; 1053 1054static const struct rapl_defaults rapl_defaults_byt = { 1055 .floor_freq_reg_addr = IOSF_CPU_POWER_BUDGET_CTL_BYT, 1056 .check_unit = rapl_check_unit_atom, 1057 .set_floor_freq = set_floor_freq_atom, 1058 .compute_time_window = rapl_compute_time_window_atom, 1059}; 1060 1061static const struct rapl_defaults rapl_defaults_tng = { 1062 .floor_freq_reg_addr = IOSF_CPU_POWER_BUDGET_CTL_TNG, 1063 .check_unit = rapl_check_unit_atom, 1064 .set_floor_freq = set_floor_freq_atom, 1065 .compute_time_window = rapl_compute_time_window_atom, 1066}; 1067 1068static const struct rapl_defaults rapl_defaults_ann = { 1069 .floor_freq_reg_addr = 0, 1070 .check_unit = rapl_check_unit_atom, 1071 .set_floor_freq = NULL, 1072 .compute_time_window = rapl_compute_time_window_atom, 1073}; 1074 1075static const struct rapl_defaults rapl_defaults_cht = { 1076 .floor_freq_reg_addr = 0, 1077 .check_unit = rapl_check_unit_atom, 1078 .set_floor_freq = NULL, 1079 .compute_time_window = rapl_compute_time_window_atom, 1080}; 1081 1082#define RAPL_CPU(_model, _ops) { \ 1083 .vendor = X86_VENDOR_INTEL, \ 1084 .family = 6, \ 1085 .model = _model, \ 1086 .driver_data = (kernel_ulong_t)&_ops, \ 1087 } 1088 1089static const struct x86_cpu_id rapl_ids[] __initconst = { 1090 RAPL_CPU(0x2a, rapl_defaults_core),/* Sandy Bridge */ 1091 RAPL_CPU(0x2d, rapl_defaults_core),/* Sandy Bridge EP */ 1092 RAPL_CPU(0x37, rapl_defaults_byt),/* Valleyview */ 1093 RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */ 1094 RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ 1095 RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ 1096 RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */ 1097 RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */ 1098 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ 1099 RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */ 1100 RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */ 1101 RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */ 1102 RAPL_CPU(0x4A, rapl_defaults_tng),/* Tangier */ 1103 RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */ 1104 RAPL_CPU(0x5A, rapl_defaults_ann),/* Annidale */ 1105 RAPL_CPU(0X5C, rapl_defaults_core),/* Broxton */ 1106 RAPL_CPU(0x5E, rapl_defaults_core),/* Skylake-H/S */ 1107 RAPL_CPU(0x57, rapl_defaults_hsw_server),/* Knights Landing */ 1108 {} 1109}; 1110MODULE_DEVICE_TABLE(x86cpu, rapl_ids); 1111 1112/* read once for all raw primitive data for all packages, domains */ 1113static void rapl_update_domain_data(void) 1114{ 1115 int dmn, prim; 1116 u64 val; 1117 struct rapl_package *rp; 1118 1119 list_for_each_entry(rp, &rapl_packages, plist) { 1120 for (dmn = 0; dmn < rp->nr_domains; dmn++) { 1121 pr_debug("update package %d domain %s data\n", rp->id, 1122 rp->domains[dmn].name); 1123 /* exclude non-raw primitives */ 1124 for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++) 1125 if (!rapl_read_data_raw(&rp->domains[dmn], prim, 1126 rpi[prim].unit, 1127 &val)) 1128 rp->domains[dmn].rdd.primitives[prim] = 1129 val; 1130 } 1131 } 1132 1133} 1134 1135static int rapl_unregister_powercap(void) 1136{ 1137 struct rapl_package *rp; 1138 struct rapl_domain *rd, *rd_package = NULL; 1139 1140 /* unregister all active rapl packages from the powercap layer, 1141 * hotplug lock held 1142 */ 1143 list_for_each_entry(rp, &rapl_packages, plist) { 1144 package_power_limit_irq_restore(rp->id); 1145 1146 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; 1147 rd++) { 1148 pr_debug("remove package, undo power limit on %d: %s\n", 1149 rp->id, rd->name); 1150 rapl_write_data_raw(rd, PL1_ENABLE, 0); 1151 rapl_write_data_raw(rd, PL1_CLAMP, 0); 1152 if (find_nr_power_limit(rd) > 1) { 1153 rapl_write_data_raw(rd, PL2_ENABLE, 0); 1154 rapl_write_data_raw(rd, PL2_CLAMP, 0); 1155 } 1156 if (rd->id == RAPL_DOMAIN_PACKAGE) { 1157 rd_package = rd; 1158 continue; 1159 } 1160 powercap_unregister_zone(control_type, &rd->power_zone); 1161 } 1162 /* do the package zone last */ 1163 if (rd_package) 1164 powercap_unregister_zone(control_type, 1165 &rd_package->power_zone); 1166 } 1167 powercap_unregister_control_type(control_type); 1168 1169 return 0; 1170} 1171 1172static int rapl_package_register_powercap(struct rapl_package *rp) 1173{ 1174 struct rapl_domain *rd; 1175 int ret = 0; 1176 char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/ 1177 struct powercap_zone *power_zone = NULL; 1178 int nr_pl; 1179 1180 /* first we register package domain as the parent zone*/ 1181 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { 1182 if (rd->id == RAPL_DOMAIN_PACKAGE) { 1183 nr_pl = find_nr_power_limit(rd); 1184 pr_debug("register socket %d package domain %s\n", 1185 rp->id, rd->name); 1186 memset(dev_name, 0, sizeof(dev_name)); 1187 snprintf(dev_name, sizeof(dev_name), "%s-%d", 1188 rd->name, rp->id); 1189 power_zone = powercap_register_zone(&rd->power_zone, 1190 control_type, 1191 dev_name, NULL, 1192 &zone_ops[rd->id], 1193 nr_pl, 1194 &constraint_ops); 1195 if (IS_ERR(power_zone)) { 1196 pr_debug("failed to register package, %d\n", 1197 rp->id); 1198 ret = PTR_ERR(power_zone); 1199 goto exit_package; 1200 } 1201 /* track parent zone in per package/socket data */ 1202 rp->power_zone = power_zone; 1203 /* done, only one package domain per socket */ 1204 break; 1205 } 1206 } 1207 if (!power_zone) { 1208 pr_err("no package domain found, unknown topology!\n"); 1209 ret = -ENODEV; 1210 goto exit_package; 1211 } 1212 /* now register domains as children of the socket/package*/ 1213 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { 1214 if (rd->id == RAPL_DOMAIN_PACKAGE) 1215 continue; 1216 /* number of power limits per domain varies */ 1217 nr_pl = find_nr_power_limit(rd); 1218 power_zone = powercap_register_zone(&rd->power_zone, 1219 control_type, rd->name, 1220 rp->power_zone, 1221 &zone_ops[rd->id], nr_pl, 1222 &constraint_ops); 1223 1224 if (IS_ERR(power_zone)) { 1225 pr_debug("failed to register power_zone, %d:%s:%s\n", 1226 rp->id, rd->name, dev_name); 1227 ret = PTR_ERR(power_zone); 1228 goto err_cleanup; 1229 } 1230 } 1231 1232exit_package: 1233 return ret; 1234err_cleanup: 1235 /* clean up previously initialized domains within the package if we 1236 * failed after the first domain setup. 1237 */ 1238 while (--rd >= rp->domains) { 1239 pr_debug("unregister package %d domain %s\n", rp->id, rd->name); 1240 powercap_unregister_zone(control_type, &rd->power_zone); 1241 } 1242 1243 return ret; 1244} 1245 1246static int rapl_register_powercap(void) 1247{ 1248 struct rapl_domain *rd; 1249 struct rapl_package *rp; 1250 int ret = 0; 1251 1252 control_type = powercap_register_control_type(NULL, "intel-rapl", NULL); 1253 if (IS_ERR(control_type)) { 1254 pr_debug("failed to register powercap control_type.\n"); 1255 return PTR_ERR(control_type); 1256 } 1257 /* read the initial data */ 1258 rapl_update_domain_data(); 1259 list_for_each_entry(rp, &rapl_packages, plist) 1260 if (rapl_package_register_powercap(rp)) 1261 goto err_cleanup_package; 1262 return ret; 1263 1264err_cleanup_package: 1265 /* clean up previously initialized packages */ 1266 list_for_each_entry_continue_reverse(rp, &rapl_packages, plist) { 1267 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; 1268 rd++) { 1269 pr_debug("unregister zone/package %d, %s domain\n", 1270 rp->id, rd->name); 1271 powercap_unregister_zone(control_type, &rd->power_zone); 1272 } 1273 } 1274 1275 return ret; 1276} 1277 1278static int rapl_check_domain(int cpu, int domain) 1279{ 1280 unsigned msr; 1281 u64 val = 0; 1282 1283 switch (domain) { 1284 case RAPL_DOMAIN_PACKAGE: 1285 msr = MSR_PKG_ENERGY_STATUS; 1286 break; 1287 case RAPL_DOMAIN_PP0: 1288 msr = MSR_PP0_ENERGY_STATUS; 1289 break; 1290 case RAPL_DOMAIN_PP1: 1291 msr = MSR_PP1_ENERGY_STATUS; 1292 break; 1293 case RAPL_DOMAIN_DRAM: 1294 msr = MSR_DRAM_ENERGY_STATUS; 1295 break; 1296 default: 1297 pr_err("invalid domain id %d\n", domain); 1298 return -EINVAL; 1299 } 1300 /* make sure domain counters are available and contains non-zero 1301 * values, otherwise skip it. 1302 */ 1303 if (rdmsrl_safe_on_cpu(cpu, msr, &val) || !val) 1304 return -ENODEV; 1305 1306 return 0; 1307} 1308 1309/* Detect active and valid domains for the given CPU, caller must 1310 * ensure the CPU belongs to the targeted package and CPU hotlug is disabled. 1311 */ 1312static int rapl_detect_domains(struct rapl_package *rp, int cpu) 1313{ 1314 int i; 1315 int ret = 0; 1316 struct rapl_domain *rd; 1317 u64 locked; 1318 1319 for (i = 0; i < RAPL_DOMAIN_MAX; i++) { 1320 /* use physical package id to read counters */ 1321 if (!rapl_check_domain(cpu, i)) { 1322 rp->domain_map |= 1 << i; 1323 pr_info("Found RAPL domain %s\n", rapl_domain_names[i]); 1324 } 1325 } 1326 rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX); 1327 if (!rp->nr_domains) { 1328 pr_err("no valid rapl domains found in package %d\n", rp->id); 1329 ret = -ENODEV; 1330 goto done; 1331 } 1332 pr_debug("found %d domains on package %d\n", rp->nr_domains, rp->id); 1333 1334 rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain), 1335 GFP_KERNEL); 1336 if (!rp->domains) { 1337 ret = -ENOMEM; 1338 goto done; 1339 } 1340 rapl_init_domains(rp); 1341 1342 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { 1343 /* check if the domain is locked by BIOS */ 1344 ret = rapl_read_data_raw(rd, FW_LOCK, false, &locked); 1345 if (ret) 1346 return ret; 1347 if (locked) { 1348 pr_info("RAPL package %d domain %s locked by BIOS\n", 1349 rp->id, rd->name); 1350 rd->state |= DOMAIN_STATE_BIOS_LOCKED; 1351 } 1352 } 1353 1354 1355done: 1356 return ret; 1357} 1358 1359static bool is_package_new(int package) 1360{ 1361 struct rapl_package *rp; 1362 1363 /* caller prevents cpu hotplug, there will be no new packages added 1364 * or deleted while traversing the package list, no need for locking. 1365 */ 1366 list_for_each_entry(rp, &rapl_packages, plist) 1367 if (package == rp->id) 1368 return false; 1369 1370 return true; 1371} 1372 1373/* RAPL interface can be made of a two-level hierarchy: package level and domain 1374 * level. We first detect the number of packages then domains of each package. 1375 * We have to consider the possiblity of CPU online/offline due to hotplug and 1376 * other scenarios. 1377 */ 1378static int rapl_detect_topology(void) 1379{ 1380 int i; 1381 int phy_package_id; 1382 struct rapl_package *new_package, *rp; 1383 1384 for_each_online_cpu(i) { 1385 phy_package_id = topology_physical_package_id(i); 1386 if (is_package_new(phy_package_id)) { 1387 new_package = kzalloc(sizeof(*rp), GFP_KERNEL); 1388 if (!new_package) { 1389 rapl_cleanup_data(); 1390 return -ENOMEM; 1391 } 1392 /* add the new package to the list */ 1393 new_package->id = phy_package_id; 1394 new_package->nr_cpus = 1; 1395 1396 /* check if the package contains valid domains */ 1397 if (rapl_detect_domains(new_package, i) || 1398 rapl_defaults->check_unit(new_package, i)) { 1399 kfree(new_package->domains); 1400 kfree(new_package); 1401 /* free up the packages already initialized */ 1402 rapl_cleanup_data(); 1403 return -ENODEV; 1404 } 1405 INIT_LIST_HEAD(&new_package->plist); 1406 list_add(&new_package->plist, &rapl_packages); 1407 } else { 1408 rp = find_package_by_id(phy_package_id); 1409 if (rp) 1410 ++rp->nr_cpus; 1411 } 1412 } 1413 1414 return 0; 1415} 1416 1417/* called from CPU hotplug notifier, hotplug lock held */ 1418static void rapl_remove_package(struct rapl_package *rp) 1419{ 1420 struct rapl_domain *rd, *rd_package = NULL; 1421 1422 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { 1423 if (rd->id == RAPL_DOMAIN_PACKAGE) { 1424 rd_package = rd; 1425 continue; 1426 } 1427 pr_debug("remove package %d, %s domain\n", rp->id, rd->name); 1428 powercap_unregister_zone(control_type, &rd->power_zone); 1429 } 1430 /* do parent zone last */ 1431 powercap_unregister_zone(control_type, &rd_package->power_zone); 1432 list_del(&rp->plist); 1433 kfree(rp); 1434} 1435 1436/* called from CPU hotplug notifier, hotplug lock held */ 1437static int rapl_add_package(int cpu) 1438{ 1439 int ret = 0; 1440 int phy_package_id; 1441 struct rapl_package *rp; 1442 1443 phy_package_id = topology_physical_package_id(cpu); 1444 rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL); 1445 if (!rp) 1446 return -ENOMEM; 1447 1448 /* add the new package to the list */ 1449 rp->id = phy_package_id; 1450 rp->nr_cpus = 1; 1451 /* check if the package contains valid domains */ 1452 if (rapl_detect_domains(rp, cpu) || 1453 rapl_defaults->check_unit(rp, cpu)) { 1454 ret = -ENODEV; 1455 goto err_free_package; 1456 } 1457 if (!rapl_package_register_powercap(rp)) { 1458 INIT_LIST_HEAD(&rp->plist); 1459 list_add(&rp->plist, &rapl_packages); 1460 return ret; 1461 } 1462 1463err_free_package: 1464 kfree(rp->domains); 1465 kfree(rp); 1466 1467 return ret; 1468} 1469 1470/* Handles CPU hotplug on multi-socket systems. 1471 * If a CPU goes online as the first CPU of the physical package 1472 * we add the RAPL package to the system. Similarly, when the last 1473 * CPU of the package is removed, we remove the RAPL package and its 1474 * associated domains. Cooling devices are handled accordingly at 1475 * per-domain level. 1476 */ 1477static int rapl_cpu_callback(struct notifier_block *nfb, 1478 unsigned long action, void *hcpu) 1479{ 1480 unsigned long cpu = (unsigned long)hcpu; 1481 int phy_package_id; 1482 struct rapl_package *rp; 1483 1484 phy_package_id = topology_physical_package_id(cpu); 1485 switch (action) { 1486 case CPU_ONLINE: 1487 case CPU_ONLINE_FROZEN: 1488 case CPU_DOWN_FAILED: 1489 case CPU_DOWN_FAILED_FROZEN: 1490 rp = find_package_by_id(phy_package_id); 1491 if (rp) 1492 ++rp->nr_cpus; 1493 else 1494 rapl_add_package(cpu); 1495 break; 1496 case CPU_DOWN_PREPARE: 1497 case CPU_DOWN_PREPARE_FROZEN: 1498 rp = find_package_by_id(phy_package_id); 1499 if (!rp) 1500 break; 1501 if (--rp->nr_cpus == 0) 1502 rapl_remove_package(rp); 1503 } 1504 1505 return NOTIFY_OK; 1506} 1507 1508static struct notifier_block rapl_cpu_notifier = { 1509 .notifier_call = rapl_cpu_callback, 1510}; 1511 1512static int __init rapl_init(void) 1513{ 1514 int ret = 0; 1515 const struct x86_cpu_id *id; 1516 1517 id = x86_match_cpu(rapl_ids); 1518 if (!id) { 1519 pr_err("driver does not support CPU family %d model %d\n", 1520 boot_cpu_data.x86, boot_cpu_data.x86_model); 1521 1522 return -ENODEV; 1523 } 1524 1525 rapl_defaults = (struct rapl_defaults *)id->driver_data; 1526 1527 cpu_notifier_register_begin(); 1528 1529 /* prevent CPU hotplug during detection */ 1530 get_online_cpus(); 1531 ret = rapl_detect_topology(); 1532 if (ret) 1533 goto done; 1534 1535 if (rapl_register_powercap()) { 1536 rapl_cleanup_data(); 1537 ret = -ENODEV; 1538 goto done; 1539 } 1540 __register_hotcpu_notifier(&rapl_cpu_notifier); 1541done: 1542 put_online_cpus(); 1543 cpu_notifier_register_done(); 1544 1545 return ret; 1546} 1547 1548static void __exit rapl_exit(void) 1549{ 1550 cpu_notifier_register_begin(); 1551 get_online_cpus(); 1552 __unregister_hotcpu_notifier(&rapl_cpu_notifier); 1553 rapl_unregister_powercap(); 1554 rapl_cleanup_data(); 1555 put_online_cpus(); 1556 cpu_notifier_register_done(); 1557} 1558 1559module_init(rapl_init); 1560module_exit(rapl_exit); 1561 1562MODULE_DESCRIPTION("Driver for Intel RAPL (Running Average Power Limit)"); 1563MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com>"); 1564MODULE_LICENSE("GPL v2"); 1565