root/arch/x86/kernel/cpu/umwait.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_umwait_control_msr
  2. umwait_update_control_msr
  3. umwait_cpu_online
  4. umwait_cpu_offline
  5. umwait_syscore_resume
  6. umwait_ctrl_c02_enabled
  7. umwait_ctrl_max_time
  8. umwait_update_control
  9. enable_c02_show
  10. enable_c02_store
  11. max_time_show
  12. max_time_store
  13. umwait_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 #include <linux/syscore_ops.h>
   3 #include <linux/suspend.h>
   4 #include <linux/cpu.h>
   5 
   6 #include <asm/msr.h>
   7 
   8 #define UMWAIT_C02_ENABLE       0
   9 
  10 #define UMWAIT_CTRL_VAL(max_time, c02_disable)                          \
  11         (((max_time) & MSR_IA32_UMWAIT_CONTROL_TIME_MASK) |             \
  12         ((c02_disable) & MSR_IA32_UMWAIT_CONTROL_C02_DISABLE))
  13 
  14 /*
  15  * Cache IA32_UMWAIT_CONTROL MSR. This is a systemwide control. By default,
  16  * umwait max time is 100000 in TSC-quanta and C0.2 is enabled
  17  */
  18 static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
  19 
  20 u32 get_umwait_control_msr(void)
  21 {
  22         return umwait_control_cached;
  23 }
  24 EXPORT_SYMBOL_GPL(get_umwait_control_msr);
  25 
  26 /*
  27  * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
  28  * hardware or BIOS before kernel boot.
  29  */
  30 static u32 orig_umwait_control_cached __ro_after_init;
  31 
  32 /*
  33  * Serialize access to umwait_control_cached and IA32_UMWAIT_CONTROL MSR in
  34  * the sysfs write functions.
  35  */
  36 static DEFINE_MUTEX(umwait_lock);
  37 
  38 static void umwait_update_control_msr(void * unused)
  39 {
  40         lockdep_assert_irqs_disabled();
  41         wrmsr(MSR_IA32_UMWAIT_CONTROL, READ_ONCE(umwait_control_cached), 0);
  42 }
  43 
  44 /*
  45  * The CPU hotplug callback sets the control MSR to the global control
  46  * value.
  47  *
  48  * Disable interrupts so the read of umwait_control_cached and the WRMSR
  49  * are protected against a concurrent sysfs write. Otherwise the sysfs
  50  * write could update the cached value after it had been read on this CPU
  51  * and issue the IPI before the old value had been written. The IPI would
  52  * interrupt, write the new value and after return from IPI the previous
  53  * value would be written by this CPU.
  54  *
  55  * With interrupts disabled the upcoming CPU either sees the new control
  56  * value or the IPI is updating this CPU to the new control value after
  57  * interrupts have been reenabled.
  58  */
  59 static int umwait_cpu_online(unsigned int cpu)
  60 {
  61         local_irq_disable();
  62         umwait_update_control_msr(NULL);
  63         local_irq_enable();
  64         return 0;
  65 }
  66 
  67 /*
  68  * The CPU hotplug callback sets the control MSR to the original control
  69  * value.
  70  */
  71 static int umwait_cpu_offline(unsigned int cpu)
  72 {
  73         /*
  74          * This code is protected by the CPU hotplug already and
  75          * orig_umwait_control_cached is never changed after it caches
  76          * the original control MSR value in umwait_init(). So there
  77          * is no race condition here.
  78          */
  79         wrmsr(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached, 0);
  80 
  81         return 0;
  82 }
  83 
  84 /*
  85  * On resume, restore IA32_UMWAIT_CONTROL MSR on the boot processor which
  86  * is the only active CPU at this time. The MSR is set up on the APs via the
  87  * CPU hotplug callback.
  88  *
  89  * This function is invoked on resume from suspend and hibernation. On
  90  * resume from suspend the restore should be not required, but we neither
  91  * trust the firmware nor does it matter if the same value is written
  92  * again.
  93  */
  94 static void umwait_syscore_resume(void)
  95 {
  96         umwait_update_control_msr(NULL);
  97 }
  98 
  99 static struct syscore_ops umwait_syscore_ops = {
 100         .resume = umwait_syscore_resume,
 101 };
 102 
 103 /* sysfs interface */
 104 
 105 /*
 106  * When bit 0 in IA32_UMWAIT_CONTROL MSR is 1, C0.2 is disabled.
 107  * Otherwise, C0.2 is enabled.
 108  */
 109 static inline bool umwait_ctrl_c02_enabled(u32 ctrl)
 110 {
 111         return !(ctrl & MSR_IA32_UMWAIT_CONTROL_C02_DISABLE);
 112 }
 113 
 114 static inline u32 umwait_ctrl_max_time(u32 ctrl)
 115 {
 116         return ctrl & MSR_IA32_UMWAIT_CONTROL_TIME_MASK;
 117 }
 118 
 119 static inline void umwait_update_control(u32 maxtime, bool c02_enable)
 120 {
 121         u32 ctrl = maxtime & MSR_IA32_UMWAIT_CONTROL_TIME_MASK;
 122 
 123         if (!c02_enable)
 124                 ctrl |= MSR_IA32_UMWAIT_CONTROL_C02_DISABLE;
 125 
 126         WRITE_ONCE(umwait_control_cached, ctrl);
 127         /* Propagate to all CPUs */
 128         on_each_cpu(umwait_update_control_msr, NULL, 1);
 129 }
 130 
 131 static ssize_t
 132 enable_c02_show(struct device *dev, struct device_attribute *attr, char *buf)
 133 {
 134         u32 ctrl = READ_ONCE(umwait_control_cached);
 135 
 136         return sprintf(buf, "%d\n", umwait_ctrl_c02_enabled(ctrl));
 137 }
 138 
 139 static ssize_t enable_c02_store(struct device *dev,
 140                                 struct device_attribute *attr,
 141                                 const char *buf, size_t count)
 142 {
 143         bool c02_enable;
 144         u32 ctrl;
 145         int ret;
 146 
 147         ret = kstrtobool(buf, &c02_enable);
 148         if (ret)
 149                 return ret;
 150 
 151         mutex_lock(&umwait_lock);
 152 
 153         ctrl = READ_ONCE(umwait_control_cached);
 154         if (c02_enable != umwait_ctrl_c02_enabled(ctrl))
 155                 umwait_update_control(ctrl, c02_enable);
 156 
 157         mutex_unlock(&umwait_lock);
 158 
 159         return count;
 160 }
 161 static DEVICE_ATTR_RW(enable_c02);
 162 
 163 static ssize_t
 164 max_time_show(struct device *kobj, struct device_attribute *attr, char *buf)
 165 {
 166         u32 ctrl = READ_ONCE(umwait_control_cached);
 167 
 168         return sprintf(buf, "%u\n", umwait_ctrl_max_time(ctrl));
 169 }
 170 
 171 static ssize_t max_time_store(struct device *kobj,
 172                               struct device_attribute *attr,
 173                               const char *buf, size_t count)
 174 {
 175         u32 max_time, ctrl;
 176         int ret;
 177 
 178         ret = kstrtou32(buf, 0, &max_time);
 179         if (ret)
 180                 return ret;
 181 
 182         /* bits[1:0] must be zero */
 183         if (max_time & ~MSR_IA32_UMWAIT_CONTROL_TIME_MASK)
 184                 return -EINVAL;
 185 
 186         mutex_lock(&umwait_lock);
 187 
 188         ctrl = READ_ONCE(umwait_control_cached);
 189         if (max_time != umwait_ctrl_max_time(ctrl))
 190                 umwait_update_control(max_time, umwait_ctrl_c02_enabled(ctrl));
 191 
 192         mutex_unlock(&umwait_lock);
 193 
 194         return count;
 195 }
 196 static DEVICE_ATTR_RW(max_time);
 197 
 198 static struct attribute *umwait_attrs[] = {
 199         &dev_attr_enable_c02.attr,
 200         &dev_attr_max_time.attr,
 201         NULL
 202 };
 203 
 204 static struct attribute_group umwait_attr_group = {
 205         .attrs = umwait_attrs,
 206         .name = "umwait_control",
 207 };
 208 
 209 static int __init umwait_init(void)
 210 {
 211         struct device *dev;
 212         int ret;
 213 
 214         if (!boot_cpu_has(X86_FEATURE_WAITPKG))
 215                 return -ENODEV;
 216 
 217         /*
 218          * Cache the original control MSR value before the control MSR is
 219          * changed. This is the only place where orig_umwait_control_cached
 220          * is modified.
 221          */
 222         rdmsrl(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached);
 223 
 224         ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online",
 225                                 umwait_cpu_online, umwait_cpu_offline);
 226         if (ret < 0) {
 227                 /*
 228                  * On failure, the control MSR on all CPUs has the
 229                  * original control value.
 230                  */
 231                 return ret;
 232         }
 233 
 234         register_syscore_ops(&umwait_syscore_ops);
 235 
 236         /*
 237          * Add umwait control interface. Ignore failure, so at least the
 238          * default values are set up in case the machine manages to boot.
 239          */
 240         dev = cpu_subsys.dev_root;
 241         return sysfs_create_group(&dev->kobj, &umwait_attr_group);
 242 }
 243 device_initcall(umwait_init);

/* [<][>][^][v][top][bottom][index][help] */