root/drivers/cpufreq/longrun.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. longrun_get_policy
  2. longrun_set_policy
  3. longrun_verify_policy
  4. longrun_get
  5. longrun_determine_freqs
  6. longrun_cpu_init
  7. longrun_init
  8. longrun_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * (C) 2002 - 2003  Dominik Brodowski <linux@brodo.de>
   4  *
   5  *  BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
   6  */
   7 
   8 #include <linux/kernel.h>
   9 #include <linux/module.h>
  10 #include <linux/init.h>
  11 #include <linux/cpufreq.h>
  12 #include <linux/timex.h>
  13 
  14 #include <asm/msr.h>
  15 #include <asm/processor.h>
  16 #include <asm/cpu_device_id.h>
  17 
  18 static struct cpufreq_driver    longrun_driver;
  19 
  20 /**
  21  * longrun_{low,high}_freq is needed for the conversion of cpufreq kHz
  22  * values into per cent values. In TMTA microcode, the following is valid:
  23  * performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
  24  */
  25 static unsigned int longrun_low_freq, longrun_high_freq;
  26 
  27 
  28 /**
  29  * longrun_get_policy - get the current LongRun policy
  30  * @policy: struct cpufreq_policy where current policy is written into
  31  *
  32  * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS
  33  * and MSR_TMTA_LONGRUN_CTRL
  34  */
  35 static void longrun_get_policy(struct cpufreq_policy *policy)
  36 {
  37         u32 msr_lo, msr_hi;
  38 
  39         rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
  40         pr_debug("longrun flags are %x - %x\n", msr_lo, msr_hi);
  41         if (msr_lo & 0x01)
  42                 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
  43         else
  44                 policy->policy = CPUFREQ_POLICY_POWERSAVE;
  45 
  46         rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
  47         pr_debug("longrun ctrl is %x - %x\n", msr_lo, msr_hi);
  48         msr_lo &= 0x0000007F;
  49         msr_hi &= 0x0000007F;
  50 
  51         if (longrun_high_freq <= longrun_low_freq) {
  52                 /* Assume degenerate Longrun table */
  53                 policy->min = policy->max = longrun_high_freq;
  54         } else {
  55                 policy->min = longrun_low_freq + msr_lo *
  56                         ((longrun_high_freq - longrun_low_freq) / 100);
  57                 policy->max = longrun_low_freq + msr_hi *
  58                         ((longrun_high_freq - longrun_low_freq) / 100);
  59         }
  60         policy->cpu = 0;
  61 }
  62 
  63 
  64 /**
  65  * longrun_set_policy - sets a new CPUFreq policy
  66  * @policy: new policy
  67  *
  68  * Sets a new CPUFreq policy on LongRun-capable processors. This function
  69  * has to be called with cpufreq_driver locked.
  70  */
  71 static int longrun_set_policy(struct cpufreq_policy *policy)
  72 {
  73         u32 msr_lo, msr_hi;
  74         u32 pctg_lo, pctg_hi;
  75 
  76         if (!policy)
  77                 return -EINVAL;
  78 
  79         if (longrun_high_freq <= longrun_low_freq) {
  80                 /* Assume degenerate Longrun table */
  81                 pctg_lo = pctg_hi = 100;
  82         } else {
  83                 pctg_lo = (policy->min - longrun_low_freq) /
  84                         ((longrun_high_freq - longrun_low_freq) / 100);
  85                 pctg_hi = (policy->max - longrun_low_freq) /
  86                         ((longrun_high_freq - longrun_low_freq) / 100);
  87         }
  88 
  89         if (pctg_hi > 100)
  90                 pctg_hi = 100;
  91         if (pctg_lo > pctg_hi)
  92                 pctg_lo = pctg_hi;
  93 
  94         /* performance or economy mode */
  95         rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
  96         msr_lo &= 0xFFFFFFFE;
  97         switch (policy->policy) {
  98         case CPUFREQ_POLICY_PERFORMANCE:
  99                 msr_lo |= 0x00000001;
 100                 break;
 101         case CPUFREQ_POLICY_POWERSAVE:
 102                 break;
 103         }
 104         wrmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
 105 
 106         /* lower and upper boundary */
 107         rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
 108         msr_lo &= 0xFFFFFF80;
 109         msr_hi &= 0xFFFFFF80;
 110         msr_lo |= pctg_lo;
 111         msr_hi |= pctg_hi;
 112         wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
 113 
 114         return 0;
 115 }
 116 
 117 
 118 /**
 119  * longrun_verify_poliy - verifies a new CPUFreq policy
 120  * @policy: the policy to verify
 121  *
 122  * Validates a new CPUFreq policy. This function has to be called with
 123  * cpufreq_driver locked.
 124  */
 125 static int longrun_verify_policy(struct cpufreq_policy_data *policy)
 126 {
 127         if (!policy)
 128                 return -EINVAL;
 129 
 130         policy->cpu = 0;
 131         cpufreq_verify_within_cpu_limits(policy);
 132 
 133         return 0;
 134 }
 135 
 136 static unsigned int longrun_get(unsigned int cpu)
 137 {
 138         u32 eax, ebx, ecx, edx;
 139 
 140         if (cpu)
 141                 return 0;
 142 
 143         cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
 144         pr_debug("cpuid eax is %u\n", eax);
 145 
 146         return eax * 1000;
 147 }
 148 
 149 /**
 150  * longrun_determine_freqs - determines the lowest and highest possible core frequency
 151  * @low_freq: an int to put the lowest frequency into
 152  * @high_freq: an int to put the highest frequency into
 153  *
 154  * Determines the lowest and highest possible core frequencies on this CPU.
 155  * This is necessary to calculate the performance percentage according to
 156  * TMTA rules:
 157  * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq)
 158  */
 159 static int longrun_determine_freqs(unsigned int *low_freq,
 160                                                       unsigned int *high_freq)
 161 {
 162         u32 msr_lo, msr_hi;
 163         u32 save_lo, save_hi;
 164         u32 eax, ebx, ecx, edx;
 165         u32 try_hi;
 166         struct cpuinfo_x86 *c = &cpu_data(0);
 167 
 168         if (!low_freq || !high_freq)
 169                 return -EINVAL;
 170 
 171         if (cpu_has(c, X86_FEATURE_LRTI)) {
 172                 /* if the LongRun Table Interface is present, the
 173                  * detection is a bit easier:
 174                  * For minimum frequency, read out the maximum
 175                  * level (msr_hi), write that into "currently
 176                  * selected level", and read out the frequency.
 177                  * For maximum frequency, read out level zero.
 178                  */
 179                 /* minimum */
 180                 rdmsr(MSR_TMTA_LRTI_READOUT, msr_lo, msr_hi);
 181                 wrmsr(MSR_TMTA_LRTI_READOUT, msr_hi, msr_hi);
 182                 rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
 183                 *low_freq = msr_lo * 1000; /* to kHz */
 184 
 185                 /* maximum */
 186                 wrmsr(MSR_TMTA_LRTI_READOUT, 0, msr_hi);
 187                 rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
 188                 *high_freq = msr_lo * 1000; /* to kHz */
 189 
 190                 pr_debug("longrun table interface told %u - %u kHz\n",
 191                                 *low_freq, *high_freq);
 192 
 193                 if (*low_freq > *high_freq)
 194                         *low_freq = *high_freq;
 195                 return 0;
 196         }
 197 
 198         /* set the upper border to the value determined during TSC init */
 199         *high_freq = (cpu_khz / 1000);
 200         *high_freq = *high_freq * 1000;
 201         pr_debug("high frequency is %u kHz\n", *high_freq);
 202 
 203         /* get current borders */
 204         rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
 205         save_lo = msr_lo & 0x0000007F;
 206         save_hi = msr_hi & 0x0000007F;
 207 
 208         /* if current perf_pctg is larger than 90%, we need to decrease the
 209          * upper limit to make the calculation more accurate.
 210          */
 211         cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
 212         /* try decreasing in 10% steps, some processors react only
 213          * on some barrier values */
 214         for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -= 10) {
 215                 /* set to 0 to try_hi perf_pctg */
 216                 msr_lo &= 0xFFFFFF80;
 217                 msr_hi &= 0xFFFFFF80;
 218                 msr_hi |= try_hi;
 219                 wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
 220 
 221                 /* read out current core MHz and current perf_pctg */
 222                 cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
 223 
 224                 /* restore values */
 225                 wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi);
 226         }
 227         pr_debug("percentage is %u %%, freq is %u MHz\n", ecx, eax);
 228 
 229         /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
 230          * eqals
 231          * low_freq * (1 - perf_pctg) = (cur_freq - high_freq * perf_pctg)
 232          *
 233          * high_freq * perf_pctg is stored tempoarily into "ebx".
 234          */
 235         ebx = (((cpu_khz / 1000) * ecx) / 100); /* to MHz */
 236 
 237         if ((ecx > 95) || (ecx == 0) || (eax < ebx))
 238                 return -EIO;
 239 
 240         edx = ((eax - ebx) * 100) / (100 - ecx);
 241         *low_freq = edx * 1000; /* back to kHz */
 242 
 243         pr_debug("low frequency is %u kHz\n", *low_freq);
 244 
 245         if (*low_freq > *high_freq)
 246                 *low_freq = *high_freq;
 247 
 248         return 0;
 249 }
 250 
 251 
 252 static int longrun_cpu_init(struct cpufreq_policy *policy)
 253 {
 254         int result = 0;
 255 
 256         /* capability check */
 257         if (policy->cpu != 0)
 258                 return -ENODEV;
 259 
 260         /* detect low and high frequency */
 261         result = longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq);
 262         if (result)
 263                 return result;
 264 
 265         /* cpuinfo and default policy values */
 266         policy->cpuinfo.min_freq = longrun_low_freq;
 267         policy->cpuinfo.max_freq = longrun_high_freq;
 268         longrun_get_policy(policy);
 269 
 270         return 0;
 271 }
 272 
 273 
 274 static struct cpufreq_driver longrun_driver = {
 275         .flags          = CPUFREQ_CONST_LOOPS,
 276         .verify         = longrun_verify_policy,
 277         .setpolicy      = longrun_set_policy,
 278         .get            = longrun_get,
 279         .init           = longrun_cpu_init,
 280         .name           = "longrun",
 281 };
 282 
 283 static const struct x86_cpu_id longrun_ids[] = {
 284         { X86_VENDOR_TRANSMETA, X86_FAMILY_ANY, X86_MODEL_ANY,
 285           X86_FEATURE_LONGRUN },
 286         {}
 287 };
 288 MODULE_DEVICE_TABLE(x86cpu, longrun_ids);
 289 
 290 /**
 291  * longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver
 292  *
 293  * Initializes the LongRun support.
 294  */
 295 static int __init longrun_init(void)
 296 {
 297         if (!x86_match_cpu(longrun_ids))
 298                 return -ENODEV;
 299         return cpufreq_register_driver(&longrun_driver);
 300 }
 301 
 302 
 303 /**
 304  * longrun_exit - unregisters LongRun support
 305  */
 306 static void __exit longrun_exit(void)
 307 {
 308         cpufreq_unregister_driver(&longrun_driver);
 309 }
 310 
 311 
 312 MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
 313 MODULE_DESCRIPTION("LongRun driver for Transmeta Crusoe and "
 314                 "Efficeon processors.");
 315 MODULE_LICENSE("GPL");
 316 
 317 module_init(longrun_init);
 318 module_exit(longrun_exit);

/* [<][>][^][v][top][bottom][index][help] */