root/arch/powerpc/platforms/cell/cpufreq_spudemand.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. calc_freq
  2. spu_gov_work
  3. spu_gov_init_work
  4. spu_gov_cancel_work
  5. spu_gov_start
  6. spu_gov_stop
  7. spu_gov_init
  8. spu_gov_exit

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * spu aware cpufreq governor for the cell processor
   4  *
   5  * © Copyright IBM Corporation 2006-2008
   6  *
   7  * Author: Christian Krafft <krafft@de.ibm.com>
   8  */
   9 
  10 #include <linux/cpufreq.h>
  11 #include <linux/sched.h>
  12 #include <linux/sched/loadavg.h>
  13 #include <linux/module.h>
  14 #include <linux/timer.h>
  15 #include <linux/workqueue.h>
  16 #include <linux/atomic.h>
  17 #include <asm/machdep.h>
  18 #include <asm/spu.h>
  19 
  20 #define POLL_TIME       100000          /* in µs */
  21 #define EXP             753             /* exp(-1) in fixed-point */
  22 
  23 struct spu_gov_info_struct {
  24         unsigned long busy_spus;        /* fixed-point */
  25         struct cpufreq_policy *policy;
  26         struct delayed_work work;
  27         unsigned int poll_int;          /* µs */
  28 };
  29 static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info);
  30 
  31 static int calc_freq(struct spu_gov_info_struct *info)
  32 {
  33         int cpu;
  34         int busy_spus;
  35 
  36         cpu = info->policy->cpu;
  37         busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus);
  38 
  39         info->busy_spus = calc_load(info->busy_spus, EXP, busy_spus * FIXED_1);
  40         pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n",
  41                         cpu, busy_spus, info->busy_spus);
  42 
  43         return info->policy->max * info->busy_spus / FIXED_1;
  44 }
  45 
  46 static void spu_gov_work(struct work_struct *work)
  47 {
  48         struct spu_gov_info_struct *info;
  49         int delay;
  50         unsigned long target_freq;
  51 
  52         info = container_of(work, struct spu_gov_info_struct, work.work);
  53 
  54         /* after cancel_delayed_work_sync we unset info->policy */
  55         BUG_ON(info->policy == NULL);
  56 
  57         target_freq = calc_freq(info);
  58         __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H);
  59 
  60         delay = usecs_to_jiffies(info->poll_int);
  61         schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
  62 }
  63 
  64 static void spu_gov_init_work(struct spu_gov_info_struct *info)
  65 {
  66         int delay = usecs_to_jiffies(info->poll_int);
  67         INIT_DEFERRABLE_WORK(&info->work, spu_gov_work);
  68         schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
  69 }
  70 
  71 static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
  72 {
  73         cancel_delayed_work_sync(&info->work);
  74 }
  75 
  76 static int spu_gov_start(struct cpufreq_policy *policy)
  77 {
  78         unsigned int cpu = policy->cpu;
  79         struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
  80         struct spu_gov_info_struct *affected_info;
  81         int i;
  82 
  83         if (!cpu_online(cpu)) {
  84                 printk(KERN_ERR "cpu %d is not online\n", cpu);
  85                 return -EINVAL;
  86         }
  87 
  88         if (!policy->cur) {
  89                 printk(KERN_ERR "no cpu specified in policy\n");
  90                 return -EINVAL;
  91         }
  92 
  93         /* initialize spu_gov_info for all affected cpus */
  94         for_each_cpu(i, policy->cpus) {
  95                 affected_info = &per_cpu(spu_gov_info, i);
  96                 affected_info->policy = policy;
  97         }
  98 
  99         info->poll_int = POLL_TIME;
 100 
 101         /* setup timer */
 102         spu_gov_init_work(info);
 103 
 104         return 0;
 105 }
 106 
 107 static void spu_gov_stop(struct cpufreq_policy *policy)
 108 {
 109         unsigned int cpu = policy->cpu;
 110         struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
 111         int i;
 112 
 113         /* cancel timer */
 114         spu_gov_cancel_work(info);
 115 
 116         /* clean spu_gov_info for all affected cpus */
 117         for_each_cpu (i, policy->cpus) {
 118                 info = &per_cpu(spu_gov_info, i);
 119                 info->policy = NULL;
 120         }
 121 }
 122 
 123 static struct cpufreq_governor spu_governor = {
 124         .name = "spudemand",
 125         .start = spu_gov_start,
 126         .stop = spu_gov_stop,
 127         .owner = THIS_MODULE,
 128 };
 129 
 130 /*
 131  * module init and destoy
 132  */
 133 
 134 static int __init spu_gov_init(void)
 135 {
 136         int ret;
 137 
 138         ret = cpufreq_register_governor(&spu_governor);
 139         if (ret)
 140                 printk(KERN_ERR "registration of governor failed\n");
 141         return ret;
 142 }
 143 
 144 static void __exit spu_gov_exit(void)
 145 {
 146         cpufreq_unregister_governor(&spu_governor);
 147 }
 148 
 149 
 150 module_init(spu_gov_init);
 151 module_exit(spu_gov_exit);
 152 
 153 MODULE_LICENSE("GPL");
 154 MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
 155 

/* [<][>][^][v][top][bottom][index][help] */