root/drivers/clk/mvebu/clk-cpu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. clk_cpu_recalc_rate
  2. clk_cpu_round_rate
  3. clk_cpu_off_set_rate
  4. clk_cpu_on_set_rate
  5. clk_cpu_set_rate
  6. of_cpu_clk_setup
  7. of_mv98dx3236_cpu_clk_setup

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Marvell MVEBU CPU clock handling.
   4  *
   5  * Copyright (C) 2012 Marvell
   6  *
   7  * Gregory CLEMENT <gregory.clement@free-electrons.com>
   8  *
   9  */
  10 #include <linux/kernel.h>
  11 #include <linux/slab.h>
  12 #include <linux/clk.h>
  13 #include <linux/clk-provider.h>
  14 #include <linux/of_address.h>
  15 #include <linux/io.h>
  16 #include <linux/of.h>
  17 #include <linux/delay.h>
  18 #include <linux/mvebu-pmsu.h>
  19 #include <asm/smp_plat.h>
  20 
  21 #define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET               0x0
  22 #define   SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL          0xff
  23 #define   SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT        8
  24 #define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET              0x8
  25 #define   SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
  26 #define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET              0xC
  27 #define SYS_CTRL_CLK_DIVIDER_MASK                      0x3F
  28 
  29 #define PMU_DFS_RATIO_SHIFT 16
  30 #define PMU_DFS_RATIO_MASK  0x3F
  31 
  32 #define MAX_CPU     4
  33 struct cpu_clk {
  34         struct clk_hw hw;
  35         int cpu;
  36         const char *clk_name;
  37         const char *parent_name;
  38         void __iomem *reg_base;
  39         void __iomem *pmu_dfs;
  40 };
  41 
  42 static struct clk **clks;
  43 
  44 static struct clk_onecell_data clk_data;
  45 
  46 #define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
  47 
  48 static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
  49                                          unsigned long parent_rate)
  50 {
  51         struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
  52         u32 reg, div;
  53 
  54         reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
  55         div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
  56         return parent_rate / div;
  57 }
  58 
  59 static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
  60                                unsigned long *parent_rate)
  61 {
  62         /* Valid ratio are 1:1, 1:2 and 1:3 */
  63         u32 div;
  64 
  65         div = *parent_rate / rate;
  66         if (div == 0)
  67                 div = 1;
  68         else if (div > 3)
  69                 div = 3;
  70 
  71         return *parent_rate / div;
  72 }
  73 
  74 static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
  75                                 unsigned long parent_rate)
  76 
  77 {
  78         struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
  79         u32 reg, div;
  80         u32 reload_mask;
  81 
  82         div = parent_rate / rate;
  83         reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
  84                 & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
  85                 | (div << (cpuclk->cpu * 8));
  86         writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
  87         /* Set clock divider reload smooth bit mask */
  88         reload_mask = 1 << (20 + cpuclk->cpu);
  89 
  90         reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
  91             | reload_mask;
  92         writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  93 
  94         /* Now trigger the clock update */
  95         reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
  96             | 1 << 24;
  97         writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  98 
  99         /* Wait for clocks to settle down then clear reload request */
 100         udelay(1000);
 101         reg &= ~(reload_mask | 1 << 24);
 102         writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
 103         udelay(1000);
 104 
 105         return 0;
 106 }
 107 
 108 static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
 109                                unsigned long parent_rate)
 110 {
 111         u32 reg;
 112         unsigned long fabric_div, target_div, cur_rate;
 113         struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
 114 
 115         /*
 116          * PMU DFS registers are not mapped, Device Tree does not
 117          * describes them. We cannot change the frequency dynamically.
 118          */
 119         if (!cpuclk->pmu_dfs)
 120                 return -ENODEV;
 121 
 122         cur_rate = clk_hw_get_rate(hwclk);
 123 
 124         reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET);
 125         fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) &
 126                 SYS_CTRL_CLK_DIVIDER_MASK;
 127 
 128         /* Frequency is going up */
 129         if (rate == 2 * cur_rate)
 130                 target_div = fabric_div / 2;
 131         /* Frequency is going down */
 132         else
 133                 target_div = fabric_div;
 134 
 135         if (target_div == 0)
 136                 target_div = 1;
 137 
 138         reg = readl(cpuclk->pmu_dfs);
 139         reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT);
 140         reg |= (target_div << PMU_DFS_RATIO_SHIFT);
 141         writel(reg, cpuclk->pmu_dfs);
 142 
 143         reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
 144         reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL <<
 145                 SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT);
 146         writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
 147 
 148         return mvebu_pmsu_dfs_request(cpuclk->cpu);
 149 }
 150 
 151 static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
 152                             unsigned long parent_rate)
 153 {
 154         if (__clk_is_enabled(hwclk->clk))
 155                 return clk_cpu_on_set_rate(hwclk, rate, parent_rate);
 156         else
 157                 return clk_cpu_off_set_rate(hwclk, rate, parent_rate);
 158 }
 159 
 160 static const struct clk_ops cpu_ops = {
 161         .recalc_rate = clk_cpu_recalc_rate,
 162         .round_rate = clk_cpu_round_rate,
 163         .set_rate = clk_cpu_set_rate,
 164 };
 165 
 166 static void __init of_cpu_clk_setup(struct device_node *node)
 167 {
 168         struct cpu_clk *cpuclk;
 169         void __iomem *clock_complex_base = of_iomap(node, 0);
 170         void __iomem *pmu_dfs_base = of_iomap(node, 1);
 171         int ncpus = 0;
 172         struct device_node *dn;
 173 
 174         if (clock_complex_base == NULL) {
 175                 pr_err("%s: clock-complex base register not set\n",
 176                         __func__);
 177                 return;
 178         }
 179 
 180         if (pmu_dfs_base == NULL)
 181                 pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
 182                         __func__);
 183 
 184         for_each_of_cpu_node(dn)
 185                 ncpus++;
 186 
 187         cpuclk = kcalloc(ncpus, sizeof(*cpuclk), GFP_KERNEL);
 188         if (WARN_ON(!cpuclk))
 189                 goto cpuclk_out;
 190 
 191         clks = kcalloc(ncpus, sizeof(*clks), GFP_KERNEL);
 192         if (WARN_ON(!clks))
 193                 goto clks_out;
 194 
 195         for_each_of_cpu_node(dn) {
 196                 struct clk_init_data init;
 197                 struct clk *clk;
 198                 char *clk_name = kzalloc(5, GFP_KERNEL);
 199                 int cpu, err;
 200 
 201                 if (WARN_ON(!clk_name))
 202                         goto bail_out;
 203 
 204                 err = of_property_read_u32(dn, "reg", &cpu);
 205                 if (WARN_ON(err))
 206                         goto bail_out;
 207 
 208                 sprintf(clk_name, "cpu%d", cpu);
 209 
 210                 cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
 211                 cpuclk[cpu].clk_name = clk_name;
 212                 cpuclk[cpu].cpu = cpu;
 213                 cpuclk[cpu].reg_base = clock_complex_base;
 214                 if (pmu_dfs_base)
 215                         cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu;
 216                 cpuclk[cpu].hw.init = &init;
 217 
 218                 init.name = cpuclk[cpu].clk_name;
 219                 init.ops = &cpu_ops;
 220                 init.flags = 0;
 221                 init.parent_names = &cpuclk[cpu].parent_name;
 222                 init.num_parents = 1;
 223 
 224                 clk = clk_register(NULL, &cpuclk[cpu].hw);
 225                 if (WARN_ON(IS_ERR(clk)))
 226                         goto bail_out;
 227                 clks[cpu] = clk;
 228         }
 229         clk_data.clk_num = MAX_CPU;
 230         clk_data.clks = clks;
 231         of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
 232 
 233         return;
 234 bail_out:
 235         kfree(clks);
 236         while(ncpus--)
 237                 kfree(cpuclk[ncpus].clk_name);
 238 clks_out:
 239         kfree(cpuclk);
 240 cpuclk_out:
 241         iounmap(clock_complex_base);
 242 }
 243 
 244 CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
 245                                          of_cpu_clk_setup);
 246 
 247 static void __init of_mv98dx3236_cpu_clk_setup(struct device_node *node)
 248 {
 249         of_clk_add_provider(node, of_clk_src_simple_get, NULL);
 250 }
 251 
 252 CLK_OF_DECLARE(mv98dx3236_cpu_clock, "marvell,mv98dx3236-cpu-clock",
 253                                          of_mv98dx3236_cpu_clk_setup);

/* [<][>][^][v][top][bottom][index][help] */