root/drivers/clk/sunxi-ng/ccu_nkmp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ccu_nkmp_calc_rate
  2. ccu_nkmp_find_best
  3. ccu_nkmp_disable
  4. ccu_nkmp_enable
  5. ccu_nkmp_is_enabled
  6. ccu_nkmp_recalc_rate
  7. ccu_nkmp_round_rate
  8. ccu_nkmp_set_rate

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Copyright (C) 2016 Maxime Ripard
   4  * Maxime Ripard <maxime.ripard@free-electrons.com>
   5  */
   6 
   7 #include <linux/clk-provider.h>
   8 #include <linux/io.h>
   9 
  10 #include "ccu_gate.h"
  11 #include "ccu_nkmp.h"
  12 
  13 struct _ccu_nkmp {
  14         unsigned long   n, min_n, max_n;
  15         unsigned long   k, min_k, max_k;
  16         unsigned long   m, min_m, max_m;
  17         unsigned long   p, min_p, max_p;
  18 };
  19 
  20 static unsigned long ccu_nkmp_calc_rate(unsigned long parent,
  21                                         unsigned long n, unsigned long k,
  22                                         unsigned long m, unsigned long p)
  23 {
  24         u64 rate = parent;
  25 
  26         rate *= n * k;
  27         do_div(rate, m * p);
  28 
  29         return rate;
  30 }
  31 
  32 static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
  33                                struct _ccu_nkmp *nkmp)
  34 {
  35         unsigned long best_rate = 0;
  36         unsigned long best_n = 0, best_k = 0, best_m = 0, best_p = 0;
  37         unsigned long _n, _k, _m, _p;
  38 
  39         for (_k = nkmp->min_k; _k <= nkmp->max_k; _k++) {
  40                 for (_n = nkmp->min_n; _n <= nkmp->max_n; _n++) {
  41                         for (_m = nkmp->min_m; _m <= nkmp->max_m; _m++) {
  42                                 for (_p = nkmp->min_p; _p <= nkmp->max_p; _p <<= 1) {
  43                                         unsigned long tmp_rate;
  44 
  45                                         tmp_rate = ccu_nkmp_calc_rate(parent,
  46                                                                       _n, _k,
  47                                                                       _m, _p);
  48 
  49                                         if (tmp_rate > rate)
  50                                                 continue;
  51 
  52                                         if ((rate - tmp_rate) < (rate - best_rate)) {
  53                                                 best_rate = tmp_rate;
  54                                                 best_n = _n;
  55                                                 best_k = _k;
  56                                                 best_m = _m;
  57                                                 best_p = _p;
  58                                         }
  59                                 }
  60                         }
  61                 }
  62         }
  63 
  64         nkmp->n = best_n;
  65         nkmp->k = best_k;
  66         nkmp->m = best_m;
  67         nkmp->p = best_p;
  68 }
  69 
  70 static void ccu_nkmp_disable(struct clk_hw *hw)
  71 {
  72         struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
  73 
  74         return ccu_gate_helper_disable(&nkmp->common, nkmp->enable);
  75 }
  76 
  77 static int ccu_nkmp_enable(struct clk_hw *hw)
  78 {
  79         struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
  80 
  81         return ccu_gate_helper_enable(&nkmp->common, nkmp->enable);
  82 }
  83 
  84 static int ccu_nkmp_is_enabled(struct clk_hw *hw)
  85 {
  86         struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
  87 
  88         return ccu_gate_helper_is_enabled(&nkmp->common, nkmp->enable);
  89 }
  90 
  91 static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
  92                                         unsigned long parent_rate)
  93 {
  94         struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
  95         unsigned long n, m, k, p, rate;
  96         u32 reg;
  97 
  98         reg = readl(nkmp->common.base + nkmp->common.reg);
  99 
 100         n = reg >> nkmp->n.shift;
 101         n &= (1 << nkmp->n.width) - 1;
 102         n += nkmp->n.offset;
 103         if (!n)
 104                 n++;
 105 
 106         k = reg >> nkmp->k.shift;
 107         k &= (1 << nkmp->k.width) - 1;
 108         k += nkmp->k.offset;
 109         if (!k)
 110                 k++;
 111 
 112         m = reg >> nkmp->m.shift;
 113         m &= (1 << nkmp->m.width) - 1;
 114         m += nkmp->m.offset;
 115         if (!m)
 116                 m++;
 117 
 118         p = reg >> nkmp->p.shift;
 119         p &= (1 << nkmp->p.width) - 1;
 120 
 121         rate = ccu_nkmp_calc_rate(parent_rate, n, k, m, 1 << p);
 122         if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
 123                 rate /= nkmp->fixed_post_div;
 124 
 125         return rate;
 126 }
 127 
 128 static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
 129                               unsigned long *parent_rate)
 130 {
 131         struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
 132         struct _ccu_nkmp _nkmp;
 133 
 134         if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
 135                 rate *= nkmp->fixed_post_div;
 136 
 137         if (nkmp->max_rate && rate > nkmp->max_rate) {
 138                 rate = nkmp->max_rate;
 139                 if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
 140                         rate /= nkmp->fixed_post_div;
 141                 return rate;
 142         }
 143 
 144         _nkmp.min_n = nkmp->n.min ?: 1;
 145         _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width;
 146         _nkmp.min_k = nkmp->k.min ?: 1;
 147         _nkmp.max_k = nkmp->k.max ?: 1 << nkmp->k.width;
 148         _nkmp.min_m = 1;
 149         _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width;
 150         _nkmp.min_p = 1;
 151         _nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
 152 
 153         ccu_nkmp_find_best(*parent_rate, rate, &_nkmp);
 154 
 155         rate = ccu_nkmp_calc_rate(*parent_rate, _nkmp.n, _nkmp.k,
 156                                   _nkmp.m, _nkmp.p);
 157         if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
 158                 rate = rate / nkmp->fixed_post_div;
 159 
 160         return rate;
 161 }
 162 
 163 static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
 164                            unsigned long parent_rate)
 165 {
 166         struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
 167         u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0;
 168         struct _ccu_nkmp _nkmp;
 169         unsigned long flags;
 170         u32 reg;
 171 
 172         if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
 173                 rate = rate * nkmp->fixed_post_div;
 174 
 175         _nkmp.min_n = nkmp->n.min ?: 1;
 176         _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width;
 177         _nkmp.min_k = nkmp->k.min ?: 1;
 178         _nkmp.max_k = nkmp->k.max ?: 1 << nkmp->k.width;
 179         _nkmp.min_m = 1;
 180         _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width;
 181         _nkmp.min_p = 1;
 182         _nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
 183 
 184         ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
 185 
 186         /*
 187          * If width is 0, GENMASK() macro may not generate expected mask (0)
 188          * as it falls under undefined behaviour by C standard due to shifts
 189          * which are equal or greater than width of left operand. This can
 190          * be easily avoided by explicitly checking if width is 0.
 191          */
 192         if (nkmp->n.width)
 193                 n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1,
 194                                  nkmp->n.shift);
 195         if (nkmp->k.width)
 196                 k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1,
 197                                  nkmp->k.shift);
 198         if (nkmp->m.width)
 199                 m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1,
 200                                  nkmp->m.shift);
 201         if (nkmp->p.width)
 202                 p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1,
 203                                  nkmp->p.shift);
 204 
 205         spin_lock_irqsave(nkmp->common.lock, flags);
 206 
 207         reg = readl(nkmp->common.base + nkmp->common.reg);
 208         reg &= ~(n_mask | k_mask | m_mask | p_mask);
 209 
 210         reg |= ((_nkmp.n - nkmp->n.offset) << nkmp->n.shift) & n_mask;
 211         reg |= ((_nkmp.k - nkmp->k.offset) << nkmp->k.shift) & k_mask;
 212         reg |= ((_nkmp.m - nkmp->m.offset) << nkmp->m.shift) & m_mask;
 213         reg |= (ilog2(_nkmp.p) << nkmp->p.shift) & p_mask;
 214 
 215         writel(reg, nkmp->common.base + nkmp->common.reg);
 216 
 217         spin_unlock_irqrestore(nkmp->common.lock, flags);
 218 
 219         ccu_helper_wait_for_lock(&nkmp->common, nkmp->lock);
 220 
 221         return 0;
 222 }
 223 
 224 const struct clk_ops ccu_nkmp_ops = {
 225         .disable        = ccu_nkmp_disable,
 226         .enable         = ccu_nkmp_enable,
 227         .is_enabled     = ccu_nkmp_is_enabled,
 228 
 229         .recalc_rate    = ccu_nkmp_recalc_rate,
 230         .round_rate     = ccu_nkmp_round_rate,
 231         .set_rate       = ccu_nkmp_set_rate,
 232 };

/* [<][>][^][v][top][bottom][index][help] */