root/arch/arm/mach-vexpress/spc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. cluster_is_a15
  2. ve_spc_global_wakeup_irq
  3. ve_spc_cpu_wakeup_irq
  4. ve_spc_set_resume_addr
  5. ve_spc_powerdown
  6. standbywfi_cpu_mask
  7. ve_spc_cpu_in_wfi
  8. ve_spc_get_performance
  9. ve_spc_round_performance
  10. ve_spc_find_performance_index
  11. ve_spc_waitforcompletion
  12. ve_spc_set_performance
  13. ve_spc_read_sys_cfg
  14. ve_spc_irq_handler
  15. ve_spc_populate_opps
  16. ve_init_opp_table
  17. ve_spc_init
  18. spc_recalc_rate
  19. spc_round_rate
  20. spc_set_rate
  21. ve_spc_clk_register
  22. ve_spc_clk_init

   1 /*
   2  * Versatile Express Serial Power Controller (SPC) support
   3  *
   4  * Copyright (C) 2013 ARM Ltd.
   5  *
   6  * Authors: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
   7  *          Achin Gupta           <achin.gupta@arm.com>
   8  *          Lorenzo Pieralisi     <lorenzo.pieralisi@arm.com>
   9  *
  10  * This program is free software; you can redistribute it and/or modify
  11  * it under the terms of the GNU General Public License version 2 as
  12  * published by the Free Software Foundation.
  13  *
  14  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  15  * kind, whether express or implied; without even the implied warranty
  16  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17  * GNU General Public License for more details.
  18  */
  19 
  20 #include <linux/clk-provider.h>
  21 #include <linux/clkdev.h>
  22 #include <linux/cpu.h>
  23 #include <linux/delay.h>
  24 #include <linux/err.h>
  25 #include <linux/interrupt.h>
  26 #include <linux/io.h>
  27 #include <linux/platform_device.h>
  28 #include <linux/pm_opp.h>
  29 #include <linux/slab.h>
  30 #include <linux/semaphore.h>
  31 
  32 #include <asm/cacheflush.h>
  33 
  34 #include "spc.h"
  35 
  36 #define SPCLOG "vexpress-spc: "
  37 
  38 #define PERF_LVL_A15            0x00
  39 #define PERF_REQ_A15            0x04
  40 #define PERF_LVL_A7             0x08
  41 #define PERF_REQ_A7             0x0c
  42 #define COMMS                   0x10
  43 #define COMMS_REQ               0x14
  44 #define PWC_STATUS              0x18
  45 #define PWC_FLAG                0x1c
  46 
  47 /* SPC wake-up IRQs status and mask */
  48 #define WAKE_INT_MASK           0x24
  49 #define WAKE_INT_RAW            0x28
  50 #define WAKE_INT_STAT           0x2c
  51 /* SPC power down registers */
  52 #define A15_PWRDN_EN            0x30
  53 #define A7_PWRDN_EN             0x34
  54 /* SPC per-CPU mailboxes */
  55 #define A15_BX_ADDR0            0x68
  56 #define A7_BX_ADDR0             0x78
  57 
  58 /* SPC CPU/cluster reset statue */
  59 #define STANDBYWFI_STAT         0x3c
  60 #define STANDBYWFI_STAT_A15_CPU_MASK(cpu)       (1 << (cpu))
  61 #define STANDBYWFI_STAT_A7_CPU_MASK(cpu)        (1 << (3 + (cpu)))
  62 
  63 /* SPC system config interface registers */
  64 #define SYSCFG_WDATA            0x70
  65 #define SYSCFG_RDATA            0x74
  66 
  67 /* A15/A7 OPP virtual register base */
  68 #define A15_PERFVAL_BASE        0xC10
  69 #define A7_PERFVAL_BASE         0xC30
  70 
  71 /* Config interface control bits */
  72 #define SYSCFG_START            BIT(31)
  73 #define SYSCFG_SCC              (6 << 20)
  74 #define SYSCFG_STAT             (14 << 20)
  75 
  76 /* wake-up interrupt masks */
  77 #define GBL_WAKEUP_INT_MSK      (0x3 << 10)
  78 
  79 /* TC2 static dual-cluster configuration */
  80 #define MAX_CLUSTERS            2
  81 
  82 /*
  83  * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS
  84  * operation, the operation could start just before jiffie is about
  85  * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz
  86  */
  87 #define TIMEOUT_US      20000
  88 
  89 #define MAX_OPPS        8
  90 #define CA15_DVFS       0
  91 #define CA7_DVFS        1
  92 #define SPC_SYS_CFG     2
  93 #define STAT_COMPLETE(type)     ((1 << 0) << (type << 2))
  94 #define STAT_ERR(type)          ((1 << 1) << (type << 2))
  95 #define RESPONSE_MASK(type)     (STAT_COMPLETE(type) | STAT_ERR(type))
  96 
  97 struct ve_spc_opp {
  98         unsigned long freq;
  99         unsigned long u_volt;
 100 };
 101 
 102 struct ve_spc_drvdata {
 103         void __iomem *baseaddr;
 104         /*
 105          * A15s cluster identifier
 106          * It corresponds to A15 processors MPIDR[15:8] bitfield
 107          */
 108         u32 a15_clusid;
 109         uint32_t cur_rsp_mask;
 110         uint32_t cur_rsp_stat;
 111         struct semaphore sem;
 112         struct completion done;
 113         struct ve_spc_opp *opps[MAX_CLUSTERS];
 114         int num_opps[MAX_CLUSTERS];
 115 };
 116 
 117 static struct ve_spc_drvdata *info;
 118 
 119 static inline bool cluster_is_a15(u32 cluster)
 120 {
 121         return cluster == info->a15_clusid;
 122 }
 123 
 124 /**
 125  * ve_spc_global_wakeup_irq()
 126  *
 127  * Function to set/clear global wakeup IRQs. Not protected by locking since
 128  * it might be used in code paths where normal cacheable locks are not
 129  * working. Locking must be provided by the caller to ensure atomicity.
 130  *
 131  * @set: if true, global wake-up IRQs are set, if false they are cleared
 132  */
 133 void ve_spc_global_wakeup_irq(bool set)
 134 {
 135         u32 reg;
 136 
 137         reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
 138 
 139         if (set)
 140                 reg |= GBL_WAKEUP_INT_MSK;
 141         else
 142                 reg &= ~GBL_WAKEUP_INT_MSK;
 143 
 144         writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK);
 145 }
 146 
 147 /**
 148  * ve_spc_cpu_wakeup_irq()
 149  *
 150  * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
 151  * it might be used in code paths where normal cacheable locks are not
 152  * working. Locking must be provided by the caller to ensure atomicity.
 153  *
 154  * @cluster: mpidr[15:8] bitfield describing cluster affinity level
 155  * @cpu: mpidr[7:0] bitfield describing cpu affinity level
 156  * @set: if true, wake-up IRQs are set, if false they are cleared
 157  */
 158 void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set)
 159 {
 160         u32 mask, reg;
 161 
 162         if (cluster >= MAX_CLUSTERS)
 163                 return;
 164 
 165         mask = BIT(cpu);
 166 
 167         if (!cluster_is_a15(cluster))
 168                 mask <<= 4;
 169 
 170         reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
 171 
 172         if (set)
 173                 reg |= mask;
 174         else
 175                 reg &= ~mask;
 176 
 177         writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK);
 178 }
 179 
 180 /**
 181  * ve_spc_set_resume_addr() - set the jump address used for warm boot
 182  *
 183  * @cluster: mpidr[15:8] bitfield describing cluster affinity level
 184  * @cpu: mpidr[7:0] bitfield describing cpu affinity level
 185  * @addr: physical resume address
 186  */
 187 void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr)
 188 {
 189         void __iomem *baseaddr;
 190 
 191         if (cluster >= MAX_CLUSTERS)
 192                 return;
 193 
 194         if (cluster_is_a15(cluster))
 195                 baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2);
 196         else
 197                 baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2);
 198 
 199         writel_relaxed(addr, baseaddr);
 200 }
 201 
 202 /**
 203  * ve_spc_powerdown()
 204  *
 205  * Function to enable/disable cluster powerdown. Not protected by locking
 206  * since it might be used in code paths where normal cacheable locks are not
 207  * working. Locking must be provided by the caller to ensure atomicity.
 208  *
 209  * @cluster: mpidr[15:8] bitfield describing cluster affinity level
 210  * @enable: if true enables powerdown, if false disables it
 211  */
 212 void ve_spc_powerdown(u32 cluster, bool enable)
 213 {
 214         u32 pwdrn_reg;
 215 
 216         if (cluster >= MAX_CLUSTERS)
 217                 return;
 218 
 219         pwdrn_reg = cluster_is_a15(cluster) ? A15_PWRDN_EN : A7_PWRDN_EN;
 220         writel_relaxed(enable, info->baseaddr + pwdrn_reg);
 221 }
 222 
 223 static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster)
 224 {
 225         return cluster_is_a15(cluster) ?
 226                   STANDBYWFI_STAT_A15_CPU_MASK(cpu)
 227                 : STANDBYWFI_STAT_A7_CPU_MASK(cpu);
 228 }
 229 
 230 /**
 231  * ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
 232  *
 233  * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster
 234  * @cluster: mpidr[15:8] bitfield describing cluster affinity level
 235  *
 236  * @return: non-zero if and only if the specified CPU is in WFI
 237  *
 238  * Take care when interpreting the result of this function: a CPU might
 239  * be in WFI temporarily due to idle, and is not necessarily safely
 240  * parked.
 241  */
 242 int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
 243 {
 244         int ret;
 245         u32 mask = standbywfi_cpu_mask(cpu, cluster);
 246 
 247         if (cluster >= MAX_CLUSTERS)
 248                 return 1;
 249 
 250         ret = readl_relaxed(info->baseaddr + STANDBYWFI_STAT);
 251 
 252         pr_debug("%s: PCFGREG[0x%X] = 0x%08X, mask = 0x%X\n",
 253                  __func__, STANDBYWFI_STAT, ret, mask);
 254 
 255         return ret & mask;
 256 }
 257 
 258 static int ve_spc_get_performance(int cluster, u32 *freq)
 259 {
 260         struct ve_spc_opp *opps = info->opps[cluster];
 261         u32 perf_cfg_reg = 0;
 262         u32 perf;
 263 
 264         perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7;
 265 
 266         perf = readl_relaxed(info->baseaddr + perf_cfg_reg);
 267         if (perf >= info->num_opps[cluster])
 268                 return -EINVAL;
 269 
 270         opps += perf;
 271         *freq = opps->freq;
 272 
 273         return 0;
 274 }
 275 
 276 /* find closest match to given frequency in OPP table */
 277 static int ve_spc_round_performance(int cluster, u32 freq)
 278 {
 279         int idx, max_opp = info->num_opps[cluster];
 280         struct ve_spc_opp *opps = info->opps[cluster];
 281         u32 fmin = 0, fmax = ~0, ftmp;
 282 
 283         freq /= 1000; /* OPP entries in kHz */
 284         for (idx = 0; idx < max_opp; idx++, opps++) {
 285                 ftmp = opps->freq;
 286                 if (ftmp >= freq) {
 287                         if (ftmp <= fmax)
 288                                 fmax = ftmp;
 289                 } else {
 290                         if (ftmp >= fmin)
 291                                 fmin = ftmp;
 292                 }
 293         }
 294         if (fmax != ~0)
 295                 return fmax * 1000;
 296         else
 297                 return fmin * 1000;
 298 }
 299 
 300 static int ve_spc_find_performance_index(int cluster, u32 freq)
 301 {
 302         int idx, max_opp = info->num_opps[cluster];
 303         struct ve_spc_opp *opps = info->opps[cluster];
 304 
 305         for (idx = 0; idx < max_opp; idx++, opps++)
 306                 if (opps->freq == freq)
 307                         break;
 308         return (idx == max_opp) ? -EINVAL : idx;
 309 }
 310 
 311 static int ve_spc_waitforcompletion(int req_type)
 312 {
 313         int ret = wait_for_completion_interruptible_timeout(
 314                         &info->done, usecs_to_jiffies(TIMEOUT_US));
 315         if (ret == 0)
 316                 ret = -ETIMEDOUT;
 317         else if (ret > 0)
 318                 ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO;
 319         return ret;
 320 }
 321 
 322 static int ve_spc_set_performance(int cluster, u32 freq)
 323 {
 324         u32 perf_cfg_reg;
 325         int ret, perf, req_type;
 326 
 327         if (cluster_is_a15(cluster)) {
 328                 req_type = CA15_DVFS;
 329                 perf_cfg_reg = PERF_LVL_A15;
 330         } else {
 331                 req_type = CA7_DVFS;
 332                 perf_cfg_reg = PERF_LVL_A7;
 333         }
 334 
 335         perf = ve_spc_find_performance_index(cluster, freq);
 336 
 337         if (perf < 0)
 338                 return perf;
 339 
 340         if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
 341                 return -ETIME;
 342 
 343         init_completion(&info->done);
 344         info->cur_rsp_mask = RESPONSE_MASK(req_type);
 345 
 346         writel(perf, info->baseaddr + perf_cfg_reg);
 347         ret = ve_spc_waitforcompletion(req_type);
 348 
 349         info->cur_rsp_mask = 0;
 350         up(&info->sem);
 351 
 352         return ret;
 353 }
 354 
 355 static int ve_spc_read_sys_cfg(int func, int offset, uint32_t *data)
 356 {
 357         int ret;
 358 
 359         if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
 360                 return -ETIME;
 361 
 362         init_completion(&info->done);
 363         info->cur_rsp_mask = RESPONSE_MASK(SPC_SYS_CFG);
 364 
 365         /* Set the control value */
 366         writel(SYSCFG_START | func | offset >> 2, info->baseaddr + COMMS);
 367         ret = ve_spc_waitforcompletion(SPC_SYS_CFG);
 368 
 369         if (ret == 0)
 370                 *data = readl(info->baseaddr + SYSCFG_RDATA);
 371 
 372         info->cur_rsp_mask = 0;
 373         up(&info->sem);
 374 
 375         return ret;
 376 }
 377 
 378 static irqreturn_t ve_spc_irq_handler(int irq, void *data)
 379 {
 380         struct ve_spc_drvdata *drv_data = data;
 381         uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS);
 382 
 383         if (info->cur_rsp_mask & status) {
 384                 info->cur_rsp_stat = status;
 385                 complete(&drv_data->done);
 386         }
 387 
 388         return IRQ_HANDLED;
 389 }
 390 
 391 /*
 392  *  +--------------------------+
 393  *  | 31      20 | 19        0 |
 394  *  +--------------------------+
 395  *  |   m_volt   |  freq(kHz)  |
 396  *  +--------------------------+
 397  */
 398 #define MULT_FACTOR     20
 399 #define VOLT_SHIFT      20
 400 #define FREQ_MASK       (0xFFFFF)
 401 static int ve_spc_populate_opps(uint32_t cluster)
 402 {
 403         uint32_t data = 0, off, ret, idx;
 404         struct ve_spc_opp *opps;
 405 
 406         opps = kcalloc(MAX_OPPS, sizeof(*opps), GFP_KERNEL);
 407         if (!opps)
 408                 return -ENOMEM;
 409 
 410         info->opps[cluster] = opps;
 411 
 412         off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE;
 413         for (idx = 0; idx < MAX_OPPS; idx++, off += 4, opps++) {
 414                 ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data);
 415                 if (!ret) {
 416                         opps->freq = (data & FREQ_MASK) * MULT_FACTOR;
 417                         opps->u_volt = (data >> VOLT_SHIFT) * 1000;
 418                 } else {
 419                         break;
 420                 }
 421         }
 422         info->num_opps[cluster] = idx;
 423 
 424         return ret;
 425 }
 426 
 427 static int ve_init_opp_table(struct device *cpu_dev)
 428 {
 429         int cluster;
 430         int idx, ret = 0, max_opp;
 431         struct ve_spc_opp *opps;
 432 
 433         cluster = topology_physical_package_id(cpu_dev->id);
 434         cluster = cluster < 0 ? 0 : cluster;
 435 
 436         max_opp = info->num_opps[cluster];
 437         opps = info->opps[cluster];
 438 
 439         for (idx = 0; idx < max_opp; idx++, opps++) {
 440                 ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt);
 441                 if (ret) {
 442                         dev_warn(cpu_dev, "failed to add opp %lu %lu\n",
 443                                  opps->freq, opps->u_volt);
 444                         return ret;
 445                 }
 446         }
 447         return ret;
 448 }
 449 
 450 int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq)
 451 {
 452         int ret;
 453         info = kzalloc(sizeof(*info), GFP_KERNEL);
 454         if (!info)
 455                 return -ENOMEM;
 456 
 457         info->baseaddr = baseaddr;
 458         info->a15_clusid = a15_clusid;
 459 
 460         if (irq <= 0) {
 461                 pr_err(SPCLOG "Invalid IRQ %d\n", irq);
 462                 kfree(info);
 463                 return -EINVAL;
 464         }
 465 
 466         init_completion(&info->done);
 467 
 468         readl_relaxed(info->baseaddr + PWC_STATUS);
 469 
 470         ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH
 471                                 | IRQF_ONESHOT, "vexpress-spc", info);
 472         if (ret) {
 473                 pr_err(SPCLOG "IRQ %d request failed\n", irq);
 474                 kfree(info);
 475                 return -ENODEV;
 476         }
 477 
 478         sema_init(&info->sem, 1);
 479         /*
 480          * Multi-cluster systems may need this data when non-coherent, during
 481          * cluster power-up/power-down. Make sure driver info reaches main
 482          * memory.
 483          */
 484         sync_cache_w(info);
 485         sync_cache_w(&info);
 486 
 487         return 0;
 488 }
 489 
 490 struct clk_spc {
 491         struct clk_hw hw;
 492         int cluster;
 493 };
 494 
 495 #define to_clk_spc(spc) container_of(spc, struct clk_spc, hw)
 496 static unsigned long spc_recalc_rate(struct clk_hw *hw,
 497                 unsigned long parent_rate)
 498 {
 499         struct clk_spc *spc = to_clk_spc(hw);
 500         u32 freq;
 501 
 502         if (ve_spc_get_performance(spc->cluster, &freq))
 503                 return -EIO;
 504 
 505         return freq * 1000;
 506 }
 507 
 508 static long spc_round_rate(struct clk_hw *hw, unsigned long drate,
 509                 unsigned long *parent_rate)
 510 {
 511         struct clk_spc *spc = to_clk_spc(hw);
 512 
 513         return ve_spc_round_performance(spc->cluster, drate);
 514 }
 515 
 516 static int spc_set_rate(struct clk_hw *hw, unsigned long rate,
 517                 unsigned long parent_rate)
 518 {
 519         struct clk_spc *spc = to_clk_spc(hw);
 520 
 521         return ve_spc_set_performance(spc->cluster, rate / 1000);
 522 }
 523 
 524 static struct clk_ops clk_spc_ops = {
 525         .recalc_rate = spc_recalc_rate,
 526         .round_rate = spc_round_rate,
 527         .set_rate = spc_set_rate,
 528 };
 529 
 530 static struct clk *ve_spc_clk_register(struct device *cpu_dev)
 531 {
 532         struct clk_init_data init;
 533         struct clk_spc *spc;
 534 
 535         spc = kzalloc(sizeof(*spc), GFP_KERNEL);
 536         if (!spc)
 537                 return ERR_PTR(-ENOMEM);
 538 
 539         spc->hw.init = &init;
 540         spc->cluster = topology_physical_package_id(cpu_dev->id);
 541 
 542         spc->cluster = spc->cluster < 0 ? 0 : spc->cluster;
 543 
 544         init.name = dev_name(cpu_dev);
 545         init.ops = &clk_spc_ops;
 546         init.flags = CLK_GET_RATE_NOCACHE;
 547         init.num_parents = 0;
 548 
 549         return devm_clk_register(cpu_dev, &spc->hw);
 550 }
 551 
 552 static int __init ve_spc_clk_init(void)
 553 {
 554         int cpu, cluster;
 555         struct clk *clk;
 556         bool init_opp_table[MAX_CLUSTERS] = { false };
 557 
 558         if (!info)
 559                 return 0; /* Continue only if SPC is initialised */
 560 
 561         if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) {
 562                 pr_err("failed to build OPP table\n");
 563                 return -ENODEV;
 564         }
 565 
 566         for_each_possible_cpu(cpu) {
 567                 struct device *cpu_dev = get_cpu_device(cpu);
 568                 if (!cpu_dev) {
 569                         pr_warn("failed to get cpu%d device\n", cpu);
 570                         continue;
 571                 }
 572                 clk = ve_spc_clk_register(cpu_dev);
 573                 if (IS_ERR(clk)) {
 574                         pr_warn("failed to register cpu%d clock\n", cpu);
 575                         continue;
 576                 }
 577                 if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) {
 578                         pr_warn("failed to register cpu%d clock lookup\n", cpu);
 579                         continue;
 580                 }
 581 
 582                 cluster = topology_physical_package_id(cpu_dev->id);
 583                 if (init_opp_table[cluster])
 584                         continue;
 585 
 586                 if (ve_init_opp_table(cpu_dev))
 587                         pr_warn("failed to initialise cpu%d opp table\n", cpu);
 588                 else if (dev_pm_opp_set_sharing_cpus(cpu_dev,
 589                          topology_core_cpumask(cpu_dev->id)))
 590                         pr_warn("failed to mark OPPs shared for cpu%d\n", cpu);
 591                 else
 592                         init_opp_table[cluster] = true;
 593         }
 594 
 595         platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
 596         return 0;
 597 }
 598 device_initcall(ve_spc_clk_init);

/* [<][>][^][v][top][bottom][index][help] */