1/* 2 * Clock and PLL control for DaVinci devices 3 * 4 * Copyright (C) 2006-2007 Texas Instruments. 5 * Copyright (C) 2008-2009 Deep Root Systems, LLC 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 */ 12 13#include <linux/module.h> 14#include <linux/kernel.h> 15#include <linux/init.h> 16#include <linux/errno.h> 17#include <linux/clk.h> 18#include <linux/err.h> 19#include <linux/mutex.h> 20#include <linux/io.h> 21#include <linux/delay.h> 22 23#include <mach/hardware.h> 24 25#include <mach/clock.h> 26#include <mach/psc.h> 27#include <mach/cputype.h> 28#include "clock.h" 29 30static LIST_HEAD(clocks); 31static DEFINE_MUTEX(clocks_mutex); 32static DEFINE_SPINLOCK(clockfw_lock); 33 34static void __clk_enable(struct clk *clk) 35{ 36 if (clk->parent) 37 __clk_enable(clk->parent); 38 if (clk->usecount++ == 0) { 39 if (clk->flags & CLK_PSC) 40 davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc, 41 true, clk->flags); 42 else if (clk->clk_enable) 43 clk->clk_enable(clk); 44 } 45} 46 47static void __clk_disable(struct clk *clk) 48{ 49 if (WARN_ON(clk->usecount == 0)) 50 return; 51 if (--clk->usecount == 0) { 52 if (!(clk->flags & CLK_PLL) && (clk->flags & CLK_PSC)) 53 davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc, 54 false, clk->flags); 55 else if (clk->clk_disable) 56 clk->clk_disable(clk); 57 } 58 if (clk->parent) 59 __clk_disable(clk->parent); 60} 61 62int davinci_clk_reset(struct clk *clk, bool reset) 63{ 64 unsigned long flags; 65 66 if (clk == NULL || IS_ERR(clk)) 67 return -EINVAL; 68 69 spin_lock_irqsave(&clockfw_lock, flags); 70 if (clk->flags & CLK_PSC) 71 davinci_psc_reset(clk->gpsc, clk->lpsc, reset); 72 spin_unlock_irqrestore(&clockfw_lock, flags); 73 74 return 0; 75} 76EXPORT_SYMBOL(davinci_clk_reset); 77 78int davinci_clk_reset_assert(struct clk *clk) 79{ 80 if (clk == NULL || IS_ERR(clk) || !clk->reset) 81 return -EINVAL; 82 83 return clk->reset(clk, true); 84} 85EXPORT_SYMBOL(davinci_clk_reset_assert); 86 87int davinci_clk_reset_deassert(struct clk *clk) 88{ 89 if (clk == NULL || IS_ERR(clk) || !clk->reset) 90 return -EINVAL; 91 92 return clk->reset(clk, false); 93} 94EXPORT_SYMBOL(davinci_clk_reset_deassert); 95 96int clk_enable(struct clk *clk) 97{ 98 unsigned long flags; 99 100 if (clk == NULL || IS_ERR(clk)) 101 return -EINVAL; 102 103 spin_lock_irqsave(&clockfw_lock, flags); 104 __clk_enable(clk); 105 spin_unlock_irqrestore(&clockfw_lock, flags); 106 107 return 0; 108} 109EXPORT_SYMBOL(clk_enable); 110 111void clk_disable(struct clk *clk) 112{ 113 unsigned long flags; 114 115 if (clk == NULL || IS_ERR(clk)) 116 return; 117 118 spin_lock_irqsave(&clockfw_lock, flags); 119 __clk_disable(clk); 120 spin_unlock_irqrestore(&clockfw_lock, flags); 121} 122EXPORT_SYMBOL(clk_disable); 123 124unsigned long clk_get_rate(struct clk *clk) 125{ 126 if (clk == NULL || IS_ERR(clk)) 127 return -EINVAL; 128 129 return clk->rate; 130} 131EXPORT_SYMBOL(clk_get_rate); 132 133long clk_round_rate(struct clk *clk, unsigned long rate) 134{ 135 if (clk == NULL || IS_ERR(clk)) 136 return 0; 137 138 if (clk->round_rate) 139 return clk->round_rate(clk, rate); 140 141 return clk->rate; 142} 143EXPORT_SYMBOL(clk_round_rate); 144 145/* Propagate rate to children */ 146static void propagate_rate(struct clk *root) 147{ 148 struct clk *clk; 149 150 list_for_each_entry(clk, &root->children, childnode) { 151 if (clk->recalc) 152 clk->rate = clk->recalc(clk); 153 propagate_rate(clk); 154 } 155} 156 157int clk_set_rate(struct clk *clk, unsigned long rate) 158{ 159 unsigned long flags; 160 int ret = -EINVAL; 161 162 if (clk == NULL || IS_ERR(clk)) 163 return ret; 164 165 if (clk->set_rate) 166 ret = clk->set_rate(clk, rate); 167 168 spin_lock_irqsave(&clockfw_lock, flags); 169 if (ret == 0) { 170 if (clk->recalc) 171 clk->rate = clk->recalc(clk); 172 propagate_rate(clk); 173 } 174 spin_unlock_irqrestore(&clockfw_lock, flags); 175 176 return ret; 177} 178EXPORT_SYMBOL(clk_set_rate); 179 180int clk_set_parent(struct clk *clk, struct clk *parent) 181{ 182 unsigned long flags; 183 184 if (clk == NULL || IS_ERR(clk)) 185 return -EINVAL; 186 187 /* Cannot change parent on enabled clock */ 188 if (WARN_ON(clk->usecount)) 189 return -EINVAL; 190 191 mutex_lock(&clocks_mutex); 192 clk->parent = parent; 193 list_del_init(&clk->childnode); 194 list_add(&clk->childnode, &clk->parent->children); 195 mutex_unlock(&clocks_mutex); 196 197 spin_lock_irqsave(&clockfw_lock, flags); 198 if (clk->recalc) 199 clk->rate = clk->recalc(clk); 200 propagate_rate(clk); 201 spin_unlock_irqrestore(&clockfw_lock, flags); 202 203 return 0; 204} 205EXPORT_SYMBOL(clk_set_parent); 206 207int clk_register(struct clk *clk) 208{ 209 if (clk == NULL || IS_ERR(clk)) 210 return -EINVAL; 211 212 if (WARN(clk->parent && !clk->parent->rate, 213 "CLK: %s parent %s has no rate!\n", 214 clk->name, clk->parent->name)) 215 return -EINVAL; 216 217 INIT_LIST_HEAD(&clk->children); 218 219 mutex_lock(&clocks_mutex); 220 list_add_tail(&clk->node, &clocks); 221 if (clk->parent) 222 list_add_tail(&clk->childnode, &clk->parent->children); 223 mutex_unlock(&clocks_mutex); 224 225 /* If rate is already set, use it */ 226 if (clk->rate) 227 return 0; 228 229 /* Else, see if there is a way to calculate it */ 230 if (clk->recalc) 231 clk->rate = clk->recalc(clk); 232 233 /* Otherwise, default to parent rate */ 234 else if (clk->parent) 235 clk->rate = clk->parent->rate; 236 237 return 0; 238} 239EXPORT_SYMBOL(clk_register); 240 241void clk_unregister(struct clk *clk) 242{ 243 if (clk == NULL || IS_ERR(clk)) 244 return; 245 246 mutex_lock(&clocks_mutex); 247 list_del(&clk->node); 248 list_del(&clk->childnode); 249 mutex_unlock(&clocks_mutex); 250} 251EXPORT_SYMBOL(clk_unregister); 252 253#ifdef CONFIG_DAVINCI_RESET_CLOCKS 254/* 255 * Disable any unused clocks left on by the bootloader 256 */ 257int __init davinci_clk_disable_unused(void) 258{ 259 struct clk *ck; 260 261 spin_lock_irq(&clockfw_lock); 262 list_for_each_entry(ck, &clocks, node) { 263 if (ck->usecount > 0) 264 continue; 265 if (!(ck->flags & CLK_PSC)) 266 continue; 267 268 /* ignore if in Disabled or SwRstDisable states */ 269 if (!davinci_psc_is_clk_active(ck->gpsc, ck->lpsc)) 270 continue; 271 272 pr_debug("Clocks: disable unused %s\n", ck->name); 273 274 davinci_psc_config(ck->domain, ck->gpsc, ck->lpsc, 275 false, ck->flags); 276 } 277 spin_unlock_irq(&clockfw_lock); 278 279 return 0; 280} 281#endif 282 283static unsigned long clk_sysclk_recalc(struct clk *clk) 284{ 285 u32 v, plldiv; 286 struct pll_data *pll; 287 unsigned long rate = clk->rate; 288 289 /* If this is the PLL base clock, no more calculations needed */ 290 if (clk->pll_data) 291 return rate; 292 293 if (WARN_ON(!clk->parent)) 294 return rate; 295 296 rate = clk->parent->rate; 297 298 /* Otherwise, the parent must be a PLL */ 299 if (WARN_ON(!clk->parent->pll_data)) 300 return rate; 301 302 pll = clk->parent->pll_data; 303 304 /* If pre-PLL, source clock is before the multiplier and divider(s) */ 305 if (clk->flags & PRE_PLL) 306 rate = pll->input_rate; 307 308 if (!clk->div_reg) 309 return rate; 310 311 v = __raw_readl(pll->base + clk->div_reg); 312 if (v & PLLDIV_EN) { 313 plldiv = (v & pll->div_ratio_mask) + 1; 314 if (plldiv) 315 rate /= plldiv; 316 } 317 318 return rate; 319} 320 321int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate) 322{ 323 unsigned v; 324 struct pll_data *pll; 325 unsigned long input; 326 unsigned ratio = 0; 327 328 /* If this is the PLL base clock, wrong function to call */ 329 if (clk->pll_data) 330 return -EINVAL; 331 332 /* There must be a parent... */ 333 if (WARN_ON(!clk->parent)) 334 return -EINVAL; 335 336 /* ... the parent must be a PLL... */ 337 if (WARN_ON(!clk->parent->pll_data)) 338 return -EINVAL; 339 340 /* ... and this clock must have a divider. */ 341 if (WARN_ON(!clk->div_reg)) 342 return -EINVAL; 343 344 pll = clk->parent->pll_data; 345 346 input = clk->parent->rate; 347 348 /* If pre-PLL, source clock is before the multiplier and divider(s) */ 349 if (clk->flags & PRE_PLL) 350 input = pll->input_rate; 351 352 if (input > rate) { 353 /* 354 * Can afford to provide an output little higher than requested 355 * only if maximum rate supported by hardware on this sysclk 356 * is known. 357 */ 358 if (clk->maxrate) { 359 ratio = DIV_ROUND_CLOSEST(input, rate); 360 if (input / ratio > clk->maxrate) 361 ratio = 0; 362 } 363 364 if (ratio == 0) 365 ratio = DIV_ROUND_UP(input, rate); 366 367 ratio--; 368 } 369 370 if (ratio > pll->div_ratio_mask) 371 return -EINVAL; 372 373 do { 374 v = __raw_readl(pll->base + PLLSTAT); 375 } while (v & PLLSTAT_GOSTAT); 376 377 v = __raw_readl(pll->base + clk->div_reg); 378 v &= ~pll->div_ratio_mask; 379 v |= ratio | PLLDIV_EN; 380 __raw_writel(v, pll->base + clk->div_reg); 381 382 v = __raw_readl(pll->base + PLLCMD); 383 v |= PLLCMD_GOSET; 384 __raw_writel(v, pll->base + PLLCMD); 385 386 do { 387 v = __raw_readl(pll->base + PLLSTAT); 388 } while (v & PLLSTAT_GOSTAT); 389 390 return 0; 391} 392EXPORT_SYMBOL(davinci_set_sysclk_rate); 393 394static unsigned long clk_leafclk_recalc(struct clk *clk) 395{ 396 if (WARN_ON(!clk->parent)) 397 return clk->rate; 398 399 return clk->parent->rate; 400} 401 402int davinci_simple_set_rate(struct clk *clk, unsigned long rate) 403{ 404 clk->rate = rate; 405 return 0; 406} 407 408static unsigned long clk_pllclk_recalc(struct clk *clk) 409{ 410 u32 ctrl, mult = 1, prediv = 1, postdiv = 1; 411 u8 bypass; 412 struct pll_data *pll = clk->pll_data; 413 unsigned long rate = clk->rate; 414 415 ctrl = __raw_readl(pll->base + PLLCTL); 416 rate = pll->input_rate = clk->parent->rate; 417 418 if (ctrl & PLLCTL_PLLEN) { 419 bypass = 0; 420 mult = __raw_readl(pll->base + PLLM); 421 if (cpu_is_davinci_dm365()) 422 mult = 2 * (mult & PLLM_PLLM_MASK); 423 else 424 mult = (mult & PLLM_PLLM_MASK) + 1; 425 } else 426 bypass = 1; 427 428 if (pll->flags & PLL_HAS_PREDIV) { 429 prediv = __raw_readl(pll->base + PREDIV); 430 if (prediv & PLLDIV_EN) 431 prediv = (prediv & pll->div_ratio_mask) + 1; 432 else 433 prediv = 1; 434 } 435 436 /* pre-divider is fixed, but (some?) chips won't report that */ 437 if (cpu_is_davinci_dm355() && pll->num == 1) 438 prediv = 8; 439 440 if (pll->flags & PLL_HAS_POSTDIV) { 441 postdiv = __raw_readl(pll->base + POSTDIV); 442 if (postdiv & PLLDIV_EN) 443 postdiv = (postdiv & pll->div_ratio_mask) + 1; 444 else 445 postdiv = 1; 446 } 447 448 if (!bypass) { 449 rate /= prediv; 450 rate *= mult; 451 rate /= postdiv; 452 } 453 454 pr_debug("PLL%d: input = %lu MHz [ ", 455 pll->num, clk->parent->rate / 1000000); 456 if (bypass) 457 pr_debug("bypass "); 458 if (prediv > 1) 459 pr_debug("/ %d ", prediv); 460 if (mult > 1) 461 pr_debug("* %d ", mult); 462 if (postdiv > 1) 463 pr_debug("/ %d ", postdiv); 464 pr_debug("] --> %lu MHz output.\n", rate / 1000000); 465 466 return rate; 467} 468 469/** 470 * davinci_set_pllrate - set the output rate of a given PLL. 471 * 472 * Note: Currently tested to work with OMAP-L138 only. 473 * 474 * @pll: pll whose rate needs to be changed. 475 * @prediv: The pre divider value. Passing 0 disables the pre-divider. 476 * @pllm: The multiplier value. Passing 0 leads to multiply-by-one. 477 * @postdiv: The post divider value. Passing 0 disables the post-divider. 478 */ 479int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv, 480 unsigned int mult, unsigned int postdiv) 481{ 482 u32 ctrl; 483 unsigned int locktime; 484 unsigned long flags; 485 486 if (pll->base == NULL) 487 return -EINVAL; 488 489 /* 490 * PLL lock time required per OMAP-L138 datasheet is 491 * (2000 * prediv)/sqrt(pllm) OSCIN cycles. We approximate sqrt(pllm) 492 * as 4 and OSCIN cycle as 25 MHz. 493 */ 494 if (prediv) { 495 locktime = ((2000 * prediv) / 100); 496 prediv = (prediv - 1) | PLLDIV_EN; 497 } else { 498 locktime = PLL_LOCK_TIME; 499 } 500 if (postdiv) 501 postdiv = (postdiv - 1) | PLLDIV_EN; 502 if (mult) 503 mult = mult - 1; 504 505 /* Protect against simultaneous calls to PLL setting seqeunce */ 506 spin_lock_irqsave(&clockfw_lock, flags); 507 508 ctrl = __raw_readl(pll->base + PLLCTL); 509 510 /* Switch the PLL to bypass mode */ 511 ctrl &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN); 512 __raw_writel(ctrl, pll->base + PLLCTL); 513 514 udelay(PLL_BYPASS_TIME); 515 516 /* Reset and enable PLL */ 517 ctrl &= ~(PLLCTL_PLLRST | PLLCTL_PLLDIS); 518 __raw_writel(ctrl, pll->base + PLLCTL); 519 520 if (pll->flags & PLL_HAS_PREDIV) 521 __raw_writel(prediv, pll->base + PREDIV); 522 523 __raw_writel(mult, pll->base + PLLM); 524 525 if (pll->flags & PLL_HAS_POSTDIV) 526 __raw_writel(postdiv, pll->base + POSTDIV); 527 528 udelay(PLL_RESET_TIME); 529 530 /* Bring PLL out of reset */ 531 ctrl |= PLLCTL_PLLRST; 532 __raw_writel(ctrl, pll->base + PLLCTL); 533 534 udelay(locktime); 535 536 /* Remove PLL from bypass mode */ 537 ctrl |= PLLCTL_PLLEN; 538 __raw_writel(ctrl, pll->base + PLLCTL); 539 540 spin_unlock_irqrestore(&clockfw_lock, flags); 541 542 return 0; 543} 544EXPORT_SYMBOL(davinci_set_pllrate); 545 546/** 547 * davinci_set_refclk_rate() - Set the reference clock rate 548 * @rate: The new rate. 549 * 550 * Sets the reference clock rate to a given value. This will most likely 551 * result in the entire clock tree getting updated. 552 * 553 * This is used to support boards which use a reference clock different 554 * than that used by default in <soc>.c file. The reference clock rate 555 * should be updated early in the boot process; ideally soon after the 556 * clock tree has been initialized once with the default reference clock 557 * rate (davinci_common_init()). 558 * 559 * Returns 0 on success, error otherwise. 560 */ 561int davinci_set_refclk_rate(unsigned long rate) 562{ 563 struct clk *refclk; 564 565 refclk = clk_get(NULL, "ref"); 566 if (IS_ERR(refclk)) { 567 pr_err("%s: failed to get reference clock\n", __func__); 568 return PTR_ERR(refclk); 569 } 570 571 clk_set_rate(refclk, rate); 572 573 clk_put(refclk); 574 575 return 0; 576} 577 578int __init davinci_clk_init(struct clk_lookup *clocks) 579{ 580 struct clk_lookup *c; 581 struct clk *clk; 582 size_t num_clocks = 0; 583 584 for (c = clocks; c->clk; c++) { 585 clk = c->clk; 586 587 if (!clk->recalc) { 588 589 /* Check if clock is a PLL */ 590 if (clk->pll_data) 591 clk->recalc = clk_pllclk_recalc; 592 593 /* Else, if it is a PLL-derived clock */ 594 else if (clk->flags & CLK_PLL) 595 clk->recalc = clk_sysclk_recalc; 596 597 /* Otherwise, it is a leaf clock (PSC clock) */ 598 else if (clk->parent) 599 clk->recalc = clk_leafclk_recalc; 600 } 601 602 if (clk->pll_data) { 603 struct pll_data *pll = clk->pll_data; 604 605 if (!pll->div_ratio_mask) 606 pll->div_ratio_mask = PLLDIV_RATIO_MASK; 607 608 if (pll->phys_base && !pll->base) { 609 pll->base = ioremap(pll->phys_base, SZ_4K); 610 WARN_ON(!pll->base); 611 } 612 } 613 614 if (clk->recalc) 615 clk->rate = clk->recalc(clk); 616 617 if (clk->lpsc) 618 clk->flags |= CLK_PSC; 619 620 if (clk->flags & PSC_LRST) 621 clk->reset = davinci_clk_reset; 622 623 clk_register(clk); 624 num_clocks++; 625 626 /* Turn on clocks that Linux doesn't otherwise manage */ 627 if (clk->flags & ALWAYS_ENABLED) 628 clk_enable(clk); 629 } 630 631 clkdev_add_table(clocks, num_clocks); 632 633 return 0; 634} 635 636#ifdef CONFIG_DEBUG_FS 637 638#include <linux/debugfs.h> 639#include <linux/seq_file.h> 640 641#define CLKNAME_MAX 10 /* longest clock name */ 642#define NEST_DELTA 2 643#define NEST_MAX 4 644 645static void 646dump_clock(struct seq_file *s, unsigned nest, struct clk *parent) 647{ 648 char *state; 649 char buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX]; 650 struct clk *clk; 651 unsigned i; 652 653 if (parent->flags & CLK_PLL) 654 state = "pll"; 655 else if (parent->flags & CLK_PSC) 656 state = "psc"; 657 else 658 state = ""; 659 660 /* <nest spaces> name <pad to end> */ 661 memset(buf, ' ', sizeof(buf) - 1); 662 buf[sizeof(buf) - 1] = 0; 663 i = strlen(parent->name); 664 memcpy(buf + nest, parent->name, 665 min(i, (unsigned)(sizeof(buf) - 1 - nest))); 666 667 seq_printf(s, "%s users=%2d %-3s %9ld Hz\n", 668 buf, parent->usecount, state, clk_get_rate(parent)); 669 /* REVISIT show device associations too */ 670 671 /* cost is now small, but not linear... */ 672 list_for_each_entry(clk, &parent->children, childnode) { 673 dump_clock(s, nest + NEST_DELTA, clk); 674 } 675} 676 677static int davinci_ck_show(struct seq_file *m, void *v) 678{ 679 struct clk *clk; 680 681 /* 682 * Show clock tree; We trust nonzero usecounts equate to PSC enables... 683 */ 684 mutex_lock(&clocks_mutex); 685 list_for_each_entry(clk, &clocks, node) 686 if (!clk->parent) 687 dump_clock(m, 0, clk); 688 mutex_unlock(&clocks_mutex); 689 690 return 0; 691} 692 693static int davinci_ck_open(struct inode *inode, struct file *file) 694{ 695 return single_open(file, davinci_ck_show, NULL); 696} 697 698static const struct file_operations davinci_ck_operations = { 699 .open = davinci_ck_open, 700 .read = seq_read, 701 .llseek = seq_lseek, 702 .release = single_release, 703}; 704 705static int __init davinci_clk_debugfs_init(void) 706{ 707 debugfs_create_file("davinci_clocks", S_IFREG | S_IRUGO, NULL, NULL, 708 &davinci_ck_operations); 709 return 0; 710 711} 712device_initcall(davinci_clk_debugfs_init); 713#endif /* CONFIG_DEBUG_FS */ 714