1/* 2 * Ingenic SoC CGU driver 3 * 4 * Copyright (c) 2013-2015 Imagination Technologies 5 * Author: Paul Burton <paul.burton@imgtec.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License as 9 * published by the Free Software Foundation; either version 2 of 10 * the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18#include <linux/bitops.h> 19#include <linux/clk.h> 20#include <linux/clk-provider.h> 21#include <linux/clkdev.h> 22#include <linux/delay.h> 23#include <linux/math64.h> 24#include <linux/of.h> 25#include <linux/of_address.h> 26#include <linux/slab.h> 27#include <linux/spinlock.h> 28#include "cgu.h" 29 30#define MHZ (1000 * 1000) 31 32/** 33 * ingenic_cgu_gate_get() - get the value of clock gate register bit 34 * @cgu: reference to the CGU whose registers should be read 35 * @info: info struct describing the gate bit 36 * 37 * Retrieves the state of the clock gate bit described by info. The 38 * caller must hold cgu->lock. 39 * 40 * Return: true if the gate bit is set, else false. 41 */ 42static inline bool 43ingenic_cgu_gate_get(struct ingenic_cgu *cgu, 44 const struct ingenic_cgu_gate_info *info) 45{ 46 return readl(cgu->base + info->reg) & BIT(info->bit); 47} 48 49/** 50 * ingenic_cgu_gate_set() - set the value of clock gate register bit 51 * @cgu: reference to the CGU whose registers should be modified 52 * @info: info struct describing the gate bit 53 * @val: non-zero to gate a clock, otherwise zero 54 * 55 * Sets the given gate bit in order to gate or ungate a clock. 56 * 57 * The caller must hold cgu->lock. 58 */ 59static inline void 60ingenic_cgu_gate_set(struct ingenic_cgu *cgu, 61 const struct ingenic_cgu_gate_info *info, bool val) 62{ 63 u32 clkgr = readl(cgu->base + info->reg); 64 65 if (val) 66 clkgr |= BIT(info->bit); 67 else 68 clkgr &= ~BIT(info->bit); 69 70 writel(clkgr, cgu->base + info->reg); 71} 72 73/* 74 * PLL operations 75 */ 76 77static unsigned long 78ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 79{ 80 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); 81 struct ingenic_cgu *cgu = ingenic_clk->cgu; 82 const struct ingenic_cgu_clk_info *clk_info; 83 const struct ingenic_cgu_pll_info *pll_info; 84 unsigned m, n, od_enc, od; 85 bool bypass, enable; 86 unsigned long flags; 87 u32 ctl; 88 89 clk_info = &cgu->clock_info[ingenic_clk->idx]; 90 BUG_ON(clk_info->type != CGU_CLK_PLL); 91 pll_info = &clk_info->pll; 92 93 spin_lock_irqsave(&cgu->lock, flags); 94 ctl = readl(cgu->base + pll_info->reg); 95 spin_unlock_irqrestore(&cgu->lock, flags); 96 97 m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0); 98 m += pll_info->m_offset; 99 n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0); 100 n += pll_info->n_offset; 101 od_enc = ctl >> pll_info->od_shift; 102 od_enc &= GENMASK(pll_info->od_bits - 1, 0); 103 bypass = !!(ctl & BIT(pll_info->bypass_bit)); 104 enable = !!(ctl & BIT(pll_info->enable_bit)); 105 106 if (bypass) 107 return parent_rate; 108 109 if (!enable) 110 return 0; 111 112 for (od = 0; od < pll_info->od_max; od++) { 113 if (pll_info->od_encoding[od] == od_enc) 114 break; 115 } 116 BUG_ON(od == pll_info->od_max); 117 od++; 118 119 return div_u64((u64)parent_rate * m, n * od); 120} 121 122static unsigned long 123ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info, 124 unsigned long rate, unsigned long parent_rate, 125 unsigned *pm, unsigned *pn, unsigned *pod) 126{ 127 const struct ingenic_cgu_pll_info *pll_info; 128 unsigned m, n, od; 129 130 pll_info = &clk_info->pll; 131 od = 1; 132 133 /* 134 * The frequency after the input divider must be between 10 and 50 MHz. 135 * The highest divider yields the best resolution. 136 */ 137 n = parent_rate / (10 * MHZ); 138 n = min_t(unsigned, n, 1 << clk_info->pll.n_bits); 139 n = max_t(unsigned, n, pll_info->n_offset); 140 141 m = (rate / MHZ) * od * n / (parent_rate / MHZ); 142 m = min_t(unsigned, m, 1 << clk_info->pll.m_bits); 143 m = max_t(unsigned, m, pll_info->m_offset); 144 145 if (pm) 146 *pm = m; 147 if (pn) 148 *pn = n; 149 if (pod) 150 *pod = od; 151 152 return div_u64((u64)parent_rate * m, n * od); 153} 154 155static long 156ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate, 157 unsigned long *prate) 158{ 159 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); 160 struct ingenic_cgu *cgu = ingenic_clk->cgu; 161 const struct ingenic_cgu_clk_info *clk_info; 162 163 clk_info = &cgu->clock_info[ingenic_clk->idx]; 164 BUG_ON(clk_info->type != CGU_CLK_PLL); 165 166 return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL); 167} 168 169static int 170ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate, 171 unsigned long parent_rate) 172{ 173 const unsigned timeout = 100; 174 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); 175 struct ingenic_cgu *cgu = ingenic_clk->cgu; 176 const struct ingenic_cgu_clk_info *clk_info; 177 const struct ingenic_cgu_pll_info *pll_info; 178 unsigned long rate, flags; 179 unsigned m, n, od, i; 180 u32 ctl; 181 182 clk_info = &cgu->clock_info[ingenic_clk->idx]; 183 BUG_ON(clk_info->type != CGU_CLK_PLL); 184 pll_info = &clk_info->pll; 185 186 rate = ingenic_pll_calc(clk_info, req_rate, parent_rate, 187 &m, &n, &od); 188 if (rate != req_rate) 189 pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n", 190 clk_info->name, req_rate, rate); 191 192 spin_lock_irqsave(&cgu->lock, flags); 193 ctl = readl(cgu->base + pll_info->reg); 194 195 ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift); 196 ctl |= (m - pll_info->m_offset) << pll_info->m_shift; 197 198 ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift); 199 ctl |= (n - pll_info->n_offset) << pll_info->n_shift; 200 201 ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift); 202 ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift; 203 204 ctl &= ~BIT(pll_info->bypass_bit); 205 ctl |= BIT(pll_info->enable_bit); 206 207 writel(ctl, cgu->base + pll_info->reg); 208 209 /* wait for the PLL to stabilise */ 210 for (i = 0; i < timeout; i++) { 211 ctl = readl(cgu->base + pll_info->reg); 212 if (ctl & BIT(pll_info->stable_bit)) 213 break; 214 mdelay(1); 215 } 216 217 spin_unlock_irqrestore(&cgu->lock, flags); 218 219 if (i == timeout) 220 return -EBUSY; 221 222 return 0; 223} 224 225static const struct clk_ops ingenic_pll_ops = { 226 .recalc_rate = ingenic_pll_recalc_rate, 227 .round_rate = ingenic_pll_round_rate, 228 .set_rate = ingenic_pll_set_rate, 229}; 230 231/* 232 * Operations for all non-PLL clocks 233 */ 234 235static u8 ingenic_clk_get_parent(struct clk_hw *hw) 236{ 237 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); 238 struct ingenic_cgu *cgu = ingenic_clk->cgu; 239 const struct ingenic_cgu_clk_info *clk_info; 240 u32 reg; 241 u8 i, hw_idx, idx = 0; 242 243 clk_info = &cgu->clock_info[ingenic_clk->idx]; 244 245 if (clk_info->type & CGU_CLK_MUX) { 246 reg = readl(cgu->base + clk_info->mux.reg); 247 hw_idx = (reg >> clk_info->mux.shift) & 248 GENMASK(clk_info->mux.bits - 1, 0); 249 250 /* 251 * Convert the hardware index to the parent index by skipping 252 * over any -1's in the parents array. 253 */ 254 for (i = 0; i < hw_idx; i++) { 255 if (clk_info->parents[i] != -1) 256 idx++; 257 } 258 } 259 260 return idx; 261} 262 263static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx) 264{ 265 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); 266 struct ingenic_cgu *cgu = ingenic_clk->cgu; 267 const struct ingenic_cgu_clk_info *clk_info; 268 unsigned long flags; 269 u8 curr_idx, hw_idx, num_poss; 270 u32 reg, mask; 271 272 clk_info = &cgu->clock_info[ingenic_clk->idx]; 273 274 if (clk_info->type & CGU_CLK_MUX) { 275 /* 276 * Convert the parent index to the hardware index by adding 277 * 1 for any -1 in the parents array preceding the given 278 * index. That is, we want the index of idx'th entry in 279 * clk_info->parents which does not equal -1. 280 */ 281 hw_idx = curr_idx = 0; 282 num_poss = 1 << clk_info->mux.bits; 283 for (; hw_idx < num_poss; hw_idx++) { 284 if (clk_info->parents[hw_idx] == -1) 285 continue; 286 if (curr_idx == idx) 287 break; 288 curr_idx++; 289 } 290 291 /* idx should always be a valid parent */ 292 BUG_ON(curr_idx != idx); 293 294 mask = GENMASK(clk_info->mux.bits - 1, 0); 295 mask <<= clk_info->mux.shift; 296 297 spin_lock_irqsave(&cgu->lock, flags); 298 299 /* write the register */ 300 reg = readl(cgu->base + clk_info->mux.reg); 301 reg &= ~mask; 302 reg |= hw_idx << clk_info->mux.shift; 303 writel(reg, cgu->base + clk_info->mux.reg); 304 305 spin_unlock_irqrestore(&cgu->lock, flags); 306 return 0; 307 } 308 309 return idx ? -EINVAL : 0; 310} 311 312static unsigned long 313ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 314{ 315 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); 316 struct ingenic_cgu *cgu = ingenic_clk->cgu; 317 const struct ingenic_cgu_clk_info *clk_info; 318 unsigned long rate = parent_rate; 319 u32 div_reg, div; 320 321 clk_info = &cgu->clock_info[ingenic_clk->idx]; 322 323 if (clk_info->type & CGU_CLK_DIV) { 324 div_reg = readl(cgu->base + clk_info->div.reg); 325 div = (div_reg >> clk_info->div.shift) & 326 GENMASK(clk_info->div.bits - 1, 0); 327 div += 1; 328 329 rate /= div; 330 } 331 332 return rate; 333} 334 335static unsigned 336ingenic_clk_calc_div(const struct ingenic_cgu_clk_info *clk_info, 337 unsigned long parent_rate, unsigned long req_rate) 338{ 339 unsigned div; 340 341 /* calculate the divide */ 342 div = DIV_ROUND_UP(parent_rate, req_rate); 343 344 /* and impose hardware constraints */ 345 div = min_t(unsigned, div, 1 << clk_info->div.bits); 346 div = max_t(unsigned, div, 1); 347 348 return div; 349} 350 351static long 352ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate, 353 unsigned long *parent_rate) 354{ 355 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); 356 struct ingenic_cgu *cgu = ingenic_clk->cgu; 357 const struct ingenic_cgu_clk_info *clk_info; 358 long rate = *parent_rate; 359 360 clk_info = &cgu->clock_info[ingenic_clk->idx]; 361 362 if (clk_info->type & CGU_CLK_DIV) 363 rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate); 364 else if (clk_info->type & CGU_CLK_FIXDIV) 365 rate /= clk_info->fixdiv.div; 366 367 return rate; 368} 369 370static int 371ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate, 372 unsigned long parent_rate) 373{ 374 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); 375 struct ingenic_cgu *cgu = ingenic_clk->cgu; 376 const struct ingenic_cgu_clk_info *clk_info; 377 const unsigned timeout = 100; 378 unsigned long rate, flags; 379 unsigned div, i; 380 u32 reg, mask; 381 int ret = 0; 382 383 clk_info = &cgu->clock_info[ingenic_clk->idx]; 384 385 if (clk_info->type & CGU_CLK_DIV) { 386 div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate); 387 rate = parent_rate / div; 388 389 if (rate != req_rate) 390 return -EINVAL; 391 392 spin_lock_irqsave(&cgu->lock, flags); 393 reg = readl(cgu->base + clk_info->div.reg); 394 395 /* update the divide */ 396 mask = GENMASK(clk_info->div.bits - 1, 0); 397 reg &= ~(mask << clk_info->div.shift); 398 reg |= (div - 1) << clk_info->div.shift; 399 400 /* clear the stop bit */ 401 if (clk_info->div.stop_bit != -1) 402 reg &= ~BIT(clk_info->div.stop_bit); 403 404 /* set the change enable bit */ 405 if (clk_info->div.ce_bit != -1) 406 reg |= BIT(clk_info->div.ce_bit); 407 408 /* update the hardware */ 409 writel(reg, cgu->base + clk_info->div.reg); 410 411 /* wait for the change to take effect */ 412 if (clk_info->div.busy_bit != -1) { 413 for (i = 0; i < timeout; i++) { 414 reg = readl(cgu->base + clk_info->div.reg); 415 if (!(reg & BIT(clk_info->div.busy_bit))) 416 break; 417 mdelay(1); 418 } 419 if (i == timeout) 420 ret = -EBUSY; 421 } 422 423 spin_unlock_irqrestore(&cgu->lock, flags); 424 return ret; 425 } 426 427 return -EINVAL; 428} 429 430static int ingenic_clk_enable(struct clk_hw *hw) 431{ 432 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); 433 struct ingenic_cgu *cgu = ingenic_clk->cgu; 434 const struct ingenic_cgu_clk_info *clk_info; 435 unsigned long flags; 436 437 clk_info = &cgu->clock_info[ingenic_clk->idx]; 438 439 if (clk_info->type & CGU_CLK_GATE) { 440 /* ungate the clock */ 441 spin_lock_irqsave(&cgu->lock, flags); 442 ingenic_cgu_gate_set(cgu, &clk_info->gate, false); 443 spin_unlock_irqrestore(&cgu->lock, flags); 444 } 445 446 return 0; 447} 448 449static void ingenic_clk_disable(struct clk_hw *hw) 450{ 451 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); 452 struct ingenic_cgu *cgu = ingenic_clk->cgu; 453 const struct ingenic_cgu_clk_info *clk_info; 454 unsigned long flags; 455 456 clk_info = &cgu->clock_info[ingenic_clk->idx]; 457 458 if (clk_info->type & CGU_CLK_GATE) { 459 /* gate the clock */ 460 spin_lock_irqsave(&cgu->lock, flags); 461 ingenic_cgu_gate_set(cgu, &clk_info->gate, true); 462 spin_unlock_irqrestore(&cgu->lock, flags); 463 } 464} 465 466static int ingenic_clk_is_enabled(struct clk_hw *hw) 467{ 468 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); 469 struct ingenic_cgu *cgu = ingenic_clk->cgu; 470 const struct ingenic_cgu_clk_info *clk_info; 471 unsigned long flags; 472 int enabled = 1; 473 474 clk_info = &cgu->clock_info[ingenic_clk->idx]; 475 476 if (clk_info->type & CGU_CLK_GATE) { 477 spin_lock_irqsave(&cgu->lock, flags); 478 enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate); 479 spin_unlock_irqrestore(&cgu->lock, flags); 480 } 481 482 return enabled; 483} 484 485static const struct clk_ops ingenic_clk_ops = { 486 .get_parent = ingenic_clk_get_parent, 487 .set_parent = ingenic_clk_set_parent, 488 489 .recalc_rate = ingenic_clk_recalc_rate, 490 .round_rate = ingenic_clk_round_rate, 491 .set_rate = ingenic_clk_set_rate, 492 493 .enable = ingenic_clk_enable, 494 .disable = ingenic_clk_disable, 495 .is_enabled = ingenic_clk_is_enabled, 496}; 497 498/* 499 * Setup functions. 500 */ 501 502static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx) 503{ 504 const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx]; 505 struct clk_init_data clk_init; 506 struct ingenic_clk *ingenic_clk = NULL; 507 struct clk *clk, *parent; 508 const char *parent_names[4]; 509 unsigned caps, i, num_possible; 510 int err = -EINVAL; 511 512 BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names)); 513 514 if (clk_info->type == CGU_CLK_EXT) { 515 clk = of_clk_get_by_name(cgu->np, clk_info->name); 516 if (IS_ERR(clk)) { 517 pr_err("%s: no external clock '%s' provided\n", 518 __func__, clk_info->name); 519 err = -ENODEV; 520 goto out; 521 } 522 err = clk_register_clkdev(clk, clk_info->name, NULL); 523 if (err) { 524 clk_put(clk); 525 goto out; 526 } 527 cgu->clocks.clks[idx] = clk; 528 return 0; 529 } 530 531 if (!clk_info->type) { 532 pr_err("%s: no clock type specified for '%s'\n", __func__, 533 clk_info->name); 534 goto out; 535 } 536 537 ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL); 538 if (!ingenic_clk) { 539 err = -ENOMEM; 540 goto out; 541 } 542 543 ingenic_clk->hw.init = &clk_init; 544 ingenic_clk->cgu = cgu; 545 ingenic_clk->idx = idx; 546 547 clk_init.name = clk_info->name; 548 clk_init.flags = 0; 549 clk_init.parent_names = parent_names; 550 551 caps = clk_info->type; 552 553 if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) { 554 clk_init.num_parents = 0; 555 556 if (caps & CGU_CLK_MUX) 557 num_possible = 1 << clk_info->mux.bits; 558 else 559 num_possible = ARRAY_SIZE(clk_info->parents); 560 561 for (i = 0; i < num_possible; i++) { 562 if (clk_info->parents[i] == -1) 563 continue; 564 565 parent = cgu->clocks.clks[clk_info->parents[i]]; 566 parent_names[clk_init.num_parents] = 567 __clk_get_name(parent); 568 clk_init.num_parents++; 569 } 570 571 BUG_ON(!clk_init.num_parents); 572 BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names)); 573 } else { 574 BUG_ON(clk_info->parents[0] == -1); 575 clk_init.num_parents = 1; 576 parent = cgu->clocks.clks[clk_info->parents[0]]; 577 parent_names[0] = __clk_get_name(parent); 578 } 579 580 if (caps & CGU_CLK_CUSTOM) { 581 clk_init.ops = clk_info->custom.clk_ops; 582 583 caps &= ~CGU_CLK_CUSTOM; 584 585 if (caps) { 586 pr_err("%s: custom clock may not be combined with type 0x%x\n", 587 __func__, caps); 588 goto out; 589 } 590 } else if (caps & CGU_CLK_PLL) { 591 clk_init.ops = &ingenic_pll_ops; 592 593 caps &= ~CGU_CLK_PLL; 594 595 if (caps) { 596 pr_err("%s: PLL may not be combined with type 0x%x\n", 597 __func__, caps); 598 goto out; 599 } 600 } else { 601 clk_init.ops = &ingenic_clk_ops; 602 } 603 604 /* nothing to do for gates or fixed dividers */ 605 caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV); 606 607 if (caps & CGU_CLK_MUX) { 608 if (!(caps & CGU_CLK_MUX_GLITCHFREE)) 609 clk_init.flags |= CLK_SET_PARENT_GATE; 610 611 caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE); 612 } 613 614 if (caps & CGU_CLK_DIV) { 615 caps &= ~CGU_CLK_DIV; 616 } else { 617 /* pass rate changes to the parent clock */ 618 clk_init.flags |= CLK_SET_RATE_PARENT; 619 } 620 621 if (caps) { 622 pr_err("%s: unknown clock type 0x%x\n", __func__, caps); 623 goto out; 624 } 625 626 clk = clk_register(NULL, &ingenic_clk->hw); 627 if (IS_ERR(clk)) { 628 pr_err("%s: failed to register clock '%s'\n", __func__, 629 clk_info->name); 630 err = PTR_ERR(clk); 631 goto out; 632 } 633 634 err = clk_register_clkdev(clk, clk_info->name, NULL); 635 if (err) 636 goto out; 637 638 cgu->clocks.clks[idx] = clk; 639out: 640 if (err) 641 kfree(ingenic_clk); 642 return err; 643} 644 645struct ingenic_cgu * 646ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info, 647 unsigned num_clocks, struct device_node *np) 648{ 649 struct ingenic_cgu *cgu; 650 651 cgu = kzalloc(sizeof(*cgu), GFP_KERNEL); 652 if (!cgu) 653 goto err_out; 654 655 cgu->base = of_iomap(np, 0); 656 if (!cgu->base) { 657 pr_err("%s: failed to map CGU registers\n", __func__); 658 goto err_out_free; 659 } 660 661 cgu->np = np; 662 cgu->clock_info = clock_info; 663 cgu->clocks.clk_num = num_clocks; 664 665 spin_lock_init(&cgu->lock); 666 667 return cgu; 668 669err_out_free: 670 kfree(cgu); 671err_out: 672 return NULL; 673} 674 675int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu) 676{ 677 unsigned i; 678 int err; 679 680 cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *), 681 GFP_KERNEL); 682 if (!cgu->clocks.clks) { 683 err = -ENOMEM; 684 goto err_out; 685 } 686 687 for (i = 0; i < cgu->clocks.clk_num; i++) { 688 err = ingenic_register_clock(cgu, i); 689 if (err) 690 goto err_out_unregister; 691 } 692 693 err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get, 694 &cgu->clocks); 695 if (err) 696 goto err_out_unregister; 697 698 return 0; 699 700err_out_unregister: 701 for (i = 0; i < cgu->clocks.clk_num; i++) { 702 if (!cgu->clocks.clks[i]) 703 continue; 704 if (cgu->clock_info[i].type & CGU_CLK_EXT) 705 clk_put(cgu->clocks.clks[i]); 706 else 707 clk_unregister(cgu->clocks.clks[i]); 708 } 709 kfree(cgu->clocks.clks); 710err_out: 711 return err; 712} 713