Lines Matching refs:pmu
49 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_reset() local
53 spin_lock_irqsave(&pmu->lock, flags); in pmu_reset_reset()
54 val = readl_relaxed(pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset()
55 writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset()
56 writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset()
57 spin_unlock_irqrestore(&pmu->lock, flags); in pmu_reset_reset()
64 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_assert() local
68 spin_lock_irqsave(&pmu->lock, flags); in pmu_reset_assert()
69 val &= readl_relaxed(pmu->pmc_base + PMC_SW_RST); in pmu_reset_assert()
70 writel_relaxed(val, pmu->pmc_base + PMC_SW_RST); in pmu_reset_assert()
71 spin_unlock_irqrestore(&pmu->lock, flags); in pmu_reset_assert()
78 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_deassert() local
82 spin_lock_irqsave(&pmu->lock, flags); in pmu_reset_deassert()
83 val |= readl_relaxed(pmu->pmc_base + PMC_SW_RST); in pmu_reset_deassert()
84 writel_relaxed(val, pmu->pmc_base + PMC_SW_RST); in pmu_reset_deassert()
85 spin_unlock_irqrestore(&pmu->lock, flags); in pmu_reset_deassert()
102 static void __init pmu_reset_init(struct pmu_data *pmu) in pmu_reset_init() argument
106 pmu->reset = pmu_reset; in pmu_reset_init()
107 pmu->reset.of_node = pmu->of_node; in pmu_reset_init()
109 ret = reset_controller_register(&pmu->reset); in pmu_reset_init()
114 static void __init pmu_reset_init(struct pmu_data *pmu) in pmu_reset_init() argument
120 struct pmu_data *pmu; member
145 struct pmu_data *pmu = pmu_dom->pmu; in pmu_domain_power_off() local
148 void __iomem *pmu_base = pmu->pmu_base; in pmu_domain_power_off()
149 void __iomem *pmc_base = pmu->pmc_base; in pmu_domain_power_off()
151 spin_lock_irqsave(&pmu->lock, flags); in pmu_domain_power_off()
171 spin_unlock_irqrestore(&pmu->lock, flags); in pmu_domain_power_off()
179 struct pmu_data *pmu = pmu_dom->pmu; in pmu_domain_power_on() local
182 void __iomem *pmu_base = pmu->pmu_base; in pmu_domain_power_on()
183 void __iomem *pmc_base = pmu->pmc_base; in pmu_domain_power_on()
185 spin_lock_irqsave(&pmu->lock, flags); in pmu_domain_power_on()
205 spin_unlock_irqrestore(&pmu->lock, flags); in pmu_domain_power_on()
213 unsigned int val = readl_relaxed(domain->pmu->pmu_base + PMU_PWR); in __pmu_domain_register()
227 struct pmu_data *pmu = irq_desc_get_handler_data(desc); in pmu_irq_handler() local
228 struct irq_chip_generic *gc = pmu->irq_gc; in pmu_irq_handler()
229 struct irq_domain *domain = pmu->irq_domain; in pmu_irq_handler()
265 static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq) in dove_init_pmu_irq() argument
273 writel(0, pmu->pmc_base + PMC_IRQ_MASK); in dove_init_pmu_irq()
274 writel(0, pmu->pmc_base + PMC_IRQ_CAUSE); in dove_init_pmu_irq()
276 domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS, in dove_init_pmu_irq()
294 gc->reg_base = pmu->pmc_base; in dove_init_pmu_irq()
299 pmu->irq_domain = domain; in dove_init_pmu_irq()
300 pmu->irq_gc = gc; in dove_init_pmu_irq()
302 irq_set_handler_data(irq, pmu); in dove_init_pmu_irq()
332 struct pmu_data *pmu; in dove_init_pmu() local
346 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); in dove_init_pmu()
347 if (!pmu) in dove_init_pmu()
350 spin_lock_init(&pmu->lock); in dove_init_pmu()
351 pmu->of_node = np_pmu; in dove_init_pmu()
352 pmu->pmc_base = of_iomap(pmu->of_node, 0); in dove_init_pmu()
353 pmu->pmu_base = of_iomap(pmu->of_node, 1); in dove_init_pmu()
354 if (!pmu->pmc_base || !pmu->pmu_base) { in dove_init_pmu()
356 iounmap(pmu->pmu_base); in dove_init_pmu()
357 iounmap(pmu->pmc_base); in dove_init_pmu()
358 kfree(pmu); in dove_init_pmu()
362 pmu_reset_init(pmu); in dove_init_pmu()
372 domain->pmu = pmu; in dove_init_pmu()
392 if (args.np == pmu->of_node) in dove_init_pmu()
401 parent_irq = irq_of_parse_and_map(pmu->of_node, 0); in dove_init_pmu()
405 ret = dove_init_pmu_irq(pmu, parent_irq); in dove_init_pmu()