Lines Matching refs:drvdata

93 static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)  in etb_get_buffer_depth()  argument
98 ret = clk_prepare_enable(drvdata->clk); in etb_get_buffer_depth()
103 depth = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG); in etb_get_buffer_depth()
105 clk_disable_unprepare(drvdata->clk); in etb_get_buffer_depth()
109 static void etb_enable_hw(struct etb_drvdata *drvdata) in etb_enable_hw() argument
114 CS_UNLOCK(drvdata->base); in etb_enable_hw()
116 depth = drvdata->buffer_depth; in etb_enable_hw()
118 writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER); in etb_enable_hw()
121 writel_relaxed(0x0, drvdata->base + ETB_RWD_REG); in etb_enable_hw()
124 writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER); in etb_enable_hw()
126 writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER); in etb_enable_hw()
128 writel_relaxed(drvdata->trigger_cntr, drvdata->base + ETB_TRG); in etb_enable_hw()
130 drvdata->base + ETB_FFCR); in etb_enable_hw()
132 writel_relaxed(ETB_CTL_CAPT_EN, drvdata->base + ETB_CTL_REG); in etb_enable_hw()
134 CS_LOCK(drvdata->base); in etb_enable_hw()
139 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in etb_enable() local
143 ret = clk_prepare_enable(drvdata->clk); in etb_enable()
147 spin_lock_irqsave(&drvdata->spinlock, flags); in etb_enable()
148 etb_enable_hw(drvdata); in etb_enable()
149 drvdata->enable = true; in etb_enable()
150 spin_unlock_irqrestore(&drvdata->spinlock, flags); in etb_enable()
152 dev_info(drvdata->dev, "ETB enabled\n"); in etb_enable()
156 static void etb_disable_hw(struct etb_drvdata *drvdata) in etb_disable_hw() argument
160 CS_UNLOCK(drvdata->base); in etb_disable_hw()
162 ffcr = readl_relaxed(drvdata->base + ETB_FFCR); in etb_disable_hw()
165 writel_relaxed(ffcr, drvdata->base + ETB_FFCR); in etb_disable_hw()
168 writel_relaxed(ffcr, drvdata->base + ETB_FFCR); in etb_disable_hw()
170 if (coresight_timeout(drvdata->base, ETB_FFCR, ETB_FFCR_BIT, 0)) { in etb_disable_hw()
171 dev_err(drvdata->dev, in etb_disable_hw()
177 writel_relaxed(0x0, drvdata->base + ETB_CTL_REG); in etb_disable_hw()
179 if (coresight_timeout(drvdata->base, ETB_FFSR, ETB_FFSR_BIT, 1)) { in etb_disable_hw()
180 dev_err(drvdata->dev, in etb_disable_hw()
185 CS_LOCK(drvdata->base); in etb_disable_hw()
188 static void etb_dump_hw(struct etb_drvdata *drvdata) in etb_dump_hw() argument
196 CS_UNLOCK(drvdata->base); in etb_dump_hw()
198 read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER); in etb_dump_hw()
199 write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER); in etb_dump_hw()
204 dev_err(drvdata->dev, in etb_dump_hw()
207 dev_err(drvdata->dev, "frameoff: %lu, frame_endoff: %lu\n", in etb_dump_hw()
212 if ((readl_relaxed(drvdata->base + ETB_STATUS_REG) in etb_dump_hw()
214 writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER); in etb_dump_hw()
216 writel_relaxed(write_ptr, drvdata->base + ETB_RAM_READ_POINTER); in etb_dump_hw()
218 depth = drvdata->buffer_depth; in etb_dump_hw()
219 buf_ptr = drvdata->buf; in etb_dump_hw()
221 read_data = readl_relaxed(drvdata->base + in etb_dump_hw()
239 writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER); in etb_dump_hw()
241 CS_LOCK(drvdata->base); in etb_dump_hw()
246 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in etb_disable() local
249 spin_lock_irqsave(&drvdata->spinlock, flags); in etb_disable()
250 etb_disable_hw(drvdata); in etb_disable()
251 etb_dump_hw(drvdata); in etb_disable()
252 drvdata->enable = false; in etb_disable()
253 spin_unlock_irqrestore(&drvdata->spinlock, flags); in etb_disable()
255 clk_disable_unprepare(drvdata->clk); in etb_disable()
257 dev_info(drvdata->dev, "ETB disabled\n"); in etb_disable()
269 static void etb_dump(struct etb_drvdata *drvdata) in etb_dump() argument
273 spin_lock_irqsave(&drvdata->spinlock, flags); in etb_dump()
274 if (drvdata->enable) { in etb_dump()
275 etb_disable_hw(drvdata); in etb_dump()
276 etb_dump_hw(drvdata); in etb_dump()
277 etb_enable_hw(drvdata); in etb_dump()
279 spin_unlock_irqrestore(&drvdata->spinlock, flags); in etb_dump()
281 dev_info(drvdata->dev, "ETB dumped\n"); in etb_dump()
286 struct etb_drvdata *drvdata = container_of(file->private_data, in etb_open() local
289 if (atomic_cmpxchg(&drvdata->in_use, 0, 1)) in etb_open()
292 dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__); in etb_open()
300 struct etb_drvdata *drvdata = container_of(file->private_data, in etb_read() local
303 etb_dump(drvdata); in etb_read()
305 depth = drvdata->buffer_depth; in etb_read()
309 if (copy_to_user(data, drvdata->buf + *ppos, len)) { in etb_read()
310 dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__); in etb_read()
316 dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n", in etb_read()
323 struct etb_drvdata *drvdata = container_of(file->private_data, in etb_release() local
325 atomic_set(&drvdata->in_use, 0); in etb_release()
327 dev_dbg(drvdata->dev, "%s: released\n", __func__); in etb_release()
346 struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent); in status_show() local
348 ret = clk_prepare_enable(drvdata->clk); in status_show()
352 spin_lock_irqsave(&drvdata->spinlock, flags); in status_show()
353 CS_UNLOCK(drvdata->base); in status_show()
355 etb_rdr = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG); in status_show()
356 etb_sr = readl_relaxed(drvdata->base + ETB_STATUS_REG); in status_show()
357 etb_rrp = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER); in status_show()
358 etb_rwp = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER); in status_show()
359 etb_trg = readl_relaxed(drvdata->base + ETB_TRG); in status_show()
360 etb_cr = readl_relaxed(drvdata->base + ETB_CTL_REG); in status_show()
361 etb_ffsr = readl_relaxed(drvdata->base + ETB_FFSR); in status_show()
362 etb_ffcr = readl_relaxed(drvdata->base + ETB_FFCR); in status_show()
364 CS_LOCK(drvdata->base); in status_show()
365 spin_unlock_irqrestore(&drvdata->spinlock, flags); in status_show()
367 clk_disable_unprepare(drvdata->clk); in status_show()
388 struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent); in trigger_cntr_show() local
389 unsigned long val = drvdata->trigger_cntr; in trigger_cntr_show()
400 struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent); in trigger_cntr_store() local
406 drvdata->trigger_cntr = val; in trigger_cntr_store()
424 struct etb_drvdata *drvdata; in etb_probe() local
436 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); in etb_probe()
437 if (!drvdata) in etb_probe()
440 drvdata->dev = &adev->dev; in etb_probe()
441 dev_set_drvdata(dev, drvdata); in etb_probe()
448 drvdata->base = base; in etb_probe()
450 spin_lock_init(&drvdata->spinlock); in etb_probe()
452 drvdata->clk = adev->pclk; in etb_probe()
453 ret = clk_prepare_enable(drvdata->clk); in etb_probe()
457 drvdata->buffer_depth = etb_get_buffer_depth(drvdata); in etb_probe()
458 clk_disable_unprepare(drvdata->clk); in etb_probe()
460 if (drvdata->buffer_depth < 0) in etb_probe()
463 drvdata->buf = devm_kzalloc(dev, in etb_probe()
464 drvdata->buffer_depth * 4, GFP_KERNEL); in etb_probe()
465 if (!drvdata->buf) in etb_probe()
478 drvdata->csdev = coresight_register(desc); in etb_probe()
479 if (IS_ERR(drvdata->csdev)) in etb_probe()
480 return PTR_ERR(drvdata->csdev); in etb_probe()
482 drvdata->miscdev.name = pdata->name; in etb_probe()
483 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR; in etb_probe()
484 drvdata->miscdev.fops = &etb_fops; in etb_probe()
485 ret = misc_register(&drvdata->miscdev); in etb_probe()
493 coresight_unregister(drvdata->csdev); in etb_probe()
499 struct etb_drvdata *drvdata = amba_get_drvdata(adev); in etb_remove() local
501 misc_deregister(&drvdata->miscdev); in etb_remove()
502 coresight_unregister(drvdata->csdev); in etb_remove()