This source file includes following definitions.
- vpu_cfg_writel
- vpu_cfg_readl
- vpu_running
- vpu_clock_disable
- vpu_clock_enable
- vpu_ipi_register
- vpu_ipi_send
- vpu_wdt_reset_func
- vpu_wdt_reg_handler
- vpu_get_vdec_hw_capa
- vpu_get_venc_hw_capa
- vpu_mapping_dm_addr
- vpu_get_plat_device
- load_requested_vpu
- vpu_load_firmware
- vpu_init_ipi_handler
- vpu_debug_read
- vpu_free_ext_mem
- vpu_alloc_ext_mem
- vpu_ipi_handler
- vpu_ipi_init
- vpu_irq_handler
- mtk_vpu_probe
- mtk_vpu_remove
1
2
3
4
5
6 #include <linux/clk.h>
7 #include <linux/debugfs.h>
8 #include <linux/firmware.h>
9 #include <linux/interrupt.h>
10 #include <linux/iommu.h>
11 #include <linux/module.h>
12 #include <linux/of_address.h>
13 #include <linux/of_irq.h>
14 #include <linux/of_platform.h>
15 #include <linux/of_reserved_mem.h>
16 #include <linux/sched.h>
17 #include <linux/sizes.h>
18 #include <linux/dma-mapping.h>
19
20 #include "mtk_vpu.h"
21
22
23
24
25
26
27
28 #define INIT_TIMEOUT_MS 2000U
29 #define IPI_TIMEOUT_MS 2000U
30 #define VPU_FW_VER_LEN 16
31
32
33 #define VPU_PTCM_SIZE (96 * SZ_1K)
34 #define VPU_DTCM_SIZE (32 * SZ_1K)
35
36 #define VPU_DTCM_OFFSET 0x18000UL
37
38 #define VPU_EXT_P_SIZE SZ_1M
39 #define VPU_EXT_D_SIZE SZ_4M
40
41 #define VPU_P_FW_SIZE (VPU_PTCM_SIZE + VPU_EXT_P_SIZE)
42 #define VPU_D_FW_SIZE (VPU_DTCM_SIZE + VPU_EXT_D_SIZE)
43
44 #define SHARE_BUF_SIZE 48
45
46
47 #define VPU_P_FW "vpu_p.bin"
48 #define VPU_D_FW "vpu_d.bin"
49
50 #define VPU_RESET 0x0
51 #define VPU_TCM_CFG 0x0008
52 #define VPU_PMEM_EXT0_ADDR 0x000C
53 #define VPU_PMEM_EXT1_ADDR 0x0010
54 #define VPU_TO_HOST 0x001C
55 #define VPU_DMEM_EXT0_ADDR 0x0014
56 #define VPU_DMEM_EXT1_ADDR 0x0018
57 #define HOST_TO_VPU 0x0024
58 #define VPU_PC_REG 0x0060
59 #define VPU_WDT_REG 0x0084
60
61
62 #define VPU_IPC_INT BIT(8)
63
64
65
66
67
68
69
70
71 enum vpu_fw_type {
72 P_FW,
73 D_FW,
74 };
75
76
77
78
79
80
81
82
83 struct vpu_mem {
84 void *va;
85 dma_addr_t pa;
86 };
87
88
89
90
91
92
93
94
95 struct vpu_regs {
96 void __iomem *tcm;
97 void __iomem *cfg;
98 int irq;
99 };
100
101
102
103
104
105
106
107 struct vpu_wdt_handler {
108 void (*reset_func)(void *);
109 void *priv;
110 };
111
112
113
114
115
116
117
118
119 struct vpu_wdt {
120 struct vpu_wdt_handler handler[VPU_RST_MAX];
121 struct work_struct ws;
122 struct workqueue_struct *wq;
123 };
124
125
126
127
128
129
130
131
132
133
134
135
136 struct vpu_run {
137 u32 signaled;
138 char fw_ver[VPU_FW_VER_LEN];
139 unsigned int dec_capability;
140 unsigned int enc_capability;
141 wait_queue_head_t wq;
142 };
143
144
145
146
147
148
149
150
151 struct vpu_ipi_desc {
152 ipi_handler_t handler;
153 const char *name;
154 void *priv;
155 };
156
157
158
159
160
161
162
163
164
165 struct share_obj {
166 s32 id;
167 u32 len;
168 unsigned char share_buf[SHARE_BUF_SIZE];
169 };
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200 struct mtk_vpu {
201 struct vpu_mem extmem[2];
202 struct vpu_regs reg;
203 struct vpu_run run;
204 struct vpu_wdt wdt;
205 struct vpu_ipi_desc ipi_desc[IPI_MAX];
206 struct share_obj *recv_buf;
207 struct share_obj *send_buf;
208 struct device *dev;
209 struct clk *clk;
210 bool fw_loaded;
211 bool enable_4GB;
212 struct mutex vpu_mutex;
213 u32 wdt_refcnt;
214 wait_queue_head_t ack_wq;
215 bool ipi_id_ack[IPI_MAX];
216 };
217
218 static inline void vpu_cfg_writel(struct mtk_vpu *vpu, u32 val, u32 offset)
219 {
220 writel(val, vpu->reg.cfg + offset);
221 }
222
223 static inline u32 vpu_cfg_readl(struct mtk_vpu *vpu, u32 offset)
224 {
225 return readl(vpu->reg.cfg + offset);
226 }
227
228 static inline bool vpu_running(struct mtk_vpu *vpu)
229 {
230 return vpu_cfg_readl(vpu, VPU_RESET) & BIT(0);
231 }
232
233 static void vpu_clock_disable(struct mtk_vpu *vpu)
234 {
235
236 mutex_lock(&vpu->vpu_mutex);
237 if (!--vpu->wdt_refcnt)
238 vpu_cfg_writel(vpu,
239 vpu_cfg_readl(vpu, VPU_WDT_REG) & ~(1L << 31),
240 VPU_WDT_REG);
241 mutex_unlock(&vpu->vpu_mutex);
242
243 clk_disable(vpu->clk);
244 }
245
246 static int vpu_clock_enable(struct mtk_vpu *vpu)
247 {
248 int ret;
249
250 ret = clk_enable(vpu->clk);
251 if (ret)
252 return ret;
253
254 mutex_lock(&vpu->vpu_mutex);
255 if (!vpu->wdt_refcnt++)
256 vpu_cfg_writel(vpu,
257 vpu_cfg_readl(vpu, VPU_WDT_REG) | (1L << 31),
258 VPU_WDT_REG);
259 mutex_unlock(&vpu->vpu_mutex);
260
261 return ret;
262 }
263
264 int vpu_ipi_register(struct platform_device *pdev,
265 enum ipi_id id, ipi_handler_t handler,
266 const char *name, void *priv)
267 {
268 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
269 struct vpu_ipi_desc *ipi_desc;
270
271 if (!vpu) {
272 dev_err(&pdev->dev, "vpu device in not ready\n");
273 return -EPROBE_DEFER;
274 }
275
276 if (id >= 0 && id < IPI_MAX && handler) {
277 ipi_desc = vpu->ipi_desc;
278 ipi_desc[id].name = name;
279 ipi_desc[id].handler = handler;
280 ipi_desc[id].priv = priv;
281 return 0;
282 }
283
284 dev_err(&pdev->dev, "register vpu ipi id %d with invalid arguments\n",
285 id);
286 return -EINVAL;
287 }
288 EXPORT_SYMBOL_GPL(vpu_ipi_register);
289
290 int vpu_ipi_send(struct platform_device *pdev,
291 enum ipi_id id, void *buf,
292 unsigned int len)
293 {
294 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
295 struct share_obj *send_obj = vpu->send_buf;
296 unsigned long timeout;
297 int ret = 0;
298
299 if (id <= IPI_VPU_INIT || id >= IPI_MAX ||
300 len > sizeof(send_obj->share_buf) || !buf) {
301 dev_err(vpu->dev, "failed to send ipi message\n");
302 return -EINVAL;
303 }
304
305 ret = vpu_clock_enable(vpu);
306 if (ret) {
307 dev_err(vpu->dev, "failed to enable vpu clock\n");
308 return ret;
309 }
310 if (!vpu_running(vpu)) {
311 dev_err(vpu->dev, "vpu_ipi_send: VPU is not running\n");
312 ret = -EINVAL;
313 goto clock_disable;
314 }
315
316 mutex_lock(&vpu->vpu_mutex);
317
318
319 timeout = jiffies + msecs_to_jiffies(IPI_TIMEOUT_MS);
320 do {
321 if (time_after(jiffies, timeout)) {
322 dev_err(vpu->dev, "vpu_ipi_send: IPI timeout!\n");
323 ret = -EIO;
324 goto mut_unlock;
325 }
326 } while (vpu_cfg_readl(vpu, HOST_TO_VPU));
327
328 memcpy((void *)send_obj->share_buf, buf, len);
329 send_obj->len = len;
330 send_obj->id = id;
331
332 vpu->ipi_id_ack[id] = false;
333
334 vpu_cfg_writel(vpu, 0x1, HOST_TO_VPU);
335
336 mutex_unlock(&vpu->vpu_mutex);
337
338
339 timeout = msecs_to_jiffies(IPI_TIMEOUT_MS);
340 ret = wait_event_timeout(vpu->ack_wq, vpu->ipi_id_ack[id], timeout);
341 vpu->ipi_id_ack[id] = false;
342 if (ret == 0) {
343 dev_err(vpu->dev, "vpu ipi %d ack time out !", id);
344 ret = -EIO;
345 goto clock_disable;
346 }
347 vpu_clock_disable(vpu);
348
349 return 0;
350
351 mut_unlock:
352 mutex_unlock(&vpu->vpu_mutex);
353 clock_disable:
354 vpu_clock_disable(vpu);
355
356 return ret;
357 }
358 EXPORT_SYMBOL_GPL(vpu_ipi_send);
359
360 static void vpu_wdt_reset_func(struct work_struct *ws)
361 {
362 struct vpu_wdt *wdt = container_of(ws, struct vpu_wdt, ws);
363 struct mtk_vpu *vpu = container_of(wdt, struct mtk_vpu, wdt);
364 struct vpu_wdt_handler *handler = wdt->handler;
365 int index, ret;
366
367 dev_info(vpu->dev, "vpu reset\n");
368 ret = vpu_clock_enable(vpu);
369 if (ret) {
370 dev_err(vpu->dev, "[VPU] wdt enables clock failed %d\n", ret);
371 return;
372 }
373 mutex_lock(&vpu->vpu_mutex);
374 vpu_cfg_writel(vpu, 0x0, VPU_RESET);
375 vpu->fw_loaded = false;
376 mutex_unlock(&vpu->vpu_mutex);
377 vpu_clock_disable(vpu);
378
379 for (index = 0; index < VPU_RST_MAX; index++) {
380 if (handler[index].reset_func) {
381 handler[index].reset_func(handler[index].priv);
382 dev_dbg(vpu->dev, "wdt handler func %d\n", index);
383 }
384 }
385 }
386
387 int vpu_wdt_reg_handler(struct platform_device *pdev,
388 void wdt_reset(void *),
389 void *priv, enum rst_id id)
390 {
391 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
392 struct vpu_wdt_handler *handler;
393
394 if (!vpu) {
395 dev_err(&pdev->dev, "vpu device in not ready\n");
396 return -EPROBE_DEFER;
397 }
398
399 handler = vpu->wdt.handler;
400
401 if (id >= 0 && id < VPU_RST_MAX && wdt_reset) {
402 dev_dbg(vpu->dev, "wdt register id %d\n", id);
403 mutex_lock(&vpu->vpu_mutex);
404 handler[id].reset_func = wdt_reset;
405 handler[id].priv = priv;
406 mutex_unlock(&vpu->vpu_mutex);
407 return 0;
408 }
409
410 dev_err(vpu->dev, "register vpu wdt handler failed\n");
411 return -EINVAL;
412 }
413 EXPORT_SYMBOL_GPL(vpu_wdt_reg_handler);
414
415 unsigned int vpu_get_vdec_hw_capa(struct platform_device *pdev)
416 {
417 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
418
419 return vpu->run.dec_capability;
420 }
421 EXPORT_SYMBOL_GPL(vpu_get_vdec_hw_capa);
422
423 unsigned int vpu_get_venc_hw_capa(struct platform_device *pdev)
424 {
425 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
426
427 return vpu->run.enc_capability;
428 }
429 EXPORT_SYMBOL_GPL(vpu_get_venc_hw_capa);
430
431 void *vpu_mapping_dm_addr(struct platform_device *pdev,
432 u32 dtcm_dmem_addr)
433 {
434 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
435
436 if (!dtcm_dmem_addr ||
437 (dtcm_dmem_addr > (VPU_DTCM_SIZE + VPU_EXT_D_SIZE))) {
438 dev_err(vpu->dev, "invalid virtual data memory address\n");
439 return ERR_PTR(-EINVAL);
440 }
441
442 if (dtcm_dmem_addr < VPU_DTCM_SIZE)
443 return (__force void *)(dtcm_dmem_addr + vpu->reg.tcm +
444 VPU_DTCM_OFFSET);
445
446 return vpu->extmem[D_FW].va + (dtcm_dmem_addr - VPU_DTCM_SIZE);
447 }
448 EXPORT_SYMBOL_GPL(vpu_mapping_dm_addr);
449
450 struct platform_device *vpu_get_plat_device(struct platform_device *pdev)
451 {
452 struct device *dev = &pdev->dev;
453 struct device_node *vpu_node;
454 struct platform_device *vpu_pdev;
455
456 vpu_node = of_parse_phandle(dev->of_node, "mediatek,vpu", 0);
457 if (!vpu_node) {
458 dev_err(dev, "can't get vpu node\n");
459 return NULL;
460 }
461
462 vpu_pdev = of_find_device_by_node(vpu_node);
463 of_node_put(vpu_node);
464 if (WARN_ON(!vpu_pdev)) {
465 dev_err(dev, "vpu pdev failed\n");
466 return NULL;
467 }
468
469 return vpu_pdev;
470 }
471 EXPORT_SYMBOL_GPL(vpu_get_plat_device);
472
473
474 static int load_requested_vpu(struct mtk_vpu *vpu,
475 u8 fw_type)
476 {
477 size_t tcm_size = fw_type ? VPU_DTCM_SIZE : VPU_PTCM_SIZE;
478 size_t fw_size = fw_type ? VPU_D_FW_SIZE : VPU_P_FW_SIZE;
479 char *fw_name = fw_type ? VPU_D_FW : VPU_P_FW;
480 const struct firmware *vpu_fw;
481 size_t dl_size = 0;
482 size_t extra_fw_size = 0;
483 void *dest;
484 int ret;
485
486 ret = request_firmware(&vpu_fw, fw_name, vpu->dev);
487 if (ret < 0) {
488 dev_err(vpu->dev, "Failed to load %s, %d\n", fw_name, ret);
489 return ret;
490 }
491 dl_size = vpu_fw->size;
492 if (dl_size > fw_size) {
493 dev_err(vpu->dev, "fw %s size %zu is abnormal\n", fw_name,
494 dl_size);
495 release_firmware(vpu_fw);
496 return -EFBIG;
497 }
498 dev_dbg(vpu->dev, "Downloaded fw %s size: %zu.\n",
499 fw_name,
500 dl_size);
501
502 vpu_cfg_writel(vpu, 0x0, VPU_RESET);
503
504
505 if (dl_size > tcm_size) {
506 dev_dbg(vpu->dev, "fw size %zu > limited fw size %zu\n",
507 dl_size, tcm_size);
508 extra_fw_size = dl_size - tcm_size;
509 dev_dbg(vpu->dev, "extra_fw_size %zu\n", extra_fw_size);
510 dl_size = tcm_size;
511 }
512 dest = (__force void *)vpu->reg.tcm;
513 if (fw_type == D_FW)
514 dest += VPU_DTCM_OFFSET;
515 memcpy(dest, vpu_fw->data, dl_size);
516
517 if (extra_fw_size > 0) {
518 dest = vpu->extmem[fw_type].va;
519 dev_dbg(vpu->dev, "download extended memory type %x\n",
520 fw_type);
521 memcpy(dest, vpu_fw->data + tcm_size, extra_fw_size);
522 }
523
524 release_firmware(vpu_fw);
525
526 return 0;
527 }
528
529 int vpu_load_firmware(struct platform_device *pdev)
530 {
531 struct mtk_vpu *vpu;
532 struct device *dev = &pdev->dev;
533 struct vpu_run *run;
534 int ret;
535
536 if (!pdev) {
537 dev_err(dev, "VPU platform device is invalid\n");
538 return -EINVAL;
539 }
540
541 vpu = platform_get_drvdata(pdev);
542 run = &vpu->run;
543
544 mutex_lock(&vpu->vpu_mutex);
545 if (vpu->fw_loaded) {
546 mutex_unlock(&vpu->vpu_mutex);
547 return 0;
548 }
549 mutex_unlock(&vpu->vpu_mutex);
550
551 ret = vpu_clock_enable(vpu);
552 if (ret) {
553 dev_err(dev, "enable clock failed %d\n", ret);
554 return ret;
555 }
556
557 mutex_lock(&vpu->vpu_mutex);
558
559 run->signaled = false;
560 dev_dbg(vpu->dev, "firmware request\n");
561
562 ret = load_requested_vpu(vpu, P_FW);
563 if (ret < 0) {
564 dev_err(dev, "Failed to request %s, %d\n", VPU_P_FW, ret);
565 goto OUT_LOAD_FW;
566 }
567
568
569 ret = load_requested_vpu(vpu, D_FW);
570 if (ret < 0) {
571 dev_err(dev, "Failed to request %s, %d\n", VPU_D_FW, ret);
572 goto OUT_LOAD_FW;
573 }
574
575 vpu->fw_loaded = true;
576
577 vpu_cfg_writel(vpu, 0x1, VPU_RESET);
578
579 ret = wait_event_interruptible_timeout(run->wq,
580 run->signaled,
581 msecs_to_jiffies(INIT_TIMEOUT_MS)
582 );
583 if (ret == 0) {
584 ret = -ETIME;
585 dev_err(dev, "wait vpu initialization timeout!\n");
586 goto OUT_LOAD_FW;
587 } else if (-ERESTARTSYS == ret) {
588 dev_err(dev, "wait vpu interrupted by a signal!\n");
589 goto OUT_LOAD_FW;
590 }
591
592 ret = 0;
593 dev_info(dev, "vpu is ready. Fw version %s\n", run->fw_ver);
594
595 OUT_LOAD_FW:
596 mutex_unlock(&vpu->vpu_mutex);
597 vpu_clock_disable(vpu);
598
599 return ret;
600 }
601 EXPORT_SYMBOL_GPL(vpu_load_firmware);
602
603 static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv)
604 {
605 struct mtk_vpu *vpu = (struct mtk_vpu *)priv;
606 struct vpu_run *run = (struct vpu_run *)data;
607
608 vpu->run.signaled = run->signaled;
609 strscpy(vpu->run.fw_ver, run->fw_ver, sizeof(vpu->run.fw_ver));
610 vpu->run.dec_capability = run->dec_capability;
611 vpu->run.enc_capability = run->enc_capability;
612 wake_up_interruptible(&vpu->run.wq);
613 }
614
615 #ifdef CONFIG_DEBUG_FS
616 static ssize_t vpu_debug_read(struct file *file, char __user *user_buf,
617 size_t count, loff_t *ppos)
618 {
619 char buf[256];
620 unsigned int len;
621 unsigned int running, pc, vpu_to_host, host_to_vpu, wdt;
622 int ret;
623 struct device *dev = file->private_data;
624 struct mtk_vpu *vpu = dev_get_drvdata(dev);
625
626 ret = vpu_clock_enable(vpu);
627 if (ret) {
628 dev_err(vpu->dev, "[VPU] enable clock failed %d\n", ret);
629 return 0;
630 }
631
632
633 running = vpu_running(vpu);
634 pc = vpu_cfg_readl(vpu, VPU_PC_REG);
635 wdt = vpu_cfg_readl(vpu, VPU_WDT_REG);
636 host_to_vpu = vpu_cfg_readl(vpu, HOST_TO_VPU);
637 vpu_to_host = vpu_cfg_readl(vpu, VPU_TO_HOST);
638 vpu_clock_disable(vpu);
639
640 if (running) {
641 len = snprintf(buf, sizeof(buf), "VPU is running\n\n"
642 "FW Version: %s\n"
643 "PC: 0x%x\n"
644 "WDT: 0x%x\n"
645 "Host to VPU: 0x%x\n"
646 "VPU to Host: 0x%x\n",
647 vpu->run.fw_ver, pc, wdt,
648 host_to_vpu, vpu_to_host);
649 } else {
650 len = snprintf(buf, sizeof(buf), "VPU not running\n");
651 }
652
653 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
654 }
655
656 static const struct file_operations vpu_debug_fops = {
657 .open = simple_open,
658 .read = vpu_debug_read,
659 };
660 #endif
661
662 static void vpu_free_ext_mem(struct mtk_vpu *vpu, u8 fw_type)
663 {
664 struct device *dev = vpu->dev;
665 size_t fw_ext_size = fw_type ? VPU_EXT_D_SIZE : VPU_EXT_P_SIZE;
666
667 dma_free_coherent(dev, fw_ext_size, vpu->extmem[fw_type].va,
668 vpu->extmem[fw_type].pa);
669 }
670
671 static int vpu_alloc_ext_mem(struct mtk_vpu *vpu, u32 fw_type)
672 {
673 struct device *dev = vpu->dev;
674 size_t fw_ext_size = fw_type ? VPU_EXT_D_SIZE : VPU_EXT_P_SIZE;
675 u32 vpu_ext_mem0 = fw_type ? VPU_DMEM_EXT0_ADDR : VPU_PMEM_EXT0_ADDR;
676 u32 vpu_ext_mem1 = fw_type ? VPU_DMEM_EXT1_ADDR : VPU_PMEM_EXT1_ADDR;
677 u32 offset_4gb = vpu->enable_4GB ? 0x40000000 : 0;
678
679 vpu->extmem[fw_type].va = dma_alloc_coherent(dev,
680 fw_ext_size,
681 &vpu->extmem[fw_type].pa,
682 GFP_KERNEL);
683 if (!vpu->extmem[fw_type].va) {
684 dev_err(dev, "Failed to allocate the extended program memory\n");
685 return -ENOMEM;
686 }
687
688
689 vpu_cfg_writel(vpu, 0x1, vpu_ext_mem0);
690 vpu_cfg_writel(vpu, (vpu->extmem[fw_type].pa & 0xFFFFF000) + offset_4gb,
691 vpu_ext_mem1);
692
693 dev_info(dev, "%s extend memory phy=0x%llx virt=0x%p\n",
694 fw_type ? "Data" : "Program",
695 (unsigned long long)vpu->extmem[fw_type].pa,
696 vpu->extmem[fw_type].va);
697
698 return 0;
699 }
700
701 static void vpu_ipi_handler(struct mtk_vpu *vpu)
702 {
703 struct share_obj *rcv_obj = vpu->recv_buf;
704 struct vpu_ipi_desc *ipi_desc = vpu->ipi_desc;
705
706 if (rcv_obj->id < IPI_MAX && ipi_desc[rcv_obj->id].handler) {
707 ipi_desc[rcv_obj->id].handler(rcv_obj->share_buf,
708 rcv_obj->len,
709 ipi_desc[rcv_obj->id].priv);
710 if (rcv_obj->id > IPI_VPU_INIT) {
711 vpu->ipi_id_ack[rcv_obj->id] = true;
712 wake_up(&vpu->ack_wq);
713 }
714 } else {
715 dev_err(vpu->dev, "No such ipi id = %d\n", rcv_obj->id);
716 }
717 }
718
719 static int vpu_ipi_init(struct mtk_vpu *vpu)
720 {
721
722 vpu_cfg_writel(vpu, 0x0, VPU_TO_HOST);
723
724
725 vpu->recv_buf = (__force struct share_obj *)(vpu->reg.tcm +
726 VPU_DTCM_OFFSET);
727 vpu->send_buf = vpu->recv_buf + 1;
728 memset(vpu->recv_buf, 0, sizeof(struct share_obj));
729 memset(vpu->send_buf, 0, sizeof(struct share_obj));
730
731 return 0;
732 }
733
734 static irqreturn_t vpu_irq_handler(int irq, void *priv)
735 {
736 struct mtk_vpu *vpu = priv;
737 u32 vpu_to_host;
738 int ret;
739
740
741
742
743
744
745 ret = clk_enable(vpu->clk);
746 if (ret) {
747 dev_err(vpu->dev, "[VPU] enable clock failed %d\n", ret);
748 return IRQ_NONE;
749 }
750 vpu_to_host = vpu_cfg_readl(vpu, VPU_TO_HOST);
751 if (vpu_to_host & VPU_IPC_INT) {
752 vpu_ipi_handler(vpu);
753 } else {
754 dev_err(vpu->dev, "vpu watchdog timeout! 0x%x", vpu_to_host);
755 queue_work(vpu->wdt.wq, &vpu->wdt.ws);
756 }
757
758
759 vpu_cfg_writel(vpu, 0x0, VPU_TO_HOST);
760 clk_disable(vpu->clk);
761
762 return IRQ_HANDLED;
763 }
764
765 #ifdef CONFIG_DEBUG_FS
766 static struct dentry *vpu_debugfs;
767 #endif
768 static int mtk_vpu_probe(struct platform_device *pdev)
769 {
770 struct mtk_vpu *vpu;
771 struct device *dev;
772 struct resource *res;
773 int ret = 0;
774
775 dev_dbg(&pdev->dev, "initialization\n");
776
777 dev = &pdev->dev;
778 vpu = devm_kzalloc(dev, sizeof(*vpu), GFP_KERNEL);
779 if (!vpu)
780 return -ENOMEM;
781
782 vpu->dev = &pdev->dev;
783 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcm");
784 vpu->reg.tcm = devm_ioremap_resource(dev, res);
785 if (IS_ERR((__force void *)vpu->reg.tcm))
786 return PTR_ERR((__force void *)vpu->reg.tcm);
787
788 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_reg");
789 vpu->reg.cfg = devm_ioremap_resource(dev, res);
790 if (IS_ERR((__force void *)vpu->reg.cfg))
791 return PTR_ERR((__force void *)vpu->reg.cfg);
792
793
794 vpu->clk = devm_clk_get(dev, "main");
795 if (IS_ERR(vpu->clk)) {
796 dev_err(dev, "get vpu clock failed\n");
797 return PTR_ERR(vpu->clk);
798 }
799
800 platform_set_drvdata(pdev, vpu);
801
802 ret = clk_prepare(vpu->clk);
803 if (ret) {
804 dev_err(dev, "prepare vpu clock failed\n");
805 return ret;
806 }
807
808
809 vpu->wdt.wq = create_singlethread_workqueue("vpu_wdt");
810 if (!vpu->wdt.wq) {
811 dev_err(dev, "initialize wdt workqueue failed\n");
812 return -ENOMEM;
813 }
814 INIT_WORK(&vpu->wdt.ws, vpu_wdt_reset_func);
815 mutex_init(&vpu->vpu_mutex);
816
817 ret = vpu_clock_enable(vpu);
818 if (ret) {
819 dev_err(dev, "enable vpu clock failed\n");
820 goto workqueue_destroy;
821 }
822
823 dev_dbg(dev, "vpu ipi init\n");
824 ret = vpu_ipi_init(vpu);
825 if (ret) {
826 dev_err(dev, "Failed to init ipi\n");
827 goto disable_vpu_clk;
828 }
829
830
831 ret = vpu_ipi_register(pdev, IPI_VPU_INIT, vpu_init_ipi_handler,
832 "vpu_init", vpu);
833 if (ret) {
834 dev_err(dev, "Failed to register IPI_VPU_INIT\n");
835 goto vpu_mutex_destroy;
836 }
837
838 #ifdef CONFIG_DEBUG_FS
839 vpu_debugfs = debugfs_create_file("mtk_vpu", S_IRUGO, NULL, (void *)dev,
840 &vpu_debug_fops);
841 if (!vpu_debugfs) {
842 ret = -ENOMEM;
843 goto cleanup_ipi;
844 }
845 #endif
846
847
848 vpu_cfg_writel(vpu, 0x2, VPU_TCM_CFG);
849
850 vpu->enable_4GB = !!(totalram_pages() > (SZ_2G >> PAGE_SHIFT));
851 dev_info(dev, "4GB mode %u\n", vpu->enable_4GB);
852
853 if (vpu->enable_4GB) {
854 ret = of_reserved_mem_device_init(dev);
855 if (ret)
856 dev_info(dev, "init reserved memory failed\n");
857
858 }
859
860 ret = vpu_alloc_ext_mem(vpu, D_FW);
861 if (ret) {
862 dev_err(dev, "Allocate DM failed\n");
863 goto remove_debugfs;
864 }
865
866 ret = vpu_alloc_ext_mem(vpu, P_FW);
867 if (ret) {
868 dev_err(dev, "Allocate PM failed\n");
869 goto free_d_mem;
870 }
871
872 init_waitqueue_head(&vpu->run.wq);
873 init_waitqueue_head(&vpu->ack_wq);
874
875 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
876 if (!res) {
877 dev_err(dev, "get IRQ resource failed.\n");
878 ret = -ENXIO;
879 goto free_p_mem;
880 }
881 vpu->reg.irq = platform_get_irq(pdev, 0);
882 ret = devm_request_irq(dev, vpu->reg.irq, vpu_irq_handler, 0,
883 pdev->name, vpu);
884 if (ret) {
885 dev_err(dev, "failed to request irq\n");
886 goto free_p_mem;
887 }
888
889 vpu_clock_disable(vpu);
890 dev_dbg(dev, "initialization completed\n");
891
892 return 0;
893
894 free_p_mem:
895 vpu_free_ext_mem(vpu, P_FW);
896 free_d_mem:
897 vpu_free_ext_mem(vpu, D_FW);
898 remove_debugfs:
899 of_reserved_mem_device_release(dev);
900 #ifdef CONFIG_DEBUG_FS
901 debugfs_remove(vpu_debugfs);
902 cleanup_ipi:
903 #endif
904 memset(vpu->ipi_desc, 0, sizeof(struct vpu_ipi_desc) * IPI_MAX);
905 vpu_mutex_destroy:
906 mutex_destroy(&vpu->vpu_mutex);
907 disable_vpu_clk:
908 vpu_clock_disable(vpu);
909 workqueue_destroy:
910 destroy_workqueue(vpu->wdt.wq);
911
912 return ret;
913 }
914
915 static const struct of_device_id mtk_vpu_match[] = {
916 {
917 .compatible = "mediatek,mt8173-vpu",
918 },
919 {},
920 };
921 MODULE_DEVICE_TABLE(of, mtk_vpu_match);
922
923 static int mtk_vpu_remove(struct platform_device *pdev)
924 {
925 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
926
927 #ifdef CONFIG_DEBUG_FS
928 debugfs_remove(vpu_debugfs);
929 #endif
930 if (vpu->wdt.wq) {
931 flush_workqueue(vpu->wdt.wq);
932 destroy_workqueue(vpu->wdt.wq);
933 }
934 vpu_free_ext_mem(vpu, P_FW);
935 vpu_free_ext_mem(vpu, D_FW);
936 mutex_destroy(&vpu->vpu_mutex);
937 clk_unprepare(vpu->clk);
938
939 return 0;
940 }
941
942 static struct platform_driver mtk_vpu_driver = {
943 .probe = mtk_vpu_probe,
944 .remove = mtk_vpu_remove,
945 .driver = {
946 .name = "mtk_vpu",
947 .of_match_table = mtk_vpu_match,
948 },
949 };
950
951 module_platform_driver(mtk_vpu_driver);
952
953 MODULE_LICENSE("GPL v2");
954 MODULE_DESCRIPTION("Mediatek Video Processor Unit driver");