This source file includes following definitions.
- venus_helper_check_codec
- venus_helper_queue_dpb_bufs
- venus_helper_free_dpb_bufs
- venus_helper_alloc_dpb_bufs
- intbufs_set_buffer
- intbufs_unset_buffers
- venus_helper_intbufs_alloc
- venus_helper_intbufs_free
- venus_helper_intbufs_realloc
- load_per_instance
- load_per_type
- venus_helper_load_scale_clocks
- fill_buffer_desc
- return_buf_error
- put_ts_metadata
- venus_helper_get_ts_metadata
- session_process_buf
- is_dynamic_bufmode
- venus_helper_unregister_bufs
- session_register_bufs
- to_hfi_raw_fmt
- venus_helper_get_bufreq
- get_framesize_raw_nv12
- get_framesize_raw_nv12_ubwc
- venus_helper_get_framesz_raw
- venus_helper_get_framesz
- venus_helper_set_input_resolution
- venus_helper_set_output_resolution
- venus_helper_set_work_mode
- venus_helper_set_core_usage
- venus_helper_set_num_bufs
- venus_helper_set_raw_format
- venus_helper_set_color_format
- venus_helper_set_multistream
- venus_helper_set_dyn_bufmode
- venus_helper_set_bufsize
- venus_helper_get_opb_size
- delayed_process_buf_func
- venus_helper_release_buf_ref
- venus_helper_acquire_buf_ref
- is_buf_refed
- venus_helper_find_buf
- venus_helper_vb2_buf_init
- venus_helper_vb2_buf_prepare
- venus_helper_vb2_buf_queue
- venus_helper_buffers_done
- venus_helper_vb2_stop_streaming
- venus_helper_process_initial_cap_bufs
- venus_helper_process_initial_out_bufs
- venus_helper_vb2_start_streaming
- venus_helper_m2m_device_run
- venus_helper_m2m_job_abort
- venus_helper_init_instance
- find_fmt_from_caps
- venus_helper_get_out_fmts
- venus_helper_power_enable
1
2
3
4
5
6 #include <linux/clk.h>
7 #include <linux/iopoll.h>
8 #include <linux/list.h>
9 #include <linux/mutex.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/slab.h>
12 #include <media/videobuf2-dma-sg.h>
13 #include <media/v4l2-mem2mem.h>
14 #include <asm/div64.h>
15
16 #include "core.h"
17 #include "helpers.h"
18 #include "hfi_helper.h"
19 #include "hfi_venus_io.h"
20
21 struct intbuf {
22 struct list_head list;
23 u32 type;
24 size_t size;
25 void *va;
26 dma_addr_t da;
27 unsigned long attrs;
28 };
29
30 bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt)
31 {
32 struct venus_core *core = inst->core;
33 u32 session_type = inst->session_type;
34 u32 codec;
35
36 switch (v4l2_pixfmt) {
37 case V4L2_PIX_FMT_H264:
38 codec = HFI_VIDEO_CODEC_H264;
39 break;
40 case V4L2_PIX_FMT_H263:
41 codec = HFI_VIDEO_CODEC_H263;
42 break;
43 case V4L2_PIX_FMT_MPEG1:
44 codec = HFI_VIDEO_CODEC_MPEG1;
45 break;
46 case V4L2_PIX_FMT_MPEG2:
47 codec = HFI_VIDEO_CODEC_MPEG2;
48 break;
49 case V4L2_PIX_FMT_MPEG4:
50 codec = HFI_VIDEO_CODEC_MPEG4;
51 break;
52 case V4L2_PIX_FMT_VC1_ANNEX_G:
53 case V4L2_PIX_FMT_VC1_ANNEX_L:
54 codec = HFI_VIDEO_CODEC_VC1;
55 break;
56 case V4L2_PIX_FMT_VP8:
57 codec = HFI_VIDEO_CODEC_VP8;
58 break;
59 case V4L2_PIX_FMT_VP9:
60 codec = HFI_VIDEO_CODEC_VP9;
61 break;
62 case V4L2_PIX_FMT_XVID:
63 codec = HFI_VIDEO_CODEC_DIVX;
64 break;
65 case V4L2_PIX_FMT_HEVC:
66 codec = HFI_VIDEO_CODEC_HEVC;
67 break;
68 default:
69 return false;
70 }
71
72 if (session_type == VIDC_SESSION_TYPE_ENC && core->enc_codecs & codec)
73 return true;
74
75 if (session_type == VIDC_SESSION_TYPE_DEC && core->dec_codecs & codec)
76 return true;
77
78 return false;
79 }
80 EXPORT_SYMBOL_GPL(venus_helper_check_codec);
81
82 int venus_helper_queue_dpb_bufs(struct venus_inst *inst)
83 {
84 struct intbuf *buf;
85 int ret = 0;
86
87 list_for_each_entry(buf, &inst->dpbbufs, list) {
88 struct hfi_frame_data fdata;
89
90 memset(&fdata, 0, sizeof(fdata));
91 fdata.alloc_len = buf->size;
92 fdata.device_addr = buf->da;
93 fdata.buffer_type = buf->type;
94
95 ret = hfi_session_process_buf(inst, &fdata);
96 if (ret)
97 goto fail;
98 }
99
100 fail:
101 return ret;
102 }
103 EXPORT_SYMBOL_GPL(venus_helper_queue_dpb_bufs);
104
105 int venus_helper_free_dpb_bufs(struct venus_inst *inst)
106 {
107 struct intbuf *buf, *n;
108
109 list_for_each_entry_safe(buf, n, &inst->dpbbufs, list) {
110 list_del_init(&buf->list);
111 dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
112 buf->attrs);
113 kfree(buf);
114 }
115
116 INIT_LIST_HEAD(&inst->dpbbufs);
117
118 return 0;
119 }
120 EXPORT_SYMBOL_GPL(venus_helper_free_dpb_bufs);
121
122 int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
123 {
124 struct venus_core *core = inst->core;
125 struct device *dev = core->dev;
126 enum hfi_version ver = core->res->hfi_version;
127 struct hfi_buffer_requirements bufreq;
128 u32 buftype = inst->dpb_buftype;
129 unsigned int dpb_size = 0;
130 struct intbuf *buf;
131 unsigned int i;
132 u32 count;
133 int ret;
134
135
136 if (!inst->dpb_fmt)
137 return 0;
138
139 if (inst->dpb_buftype == HFI_BUFFER_OUTPUT)
140 dpb_size = inst->output_buf_size;
141 else if (inst->dpb_buftype == HFI_BUFFER_OUTPUT2)
142 dpb_size = inst->output2_buf_size;
143
144 if (!dpb_size)
145 return 0;
146
147 ret = venus_helper_get_bufreq(inst, buftype, &bufreq);
148 if (ret)
149 return ret;
150
151 count = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
152
153 for (i = 0; i < count; i++) {
154 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
155 if (!buf) {
156 ret = -ENOMEM;
157 goto fail;
158 }
159
160 buf->type = buftype;
161 buf->size = dpb_size;
162 buf->attrs = DMA_ATTR_WRITE_COMBINE |
163 DMA_ATTR_NO_KERNEL_MAPPING;
164 buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
165 buf->attrs);
166 if (!buf->va) {
167 kfree(buf);
168 ret = -ENOMEM;
169 goto fail;
170 }
171
172 list_add_tail(&buf->list, &inst->dpbbufs);
173 }
174
175 return 0;
176
177 fail:
178 venus_helper_free_dpb_bufs(inst);
179 return ret;
180 }
181 EXPORT_SYMBOL_GPL(venus_helper_alloc_dpb_bufs);
182
183 static int intbufs_set_buffer(struct venus_inst *inst, u32 type)
184 {
185 struct venus_core *core = inst->core;
186 struct device *dev = core->dev;
187 struct hfi_buffer_requirements bufreq;
188 struct hfi_buffer_desc bd;
189 struct intbuf *buf;
190 unsigned int i;
191 int ret;
192
193 ret = venus_helper_get_bufreq(inst, type, &bufreq);
194 if (ret)
195 return 0;
196
197 if (!bufreq.size)
198 return 0;
199
200 for (i = 0; i < bufreq.count_actual; i++) {
201 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
202 if (!buf) {
203 ret = -ENOMEM;
204 goto fail;
205 }
206
207 buf->type = bufreq.type;
208 buf->size = bufreq.size;
209 buf->attrs = DMA_ATTR_WRITE_COMBINE |
210 DMA_ATTR_NO_KERNEL_MAPPING;
211 buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
212 buf->attrs);
213 if (!buf->va) {
214 ret = -ENOMEM;
215 goto fail;
216 }
217
218 memset(&bd, 0, sizeof(bd));
219 bd.buffer_size = buf->size;
220 bd.buffer_type = buf->type;
221 bd.num_buffers = 1;
222 bd.device_addr = buf->da;
223
224 ret = hfi_session_set_buffers(inst, &bd);
225 if (ret) {
226 dev_err(dev, "set session buffers failed\n");
227 goto dma_free;
228 }
229
230 list_add_tail(&buf->list, &inst->internalbufs);
231 }
232
233 return 0;
234
235 dma_free:
236 dma_free_attrs(dev, buf->size, buf->va, buf->da, buf->attrs);
237 fail:
238 kfree(buf);
239 return ret;
240 }
241
242 static int intbufs_unset_buffers(struct venus_inst *inst)
243 {
244 struct hfi_buffer_desc bd = {0};
245 struct intbuf *buf, *n;
246 int ret = 0;
247
248 list_for_each_entry_safe(buf, n, &inst->internalbufs, list) {
249 bd.buffer_size = buf->size;
250 bd.buffer_type = buf->type;
251 bd.num_buffers = 1;
252 bd.device_addr = buf->da;
253 bd.response_required = true;
254
255 ret = hfi_session_unset_buffers(inst, &bd);
256
257 list_del_init(&buf->list);
258 dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
259 buf->attrs);
260 kfree(buf);
261 }
262
263 return ret;
264 }
265
266 static const unsigned int intbuf_types_1xx[] = {
267 HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_1XX),
268 HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_1XX),
269 HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_1XX),
270 HFI_BUFFER_INTERNAL_PERSIST,
271 HFI_BUFFER_INTERNAL_PERSIST_1,
272 };
273
274 static const unsigned int intbuf_types_4xx[] = {
275 HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_4XX),
276 HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_4XX),
277 HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_4XX),
278 HFI_BUFFER_INTERNAL_PERSIST,
279 HFI_BUFFER_INTERNAL_PERSIST_1,
280 };
281
282 int venus_helper_intbufs_alloc(struct venus_inst *inst)
283 {
284 const unsigned int *intbuf;
285 size_t arr_sz, i;
286 int ret;
287
288 if (IS_V4(inst->core)) {
289 arr_sz = ARRAY_SIZE(intbuf_types_4xx);
290 intbuf = intbuf_types_4xx;
291 } else {
292 arr_sz = ARRAY_SIZE(intbuf_types_1xx);
293 intbuf = intbuf_types_1xx;
294 }
295
296 for (i = 0; i < arr_sz; i++) {
297 ret = intbufs_set_buffer(inst, intbuf[i]);
298 if (ret)
299 goto error;
300 }
301
302 return 0;
303
304 error:
305 intbufs_unset_buffers(inst);
306 return ret;
307 }
308 EXPORT_SYMBOL_GPL(venus_helper_intbufs_alloc);
309
310 int venus_helper_intbufs_free(struct venus_inst *inst)
311 {
312 return intbufs_unset_buffers(inst);
313 }
314 EXPORT_SYMBOL_GPL(venus_helper_intbufs_free);
315
316 int venus_helper_intbufs_realloc(struct venus_inst *inst)
317 {
318 enum hfi_version ver = inst->core->res->hfi_version;
319 struct hfi_buffer_desc bd;
320 struct intbuf *buf, *n;
321 int ret;
322
323 list_for_each_entry_safe(buf, n, &inst->internalbufs, list) {
324 if (buf->type == HFI_BUFFER_INTERNAL_PERSIST ||
325 buf->type == HFI_BUFFER_INTERNAL_PERSIST_1)
326 continue;
327
328 memset(&bd, 0, sizeof(bd));
329 bd.buffer_size = buf->size;
330 bd.buffer_type = buf->type;
331 bd.num_buffers = 1;
332 bd.device_addr = buf->da;
333 bd.response_required = true;
334
335 ret = hfi_session_unset_buffers(inst, &bd);
336
337 dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
338 buf->attrs);
339
340 list_del_init(&buf->list);
341 kfree(buf);
342 }
343
344 ret = intbufs_set_buffer(inst, HFI_BUFFER_INTERNAL_SCRATCH(ver));
345 if (ret)
346 goto err;
347
348 ret = intbufs_set_buffer(inst, HFI_BUFFER_INTERNAL_SCRATCH_1(ver));
349 if (ret)
350 goto err;
351
352 ret = intbufs_set_buffer(inst, HFI_BUFFER_INTERNAL_SCRATCH_2(ver));
353 if (ret)
354 goto err;
355
356 return 0;
357 err:
358 return ret;
359 }
360 EXPORT_SYMBOL_GPL(venus_helper_intbufs_realloc);
361
362 static u32 load_per_instance(struct venus_inst *inst)
363 {
364 u32 mbs;
365
366 if (!inst || !(inst->state >= INST_INIT && inst->state < INST_STOP))
367 return 0;
368
369 mbs = (ALIGN(inst->width, 16) / 16) * (ALIGN(inst->height, 16) / 16);
370
371 return mbs * inst->fps;
372 }
373
374 static u32 load_per_type(struct venus_core *core, u32 session_type)
375 {
376 struct venus_inst *inst = NULL;
377 u32 mbs_per_sec = 0;
378
379 mutex_lock(&core->lock);
380 list_for_each_entry(inst, &core->instances, list) {
381 if (inst->session_type != session_type)
382 continue;
383
384 mbs_per_sec += load_per_instance(inst);
385 }
386 mutex_unlock(&core->lock);
387
388 return mbs_per_sec;
389 }
390
391 int venus_helper_load_scale_clocks(struct venus_core *core)
392 {
393 const struct freq_tbl *table = core->res->freq_tbl;
394 unsigned int num_rows = core->res->freq_tbl_size;
395 unsigned long freq = table[0].freq;
396 struct clk *clk = core->clks[0];
397 struct device *dev = core->dev;
398 u32 mbs_per_sec;
399 unsigned int i;
400 int ret;
401
402 mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) +
403 load_per_type(core, VIDC_SESSION_TYPE_DEC);
404
405 if (mbs_per_sec > core->res->max_load)
406 dev_warn(dev, "HW is overloaded, needed: %d max: %d\n",
407 mbs_per_sec, core->res->max_load);
408
409 if (!mbs_per_sec && num_rows > 1) {
410 freq = table[num_rows - 1].freq;
411 goto set_freq;
412 }
413
414 for (i = 0; i < num_rows; i++) {
415 if (mbs_per_sec > table[i].load)
416 break;
417 freq = table[i].freq;
418 }
419
420 set_freq:
421
422 ret = clk_set_rate(clk, freq);
423 if (ret)
424 goto err;
425
426 ret = clk_set_rate(core->core0_clk, freq);
427 if (ret)
428 goto err;
429
430 ret = clk_set_rate(core->core1_clk, freq);
431 if (ret)
432 goto err;
433
434 return 0;
435
436 err:
437 dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret);
438 return ret;
439 }
440 EXPORT_SYMBOL_GPL(venus_helper_load_scale_clocks);
441
442 static void fill_buffer_desc(const struct venus_buffer *buf,
443 struct hfi_buffer_desc *bd, bool response)
444 {
445 memset(bd, 0, sizeof(*bd));
446 bd->buffer_type = HFI_BUFFER_OUTPUT;
447 bd->buffer_size = buf->size;
448 bd->num_buffers = 1;
449 bd->device_addr = buf->dma_addr;
450 bd->response_required = response;
451 }
452
453 static void return_buf_error(struct venus_inst *inst,
454 struct vb2_v4l2_buffer *vbuf)
455 {
456 struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
457
458 if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
459 v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf);
460 else
461 v4l2_m2m_dst_buf_remove_by_buf(m2m_ctx, vbuf);
462
463 v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
464 }
465
466 static void
467 put_ts_metadata(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
468 {
469 struct vb2_buffer *vb = &vbuf->vb2_buf;
470 unsigned int i;
471 int slot = -1;
472 u64 ts_us = vb->timestamp;
473
474 for (i = 0; i < ARRAY_SIZE(inst->tss); i++) {
475 if (!inst->tss[i].used) {
476 slot = i;
477 break;
478 }
479 }
480
481 if (slot == -1) {
482 dev_dbg(inst->core->dev, "%s: no free slot\n", __func__);
483 return;
484 }
485
486 do_div(ts_us, NSEC_PER_USEC);
487
488 inst->tss[slot].used = true;
489 inst->tss[slot].flags = vbuf->flags;
490 inst->tss[slot].tc = vbuf->timecode;
491 inst->tss[slot].ts_us = ts_us;
492 inst->tss[slot].ts_ns = vb->timestamp;
493 }
494
495 void venus_helper_get_ts_metadata(struct venus_inst *inst, u64 timestamp_us,
496 struct vb2_v4l2_buffer *vbuf)
497 {
498 struct vb2_buffer *vb = &vbuf->vb2_buf;
499 unsigned int i;
500
501 for (i = 0; i < ARRAY_SIZE(inst->tss); ++i) {
502 if (!inst->tss[i].used)
503 continue;
504
505 if (inst->tss[i].ts_us != timestamp_us)
506 continue;
507
508 inst->tss[i].used = false;
509 vbuf->flags |= inst->tss[i].flags;
510 vbuf->timecode = inst->tss[i].tc;
511 vb->timestamp = inst->tss[i].ts_ns;
512 break;
513 }
514 }
515 EXPORT_SYMBOL_GPL(venus_helper_get_ts_metadata);
516
517 static int
518 session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
519 {
520 struct venus_buffer *buf = to_venus_buffer(vbuf);
521 struct vb2_buffer *vb = &vbuf->vb2_buf;
522 unsigned int type = vb->type;
523 struct hfi_frame_data fdata;
524 int ret;
525
526 memset(&fdata, 0, sizeof(fdata));
527 fdata.alloc_len = buf->size;
528 fdata.device_addr = buf->dma_addr;
529 fdata.timestamp = vb->timestamp;
530 do_div(fdata.timestamp, NSEC_PER_USEC);
531 fdata.flags = 0;
532 fdata.clnt_data = vbuf->vb2_buf.index;
533
534 if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
535 fdata.buffer_type = HFI_BUFFER_INPUT;
536 fdata.filled_len = vb2_get_plane_payload(vb, 0);
537 fdata.offset = vb->planes[0].data_offset;
538
539 if (vbuf->flags & V4L2_BUF_FLAG_LAST || !fdata.filled_len)
540 fdata.flags |= HFI_BUFFERFLAG_EOS;
541
542 if (inst->session_type == VIDC_SESSION_TYPE_DEC)
543 put_ts_metadata(inst, vbuf);
544 } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
545 if (inst->session_type == VIDC_SESSION_TYPE_ENC)
546 fdata.buffer_type = HFI_BUFFER_OUTPUT;
547 else
548 fdata.buffer_type = inst->opb_buftype;
549 fdata.filled_len = 0;
550 fdata.offset = 0;
551 }
552
553 ret = hfi_session_process_buf(inst, &fdata);
554 if (ret)
555 return ret;
556
557 return 0;
558 }
559
560 static bool is_dynamic_bufmode(struct venus_inst *inst)
561 {
562 struct venus_core *core = inst->core;
563 struct venus_caps *caps;
564
565
566
567
568
569 if (IS_V4(core))
570 return true;
571
572 caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
573 if (!caps)
574 return false;
575
576 return caps->cap_bufs_mode_dynamic;
577 }
578
579 int venus_helper_unregister_bufs(struct venus_inst *inst)
580 {
581 struct venus_buffer *buf, *n;
582 struct hfi_buffer_desc bd;
583 int ret = 0;
584
585 if (is_dynamic_bufmode(inst))
586 return 0;
587
588 list_for_each_entry_safe(buf, n, &inst->registeredbufs, reg_list) {
589 fill_buffer_desc(buf, &bd, true);
590 ret = hfi_session_unset_buffers(inst, &bd);
591 list_del_init(&buf->reg_list);
592 }
593
594 return ret;
595 }
596 EXPORT_SYMBOL_GPL(venus_helper_unregister_bufs);
597
598 static int session_register_bufs(struct venus_inst *inst)
599 {
600 struct venus_core *core = inst->core;
601 struct device *dev = core->dev;
602 struct hfi_buffer_desc bd;
603 struct venus_buffer *buf;
604 int ret = 0;
605
606 if (is_dynamic_bufmode(inst))
607 return 0;
608
609 list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
610 fill_buffer_desc(buf, &bd, false);
611 ret = hfi_session_set_buffers(inst, &bd);
612 if (ret) {
613 dev_err(dev, "%s: set buffer failed\n", __func__);
614 break;
615 }
616 }
617
618 return ret;
619 }
620
621 static u32 to_hfi_raw_fmt(u32 v4l2_fmt)
622 {
623 switch (v4l2_fmt) {
624 case V4L2_PIX_FMT_NV12:
625 return HFI_COLOR_FORMAT_NV12;
626 case V4L2_PIX_FMT_NV21:
627 return HFI_COLOR_FORMAT_NV21;
628 default:
629 break;
630 }
631
632 return 0;
633 }
634
635 int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
636 struct hfi_buffer_requirements *req)
637 {
638 u32 ptype = HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS;
639 union hfi_get_property hprop;
640 unsigned int i;
641 int ret;
642
643 if (req)
644 memset(req, 0, sizeof(*req));
645
646 ret = hfi_session_get_property(inst, ptype, &hprop);
647 if (ret)
648 return ret;
649
650 ret = -EINVAL;
651
652 for (i = 0; i < HFI_BUFFER_TYPE_MAX; i++) {
653 if (hprop.bufreq[i].type != type)
654 continue;
655
656 if (req)
657 memcpy(req, &hprop.bufreq[i], sizeof(*req));
658 ret = 0;
659 break;
660 }
661
662 return ret;
663 }
664 EXPORT_SYMBOL_GPL(venus_helper_get_bufreq);
665
666 static u32 get_framesize_raw_nv12(u32 width, u32 height)
667 {
668 u32 y_stride, uv_stride, y_plane;
669 u32 y_sclines, uv_sclines, uv_plane;
670 u32 size;
671
672 y_stride = ALIGN(width, 128);
673 uv_stride = ALIGN(width, 128);
674 y_sclines = ALIGN(height, 32);
675 uv_sclines = ALIGN(((height + 1) >> 1), 16);
676
677 y_plane = y_stride * y_sclines;
678 uv_plane = uv_stride * uv_sclines + SZ_4K;
679 size = y_plane + uv_plane + SZ_8K;
680
681 return ALIGN(size, SZ_4K);
682 }
683
684 static u32 get_framesize_raw_nv12_ubwc(u32 width, u32 height)
685 {
686 u32 y_meta_stride, y_meta_plane;
687 u32 y_stride, y_plane;
688 u32 uv_meta_stride, uv_meta_plane;
689 u32 uv_stride, uv_plane;
690 u32 extradata = SZ_16K;
691
692 y_meta_stride = ALIGN(DIV_ROUND_UP(width, 32), 64);
693 y_meta_plane = y_meta_stride * ALIGN(DIV_ROUND_UP(height, 8), 16);
694 y_meta_plane = ALIGN(y_meta_plane, SZ_4K);
695
696 y_stride = ALIGN(width, 128);
697 y_plane = ALIGN(y_stride * ALIGN(height, 32), SZ_4K);
698
699 uv_meta_stride = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
700 uv_meta_plane = uv_meta_stride * ALIGN(DIV_ROUND_UP(height / 2, 8), 16);
701 uv_meta_plane = ALIGN(uv_meta_plane, SZ_4K);
702
703 uv_stride = ALIGN(width, 128);
704 uv_plane = ALIGN(uv_stride * ALIGN(height / 2, 32), SZ_4K);
705
706 return ALIGN(y_meta_plane + y_plane + uv_meta_plane + uv_plane +
707 max(extradata, y_stride * 48), SZ_4K);
708 }
709
710 u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height)
711 {
712 switch (hfi_fmt) {
713 case HFI_COLOR_FORMAT_NV12:
714 case HFI_COLOR_FORMAT_NV21:
715 return get_framesize_raw_nv12(width, height);
716 case HFI_COLOR_FORMAT_NV12_UBWC:
717 return get_framesize_raw_nv12_ubwc(width, height);
718 default:
719 return 0;
720 }
721 }
722 EXPORT_SYMBOL_GPL(venus_helper_get_framesz_raw);
723
724 u32 venus_helper_get_framesz(u32 v4l2_fmt, u32 width, u32 height)
725 {
726 u32 hfi_fmt, sz;
727 bool compressed;
728
729 switch (v4l2_fmt) {
730 case V4L2_PIX_FMT_MPEG:
731 case V4L2_PIX_FMT_H264:
732 case V4L2_PIX_FMT_H264_NO_SC:
733 case V4L2_PIX_FMT_H264_MVC:
734 case V4L2_PIX_FMT_H263:
735 case V4L2_PIX_FMT_MPEG1:
736 case V4L2_PIX_FMT_MPEG2:
737 case V4L2_PIX_FMT_MPEG4:
738 case V4L2_PIX_FMT_XVID:
739 case V4L2_PIX_FMT_VC1_ANNEX_G:
740 case V4L2_PIX_FMT_VC1_ANNEX_L:
741 case V4L2_PIX_FMT_VP8:
742 case V4L2_PIX_FMT_VP9:
743 case V4L2_PIX_FMT_HEVC:
744 compressed = true;
745 break;
746 default:
747 compressed = false;
748 break;
749 }
750
751 if (compressed) {
752 sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2 / 2;
753 return ALIGN(sz, SZ_4K);
754 }
755
756 hfi_fmt = to_hfi_raw_fmt(v4l2_fmt);
757 if (!hfi_fmt)
758 return 0;
759
760 return venus_helper_get_framesz_raw(hfi_fmt, width, height);
761 }
762 EXPORT_SYMBOL_GPL(venus_helper_get_framesz);
763
764 int venus_helper_set_input_resolution(struct venus_inst *inst,
765 unsigned int width, unsigned int height)
766 {
767 u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
768 struct hfi_framesize fs;
769
770 fs.buffer_type = HFI_BUFFER_INPUT;
771 fs.width = width;
772 fs.height = height;
773
774 return hfi_session_set_property(inst, ptype, &fs);
775 }
776 EXPORT_SYMBOL_GPL(venus_helper_set_input_resolution);
777
778 int venus_helper_set_output_resolution(struct venus_inst *inst,
779 unsigned int width, unsigned int height,
780 u32 buftype)
781 {
782 u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
783 struct hfi_framesize fs;
784
785 fs.buffer_type = buftype;
786 fs.width = width;
787 fs.height = height;
788
789 return hfi_session_set_property(inst, ptype, &fs);
790 }
791 EXPORT_SYMBOL_GPL(venus_helper_set_output_resolution);
792
793 int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode)
794 {
795 const u32 ptype = HFI_PROPERTY_PARAM_WORK_MODE;
796 struct hfi_video_work_mode wm;
797
798 if (!IS_V4(inst->core))
799 return 0;
800
801 wm.video_work_mode = mode;
802
803 return hfi_session_set_property(inst, ptype, &wm);
804 }
805 EXPORT_SYMBOL_GPL(venus_helper_set_work_mode);
806
807 int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
808 {
809 const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
810 struct hfi_videocores_usage_type cu;
811
812 if (!IS_V4(inst->core))
813 return 0;
814
815 cu.video_core_enable_mask = usage;
816
817 return hfi_session_set_property(inst, ptype, &cu);
818 }
819 EXPORT_SYMBOL_GPL(venus_helper_set_core_usage);
820
821 int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
822 unsigned int output_bufs,
823 unsigned int output2_bufs)
824 {
825 u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
826 struct hfi_buffer_count_actual buf_count;
827 int ret;
828
829 buf_count.type = HFI_BUFFER_INPUT;
830 buf_count.count_actual = input_bufs;
831
832 ret = hfi_session_set_property(inst, ptype, &buf_count);
833 if (ret)
834 return ret;
835
836 buf_count.type = HFI_BUFFER_OUTPUT;
837 buf_count.count_actual = output_bufs;
838
839 ret = hfi_session_set_property(inst, ptype, &buf_count);
840 if (ret)
841 return ret;
842
843 if (output2_bufs) {
844 buf_count.type = HFI_BUFFER_OUTPUT2;
845 buf_count.count_actual = output2_bufs;
846
847 ret = hfi_session_set_property(inst, ptype, &buf_count);
848 }
849
850 return ret;
851 }
852 EXPORT_SYMBOL_GPL(venus_helper_set_num_bufs);
853
854 int venus_helper_set_raw_format(struct venus_inst *inst, u32 hfi_format,
855 u32 buftype)
856 {
857 const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
858 struct hfi_uncompressed_format_select fmt;
859
860 fmt.buffer_type = buftype;
861 fmt.format = hfi_format;
862
863 return hfi_session_set_property(inst, ptype, &fmt);
864 }
865 EXPORT_SYMBOL_GPL(venus_helper_set_raw_format);
866
867 int venus_helper_set_color_format(struct venus_inst *inst, u32 pixfmt)
868 {
869 u32 hfi_format, buftype;
870
871 if (inst->session_type == VIDC_SESSION_TYPE_DEC)
872 buftype = HFI_BUFFER_OUTPUT;
873 else if (inst->session_type == VIDC_SESSION_TYPE_ENC)
874 buftype = HFI_BUFFER_INPUT;
875 else
876 return -EINVAL;
877
878 hfi_format = to_hfi_raw_fmt(pixfmt);
879 if (!hfi_format)
880 return -EINVAL;
881
882 return venus_helper_set_raw_format(inst, hfi_format, buftype);
883 }
884 EXPORT_SYMBOL_GPL(venus_helper_set_color_format);
885
886 int venus_helper_set_multistream(struct venus_inst *inst, bool out_en,
887 bool out2_en)
888 {
889 struct hfi_multi_stream multi = {0};
890 u32 ptype = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
891 int ret;
892
893 multi.buffer_type = HFI_BUFFER_OUTPUT;
894 multi.enable = out_en;
895
896 ret = hfi_session_set_property(inst, ptype, &multi);
897 if (ret)
898 return ret;
899
900 multi.buffer_type = HFI_BUFFER_OUTPUT2;
901 multi.enable = out2_en;
902
903 return hfi_session_set_property(inst, ptype, &multi);
904 }
905 EXPORT_SYMBOL_GPL(venus_helper_set_multistream);
906
907 int venus_helper_set_dyn_bufmode(struct venus_inst *inst)
908 {
909 const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE;
910 struct hfi_buffer_alloc_mode mode;
911 int ret;
912
913 if (!is_dynamic_bufmode(inst))
914 return 0;
915
916 mode.type = HFI_BUFFER_OUTPUT;
917 mode.mode = HFI_BUFFER_MODE_DYNAMIC;
918
919 ret = hfi_session_set_property(inst, ptype, &mode);
920 if (ret)
921 return ret;
922
923 mode.type = HFI_BUFFER_OUTPUT2;
924
925 return hfi_session_set_property(inst, ptype, &mode);
926 }
927 EXPORT_SYMBOL_GPL(venus_helper_set_dyn_bufmode);
928
929 int venus_helper_set_bufsize(struct venus_inst *inst, u32 bufsize, u32 buftype)
930 {
931 const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL;
932 struct hfi_buffer_size_actual bufsz;
933
934 bufsz.type = buftype;
935 bufsz.size = bufsize;
936
937 return hfi_session_set_property(inst, ptype, &bufsz);
938 }
939 EXPORT_SYMBOL_GPL(venus_helper_set_bufsize);
940
941 unsigned int venus_helper_get_opb_size(struct venus_inst *inst)
942 {
943
944 if (inst->session_type == VIDC_SESSION_TYPE_ENC)
945 return inst->output_buf_size;
946
947 if (inst->opb_buftype == HFI_BUFFER_OUTPUT)
948 return inst->output_buf_size;
949 else if (inst->opb_buftype == HFI_BUFFER_OUTPUT2)
950 return inst->output2_buf_size;
951
952 return 0;
953 }
954 EXPORT_SYMBOL_GPL(venus_helper_get_opb_size);
955
956 static void delayed_process_buf_func(struct work_struct *work)
957 {
958 struct venus_buffer *buf, *n;
959 struct venus_inst *inst;
960 int ret;
961
962 inst = container_of(work, struct venus_inst, delayed_process_work);
963
964 mutex_lock(&inst->lock);
965
966 if (!(inst->streamon_out & inst->streamon_cap))
967 goto unlock;
968
969 list_for_each_entry_safe(buf, n, &inst->delayed_process, ref_list) {
970 if (buf->flags & HFI_BUFFERFLAG_READONLY)
971 continue;
972
973 ret = session_process_buf(inst, &buf->vb);
974 if (ret)
975 return_buf_error(inst, &buf->vb);
976
977 list_del_init(&buf->ref_list);
978 }
979 unlock:
980 mutex_unlock(&inst->lock);
981 }
982
983 void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx)
984 {
985 struct venus_buffer *buf;
986
987 list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
988 if (buf->vb.vb2_buf.index == idx) {
989 buf->flags &= ~HFI_BUFFERFLAG_READONLY;
990 schedule_work(&inst->delayed_process_work);
991 break;
992 }
993 }
994 }
995 EXPORT_SYMBOL_GPL(venus_helper_release_buf_ref);
996
997 void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf)
998 {
999 struct venus_buffer *buf = to_venus_buffer(vbuf);
1000
1001 buf->flags |= HFI_BUFFERFLAG_READONLY;
1002 }
1003 EXPORT_SYMBOL_GPL(venus_helper_acquire_buf_ref);
1004
1005 static int is_buf_refed(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
1006 {
1007 struct venus_buffer *buf = to_venus_buffer(vbuf);
1008
1009 if (buf->flags & HFI_BUFFERFLAG_READONLY) {
1010 list_add_tail(&buf->ref_list, &inst->delayed_process);
1011 schedule_work(&inst->delayed_process_work);
1012 return 1;
1013 }
1014
1015 return 0;
1016 }
1017
1018 struct vb2_v4l2_buffer *
1019 venus_helper_find_buf(struct venus_inst *inst, unsigned int type, u32 idx)
1020 {
1021 struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
1022
1023 if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1024 return v4l2_m2m_src_buf_remove_by_idx(m2m_ctx, idx);
1025 else
1026 return v4l2_m2m_dst_buf_remove_by_idx(m2m_ctx, idx);
1027 }
1028 EXPORT_SYMBOL_GPL(venus_helper_find_buf);
1029
1030 int venus_helper_vb2_buf_init(struct vb2_buffer *vb)
1031 {
1032 struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
1033 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1034 struct venus_buffer *buf = to_venus_buffer(vbuf);
1035 struct sg_table *sgt;
1036
1037 sgt = vb2_dma_sg_plane_desc(vb, 0);
1038 if (!sgt)
1039 return -EFAULT;
1040
1041 buf->size = vb2_plane_size(vb, 0);
1042 buf->dma_addr = sg_dma_address(sgt->sgl);
1043
1044 if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1045 list_add_tail(&buf->reg_list, &inst->registeredbufs);
1046
1047 return 0;
1048 }
1049 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_init);
1050
1051 int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb)
1052 {
1053 struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
1054 unsigned int out_buf_size = venus_helper_get_opb_size(inst);
1055 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1056
1057 if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
1058 if (vbuf->field == V4L2_FIELD_ANY)
1059 vbuf->field = V4L2_FIELD_NONE;
1060 if (vbuf->field != V4L2_FIELD_NONE) {
1061 dev_err(inst->core->dev, "%s field isn't supported\n",
1062 __func__);
1063 return -EINVAL;
1064 }
1065 }
1066
1067 if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
1068 vb2_plane_size(vb, 0) < out_buf_size)
1069 return -EINVAL;
1070 if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
1071 vb2_plane_size(vb, 0) < inst->input_buf_size)
1072 return -EINVAL;
1073
1074 return 0;
1075 }
1076 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_prepare);
1077
1078 void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
1079 {
1080 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1081 struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
1082 struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
1083 int ret;
1084
1085 mutex_lock(&inst->lock);
1086
1087 v4l2_m2m_buf_queue(m2m_ctx, vbuf);
1088
1089 if (inst->session_type == VIDC_SESSION_TYPE_ENC &&
1090 !(inst->streamon_out && inst->streamon_cap))
1091 goto unlock;
1092
1093 if (vb2_start_streaming_called(vb->vb2_queue)) {
1094 ret = is_buf_refed(inst, vbuf);
1095 if (ret)
1096 goto unlock;
1097
1098 ret = session_process_buf(inst, vbuf);
1099 if (ret)
1100 return_buf_error(inst, vbuf);
1101 }
1102
1103 unlock:
1104 mutex_unlock(&inst->lock);
1105 }
1106 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_queue);
1107
1108 void venus_helper_buffers_done(struct venus_inst *inst,
1109 enum vb2_buffer_state state)
1110 {
1111 struct vb2_v4l2_buffer *buf;
1112
1113 while ((buf = v4l2_m2m_src_buf_remove(inst->m2m_ctx)))
1114 v4l2_m2m_buf_done(buf, state);
1115 while ((buf = v4l2_m2m_dst_buf_remove(inst->m2m_ctx)))
1116 v4l2_m2m_buf_done(buf, state);
1117 }
1118 EXPORT_SYMBOL_GPL(venus_helper_buffers_done);
1119
1120 void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
1121 {
1122 struct venus_inst *inst = vb2_get_drv_priv(q);
1123 struct venus_core *core = inst->core;
1124 int ret;
1125
1126 mutex_lock(&inst->lock);
1127
1128 if (inst->streamon_out & inst->streamon_cap) {
1129 ret = hfi_session_stop(inst);
1130 ret |= hfi_session_unload_res(inst);
1131 ret |= venus_helper_unregister_bufs(inst);
1132 ret |= venus_helper_intbufs_free(inst);
1133 ret |= hfi_session_deinit(inst);
1134
1135 if (inst->session_error || core->sys_error)
1136 ret = -EIO;
1137
1138 if (ret)
1139 hfi_session_abort(inst);
1140
1141 venus_helper_free_dpb_bufs(inst);
1142
1143 venus_helper_load_scale_clocks(core);
1144 INIT_LIST_HEAD(&inst->registeredbufs);
1145 }
1146
1147 venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
1148
1149 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1150 inst->streamon_out = 0;
1151 else
1152 inst->streamon_cap = 0;
1153
1154 mutex_unlock(&inst->lock);
1155 }
1156 EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming);
1157
1158 int venus_helper_process_initial_cap_bufs(struct venus_inst *inst)
1159 {
1160 struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
1161 struct v4l2_m2m_buffer *buf, *n;
1162 int ret;
1163
1164 v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) {
1165 ret = session_process_buf(inst, &buf->vb);
1166 if (ret) {
1167 return_buf_error(inst, &buf->vb);
1168 return ret;
1169 }
1170 }
1171
1172 return 0;
1173 }
1174 EXPORT_SYMBOL_GPL(venus_helper_process_initial_cap_bufs);
1175
1176 int venus_helper_process_initial_out_bufs(struct venus_inst *inst)
1177 {
1178 struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
1179 struct v4l2_m2m_buffer *buf, *n;
1180 int ret;
1181
1182 v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
1183 ret = session_process_buf(inst, &buf->vb);
1184 if (ret) {
1185 return_buf_error(inst, &buf->vb);
1186 return ret;
1187 }
1188 }
1189
1190 return 0;
1191 }
1192 EXPORT_SYMBOL_GPL(venus_helper_process_initial_out_bufs);
1193
1194 int venus_helper_vb2_start_streaming(struct venus_inst *inst)
1195 {
1196 struct venus_core *core = inst->core;
1197 int ret;
1198
1199 ret = venus_helper_intbufs_alloc(inst);
1200 if (ret)
1201 return ret;
1202
1203 ret = session_register_bufs(inst);
1204 if (ret)
1205 goto err_bufs_free;
1206
1207 venus_helper_load_scale_clocks(core);
1208
1209 ret = hfi_session_load_res(inst);
1210 if (ret)
1211 goto err_unreg_bufs;
1212
1213 ret = hfi_session_start(inst);
1214 if (ret)
1215 goto err_unload_res;
1216
1217 return 0;
1218
1219 err_unload_res:
1220 hfi_session_unload_res(inst);
1221 err_unreg_bufs:
1222 venus_helper_unregister_bufs(inst);
1223 err_bufs_free:
1224 venus_helper_intbufs_free(inst);
1225 return ret;
1226 }
1227 EXPORT_SYMBOL_GPL(venus_helper_vb2_start_streaming);
1228
1229 void venus_helper_m2m_device_run(void *priv)
1230 {
1231 struct venus_inst *inst = priv;
1232 struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
1233 struct v4l2_m2m_buffer *buf, *n;
1234 int ret;
1235
1236 mutex_lock(&inst->lock);
1237
1238 v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) {
1239 ret = session_process_buf(inst, &buf->vb);
1240 if (ret)
1241 return_buf_error(inst, &buf->vb);
1242 }
1243
1244 v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
1245 ret = session_process_buf(inst, &buf->vb);
1246 if (ret)
1247 return_buf_error(inst, &buf->vb);
1248 }
1249
1250 mutex_unlock(&inst->lock);
1251 }
1252 EXPORT_SYMBOL_GPL(venus_helper_m2m_device_run);
1253
1254 void venus_helper_m2m_job_abort(void *priv)
1255 {
1256 struct venus_inst *inst = priv;
1257
1258 v4l2_m2m_job_finish(inst->m2m_dev, inst->m2m_ctx);
1259 }
1260 EXPORT_SYMBOL_GPL(venus_helper_m2m_job_abort);
1261
1262 void venus_helper_init_instance(struct venus_inst *inst)
1263 {
1264 if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
1265 INIT_LIST_HEAD(&inst->delayed_process);
1266 INIT_WORK(&inst->delayed_process_work,
1267 delayed_process_buf_func);
1268 }
1269 }
1270 EXPORT_SYMBOL_GPL(venus_helper_init_instance);
1271
1272 static bool find_fmt_from_caps(struct venus_caps *caps, u32 buftype, u32 fmt)
1273 {
1274 unsigned int i;
1275
1276 for (i = 0; i < caps->num_fmts; i++) {
1277 if (caps->fmts[i].buftype == buftype &&
1278 caps->fmts[i].fmt == fmt)
1279 return true;
1280 }
1281
1282 return false;
1283 }
1284
1285 int venus_helper_get_out_fmts(struct venus_inst *inst, u32 v4l2_fmt,
1286 u32 *out_fmt, u32 *out2_fmt, bool ubwc)
1287 {
1288 struct venus_core *core = inst->core;
1289 struct venus_caps *caps;
1290 u32 ubwc_fmt, fmt = to_hfi_raw_fmt(v4l2_fmt);
1291 bool found, found_ubwc;
1292
1293 *out_fmt = *out2_fmt = 0;
1294
1295 if (!fmt)
1296 return -EINVAL;
1297
1298 caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
1299 if (!caps)
1300 return -EINVAL;
1301
1302 if (ubwc) {
1303 ubwc_fmt = fmt | HFI_COLOR_FORMAT_UBWC_BASE;
1304 found_ubwc = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT,
1305 ubwc_fmt);
1306 found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
1307
1308 if (found_ubwc && found) {
1309 *out_fmt = ubwc_fmt;
1310 *out2_fmt = fmt;
1311 return 0;
1312 }
1313 }
1314
1315 found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, fmt);
1316 if (found) {
1317 *out_fmt = fmt;
1318 *out2_fmt = 0;
1319 return 0;
1320 }
1321
1322 found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
1323 if (found) {
1324 *out_fmt = 0;
1325 *out2_fmt = fmt;
1326 return 0;
1327 }
1328
1329 return -EINVAL;
1330 }
1331 EXPORT_SYMBOL_GPL(venus_helper_get_out_fmts);
1332
1333 int venus_helper_power_enable(struct venus_core *core, u32 session_type,
1334 bool enable)
1335 {
1336 void __iomem *ctrl, *stat;
1337 u32 val;
1338 int ret;
1339
1340 if (!IS_V3(core) && !IS_V4(core))
1341 return 0;
1342
1343 if (IS_V3(core)) {
1344 if (session_type == VIDC_SESSION_TYPE_DEC)
1345 ctrl = core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL;
1346 else
1347 ctrl = core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL;
1348 if (enable)
1349 writel(0, ctrl);
1350 else
1351 writel(1, ctrl);
1352
1353 return 0;
1354 }
1355
1356 if (session_type == VIDC_SESSION_TYPE_DEC) {
1357 ctrl = core->base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL;
1358 stat = core->base + WRAPPER_VCODEC0_MMCC_POWER_STATUS;
1359 } else {
1360 ctrl = core->base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL;
1361 stat = core->base + WRAPPER_VCODEC1_MMCC_POWER_STATUS;
1362 }
1363
1364 if (enable) {
1365 writel(0, ctrl);
1366
1367 ret = readl_poll_timeout(stat, val, val & BIT(1), 1, 100);
1368 if (ret)
1369 return ret;
1370 } else {
1371 writel(1, ctrl);
1372
1373 ret = readl_poll_timeout(stat, val, !(val & BIT(1)), 1, 100);
1374 if (ret)
1375 return ret;
1376 }
1377
1378 return 0;
1379 }
1380 EXPORT_SYMBOL_GPL(venus_helper_power_enable);