This source file includes following definitions.
- nv50_wndw_ctxdma_del
- nv50_wndw_ctxdma_new
- nv50_wndw_wait_armed
- nv50_wndw_flush_clr
- nv50_wndw_flush_set
- nv50_wndw_ntfy_enable
- nv50_wndw_atomic_check_release
- nv50_wndw_atomic_check_acquire_yuv
- nv50_wndw_atomic_check_acquire_rgb
- nv50_wndw_atomic_check_acquire
- nv50_wndw_atomic_check_lut
- nv50_wndw_atomic_check
- nv50_wndw_cleanup_fb
- nv50_wndw_prepare_fb
- nv50_wndw_atomic_destroy_state
- nv50_wndw_atomic_duplicate_state
- nv50_wndw_zpos_default
- nv50_wndw_reset
- nv50_wndw_destroy
- nv50_wndw_notify
- nv50_wndw_fini
- nv50_wndw_init
- nv50_wndw_new_
- nv50_wndw_new
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 #include "wndw.h"
23 #include "wimm.h"
24
25 #include <nvif/class.h>
26 #include <nvif/cl0002.h>
27
28 #include <drm/drm_atomic_helper.h>
29 #include <drm/drm_fourcc.h>
30
31 #include "nouveau_bo.h"
32
33 static void
34 nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
35 {
36 nvif_object_fini(&ctxdma->object);
37 list_del(&ctxdma->head);
38 kfree(ctxdma);
39 }
40
41 static struct nv50_wndw_ctxdma *
42 nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
43 {
44 struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
45 struct nv50_wndw_ctxdma *ctxdma;
46 const u8 kind = fb->nvbo->kind;
47 const u32 handle = 0xfb000000 | kind;
48 struct {
49 struct nv_dma_v0 base;
50 union {
51 struct nv50_dma_v0 nv50;
52 struct gf100_dma_v0 gf100;
53 struct gf119_dma_v0 gf119;
54 };
55 } args = {};
56 u32 argc = sizeof(args.base);
57 int ret;
58
59 list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
60 if (ctxdma->object.handle == handle)
61 return ctxdma;
62 }
63
64 if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
65 return ERR_PTR(-ENOMEM);
66 list_add(&ctxdma->head, &wndw->ctxdma.list);
67
68 args.base.target = NV_DMA_V0_TARGET_VRAM;
69 args.base.access = NV_DMA_V0_ACCESS_RDWR;
70 args.base.start = 0;
71 args.base.limit = drm->client.device.info.ram_user - 1;
72
73 if (drm->client.device.info.chipset < 0x80) {
74 args.nv50.part = NV50_DMA_V0_PART_256;
75 argc += sizeof(args.nv50);
76 } else
77 if (drm->client.device.info.chipset < 0xc0) {
78 args.nv50.part = NV50_DMA_V0_PART_256;
79 args.nv50.kind = kind;
80 argc += sizeof(args.nv50);
81 } else
82 if (drm->client.device.info.chipset < 0xd0) {
83 args.gf100.kind = kind;
84 argc += sizeof(args.gf100);
85 } else {
86 args.gf119.page = GF119_DMA_V0_PAGE_LP;
87 args.gf119.kind = kind;
88 argc += sizeof(args.gf119);
89 }
90
91 ret = nvif_object_init(wndw->ctxdma.parent, handle, NV_DMA_IN_MEMORY,
92 &args, argc, &ctxdma->object);
93 if (ret) {
94 nv50_wndw_ctxdma_del(ctxdma);
95 return ERR_PTR(ret);
96 }
97
98 return ctxdma;
99 }
100
101 int
102 nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
103 {
104 struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
105 if (asyw->set.ntfy) {
106 return wndw->func->ntfy_wait_begun(disp->sync,
107 asyw->ntfy.offset,
108 wndw->wndw.base.device);
109 }
110 return 0;
111 }
112
113 void
114 nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 *interlock, bool flush,
115 struct nv50_wndw_atom *asyw)
116 {
117 union nv50_wndw_atom_mask clr = {
118 .mask = asyw->clr.mask & ~(flush ? 0 : asyw->set.mask),
119 };
120 if (clr.sema ) wndw->func-> sema_clr(wndw);
121 if (clr.ntfy ) wndw->func-> ntfy_clr(wndw);
122 if (clr.xlut ) wndw->func-> xlut_clr(wndw);
123 if (clr.csc ) wndw->func-> csc_clr(wndw);
124 if (clr.image) wndw->func->image_clr(wndw);
125
126 interlock[wndw->interlock.type] |= wndw->interlock.data;
127 }
128
129 void
130 nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
131 struct nv50_wndw_atom *asyw)
132 {
133 if (interlock[NV50_DISP_INTERLOCK_CORE]) {
134 asyw->image.mode = 0;
135 asyw->image.interval = 1;
136 }
137
138 if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
139 if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
140 if (asyw->set.image) wndw->func->image_set(wndw, asyw);
141
142 if (asyw->set.xlut ) {
143 if (asyw->ilut) {
144 asyw->xlut.i.offset =
145 nv50_lut_load(&wndw->ilut, asyw->xlut.i.buffer,
146 asyw->ilut, asyw->xlut.i.load);
147 }
148 wndw->func->xlut_set(wndw, asyw);
149 }
150
151 if (asyw->set.csc ) wndw->func->csc_set (wndw, asyw);
152 if (asyw->set.scale) wndw->func->scale_set(wndw, asyw);
153 if (asyw->set.blend) wndw->func->blend_set(wndw, asyw);
154 if (asyw->set.point) {
155 if (asyw->set.point = false, asyw->set.mask)
156 interlock[wndw->interlock.type] |= wndw->interlock.data;
157 interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm;
158
159 wndw->immd->point(wndw, asyw);
160 wndw->immd->update(wndw, interlock);
161 } else {
162 interlock[wndw->interlock.type] |= wndw->interlock.data;
163 }
164 }
165
166 void
167 nv50_wndw_ntfy_enable(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
168 {
169 struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
170
171 asyw->ntfy.handle = wndw->wndw.sync.handle;
172 asyw->ntfy.offset = wndw->ntfy;
173 asyw->ntfy.awaken = false;
174 asyw->set.ntfy = true;
175
176 wndw->func->ntfy_reset(disp->sync, wndw->ntfy);
177 wndw->ntfy ^= 0x10;
178 }
179
180 static void
181 nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
182 struct nv50_wndw_atom *asyw,
183 struct nv50_head_atom *asyh)
184 {
185 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
186 NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
187 wndw->func->release(wndw, asyw, asyh);
188 asyw->ntfy.handle = 0;
189 asyw->sema.handle = 0;
190 }
191
192 static int
193 nv50_wndw_atomic_check_acquire_yuv(struct nv50_wndw_atom *asyw)
194 {
195 switch (asyw->state.fb->format->format) {
196 case DRM_FORMAT_YUYV: asyw->image.format = 0x28; break;
197 case DRM_FORMAT_UYVY: asyw->image.format = 0x29; break;
198 default:
199 WARN_ON(1);
200 return -EINVAL;
201 }
202 asyw->image.colorspace = 1;
203 return 0;
204 }
205
206 static int
207 nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
208 {
209 switch (asyw->state.fb->format->format) {
210 case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
211 case DRM_FORMAT_XRGB8888 :
212 case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break;
213 case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
214 case DRM_FORMAT_XRGB1555 :
215 case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break;
216 case DRM_FORMAT_XBGR2101010 :
217 case DRM_FORMAT_ABGR2101010 : asyw->image.format = 0xd1; break;
218 case DRM_FORMAT_XBGR8888 :
219 case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break;
220 case DRM_FORMAT_XRGB2101010 :
221 case DRM_FORMAT_ARGB2101010 : asyw->image.format = 0xdf; break;
222 case DRM_FORMAT_XBGR16161616F:
223 case DRM_FORMAT_ABGR16161616F: asyw->image.format = 0xca; break;
224 default:
225 return -EINVAL;
226 }
227 asyw->image.colorspace = 0;
228 return 0;
229 }
230
231 static int
232 nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
233 struct nv50_wndw_atom *armw,
234 struct nv50_wndw_atom *asyw,
235 struct nv50_head_atom *asyh)
236 {
237 struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
238 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
239 int ret;
240
241 NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
242
243 if (asyw->state.fb != armw->state.fb || !armw->visible || modeset) {
244 asyw->image.w = fb->base.width;
245 asyw->image.h = fb->base.height;
246 asyw->image.kind = fb->nvbo->kind;
247
248 ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
249 if (ret) {
250 ret = nv50_wndw_atomic_check_acquire_yuv(asyw);
251 if (ret)
252 return ret;
253 }
254
255 if (asyw->image.kind) {
256 asyw->image.layout = 0;
257 if (drm->client.device.info.chipset >= 0xc0)
258 asyw->image.blockh = fb->nvbo->mode >> 4;
259 else
260 asyw->image.blockh = fb->nvbo->mode;
261 asyw->image.blocks[0] = fb->base.pitches[0] / 64;
262 asyw->image.pitch[0] = 0;
263 } else {
264 asyw->image.layout = 1;
265 asyw->image.blockh = 0;
266 asyw->image.blocks[0] = 0;
267 asyw->image.pitch[0] = fb->base.pitches[0];
268 }
269
270 if (!asyh->state.async_flip)
271 asyw->image.interval = 1;
272 else
273 asyw->image.interval = 0;
274 asyw->image.mode = asyw->image.interval ? 0 : 1;
275 asyw->set.image = wndw->func->image_set != NULL;
276 }
277
278 if (wndw->func->scale_set) {
279 asyw->scale.sx = asyw->state.src_x >> 16;
280 asyw->scale.sy = asyw->state.src_y >> 16;
281 asyw->scale.sw = asyw->state.src_w >> 16;
282 asyw->scale.sh = asyw->state.src_h >> 16;
283 asyw->scale.dw = asyw->state.crtc_w;
284 asyw->scale.dh = asyw->state.crtc_h;
285 if (memcmp(&armw->scale, &asyw->scale, sizeof(asyw->scale)))
286 asyw->set.scale = true;
287 }
288
289 if (wndw->func->blend_set) {
290 asyw->blend.depth = 255 - asyw->state.normalized_zpos;
291 asyw->blend.k1 = asyw->state.alpha >> 8;
292 switch (asyw->state.pixel_blend_mode) {
293 case DRM_MODE_BLEND_PREMULTI:
294 asyw->blend.src_color = 2;
295 asyw->blend.dst_color = 7;
296 break;
297 case DRM_MODE_BLEND_COVERAGE:
298 asyw->blend.src_color = 5;
299 asyw->blend.dst_color = 7;
300 break;
301 case DRM_MODE_BLEND_PIXEL_NONE:
302 default:
303 asyw->blend.src_color = 2;
304 asyw->blend.dst_color = 4;
305 break;
306 }
307 if (memcmp(&armw->blend, &asyw->blend, sizeof(asyw->blend)))
308 asyw->set.blend = true;
309 }
310
311 if (wndw->immd) {
312 asyw->point.x = asyw->state.crtc_x;
313 asyw->point.y = asyw->state.crtc_y;
314 if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
315 asyw->set.point = true;
316 }
317
318 return wndw->func->acquire(wndw, asyw, asyh);
319 }
320
321 static void
322 nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
323 struct nv50_wndw_atom *armw,
324 struct nv50_wndw_atom *asyw,
325 struct nv50_head_atom *asyh)
326 {
327 struct drm_property_blob *ilut = asyh->state.degamma_lut;
328
329
330
331
332
333
334
335 if (!ilut && asyw->state.fb->format->format == DRM_FORMAT_C8) {
336
337
338
339
340
341 if (!(ilut = asyh->state.gamma_lut)) {
342 asyw->visible = false;
343 return;
344 }
345
346 if (wndw->func->ilut)
347 asyh->wndw.olut |= BIT(wndw->id);
348 } else {
349 asyh->wndw.olut &= ~BIT(wndw->id);
350 }
351
352 if (!ilut && wndw->func->ilut_identity &&
353 asyw->state.fb->format->format != DRM_FORMAT_XBGR16161616F &&
354 asyw->state.fb->format->format != DRM_FORMAT_ABGR16161616F) {
355 static struct drm_property_blob dummy = {};
356 ilut = &dummy;
357 }
358
359
360 memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
361 if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
362 wndw->func->ilut(wndw, asyw);
363 asyw->xlut.handle = wndw->wndw.vram.handle;
364 asyw->xlut.i.buffer = !asyw->xlut.i.buffer;
365 asyw->set.xlut = true;
366 } else {
367 asyw->clr.xlut = armw->xlut.handle != 0;
368 }
369
370
371 if (wndw->func->olut_core &&
372 (!armw->visible || (armw->xlut.handle && !asyw->xlut.handle)))
373 asyw->set.xlut = true;
374
375 if (wndw->func->csc && asyh->state.ctm) {
376 const struct drm_color_ctm *ctm = asyh->state.ctm->data;
377 wndw->func->csc(wndw, asyw, ctm);
378 asyw->csc.valid = true;
379 asyw->set.csc = true;
380 } else {
381 asyw->csc.valid = false;
382 asyw->clr.csc = armw->csc.valid;
383 }
384
385
386 asyh->state.async_flip = false;
387 }
388
389 static int
390 nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
391 {
392 struct nouveau_drm *drm = nouveau_drm(plane->dev);
393 struct nv50_wndw *wndw = nv50_wndw(plane);
394 struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
395 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
396 struct nv50_head_atom *harm = NULL, *asyh = NULL;
397 bool modeset = false;
398 int ret;
399
400 NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
401
402
403
404
405 if (asyw->state.crtc) {
406 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
407 if (IS_ERR(asyh))
408 return PTR_ERR(asyh);
409 modeset = drm_atomic_crtc_needs_modeset(&asyh->state);
410 asyw->visible = asyh->state.active;
411 } else {
412 asyw->visible = false;
413 }
414
415
416 if (armw->state.crtc) {
417 harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
418 if (IS_ERR(harm))
419 return PTR_ERR(harm);
420 }
421
422
423 if (asyw->visible && wndw->func->xlut_set &&
424 (!armw->visible ||
425 asyh->state.color_mgmt_changed ||
426 asyw->state.fb->format->format !=
427 armw->state.fb->format->format))
428 nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
429
430
431 if (asyw->visible) {
432 ret = nv50_wndw_atomic_check_acquire(wndw, modeset,
433 armw, asyw, asyh);
434 if (ret)
435 return ret;
436
437 asyh->wndw.mask |= BIT(wndw->id);
438 } else
439 if (armw->visible) {
440 nv50_wndw_atomic_check_release(wndw, asyw, harm);
441 harm->wndw.mask &= ~BIT(wndw->id);
442 } else {
443 return 0;
444 }
445
446
447
448
449
450 if (!asyw->visible || modeset) {
451 asyw->clr.ntfy = armw->ntfy.handle != 0;
452 asyw->clr.sema = armw->sema.handle != 0;
453 asyw->clr.xlut = armw->xlut.handle != 0;
454 if (asyw->clr.xlut && asyw->visible)
455 asyw->set.xlut = asyw->xlut.handle != 0;
456 asyw->clr.csc = armw->csc.valid;
457 if (wndw->func->image_clr)
458 asyw->clr.image = armw->image.handle[0] != 0;
459 }
460
461 return 0;
462 }
463
464 static void
465 nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
466 {
467 struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
468 struct nouveau_drm *drm = nouveau_drm(plane->dev);
469
470 NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
471 if (!old_state->fb)
472 return;
473
474 nouveau_bo_unpin(fb->nvbo);
475 }
476
477 static int
478 nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
479 {
480 struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
481 struct nouveau_drm *drm = nouveau_drm(plane->dev);
482 struct nv50_wndw *wndw = nv50_wndw(plane);
483 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
484 struct nv50_head_atom *asyh;
485 struct nv50_wndw_ctxdma *ctxdma;
486 int ret;
487
488 NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
489 if (!asyw->state.fb)
490 return 0;
491
492 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
493 if (ret)
494 return ret;
495
496 if (wndw->ctxdma.parent) {
497 ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
498 if (IS_ERR(ctxdma)) {
499 nouveau_bo_unpin(fb->nvbo);
500 return PTR_ERR(ctxdma);
501 }
502
503 asyw->image.handle[0] = ctxdma->object.handle;
504 }
505
506 asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv);
507 asyw->image.offset[0] = fb->nvbo->bo.offset;
508
509 if (wndw->func->prepare) {
510 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
511 if (IS_ERR(asyh))
512 return PTR_ERR(asyh);
513
514 wndw->func->prepare(wndw, asyh, asyw);
515 }
516
517 return 0;
518 }
519
520 static const struct drm_plane_helper_funcs
521 nv50_wndw_helper = {
522 .prepare_fb = nv50_wndw_prepare_fb,
523 .cleanup_fb = nv50_wndw_cleanup_fb,
524 .atomic_check = nv50_wndw_atomic_check,
525 };
526
527 static void
528 nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
529 struct drm_plane_state *state)
530 {
531 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
532 __drm_atomic_helper_plane_destroy_state(&asyw->state);
533 kfree(asyw);
534 }
535
536 static struct drm_plane_state *
537 nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
538 {
539 struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
540 struct nv50_wndw_atom *asyw;
541 if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
542 return NULL;
543 __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
544 asyw->sema = armw->sema;
545 asyw->ntfy = armw->ntfy;
546 asyw->ilut = NULL;
547 asyw->xlut = armw->xlut;
548 asyw->csc = armw->csc;
549 asyw->image = armw->image;
550 asyw->point = armw->point;
551 asyw->clr.mask = 0;
552 asyw->set.mask = 0;
553 return &asyw->state;
554 }
555
556 static int
557 nv50_wndw_zpos_default(struct drm_plane *plane)
558 {
559 return (plane->type == DRM_PLANE_TYPE_PRIMARY) ? 0 :
560 (plane->type == DRM_PLANE_TYPE_OVERLAY) ? 1 : 255;
561 }
562
563 static void
564 nv50_wndw_reset(struct drm_plane *plane)
565 {
566 struct nv50_wndw_atom *asyw;
567
568 if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
569 return;
570
571 if (plane->state)
572 plane->funcs->atomic_destroy_state(plane, plane->state);
573
574 __drm_atomic_helper_plane_reset(plane, &asyw->state);
575 plane->state->zpos = nv50_wndw_zpos_default(plane);
576 plane->state->normalized_zpos = nv50_wndw_zpos_default(plane);
577 }
578
579 static void
580 nv50_wndw_destroy(struct drm_plane *plane)
581 {
582 struct nv50_wndw *wndw = nv50_wndw(plane);
583 struct nv50_wndw_ctxdma *ctxdma, *ctxtmp;
584
585 list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) {
586 nv50_wndw_ctxdma_del(ctxdma);
587 }
588
589 nvif_notify_fini(&wndw->notify);
590 nv50_dmac_destroy(&wndw->wimm);
591 nv50_dmac_destroy(&wndw->wndw);
592
593 nv50_lut_fini(&wndw->ilut);
594
595 drm_plane_cleanup(&wndw->plane);
596 kfree(wndw);
597 }
598
599 const struct drm_plane_funcs
600 nv50_wndw = {
601 .update_plane = drm_atomic_helper_update_plane,
602 .disable_plane = drm_atomic_helper_disable_plane,
603 .destroy = nv50_wndw_destroy,
604 .reset = nv50_wndw_reset,
605 .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
606 .atomic_destroy_state = nv50_wndw_atomic_destroy_state,
607 };
608
609 static int
610 nv50_wndw_notify(struct nvif_notify *notify)
611 {
612 return NVIF_NOTIFY_KEEP;
613 }
614
615 void
616 nv50_wndw_fini(struct nv50_wndw *wndw)
617 {
618 nvif_notify_put(&wndw->notify);
619 }
620
621 void
622 nv50_wndw_init(struct nv50_wndw *wndw)
623 {
624 nvif_notify_get(&wndw->notify);
625 }
626
627 int
628 nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
629 enum drm_plane_type type, const char *name, int index,
630 const u32 *format, u32 heads,
631 enum nv50_disp_interlock_type interlock_type, u32 interlock_data,
632 struct nv50_wndw **pwndw)
633 {
634 struct nouveau_drm *drm = nouveau_drm(dev);
635 struct nvif_mmu *mmu = &drm->client.mmu;
636 struct nv50_disp *disp = nv50_disp(dev);
637 struct nv50_wndw *wndw;
638 int nformat;
639 int ret;
640
641 if (!(wndw = *pwndw = kzalloc(sizeof(*wndw), GFP_KERNEL)))
642 return -ENOMEM;
643 wndw->func = func;
644 wndw->id = index;
645 wndw->interlock.type = interlock_type;
646 wndw->interlock.data = interlock_data;
647
648 wndw->ctxdma.parent = &wndw->wndw.base.user;
649 INIT_LIST_HEAD(&wndw->ctxdma.list);
650
651 for (nformat = 0; format[nformat]; nformat++);
652
653 ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
654 format, nformat, NULL,
655 type, "%s-%d", name, index);
656 if (ret) {
657 kfree(*pwndw);
658 *pwndw = NULL;
659 return ret;
660 }
661
662 drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
663
664 if (wndw->func->ilut) {
665 ret = nv50_lut_init(disp, mmu, &wndw->ilut);
666 if (ret)
667 return ret;
668 }
669
670 wndw->notify.func = nv50_wndw_notify;
671
672 if (wndw->func->blend_set) {
673 ret = drm_plane_create_zpos_property(&wndw->plane,
674 nv50_wndw_zpos_default(&wndw->plane), 0, 254);
675 if (ret)
676 return ret;
677
678 ret = drm_plane_create_alpha_property(&wndw->plane);
679 if (ret)
680 return ret;
681
682 ret = drm_plane_create_blend_mode_property(&wndw->plane,
683 BIT(DRM_MODE_BLEND_PIXEL_NONE) |
684 BIT(DRM_MODE_BLEND_PREMULTI) |
685 BIT(DRM_MODE_BLEND_COVERAGE));
686 if (ret)
687 return ret;
688 } else {
689 ret = drm_plane_create_zpos_immutable_property(&wndw->plane,
690 nv50_wndw_zpos_default(&wndw->plane));
691 if (ret)
692 return ret;
693 }
694
695 return 0;
696 }
697
698 int
699 nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
700 struct nv50_wndw **pwndw)
701 {
702 struct {
703 s32 oclass;
704 int version;
705 int (*new)(struct nouveau_drm *, enum drm_plane_type,
706 int, s32, struct nv50_wndw **);
707 } wndws[] = {
708 { TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
709 { GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
710 {}
711 };
712 struct nv50_disp *disp = nv50_disp(drm->dev);
713 int cid, ret;
714
715 cid = nvif_mclass(&disp->disp->object, wndws);
716 if (cid < 0) {
717 NV_ERROR(drm, "No supported window class\n");
718 return cid;
719 }
720
721 ret = wndws[cid].new(drm, type, index, wndws[cid].oclass, pwndw);
722 if (ret)
723 return ret;
724
725 return nv50_wimm_init(drm, *pwndw);
726 }