1 /*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10 #include <linux/host1x.h>
11 #include <linux/iommu.h>
12
13 #include <drm/drm_atomic.h>
14 #include <drm/drm_atomic_helper.h>
15
16 #include "drm.h"
17 #include "gem.h"
18
19 #define DRIVER_NAME "tegra"
20 #define DRIVER_DESC "NVIDIA Tegra graphics"
21 #define DRIVER_DATE "20120330"
22 #define DRIVER_MAJOR 0
23 #define DRIVER_MINOR 0
24 #define DRIVER_PATCHLEVEL 0
25
26 struct tegra_drm_file {
27 struct list_head contexts;
28 };
29
tegra_atomic_schedule(struct tegra_drm * tegra,struct drm_atomic_state * state)30 static void tegra_atomic_schedule(struct tegra_drm *tegra,
31 struct drm_atomic_state *state)
32 {
33 tegra->commit.state = state;
34 schedule_work(&tegra->commit.work);
35 }
36
tegra_atomic_complete(struct tegra_drm * tegra,struct drm_atomic_state * state)37 static void tegra_atomic_complete(struct tegra_drm *tegra,
38 struct drm_atomic_state *state)
39 {
40 struct drm_device *drm = tegra->drm;
41
42 /*
43 * Everything below can be run asynchronously without the need to grab
44 * any modeset locks at all under one condition: It must be guaranteed
45 * that the asynchronous work has either been cancelled (if the driver
46 * supports it, which at least requires that the framebuffers get
47 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
48 * before the new state gets committed on the software side with
49 * drm_atomic_helper_swap_state().
50 *
51 * This scheme allows new atomic state updates to be prepared and
52 * checked in parallel to the asynchronous completion of the previous
53 * update. Which is important since compositors need to figure out the
54 * composition of the next frame right after having submitted the
55 * current layout.
56 */
57
58 drm_atomic_helper_commit_modeset_disables(drm, state);
59 drm_atomic_helper_commit_planes(drm, state);
60 drm_atomic_helper_commit_modeset_enables(drm, state);
61
62 drm_atomic_helper_wait_for_vblanks(drm, state);
63
64 drm_atomic_helper_cleanup_planes(drm, state);
65 drm_atomic_state_free(state);
66 }
67
tegra_atomic_work(struct work_struct * work)68 static void tegra_atomic_work(struct work_struct *work)
69 {
70 struct tegra_drm *tegra = container_of(work, struct tegra_drm,
71 commit.work);
72
73 tegra_atomic_complete(tegra, tegra->commit.state);
74 }
75
tegra_atomic_commit(struct drm_device * drm,struct drm_atomic_state * state,bool async)76 static int tegra_atomic_commit(struct drm_device *drm,
77 struct drm_atomic_state *state, bool async)
78 {
79 struct tegra_drm *tegra = drm->dev_private;
80 int err;
81
82 err = drm_atomic_helper_prepare_planes(drm, state);
83 if (err)
84 return err;
85
86 /* serialize outstanding asynchronous commits */
87 mutex_lock(&tegra->commit.lock);
88 flush_work(&tegra->commit.work);
89
90 /*
91 * This is the point of no return - everything below never fails except
92 * when the hw goes bonghits. Which means we can commit the new state on
93 * the software side now.
94 */
95
96 drm_atomic_helper_swap_state(drm, state);
97
98 if (async)
99 tegra_atomic_schedule(tegra, state);
100 else
101 tegra_atomic_complete(tegra, state);
102
103 mutex_unlock(&tegra->commit.lock);
104 return 0;
105 }
106
107 static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
108 .fb_create = tegra_fb_create,
109 #ifdef CONFIG_DRM_TEGRA_FBDEV
110 .output_poll_changed = tegra_fb_output_poll_changed,
111 #endif
112 .atomic_check = drm_atomic_helper_check,
113 .atomic_commit = tegra_atomic_commit,
114 };
115
tegra_drm_load(struct drm_device * drm,unsigned long flags)116 static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
117 {
118 struct host1x_device *device = to_host1x_device(drm->dev);
119 struct tegra_drm *tegra;
120 int err;
121
122 tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
123 if (!tegra)
124 return -ENOMEM;
125
126 if (iommu_present(&platform_bus_type)) {
127 tegra->domain = iommu_domain_alloc(&platform_bus_type);
128 if (!tegra->domain) {
129 err = -ENOMEM;
130 goto free;
131 }
132
133 DRM_DEBUG("IOMMU context initialized\n");
134 drm_mm_init(&tegra->mm, 0, SZ_2G);
135 }
136
137 mutex_init(&tegra->clients_lock);
138 INIT_LIST_HEAD(&tegra->clients);
139
140 mutex_init(&tegra->commit.lock);
141 INIT_WORK(&tegra->commit.work, tegra_atomic_work);
142
143 drm->dev_private = tegra;
144 tegra->drm = drm;
145
146 drm_mode_config_init(drm);
147
148 drm->mode_config.min_width = 0;
149 drm->mode_config.min_height = 0;
150
151 drm->mode_config.max_width = 4096;
152 drm->mode_config.max_height = 4096;
153
154 drm->mode_config.funcs = &tegra_drm_mode_funcs;
155
156 err = tegra_drm_fb_prepare(drm);
157 if (err < 0)
158 goto config;
159
160 drm_kms_helper_poll_init(drm);
161
162 err = host1x_device_init(device);
163 if (err < 0)
164 goto fbdev;
165
166 drm_mode_config_reset(drm);
167
168 /*
169 * We don't use the drm_irq_install() helpers provided by the DRM
170 * core, so we need to set this manually in order to allow the
171 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
172 */
173 drm->irq_enabled = true;
174
175 /* syncpoints are used for full 32-bit hardware VBLANK counters */
176 drm->max_vblank_count = 0xffffffff;
177
178 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
179 if (err < 0)
180 goto device;
181
182 err = tegra_drm_fb_init(drm);
183 if (err < 0)
184 goto vblank;
185
186 return 0;
187
188 vblank:
189 drm_vblank_cleanup(drm);
190 device:
191 host1x_device_exit(device);
192 fbdev:
193 drm_kms_helper_poll_fini(drm);
194 tegra_drm_fb_free(drm);
195 config:
196 drm_mode_config_cleanup(drm);
197
198 if (tegra->domain) {
199 iommu_domain_free(tegra->domain);
200 drm_mm_takedown(&tegra->mm);
201 }
202 free:
203 kfree(tegra);
204 return err;
205 }
206
tegra_drm_unload(struct drm_device * drm)207 static int tegra_drm_unload(struct drm_device *drm)
208 {
209 struct host1x_device *device = to_host1x_device(drm->dev);
210 struct tegra_drm *tegra = drm->dev_private;
211 int err;
212
213 drm_kms_helper_poll_fini(drm);
214 tegra_drm_fb_exit(drm);
215 drm_mode_config_cleanup(drm);
216 drm_vblank_cleanup(drm);
217
218 err = host1x_device_exit(device);
219 if (err < 0)
220 return err;
221
222 if (tegra->domain) {
223 iommu_domain_free(tegra->domain);
224 drm_mm_takedown(&tegra->mm);
225 }
226
227 kfree(tegra);
228
229 return 0;
230 }
231
tegra_drm_open(struct drm_device * drm,struct drm_file * filp)232 static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
233 {
234 struct tegra_drm_file *fpriv;
235
236 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
237 if (!fpriv)
238 return -ENOMEM;
239
240 INIT_LIST_HEAD(&fpriv->contexts);
241 filp->driver_priv = fpriv;
242
243 return 0;
244 }
245
tegra_drm_context_free(struct tegra_drm_context * context)246 static void tegra_drm_context_free(struct tegra_drm_context *context)
247 {
248 context->client->ops->close_channel(context);
249 kfree(context);
250 }
251
tegra_drm_lastclose(struct drm_device * drm)252 static void tegra_drm_lastclose(struct drm_device *drm)
253 {
254 #ifdef CONFIG_DRM_TEGRA_FBDEV
255 struct tegra_drm *tegra = drm->dev_private;
256
257 tegra_fbdev_restore_mode(tegra->fbdev);
258 #endif
259 }
260
261 static struct host1x_bo *
host1x_bo_lookup(struct drm_device * drm,struct drm_file * file,u32 handle)262 host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
263 {
264 struct drm_gem_object *gem;
265 struct tegra_bo *bo;
266
267 gem = drm_gem_object_lookup(drm, file, handle);
268 if (!gem)
269 return NULL;
270
271 mutex_lock(&drm->struct_mutex);
272 drm_gem_object_unreference(gem);
273 mutex_unlock(&drm->struct_mutex);
274
275 bo = to_tegra_bo(gem);
276 return &bo->base;
277 }
278
host1x_reloc_copy_from_user(struct host1x_reloc * dest,struct drm_tegra_reloc __user * src,struct drm_device * drm,struct drm_file * file)279 static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
280 struct drm_tegra_reloc __user *src,
281 struct drm_device *drm,
282 struct drm_file *file)
283 {
284 u32 cmdbuf, target;
285 int err;
286
287 err = get_user(cmdbuf, &src->cmdbuf.handle);
288 if (err < 0)
289 return err;
290
291 err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
292 if (err < 0)
293 return err;
294
295 err = get_user(target, &src->target.handle);
296 if (err < 0)
297 return err;
298
299 err = get_user(dest->target.offset, &src->target.offset);
300 if (err < 0)
301 return err;
302
303 err = get_user(dest->shift, &src->shift);
304 if (err < 0)
305 return err;
306
307 dest->cmdbuf.bo = host1x_bo_lookup(drm, file, cmdbuf);
308 if (!dest->cmdbuf.bo)
309 return -ENOENT;
310
311 dest->target.bo = host1x_bo_lookup(drm, file, target);
312 if (!dest->target.bo)
313 return -ENOENT;
314
315 return 0;
316 }
317
tegra_drm_submit(struct tegra_drm_context * context,struct drm_tegra_submit * args,struct drm_device * drm,struct drm_file * file)318 int tegra_drm_submit(struct tegra_drm_context *context,
319 struct drm_tegra_submit *args, struct drm_device *drm,
320 struct drm_file *file)
321 {
322 unsigned int num_cmdbufs = args->num_cmdbufs;
323 unsigned int num_relocs = args->num_relocs;
324 unsigned int num_waitchks = args->num_waitchks;
325 struct drm_tegra_cmdbuf __user *cmdbufs =
326 (void __user *)(uintptr_t)args->cmdbufs;
327 struct drm_tegra_reloc __user *relocs =
328 (void __user *)(uintptr_t)args->relocs;
329 struct drm_tegra_waitchk __user *waitchks =
330 (void __user *)(uintptr_t)args->waitchks;
331 struct drm_tegra_syncpt syncpt;
332 struct host1x_job *job;
333 int err;
334
335 /* We don't yet support other than one syncpt_incr struct per submit */
336 if (args->num_syncpts != 1)
337 return -EINVAL;
338
339 job = host1x_job_alloc(context->channel, args->num_cmdbufs,
340 args->num_relocs, args->num_waitchks);
341 if (!job)
342 return -ENOMEM;
343
344 job->num_relocs = args->num_relocs;
345 job->num_waitchk = args->num_waitchks;
346 job->client = (u32)args->context;
347 job->class = context->client->base.class;
348 job->serialize = true;
349
350 while (num_cmdbufs) {
351 struct drm_tegra_cmdbuf cmdbuf;
352 struct host1x_bo *bo;
353
354 if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
355 err = -EFAULT;
356 goto fail;
357 }
358
359 bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
360 if (!bo) {
361 err = -ENOENT;
362 goto fail;
363 }
364
365 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
366 num_cmdbufs--;
367 cmdbufs++;
368 }
369
370 /* copy and resolve relocations from submit */
371 while (num_relocs--) {
372 err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
373 &relocs[num_relocs], drm,
374 file);
375 if (err < 0)
376 goto fail;
377 }
378
379 if (copy_from_user(job->waitchk, waitchks,
380 sizeof(*waitchks) * num_waitchks)) {
381 err = -EFAULT;
382 goto fail;
383 }
384
385 if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
386 sizeof(syncpt))) {
387 err = -EFAULT;
388 goto fail;
389 }
390
391 job->is_addr_reg = context->client->ops->is_addr_reg;
392 job->syncpt_incrs = syncpt.incrs;
393 job->syncpt_id = syncpt.id;
394 job->timeout = 10000;
395
396 if (args->timeout && args->timeout < 10000)
397 job->timeout = args->timeout;
398
399 err = host1x_job_pin(job, context->client->base.dev);
400 if (err)
401 goto fail;
402
403 err = host1x_job_submit(job);
404 if (err)
405 goto fail_submit;
406
407 args->fence = job->syncpt_end;
408
409 host1x_job_put(job);
410 return 0;
411
412 fail_submit:
413 host1x_job_unpin(job);
414 fail:
415 host1x_job_put(job);
416 return err;
417 }
418
419
420 #ifdef CONFIG_DRM_TEGRA_STAGING
tegra_drm_get_context(__u64 context)421 static struct tegra_drm_context *tegra_drm_get_context(__u64 context)
422 {
423 return (struct tegra_drm_context *)(uintptr_t)context;
424 }
425
tegra_drm_file_owns_context(struct tegra_drm_file * file,struct tegra_drm_context * context)426 static bool tegra_drm_file_owns_context(struct tegra_drm_file *file,
427 struct tegra_drm_context *context)
428 {
429 struct tegra_drm_context *ctx;
430
431 list_for_each_entry(ctx, &file->contexts, list)
432 if (ctx == context)
433 return true;
434
435 return false;
436 }
437
tegra_gem_create(struct drm_device * drm,void * data,struct drm_file * file)438 static int tegra_gem_create(struct drm_device *drm, void *data,
439 struct drm_file *file)
440 {
441 struct drm_tegra_gem_create *args = data;
442 struct tegra_bo *bo;
443
444 bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
445 &args->handle);
446 if (IS_ERR(bo))
447 return PTR_ERR(bo);
448
449 return 0;
450 }
451
tegra_gem_mmap(struct drm_device * drm,void * data,struct drm_file * file)452 static int tegra_gem_mmap(struct drm_device *drm, void *data,
453 struct drm_file *file)
454 {
455 struct drm_tegra_gem_mmap *args = data;
456 struct drm_gem_object *gem;
457 struct tegra_bo *bo;
458
459 gem = drm_gem_object_lookup(drm, file, args->handle);
460 if (!gem)
461 return -EINVAL;
462
463 bo = to_tegra_bo(gem);
464
465 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
466
467 drm_gem_object_unreference(gem);
468
469 return 0;
470 }
471
tegra_syncpt_read(struct drm_device * drm,void * data,struct drm_file * file)472 static int tegra_syncpt_read(struct drm_device *drm, void *data,
473 struct drm_file *file)
474 {
475 struct host1x *host = dev_get_drvdata(drm->dev->parent);
476 struct drm_tegra_syncpt_read *args = data;
477 struct host1x_syncpt *sp;
478
479 sp = host1x_syncpt_get(host, args->id);
480 if (!sp)
481 return -EINVAL;
482
483 args->value = host1x_syncpt_read_min(sp);
484 return 0;
485 }
486
tegra_syncpt_incr(struct drm_device * drm,void * data,struct drm_file * file)487 static int tegra_syncpt_incr(struct drm_device *drm, void *data,
488 struct drm_file *file)
489 {
490 struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
491 struct drm_tegra_syncpt_incr *args = data;
492 struct host1x_syncpt *sp;
493
494 sp = host1x_syncpt_get(host1x, args->id);
495 if (!sp)
496 return -EINVAL;
497
498 return host1x_syncpt_incr(sp);
499 }
500
tegra_syncpt_wait(struct drm_device * drm,void * data,struct drm_file * file)501 static int tegra_syncpt_wait(struct drm_device *drm, void *data,
502 struct drm_file *file)
503 {
504 struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
505 struct drm_tegra_syncpt_wait *args = data;
506 struct host1x_syncpt *sp;
507
508 sp = host1x_syncpt_get(host1x, args->id);
509 if (!sp)
510 return -EINVAL;
511
512 return host1x_syncpt_wait(sp, args->thresh, args->timeout,
513 &args->value);
514 }
515
tegra_open_channel(struct drm_device * drm,void * data,struct drm_file * file)516 static int tegra_open_channel(struct drm_device *drm, void *data,
517 struct drm_file *file)
518 {
519 struct tegra_drm_file *fpriv = file->driver_priv;
520 struct tegra_drm *tegra = drm->dev_private;
521 struct drm_tegra_open_channel *args = data;
522 struct tegra_drm_context *context;
523 struct tegra_drm_client *client;
524 int err = -ENODEV;
525
526 context = kzalloc(sizeof(*context), GFP_KERNEL);
527 if (!context)
528 return -ENOMEM;
529
530 list_for_each_entry(client, &tegra->clients, list)
531 if (client->base.class == args->client) {
532 err = client->ops->open_channel(client, context);
533 if (err)
534 break;
535
536 list_add(&context->list, &fpriv->contexts);
537 args->context = (uintptr_t)context;
538 context->client = client;
539 return 0;
540 }
541
542 kfree(context);
543 return err;
544 }
545
tegra_close_channel(struct drm_device * drm,void * data,struct drm_file * file)546 static int tegra_close_channel(struct drm_device *drm, void *data,
547 struct drm_file *file)
548 {
549 struct tegra_drm_file *fpriv = file->driver_priv;
550 struct drm_tegra_close_channel *args = data;
551 struct tegra_drm_context *context;
552
553 context = tegra_drm_get_context(args->context);
554
555 if (!tegra_drm_file_owns_context(fpriv, context))
556 return -EINVAL;
557
558 list_del(&context->list);
559 tegra_drm_context_free(context);
560
561 return 0;
562 }
563
tegra_get_syncpt(struct drm_device * drm,void * data,struct drm_file * file)564 static int tegra_get_syncpt(struct drm_device *drm, void *data,
565 struct drm_file *file)
566 {
567 struct tegra_drm_file *fpriv = file->driver_priv;
568 struct drm_tegra_get_syncpt *args = data;
569 struct tegra_drm_context *context;
570 struct host1x_syncpt *syncpt;
571
572 context = tegra_drm_get_context(args->context);
573
574 if (!tegra_drm_file_owns_context(fpriv, context))
575 return -ENODEV;
576
577 if (args->index >= context->client->base.num_syncpts)
578 return -EINVAL;
579
580 syncpt = context->client->base.syncpts[args->index];
581 args->id = host1x_syncpt_id(syncpt);
582
583 return 0;
584 }
585
tegra_submit(struct drm_device * drm,void * data,struct drm_file * file)586 static int tegra_submit(struct drm_device *drm, void *data,
587 struct drm_file *file)
588 {
589 struct tegra_drm_file *fpriv = file->driver_priv;
590 struct drm_tegra_submit *args = data;
591 struct tegra_drm_context *context;
592
593 context = tegra_drm_get_context(args->context);
594
595 if (!tegra_drm_file_owns_context(fpriv, context))
596 return -ENODEV;
597
598 return context->client->ops->submit(context, args, drm, file);
599 }
600
tegra_get_syncpt_base(struct drm_device * drm,void * data,struct drm_file * file)601 static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
602 struct drm_file *file)
603 {
604 struct tegra_drm_file *fpriv = file->driver_priv;
605 struct drm_tegra_get_syncpt_base *args = data;
606 struct tegra_drm_context *context;
607 struct host1x_syncpt_base *base;
608 struct host1x_syncpt *syncpt;
609
610 context = tegra_drm_get_context(args->context);
611
612 if (!tegra_drm_file_owns_context(fpriv, context))
613 return -ENODEV;
614
615 if (args->syncpt >= context->client->base.num_syncpts)
616 return -EINVAL;
617
618 syncpt = context->client->base.syncpts[args->syncpt];
619
620 base = host1x_syncpt_get_base(syncpt);
621 if (!base)
622 return -ENXIO;
623
624 args->id = host1x_syncpt_base_id(base);
625
626 return 0;
627 }
628
tegra_gem_set_tiling(struct drm_device * drm,void * data,struct drm_file * file)629 static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
630 struct drm_file *file)
631 {
632 struct drm_tegra_gem_set_tiling *args = data;
633 enum tegra_bo_tiling_mode mode;
634 struct drm_gem_object *gem;
635 unsigned long value = 0;
636 struct tegra_bo *bo;
637
638 switch (args->mode) {
639 case DRM_TEGRA_GEM_TILING_MODE_PITCH:
640 mode = TEGRA_BO_TILING_MODE_PITCH;
641
642 if (args->value != 0)
643 return -EINVAL;
644
645 break;
646
647 case DRM_TEGRA_GEM_TILING_MODE_TILED:
648 mode = TEGRA_BO_TILING_MODE_TILED;
649
650 if (args->value != 0)
651 return -EINVAL;
652
653 break;
654
655 case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
656 mode = TEGRA_BO_TILING_MODE_BLOCK;
657
658 if (args->value > 5)
659 return -EINVAL;
660
661 value = args->value;
662 break;
663
664 default:
665 return -EINVAL;
666 }
667
668 gem = drm_gem_object_lookup(drm, file, args->handle);
669 if (!gem)
670 return -ENOENT;
671
672 bo = to_tegra_bo(gem);
673
674 bo->tiling.mode = mode;
675 bo->tiling.value = value;
676
677 drm_gem_object_unreference(gem);
678
679 return 0;
680 }
681
tegra_gem_get_tiling(struct drm_device * drm,void * data,struct drm_file * file)682 static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
683 struct drm_file *file)
684 {
685 struct drm_tegra_gem_get_tiling *args = data;
686 struct drm_gem_object *gem;
687 struct tegra_bo *bo;
688 int err = 0;
689
690 gem = drm_gem_object_lookup(drm, file, args->handle);
691 if (!gem)
692 return -ENOENT;
693
694 bo = to_tegra_bo(gem);
695
696 switch (bo->tiling.mode) {
697 case TEGRA_BO_TILING_MODE_PITCH:
698 args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
699 args->value = 0;
700 break;
701
702 case TEGRA_BO_TILING_MODE_TILED:
703 args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
704 args->value = 0;
705 break;
706
707 case TEGRA_BO_TILING_MODE_BLOCK:
708 args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
709 args->value = bo->tiling.value;
710 break;
711
712 default:
713 err = -EINVAL;
714 break;
715 }
716
717 drm_gem_object_unreference(gem);
718
719 return err;
720 }
721
tegra_gem_set_flags(struct drm_device * drm,void * data,struct drm_file * file)722 static int tegra_gem_set_flags(struct drm_device *drm, void *data,
723 struct drm_file *file)
724 {
725 struct drm_tegra_gem_set_flags *args = data;
726 struct drm_gem_object *gem;
727 struct tegra_bo *bo;
728
729 if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
730 return -EINVAL;
731
732 gem = drm_gem_object_lookup(drm, file, args->handle);
733 if (!gem)
734 return -ENOENT;
735
736 bo = to_tegra_bo(gem);
737 bo->flags = 0;
738
739 if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
740 bo->flags |= TEGRA_BO_BOTTOM_UP;
741
742 drm_gem_object_unreference(gem);
743
744 return 0;
745 }
746
tegra_gem_get_flags(struct drm_device * drm,void * data,struct drm_file * file)747 static int tegra_gem_get_flags(struct drm_device *drm, void *data,
748 struct drm_file *file)
749 {
750 struct drm_tegra_gem_get_flags *args = data;
751 struct drm_gem_object *gem;
752 struct tegra_bo *bo;
753
754 gem = drm_gem_object_lookup(drm, file, args->handle);
755 if (!gem)
756 return -ENOENT;
757
758 bo = to_tegra_bo(gem);
759 args->flags = 0;
760
761 if (bo->flags & TEGRA_BO_BOTTOM_UP)
762 args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
763
764 drm_gem_object_unreference(gem);
765
766 return 0;
767 }
768 #endif
769
770 static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
771 #ifdef CONFIG_DRM_TEGRA_STAGING
772 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED),
773 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
774 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
775 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
776 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
777 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
778 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
779 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
780 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
781 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED),
782 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, DRM_UNLOCKED),
783 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, DRM_UNLOCKED),
784 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, DRM_UNLOCKED),
785 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, DRM_UNLOCKED),
786 #endif
787 };
788
789 static const struct file_operations tegra_drm_fops = {
790 .owner = THIS_MODULE,
791 .open = drm_open,
792 .release = drm_release,
793 .unlocked_ioctl = drm_ioctl,
794 .mmap = tegra_drm_mmap,
795 .poll = drm_poll,
796 .read = drm_read,
797 #ifdef CONFIG_COMPAT
798 .compat_ioctl = drm_compat_ioctl,
799 #endif
800 .llseek = noop_llseek,
801 };
802
tegra_crtc_from_pipe(struct drm_device * drm,unsigned int pipe)803 static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
804 unsigned int pipe)
805 {
806 struct drm_crtc *crtc;
807
808 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
809 if (pipe == drm_crtc_index(crtc))
810 return crtc;
811 }
812
813 return NULL;
814 }
815
tegra_drm_get_vblank_counter(struct drm_device * drm,int pipe)816 static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
817 {
818 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
819 struct tegra_dc *dc = to_tegra_dc(crtc);
820
821 if (!crtc)
822 return 0;
823
824 return tegra_dc_get_vblank_counter(dc);
825 }
826
tegra_drm_enable_vblank(struct drm_device * drm,int pipe)827 static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
828 {
829 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
830 struct tegra_dc *dc = to_tegra_dc(crtc);
831
832 if (!crtc)
833 return -ENODEV;
834
835 tegra_dc_enable_vblank(dc);
836
837 return 0;
838 }
839
tegra_drm_disable_vblank(struct drm_device * drm,int pipe)840 static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
841 {
842 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
843 struct tegra_dc *dc = to_tegra_dc(crtc);
844
845 if (crtc)
846 tegra_dc_disable_vblank(dc);
847 }
848
tegra_drm_preclose(struct drm_device * drm,struct drm_file * file)849 static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
850 {
851 struct tegra_drm_file *fpriv = file->driver_priv;
852 struct tegra_drm_context *context, *tmp;
853 struct drm_crtc *crtc;
854
855 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
856 tegra_dc_cancel_page_flip(crtc, file);
857
858 list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
859 tegra_drm_context_free(context);
860
861 kfree(fpriv);
862 }
863
864 #ifdef CONFIG_DEBUG_FS
tegra_debugfs_framebuffers(struct seq_file * s,void * data)865 static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
866 {
867 struct drm_info_node *node = (struct drm_info_node *)s->private;
868 struct drm_device *drm = node->minor->dev;
869 struct drm_framebuffer *fb;
870
871 mutex_lock(&drm->mode_config.fb_lock);
872
873 list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
874 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
875 fb->base.id, fb->width, fb->height, fb->depth,
876 fb->bits_per_pixel,
877 atomic_read(&fb->refcount.refcount));
878 }
879
880 mutex_unlock(&drm->mode_config.fb_lock);
881
882 return 0;
883 }
884
tegra_debugfs_iova(struct seq_file * s,void * data)885 static int tegra_debugfs_iova(struct seq_file *s, void *data)
886 {
887 struct drm_info_node *node = (struct drm_info_node *)s->private;
888 struct drm_device *drm = node->minor->dev;
889 struct tegra_drm *tegra = drm->dev_private;
890
891 return drm_mm_dump_table(s, &tegra->mm);
892 }
893
894 static struct drm_info_list tegra_debugfs_list[] = {
895 { "framebuffers", tegra_debugfs_framebuffers, 0 },
896 { "iova", tegra_debugfs_iova, 0 },
897 };
898
tegra_debugfs_init(struct drm_minor * minor)899 static int tegra_debugfs_init(struct drm_minor *minor)
900 {
901 return drm_debugfs_create_files(tegra_debugfs_list,
902 ARRAY_SIZE(tegra_debugfs_list),
903 minor->debugfs_root, minor);
904 }
905
tegra_debugfs_cleanup(struct drm_minor * minor)906 static void tegra_debugfs_cleanup(struct drm_minor *minor)
907 {
908 drm_debugfs_remove_files(tegra_debugfs_list,
909 ARRAY_SIZE(tegra_debugfs_list), minor);
910 }
911 #endif
912
913 static struct drm_driver tegra_drm_driver = {
914 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
915 .load = tegra_drm_load,
916 .unload = tegra_drm_unload,
917 .open = tegra_drm_open,
918 .preclose = tegra_drm_preclose,
919 .lastclose = tegra_drm_lastclose,
920
921 .get_vblank_counter = tegra_drm_get_vblank_counter,
922 .enable_vblank = tegra_drm_enable_vblank,
923 .disable_vblank = tegra_drm_disable_vblank,
924
925 #if defined(CONFIG_DEBUG_FS)
926 .debugfs_init = tegra_debugfs_init,
927 .debugfs_cleanup = tegra_debugfs_cleanup,
928 #endif
929
930 .gem_free_object = tegra_bo_free_object,
931 .gem_vm_ops = &tegra_bo_vm_ops,
932
933 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
934 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
935 .gem_prime_export = tegra_gem_prime_export,
936 .gem_prime_import = tegra_gem_prime_import,
937
938 .dumb_create = tegra_bo_dumb_create,
939 .dumb_map_offset = tegra_bo_dumb_map_offset,
940 .dumb_destroy = drm_gem_dumb_destroy,
941
942 .ioctls = tegra_drm_ioctls,
943 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
944 .fops = &tegra_drm_fops,
945
946 .name = DRIVER_NAME,
947 .desc = DRIVER_DESC,
948 .date = DRIVER_DATE,
949 .major = DRIVER_MAJOR,
950 .minor = DRIVER_MINOR,
951 .patchlevel = DRIVER_PATCHLEVEL,
952 };
953
tegra_drm_register_client(struct tegra_drm * tegra,struct tegra_drm_client * client)954 int tegra_drm_register_client(struct tegra_drm *tegra,
955 struct tegra_drm_client *client)
956 {
957 mutex_lock(&tegra->clients_lock);
958 list_add_tail(&client->list, &tegra->clients);
959 mutex_unlock(&tegra->clients_lock);
960
961 return 0;
962 }
963
tegra_drm_unregister_client(struct tegra_drm * tegra,struct tegra_drm_client * client)964 int tegra_drm_unregister_client(struct tegra_drm *tegra,
965 struct tegra_drm_client *client)
966 {
967 mutex_lock(&tegra->clients_lock);
968 list_del_init(&client->list);
969 mutex_unlock(&tegra->clients_lock);
970
971 return 0;
972 }
973
host1x_drm_probe(struct host1x_device * dev)974 static int host1x_drm_probe(struct host1x_device *dev)
975 {
976 struct drm_driver *driver = &tegra_drm_driver;
977 struct drm_device *drm;
978 int err;
979
980 drm = drm_dev_alloc(driver, &dev->dev);
981 if (!drm)
982 return -ENOMEM;
983
984 drm_dev_set_unique(drm, dev_name(&dev->dev));
985 dev_set_drvdata(&dev->dev, drm);
986
987 err = drm_dev_register(drm, 0);
988 if (err < 0)
989 goto unref;
990
991 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
992 driver->major, driver->minor, driver->patchlevel,
993 driver->date, drm->primary->index);
994
995 return 0;
996
997 unref:
998 drm_dev_unref(drm);
999 return err;
1000 }
1001
host1x_drm_remove(struct host1x_device * dev)1002 static int host1x_drm_remove(struct host1x_device *dev)
1003 {
1004 struct drm_device *drm = dev_get_drvdata(&dev->dev);
1005
1006 drm_dev_unregister(drm);
1007 drm_dev_unref(drm);
1008
1009 return 0;
1010 }
1011
1012 #ifdef CONFIG_PM_SLEEP
host1x_drm_suspend(struct device * dev)1013 static int host1x_drm_suspend(struct device *dev)
1014 {
1015 struct drm_device *drm = dev_get_drvdata(dev);
1016
1017 drm_kms_helper_poll_disable(drm);
1018
1019 return 0;
1020 }
1021
host1x_drm_resume(struct device * dev)1022 static int host1x_drm_resume(struct device *dev)
1023 {
1024 struct drm_device *drm = dev_get_drvdata(dev);
1025
1026 drm_kms_helper_poll_enable(drm);
1027
1028 return 0;
1029 }
1030 #endif
1031
1032 static const struct dev_pm_ops host1x_drm_pm_ops = {
1033 SET_SYSTEM_SLEEP_PM_OPS(host1x_drm_suspend, host1x_drm_resume)
1034 };
1035
1036 static const struct of_device_id host1x_drm_subdevs[] = {
1037 { .compatible = "nvidia,tegra20-dc", },
1038 { .compatible = "nvidia,tegra20-hdmi", },
1039 { .compatible = "nvidia,tegra20-gr2d", },
1040 { .compatible = "nvidia,tegra20-gr3d", },
1041 { .compatible = "nvidia,tegra30-dc", },
1042 { .compatible = "nvidia,tegra30-hdmi", },
1043 { .compatible = "nvidia,tegra30-gr2d", },
1044 { .compatible = "nvidia,tegra30-gr3d", },
1045 { .compatible = "nvidia,tegra114-dsi", },
1046 { .compatible = "nvidia,tegra114-hdmi", },
1047 { .compatible = "nvidia,tegra114-gr3d", },
1048 { .compatible = "nvidia,tegra124-dc", },
1049 { .compatible = "nvidia,tegra124-sor", },
1050 { .compatible = "nvidia,tegra124-hdmi", },
1051 { /* sentinel */ }
1052 };
1053
1054 static struct host1x_driver host1x_drm_driver = {
1055 .driver = {
1056 .name = "drm",
1057 .pm = &host1x_drm_pm_ops,
1058 },
1059 .probe = host1x_drm_probe,
1060 .remove = host1x_drm_remove,
1061 .subdevs = host1x_drm_subdevs,
1062 };
1063
host1x_drm_init(void)1064 static int __init host1x_drm_init(void)
1065 {
1066 int err;
1067
1068 err = host1x_driver_register(&host1x_drm_driver);
1069 if (err < 0)
1070 return err;
1071
1072 err = platform_driver_register(&tegra_dc_driver);
1073 if (err < 0)
1074 goto unregister_host1x;
1075
1076 err = platform_driver_register(&tegra_dsi_driver);
1077 if (err < 0)
1078 goto unregister_dc;
1079
1080 err = platform_driver_register(&tegra_sor_driver);
1081 if (err < 0)
1082 goto unregister_dsi;
1083
1084 err = platform_driver_register(&tegra_hdmi_driver);
1085 if (err < 0)
1086 goto unregister_sor;
1087
1088 err = platform_driver_register(&tegra_dpaux_driver);
1089 if (err < 0)
1090 goto unregister_hdmi;
1091
1092 err = platform_driver_register(&tegra_gr2d_driver);
1093 if (err < 0)
1094 goto unregister_dpaux;
1095
1096 err = platform_driver_register(&tegra_gr3d_driver);
1097 if (err < 0)
1098 goto unregister_gr2d;
1099
1100 return 0;
1101
1102 unregister_gr2d:
1103 platform_driver_unregister(&tegra_gr2d_driver);
1104 unregister_dpaux:
1105 platform_driver_unregister(&tegra_dpaux_driver);
1106 unregister_hdmi:
1107 platform_driver_unregister(&tegra_hdmi_driver);
1108 unregister_sor:
1109 platform_driver_unregister(&tegra_sor_driver);
1110 unregister_dsi:
1111 platform_driver_unregister(&tegra_dsi_driver);
1112 unregister_dc:
1113 platform_driver_unregister(&tegra_dc_driver);
1114 unregister_host1x:
1115 host1x_driver_unregister(&host1x_drm_driver);
1116 return err;
1117 }
1118 module_init(host1x_drm_init);
1119
host1x_drm_exit(void)1120 static void __exit host1x_drm_exit(void)
1121 {
1122 platform_driver_unregister(&tegra_gr3d_driver);
1123 platform_driver_unregister(&tegra_gr2d_driver);
1124 platform_driver_unregister(&tegra_dpaux_driver);
1125 platform_driver_unregister(&tegra_hdmi_driver);
1126 platform_driver_unregister(&tegra_sor_driver);
1127 platform_driver_unregister(&tegra_dsi_driver);
1128 platform_driver_unregister(&tegra_dc_driver);
1129 host1x_driver_unregister(&host1x_drm_driver);
1130 }
1131 module_exit(host1x_drm_exit);
1132
1133 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
1134 MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
1135 MODULE_LICENSE("GPL v2");
1136