1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms:  GNU General Public License (GPL), version 2
5 */
6
7#include <drm/drmP.h>
8
9#include <linux/component.h>
10#include <linux/debugfs.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/of_platform.h>
14
15#include <drm/drm_atomic.h>
16#include <drm/drm_atomic_helper.h>
17#include <drm/drm_crtc_helper.h>
18#include <drm/drm_gem_cma_helper.h>
19#include <drm/drm_fb_cma_helper.h>
20
21#include "sti_drm_drv.h"
22#include "sti_drm_crtc.h"
23
24#define DRIVER_NAME	"sti"
25#define DRIVER_DESC	"STMicroelectronics SoC DRM"
26#define DRIVER_DATE	"20140601"
27#define DRIVER_MAJOR	1
28#define DRIVER_MINOR	0
29
30#define STI_MAX_FB_HEIGHT	4096
31#define STI_MAX_FB_WIDTH	4096
32
33static void sti_drm_atomic_schedule(struct sti_drm_private *private,
34				  struct drm_atomic_state *state)
35{
36	private->commit.state = state;
37	schedule_work(&private->commit.work);
38}
39
40static void sti_drm_atomic_complete(struct sti_drm_private *private,
41				  struct drm_atomic_state *state)
42{
43	struct drm_device *drm = private->drm_dev;
44
45	/*
46	 * Everything below can be run asynchronously without the need to grab
47	 * any modeset locks at all under one condition: It must be guaranteed
48	 * that the asynchronous work has either been cancelled (if the driver
49	 * supports it, which at least requires that the framebuffers get
50	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
51	 * before the new state gets committed on the software side with
52	 * drm_atomic_helper_swap_state().
53	 *
54	 * This scheme allows new atomic state updates to be prepared and
55	 * checked in parallel to the asynchronous completion of the previous
56	 * update. Which is important since compositors need to figure out the
57	 * composition of the next frame right after having submitted the
58	 * current layout.
59	 */
60
61	drm_atomic_helper_commit_modeset_disables(drm, state);
62	drm_atomic_helper_commit_planes(drm, state);
63	drm_atomic_helper_commit_modeset_enables(drm, state);
64
65	drm_atomic_helper_wait_for_vblanks(drm, state);
66
67	drm_atomic_helper_cleanup_planes(drm, state);
68	drm_atomic_state_free(state);
69}
70
71static void sti_drm_atomic_work(struct work_struct *work)
72{
73	struct sti_drm_private *private = container_of(work,
74			struct sti_drm_private, commit.work);
75
76	sti_drm_atomic_complete(private, private->commit.state);
77}
78
79static int sti_drm_atomic_commit(struct drm_device *drm,
80			       struct drm_atomic_state *state, bool async)
81{
82	struct sti_drm_private *private = drm->dev_private;
83	int err;
84
85	err = drm_atomic_helper_prepare_planes(drm, state);
86	if (err)
87		return err;
88
89	/* serialize outstanding asynchronous commits */
90	mutex_lock(&private->commit.lock);
91	flush_work(&private->commit.work);
92
93	/*
94	 * This is the point of no return - everything below never fails except
95	 * when the hw goes bonghits. Which means we can commit the new state on
96	 * the software side now.
97	 */
98
99	drm_atomic_helper_swap_state(drm, state);
100
101	if (async)
102		sti_drm_atomic_schedule(private, state);
103	else
104		sti_drm_atomic_complete(private, state);
105
106	mutex_unlock(&private->commit.lock);
107	return 0;
108}
109
110static struct drm_mode_config_funcs sti_drm_mode_config_funcs = {
111	.fb_create = drm_fb_cma_create,
112	.atomic_check = drm_atomic_helper_check,
113	.atomic_commit = sti_drm_atomic_commit,
114};
115
116static void sti_drm_mode_config_init(struct drm_device *dev)
117{
118	dev->mode_config.min_width = 0;
119	dev->mode_config.min_height = 0;
120
121	/*
122	 * set max width and height as default value.
123	 * this value would be used to check framebuffer size limitation
124	 * at drm_mode_addfb().
125	 */
126	dev->mode_config.max_width = STI_MAX_FB_HEIGHT;
127	dev->mode_config.max_height = STI_MAX_FB_WIDTH;
128
129	dev->mode_config.funcs = &sti_drm_mode_config_funcs;
130}
131
132static int sti_drm_load(struct drm_device *dev, unsigned long flags)
133{
134	struct sti_drm_private *private;
135	int ret;
136
137	private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL);
138	if (!private) {
139		DRM_ERROR("Failed to allocate private\n");
140		return -ENOMEM;
141	}
142	dev->dev_private = (void *)private;
143	private->drm_dev = dev;
144
145	mutex_init(&private->commit.lock);
146	INIT_WORK(&private->commit.work, sti_drm_atomic_work);
147
148	drm_mode_config_init(dev);
149	drm_kms_helper_poll_init(dev);
150
151	sti_drm_mode_config_init(dev);
152
153	ret = component_bind_all(dev->dev, dev);
154	if (ret) {
155		drm_kms_helper_poll_fini(dev);
156		drm_mode_config_cleanup(dev);
157		kfree(private);
158		return ret;
159	}
160
161	drm_mode_config_reset(dev);
162
163#ifdef CONFIG_DRM_STI_FBDEV
164	drm_fbdev_cma_init(dev, 32,
165		   dev->mode_config.num_crtc,
166		   dev->mode_config.num_connector);
167#endif
168	return 0;
169}
170
171static const struct file_operations sti_drm_driver_fops = {
172	.owner = THIS_MODULE,
173	.open = drm_open,
174	.mmap = drm_gem_cma_mmap,
175	.poll = drm_poll,
176	.read = drm_read,
177	.unlocked_ioctl = drm_ioctl,
178#ifdef CONFIG_COMPAT
179	.compat_ioctl = drm_compat_ioctl,
180#endif
181	.release = drm_release,
182};
183
184static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev,
185						struct drm_gem_object *obj,
186						int flags)
187{
188	/* we want to be able to write in mmapped buffer */
189	flags |= O_RDWR;
190	return drm_gem_prime_export(dev, obj, flags);
191}
192
193static struct drm_driver sti_drm_driver = {
194	.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
195	    DRIVER_GEM | DRIVER_PRIME,
196	.load = sti_drm_load,
197	.gem_free_object = drm_gem_cma_free_object,
198	.gem_vm_ops = &drm_gem_cma_vm_ops,
199	.dumb_create = drm_gem_cma_dumb_create,
200	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
201	.dumb_destroy = drm_gem_dumb_destroy,
202	.fops = &sti_drm_driver_fops,
203
204	.get_vblank_counter = drm_vblank_count,
205	.enable_vblank = sti_drm_crtc_enable_vblank,
206	.disable_vblank = sti_drm_crtc_disable_vblank,
207
208	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
209	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
210	.gem_prime_export = sti_drm_gem_prime_export,
211	.gem_prime_import = drm_gem_prime_import,
212	.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
213	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
214	.gem_prime_vmap = drm_gem_cma_prime_vmap,
215	.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
216	.gem_prime_mmap = drm_gem_cma_prime_mmap,
217
218	.name = DRIVER_NAME,
219	.desc = DRIVER_DESC,
220	.date = DRIVER_DATE,
221	.major = DRIVER_MAJOR,
222	.minor = DRIVER_MINOR,
223};
224
225static int compare_of(struct device *dev, void *data)
226{
227	return dev->of_node == data;
228}
229
230static int sti_drm_bind(struct device *dev)
231{
232	return drm_platform_init(&sti_drm_driver, to_platform_device(dev));
233}
234
235static void sti_drm_unbind(struct device *dev)
236{
237	drm_put_dev(dev_get_drvdata(dev));
238}
239
240static const struct component_master_ops sti_drm_ops = {
241	.bind = sti_drm_bind,
242	.unbind = sti_drm_unbind,
243};
244
245static int sti_drm_master_probe(struct platform_device *pdev)
246{
247	struct device *dev = &pdev->dev;
248	struct device_node *node = dev->parent->of_node;
249	struct device_node *child_np;
250	struct component_match *match = NULL;
251
252	dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
253
254	child_np = of_get_next_available_child(node, NULL);
255
256	while (child_np) {
257		component_match_add(dev, &match, compare_of, child_np);
258		of_node_put(child_np);
259		child_np = of_get_next_available_child(node, child_np);
260	}
261
262	return component_master_add_with_match(dev, &sti_drm_ops, match);
263}
264
265static int sti_drm_master_remove(struct platform_device *pdev)
266{
267	component_master_del(&pdev->dev, &sti_drm_ops);
268	return 0;
269}
270
271static struct platform_driver sti_drm_master_driver = {
272	.probe = sti_drm_master_probe,
273	.remove = sti_drm_master_remove,
274	.driver = {
275		.name = DRIVER_NAME "__master",
276	},
277};
278
279static int sti_drm_platform_probe(struct platform_device *pdev)
280{
281	struct device *dev = &pdev->dev;
282	struct device_node *node = dev->of_node;
283	struct platform_device *master;
284
285	of_platform_populate(node, NULL, NULL, dev);
286
287	platform_driver_register(&sti_drm_master_driver);
288	master = platform_device_register_resndata(dev,
289			DRIVER_NAME "__master", -1,
290			NULL, 0, NULL, 0);
291	if (IS_ERR(master))
292               return PTR_ERR(master);
293
294	platform_set_drvdata(pdev, master);
295	return 0;
296}
297
298static int sti_drm_platform_remove(struct platform_device *pdev)
299{
300	struct platform_device *master = platform_get_drvdata(pdev);
301
302	of_platform_depopulate(&pdev->dev);
303	platform_device_unregister(master);
304	platform_driver_unregister(&sti_drm_master_driver);
305	return 0;
306}
307
308static const struct of_device_id sti_drm_dt_ids[] = {
309	{ .compatible = "st,sti-display-subsystem", },
310	{ /* end node */ },
311};
312MODULE_DEVICE_TABLE(of, sti_drm_dt_ids);
313
314static struct platform_driver sti_drm_platform_driver = {
315	.probe = sti_drm_platform_probe,
316	.remove = sti_drm_platform_remove,
317	.driver = {
318		.name = DRIVER_NAME,
319		.of_match_table = sti_drm_dt_ids,
320	},
321};
322
323module_platform_driver(sti_drm_platform_driver);
324
325MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
326MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
327MODULE_LICENSE("GPL");
328