1 /*
2  * Copyright 2010 Matt Turner.
3  * Copyright 2012 Red Hat
4  *
5  * This file is subject to the terms and conditions of the GNU General
6  * Public License version 2. See the file COPYING in the main
7  * directory of this archive for more details.
8  *
9  * Authors: Matthew Garrett
10  *          Matt Turner
11  *          Dave Airlie
12  */
13 #include <linux/module.h>
14 #include <drm/drmP.h>
15 #include <drm/drm_fb_helper.h>
16 #include <drm/drm_crtc_helper.h>
17 
18 #include <linux/fb.h>
19 
20 #include "mgag200_drv.h"
21 
mga_dirty_update(struct mga_fbdev * mfbdev,int x,int y,int width,int height)22 static void mga_dirty_update(struct mga_fbdev *mfbdev,
23 			     int x, int y, int width, int height)
24 {
25 	int i;
26 	struct drm_gem_object *obj;
27 	struct mgag200_bo *bo;
28 	int src_offset, dst_offset;
29 	int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
30 	int ret = -EBUSY;
31 	bool unmap = false;
32 	bool store_for_later = false;
33 	int x2, y2;
34 	unsigned long flags;
35 
36 	obj = mfbdev->mfb.obj;
37 	bo = gem_to_mga_bo(obj);
38 
39 	/*
40 	 * try and reserve the BO, if we fail with busy
41 	 * then the BO is being moved and we should
42 	 * store up the damage until later.
43 	 */
44 	if (drm_can_sleep())
45 		ret = mgag200_bo_reserve(bo, true);
46 	if (ret) {
47 		if (ret != -EBUSY)
48 			return;
49 
50 		store_for_later = true;
51 	}
52 
53 	x2 = x + width - 1;
54 	y2 = y + height - 1;
55 	spin_lock_irqsave(&mfbdev->dirty_lock, flags);
56 
57 	if (mfbdev->y1 < y)
58 		y = mfbdev->y1;
59 	if (mfbdev->y2 > y2)
60 		y2 = mfbdev->y2;
61 	if (mfbdev->x1 < x)
62 		x = mfbdev->x1;
63 	if (mfbdev->x2 > x2)
64 		x2 = mfbdev->x2;
65 
66 	if (store_for_later) {
67 		mfbdev->x1 = x;
68 		mfbdev->x2 = x2;
69 		mfbdev->y1 = y;
70 		mfbdev->y2 = y2;
71 		spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
72 		return;
73 	}
74 
75 	mfbdev->x1 = mfbdev->y1 = INT_MAX;
76 	mfbdev->x2 = mfbdev->y2 = 0;
77 	spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
78 
79 	if (!bo->kmap.virtual) {
80 		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
81 		if (ret) {
82 			DRM_ERROR("failed to kmap fb updates\n");
83 			mgag200_bo_unreserve(bo);
84 			return;
85 		}
86 		unmap = true;
87 	}
88 	for (i = y; i <= y2; i++) {
89 		/* assume equal stride for now */
90 		src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
91 		memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
92 
93 	}
94 	if (unmap)
95 		ttm_bo_kunmap(&bo->kmap);
96 
97 	mgag200_bo_unreserve(bo);
98 }
99 
mga_fillrect(struct fb_info * info,const struct fb_fillrect * rect)100 static void mga_fillrect(struct fb_info *info,
101 			 const struct fb_fillrect *rect)
102 {
103 	struct mga_fbdev *mfbdev = info->par;
104 	drm_fb_helper_sys_fillrect(info, rect);
105 	mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
106 			 rect->height);
107 }
108 
mga_copyarea(struct fb_info * info,const struct fb_copyarea * area)109 static void mga_copyarea(struct fb_info *info,
110 			 const struct fb_copyarea *area)
111 {
112 	struct mga_fbdev *mfbdev = info->par;
113 	drm_fb_helper_sys_copyarea(info, area);
114 	mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
115 			 area->height);
116 }
117 
mga_imageblit(struct fb_info * info,const struct fb_image * image)118 static void mga_imageblit(struct fb_info *info,
119 			  const struct fb_image *image)
120 {
121 	struct mga_fbdev *mfbdev = info->par;
122 	drm_fb_helper_sys_imageblit(info, image);
123 	mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
124 			 image->height);
125 }
126 
127 
128 static struct fb_ops mgag200fb_ops = {
129 	.owner = THIS_MODULE,
130 	.fb_check_var = drm_fb_helper_check_var,
131 	.fb_set_par = drm_fb_helper_set_par,
132 	.fb_fillrect = mga_fillrect,
133 	.fb_copyarea = mga_copyarea,
134 	.fb_imageblit = mga_imageblit,
135 	.fb_pan_display = drm_fb_helper_pan_display,
136 	.fb_blank = drm_fb_helper_blank,
137 	.fb_setcmap = drm_fb_helper_setcmap,
138 };
139 
mgag200fb_create_object(struct mga_fbdev * afbdev,struct drm_mode_fb_cmd2 * mode_cmd,struct drm_gem_object ** gobj_p)140 static int mgag200fb_create_object(struct mga_fbdev *afbdev,
141 				   struct drm_mode_fb_cmd2 *mode_cmd,
142 				   struct drm_gem_object **gobj_p)
143 {
144 	struct drm_device *dev = afbdev->helper.dev;
145 	u32 size;
146 	struct drm_gem_object *gobj;
147 	int ret = 0;
148 
149 	size = mode_cmd->pitches[0] * mode_cmd->height;
150 	ret = mgag200_gem_create(dev, size, true, &gobj);
151 	if (ret)
152 		return ret;
153 
154 	*gobj_p = gobj;
155 	return ret;
156 }
157 
mgag200fb_create(struct drm_fb_helper * helper,struct drm_fb_helper_surface_size * sizes)158 static int mgag200fb_create(struct drm_fb_helper *helper,
159 			   struct drm_fb_helper_surface_size *sizes)
160 {
161 	struct mga_fbdev *mfbdev =
162 		container_of(helper, struct mga_fbdev, helper);
163 	struct drm_device *dev = mfbdev->helper.dev;
164 	struct drm_mode_fb_cmd2 mode_cmd;
165 	struct mga_device *mdev = dev->dev_private;
166 	struct fb_info *info;
167 	struct drm_framebuffer *fb;
168 	struct drm_gem_object *gobj = NULL;
169 	int ret;
170 	void *sysram;
171 	int size;
172 
173 	mode_cmd.width = sizes->surface_width;
174 	mode_cmd.height = sizes->surface_height;
175 	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
176 
177 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
178 							  sizes->surface_depth);
179 	size = mode_cmd.pitches[0] * mode_cmd.height;
180 
181 	ret = mgag200fb_create_object(mfbdev, &mode_cmd, &gobj);
182 	if (ret) {
183 		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
184 		return ret;
185 	}
186 
187 	sysram = vmalloc(size);
188 	if (!sysram)
189 		goto err_sysram;
190 
191 	info = drm_fb_helper_alloc_fbi(helper);
192 	if (IS_ERR(info)) {
193 		ret = PTR_ERR(info);
194 		goto err_alloc_fbi;
195 	}
196 
197 	info->par = mfbdev;
198 
199 	ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
200 	if (ret)
201 		goto err_framebuffer_init;
202 
203 	mfbdev->sysram = sysram;
204 	mfbdev->size = size;
205 
206 	fb = &mfbdev->mfb.base;
207 
208 	/* setup helper */
209 	mfbdev->helper.fb = fb;
210 
211 	strcpy(info->fix.id, "mgadrmfb");
212 
213 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
214 	info->fbops = &mgag200fb_ops;
215 
216 	/* setup aperture base/size for vesafb takeover */
217 	info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
218 	info->apertures->ranges[0].size = mdev->mc.vram_size;
219 
220 	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
221 	drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
222 			       sizes->fb_height);
223 
224 	info->screen_base = sysram;
225 	info->screen_size = size;
226 	info->pixmap.flags = FB_PIXMAP_SYSTEM;
227 
228 	DRM_DEBUG_KMS("allocated %dx%d\n",
229 		      fb->width, fb->height);
230 
231 	return 0;
232 
233 err_framebuffer_init:
234 	drm_fb_helper_release_fbi(helper);
235 err_alloc_fbi:
236 	vfree(sysram);
237 err_sysram:
238 	drm_gem_object_unreference_unlocked(gobj);
239 
240 	return ret;
241 }
242 
mga_fbdev_destroy(struct drm_device * dev,struct mga_fbdev * mfbdev)243 static int mga_fbdev_destroy(struct drm_device *dev,
244 				struct mga_fbdev *mfbdev)
245 {
246 	struct mga_framebuffer *mfb = &mfbdev->mfb;
247 
248 	drm_fb_helper_unregister_fbi(&mfbdev->helper);
249 	drm_fb_helper_release_fbi(&mfbdev->helper);
250 
251 	if (mfb->obj) {
252 		drm_gem_object_unreference_unlocked(mfb->obj);
253 		mfb->obj = NULL;
254 	}
255 	drm_fb_helper_fini(&mfbdev->helper);
256 	vfree(mfbdev->sysram);
257 	drm_framebuffer_unregister_private(&mfb->base);
258 	drm_framebuffer_cleanup(&mfb->base);
259 
260 	return 0;
261 }
262 
263 static const struct drm_fb_helper_funcs mga_fb_helper_funcs = {
264 	.gamma_set = mga_crtc_fb_gamma_set,
265 	.gamma_get = mga_crtc_fb_gamma_get,
266 	.fb_probe = mgag200fb_create,
267 };
268 
mgag200_fbdev_init(struct mga_device * mdev)269 int mgag200_fbdev_init(struct mga_device *mdev)
270 {
271 	struct mga_fbdev *mfbdev;
272 	int ret;
273 	int bpp_sel = 32;
274 
275 	/* prefer 16bpp on low end gpus with limited VRAM */
276 	if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
277 		bpp_sel = 16;
278 
279 	mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
280 	if (!mfbdev)
281 		return -ENOMEM;
282 
283 	mdev->mfbdev = mfbdev;
284 	spin_lock_init(&mfbdev->dirty_lock);
285 
286 	drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs);
287 
288 	ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
289 				 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
290 	if (ret)
291 		goto err_fb_helper;
292 
293 	ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
294 	if (ret)
295 		goto err_fb_setup;
296 
297 	/* disable all the possible outputs/crtcs before entering KMS mode */
298 	drm_helper_disable_unused_functions(mdev->dev);
299 
300 	ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
301 	if (ret)
302 		goto err_fb_setup;
303 
304 	return 0;
305 
306 err_fb_setup:
307 	drm_fb_helper_fini(&mfbdev->helper);
308 err_fb_helper:
309 	mdev->mfbdev = NULL;
310 
311 	return ret;
312 }
313 
mgag200_fbdev_fini(struct mga_device * mdev)314 void mgag200_fbdev_fini(struct mga_device *mdev)
315 {
316 	if (!mdev->mfbdev)
317 		return;
318 
319 	mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
320 }
321