1/*
2 * Copyright © 2013 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 *     David Airlie
25 */
26#include <linux/module.h>
27#include <linux/fb.h>
28
29#include "drmP.h"
30#include "drm/drm.h"
31#include "drm/drm_crtc.h"
32#include "drm/drm_crtc_helper.h"
33#include "qxl_drv.h"
34
35#include "qxl_object.h"
36#include "drm_fb_helper.h"
37
38#define QXL_DIRTY_DELAY (HZ / 30)
39
40#define QXL_FB_OP_FILLRECT 1
41#define QXL_FB_OP_COPYAREA 2
42#define QXL_FB_OP_IMAGEBLIT 3
43
44struct qxl_fb_op {
45	struct list_head head;
46	int op_type;
47	union {
48		struct fb_fillrect fr;
49		struct fb_copyarea ca;
50		struct fb_image ib;
51	} op;
52	void *img_data;
53};
54
55struct qxl_fbdev {
56	struct drm_fb_helper helper;
57	struct qxl_framebuffer	qfb;
58	struct list_head	fbdev_list;
59	struct qxl_device	*qdev;
60
61	spinlock_t delayed_ops_lock;
62	struct list_head delayed_ops;
63	void *shadow;
64	int size;
65
66	/* dirty memory logging */
67	struct {
68		spinlock_t lock;
69		bool active;
70		unsigned x1;
71		unsigned y1;
72		unsigned x2;
73		unsigned y2;
74	} dirty;
75};
76
77static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
78			      struct qxl_device *qdev, struct fb_info *info,
79			      const struct fb_image *image)
80{
81	qxl_fb_image->qdev = qdev;
82	if (info) {
83		qxl_fb_image->visual = info->fix.visual;
84		if (qxl_fb_image->visual == FB_VISUAL_TRUECOLOR ||
85		    qxl_fb_image->visual == FB_VISUAL_DIRECTCOLOR)
86			memcpy(&qxl_fb_image->pseudo_palette,
87			       info->pseudo_palette,
88			       sizeof(qxl_fb_image->pseudo_palette));
89	} else {
90		 /* fallback */
91		if (image->depth == 1)
92			qxl_fb_image->visual = FB_VISUAL_MONO10;
93		else
94			qxl_fb_image->visual = FB_VISUAL_DIRECTCOLOR;
95	}
96	if (image) {
97		memcpy(&qxl_fb_image->fb_image, image,
98		       sizeof(qxl_fb_image->fb_image));
99	}
100}
101
102static void qxl_fb_dirty_flush(struct fb_info *info)
103{
104	struct qxl_fbdev *qfbdev = info->par;
105	struct qxl_device *qdev = qfbdev->qdev;
106	struct qxl_fb_image qxl_fb_image;
107	struct fb_image *image = &qxl_fb_image.fb_image;
108	u32 x1, x2, y1, y2;
109
110	/* TODO: hard coding 32 bpp */
111	int stride = qfbdev->qfb.base.pitches[0];
112
113	x1 = qfbdev->dirty.x1;
114	x2 = qfbdev->dirty.x2;
115	y1 = qfbdev->dirty.y1;
116	y2 = qfbdev->dirty.y2;
117	/*
118	 * we are using a shadow draw buffer, at qdev->surface0_shadow
119	 */
120	qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
121	image->dx = x1;
122	image->dy = y1;
123	image->width = x2 - x1;
124	image->height = y2 - y1;
125	image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
126					 warnings */
127	image->bg_color = 0;
128	image->depth = 32;	     /* TODO: take from somewhere? */
129	image->cmap.start = 0;
130	image->cmap.len = 0;
131	image->cmap.red = NULL;
132	image->cmap.green = NULL;
133	image->cmap.blue = NULL;
134	image->cmap.transp = NULL;
135	image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
136
137	qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
138	qxl_draw_opaque_fb(&qxl_fb_image, stride);
139	qfbdev->dirty.x1 = 0;
140	qfbdev->dirty.x2 = 0;
141	qfbdev->dirty.y1 = 0;
142	qfbdev->dirty.y2 = 0;
143}
144
145static void qxl_deferred_io(struct fb_info *info,
146			    struct list_head *pagelist)
147{
148	struct qxl_fbdev *qfbdev = info->par;
149	unsigned long start, end, min, max;
150	struct page *page;
151	int y1, y2;
152
153	min = ULONG_MAX;
154	max = 0;
155	list_for_each_entry(page, pagelist, lru) {
156		start = page->index << PAGE_SHIFT;
157		end = start + PAGE_SIZE - 1;
158		min = min(min, start);
159		max = max(max, end);
160	}
161
162	if (min < max) {
163		y1 = min / info->fix.line_length;
164		y2 = (max / info->fix.line_length) + 1;
165
166		/* TODO: add spin lock? */
167		/* spin_lock_irqsave(&qfbdev->dirty.lock, flags); */
168		qfbdev->dirty.x1 = 0;
169		qfbdev->dirty.y1 = y1;
170		qfbdev->dirty.x2 = info->var.xres;
171		qfbdev->dirty.y2 = y2;
172		/* spin_unlock_irqrestore(&qfbdev->dirty.lock, flags); */
173	}
174
175	qxl_fb_dirty_flush(info);
176};
177
178
179static struct fb_deferred_io qxl_defio = {
180	.delay		= QXL_DIRTY_DELAY,
181	.deferred_io	= qxl_deferred_io,
182};
183
184static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev,
185				    const struct fb_fillrect *fb_rect)
186{
187	struct qxl_fb_op *op;
188	unsigned long flags;
189
190	op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
191	if (!op)
192		return;
193
194	op->op.fr = *fb_rect;
195	op->img_data = NULL;
196	op->op_type = QXL_FB_OP_FILLRECT;
197
198	spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
199	list_add_tail(&op->head, &qfbdev->delayed_ops);
200	spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
201}
202
203static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev,
204				    const struct fb_copyarea *fb_copy)
205{
206	struct qxl_fb_op *op;
207	unsigned long flags;
208
209	op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
210	if (!op)
211		return;
212
213	op->op.ca = *fb_copy;
214	op->img_data = NULL;
215	op->op_type = QXL_FB_OP_COPYAREA;
216
217	spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
218	list_add_tail(&op->head, &qfbdev->delayed_ops);
219	spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
220}
221
222static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev,
223				     const struct fb_image *fb_image)
224{
225	struct qxl_fb_op *op;
226	unsigned long flags;
227	uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1);
228
229	op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN);
230	if (!op)
231		return;
232
233	op->op.ib = *fb_image;
234	op->img_data = (void *)(op + 1);
235	op->op_type = QXL_FB_OP_IMAGEBLIT;
236
237	memcpy(op->img_data, fb_image->data, size);
238
239	op->op.ib.data = op->img_data;
240	spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
241	list_add_tail(&op->head, &qfbdev->delayed_ops);
242	spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
243}
244
245static void qxl_fb_fillrect_internal(struct fb_info *info,
246				     const struct fb_fillrect *fb_rect)
247{
248	struct qxl_fbdev *qfbdev = info->par;
249	struct qxl_device *qdev = qfbdev->qdev;
250	struct qxl_rect rect;
251	uint32_t color;
252	int x = fb_rect->dx;
253	int y = fb_rect->dy;
254	int width = fb_rect->width;
255	int height = fb_rect->height;
256	uint16_t rop;
257	struct qxl_draw_fill qxl_draw_fill_rec;
258
259	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
260	    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
261		color = ((u32 *) (info->pseudo_palette))[fb_rect->color];
262	else
263		color = fb_rect->color;
264	rect.left = x;
265	rect.right = x + width;
266	rect.top = y;
267	rect.bottom = y + height;
268	switch (fb_rect->rop) {
269	case ROP_XOR:
270		rop = SPICE_ROPD_OP_XOR;
271		break;
272	case ROP_COPY:
273		rop = SPICE_ROPD_OP_PUT;
274		break;
275	default:
276		pr_err("qxl_fb_fillrect(): unknown rop, "
277		       "defaulting to SPICE_ROPD_OP_PUT\n");
278		rop = SPICE_ROPD_OP_PUT;
279	}
280	qxl_draw_fill_rec.qdev = qdev;
281	qxl_draw_fill_rec.rect = rect;
282	qxl_draw_fill_rec.color = color;
283	qxl_draw_fill_rec.rop = rop;
284
285	qxl_draw_fill(&qxl_draw_fill_rec);
286}
287
288static void qxl_fb_fillrect(struct fb_info *info,
289			    const struct fb_fillrect *fb_rect)
290{
291	struct qxl_fbdev *qfbdev = info->par;
292	struct qxl_device *qdev = qfbdev->qdev;
293
294	if (!drm_can_sleep()) {
295		qxl_fb_delayed_fillrect(qfbdev, fb_rect);
296		schedule_work(&qdev->fb_work);
297		return;
298	}
299	/* make sure any previous work is done */
300	flush_work(&qdev->fb_work);
301	qxl_fb_fillrect_internal(info, fb_rect);
302}
303
304static void qxl_fb_copyarea_internal(struct fb_info *info,
305				     const struct fb_copyarea *region)
306{
307	struct qxl_fbdev *qfbdev = info->par;
308
309	qxl_draw_copyarea(qfbdev->qdev,
310			  region->width, region->height,
311			  region->sx, region->sy,
312			  region->dx, region->dy);
313}
314
315static void qxl_fb_copyarea(struct fb_info *info,
316			    const struct fb_copyarea *region)
317{
318	struct qxl_fbdev *qfbdev = info->par;
319	struct qxl_device *qdev = qfbdev->qdev;
320
321	if (!drm_can_sleep()) {
322		qxl_fb_delayed_copyarea(qfbdev, region);
323		schedule_work(&qdev->fb_work);
324		return;
325	}
326	/* make sure any previous work is done */
327	flush_work(&qdev->fb_work);
328	qxl_fb_copyarea_internal(info, region);
329}
330
331static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
332{
333	qxl_draw_opaque_fb(qxl_fb_image, 0);
334}
335
336static void qxl_fb_imageblit_internal(struct fb_info *info,
337				      const struct fb_image *image)
338{
339	struct qxl_fbdev *qfbdev = info->par;
340	struct qxl_fb_image qxl_fb_image;
341
342	/* ensure proper order  rendering operations - TODO: must do this
343	 * for everything. */
344	qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
345	qxl_fb_imageblit_safe(&qxl_fb_image);
346}
347
348static void qxl_fb_imageblit(struct fb_info *info,
349			     const struct fb_image *image)
350{
351	struct qxl_fbdev *qfbdev = info->par;
352	struct qxl_device *qdev = qfbdev->qdev;
353
354	if (!drm_can_sleep()) {
355		qxl_fb_delayed_imageblit(qfbdev, image);
356		schedule_work(&qdev->fb_work);
357		return;
358	}
359	/* make sure any previous work is done */
360	flush_work(&qdev->fb_work);
361	qxl_fb_imageblit_internal(info, image);
362}
363
364static void qxl_fb_work(struct work_struct *work)
365{
366	struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
367	unsigned long flags;
368	struct qxl_fb_op *entry, *tmp;
369	struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
370
371	/* since the irq context just adds entries to the end of the
372	   list dropping the lock should be fine, as entry isn't modified
373	   in the operation code */
374	spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
375	list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) {
376		spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
377		switch (entry->op_type) {
378		case QXL_FB_OP_FILLRECT:
379			qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr);
380			break;
381		case QXL_FB_OP_COPYAREA:
382			qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca);
383			break;
384		case QXL_FB_OP_IMAGEBLIT:
385			qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib);
386			break;
387		}
388		spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
389		list_del(&entry->head);
390		kfree(entry);
391	}
392	spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
393}
394
395int qxl_fb_init(struct qxl_device *qdev)
396{
397	INIT_WORK(&qdev->fb_work, qxl_fb_work);
398	return 0;
399}
400
401static struct fb_ops qxlfb_ops = {
402	.owner = THIS_MODULE,
403	.fb_check_var = drm_fb_helper_check_var,
404	.fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
405	.fb_fillrect = qxl_fb_fillrect,
406	.fb_copyarea = qxl_fb_copyarea,
407	.fb_imageblit = qxl_fb_imageblit,
408	.fb_pan_display = drm_fb_helper_pan_display,
409	.fb_blank = drm_fb_helper_blank,
410	.fb_setcmap = drm_fb_helper_setcmap,
411	.fb_debug_enter = drm_fb_helper_debug_enter,
412	.fb_debug_leave = drm_fb_helper_debug_leave,
413};
414
415static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
416{
417	struct qxl_bo *qbo = gem_to_qxl_bo(gobj);
418	int ret;
419
420	ret = qxl_bo_reserve(qbo, false);
421	if (likely(ret == 0)) {
422		qxl_bo_kunmap(qbo);
423		qxl_bo_unpin(qbo);
424		qxl_bo_unreserve(qbo);
425	}
426	drm_gem_object_unreference_unlocked(gobj);
427}
428
429int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
430				  struct drm_file *file_priv,
431				  uint32_t *handle)
432{
433	int r;
434	struct drm_gem_object *gobj = qdev->fbdev_qfb->obj;
435
436	BUG_ON(!gobj);
437	/* drm_get_handle_create adds a reference - good */
438	r = drm_gem_handle_create(file_priv, gobj, handle);
439	if (r)
440		return r;
441	return 0;
442}
443
444static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
445				      struct drm_mode_fb_cmd2 *mode_cmd,
446				      struct drm_gem_object **gobj_p)
447{
448	struct qxl_device *qdev = qfbdev->qdev;
449	struct drm_gem_object *gobj = NULL;
450	struct qxl_bo *qbo = NULL;
451	int ret;
452	int aligned_size, size;
453	int height = mode_cmd->height;
454	int bpp;
455	int depth;
456
457	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
458
459	size = mode_cmd->pitches[0] * height;
460	aligned_size = ALIGN(size, PAGE_SIZE);
461	/* TODO: unallocate and reallocate surface0 for real. Hack to just
462	 * have a large enough surface0 for 1024x768 Xorg 32bpp mode */
463	ret = qxl_gem_object_create(qdev, aligned_size, 0,
464				    QXL_GEM_DOMAIN_SURFACE,
465				    false, /* is discardable */
466				    false, /* is kernel (false means device) */
467				    NULL,
468				    &gobj);
469	if (ret) {
470		pr_err("failed to allocate framebuffer (%d)\n",
471		       aligned_size);
472		return -ENOMEM;
473	}
474	qbo = gem_to_qxl_bo(gobj);
475
476	qbo->surf.width = mode_cmd->width;
477	qbo->surf.height = mode_cmd->height;
478	qbo->surf.stride = mode_cmd->pitches[0];
479	qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
480	ret = qxl_bo_reserve(qbo, false);
481	if (unlikely(ret != 0))
482		goto out_unref;
483	ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
484	if (ret) {
485		qxl_bo_unreserve(qbo);
486		goto out_unref;
487	}
488	ret = qxl_bo_kmap(qbo, NULL);
489	qxl_bo_unreserve(qbo); /* unreserve, will be mmaped */
490	if (ret)
491		goto out_unref;
492
493	*gobj_p = gobj;
494	return 0;
495out_unref:
496	qxlfb_destroy_pinned_object(gobj);
497	*gobj_p = NULL;
498	return ret;
499}
500
501static int qxlfb_create(struct qxl_fbdev *qfbdev,
502			struct drm_fb_helper_surface_size *sizes)
503{
504	struct qxl_device *qdev = qfbdev->qdev;
505	struct fb_info *info;
506	struct drm_framebuffer *fb = NULL;
507	struct drm_mode_fb_cmd2 mode_cmd;
508	struct drm_gem_object *gobj = NULL;
509	struct qxl_bo *qbo = NULL;
510	struct device *device = &qdev->pdev->dev;
511	int ret;
512	int size;
513	int bpp = sizes->surface_bpp;
514	int depth = sizes->surface_depth;
515	void *shadow;
516
517	mode_cmd.width = sizes->surface_width;
518	mode_cmd.height = sizes->surface_height;
519
520	mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
521	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
522
523	ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
524	qbo = gem_to_qxl_bo(gobj);
525	QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
526		 mode_cmd.height, mode_cmd.pitches[0]);
527
528	shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
529	/* TODO: what's the usual response to memory allocation errors? */
530	BUG_ON(!shadow);
531	QXL_INFO(qdev,
532	"surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
533		 qxl_bo_gpu_offset(qbo),
534		 qxl_bo_mmap_offset(qbo),
535		 qbo->kptr,
536		 shadow);
537	size = mode_cmd.pitches[0] * mode_cmd.height;
538
539	info = framebuffer_alloc(0, device);
540	if (info == NULL) {
541		ret = -ENOMEM;
542		goto out_unref;
543	}
544
545	info->par = qfbdev;
546
547	qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
548
549	fb = &qfbdev->qfb.base;
550
551	/* setup helper with fb data */
552	qfbdev->helper.fb = fb;
553	qfbdev->helper.fbdev = info;
554	qfbdev->shadow = shadow;
555	strcpy(info->fix.id, "qxldrmfb");
556
557	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
558
559	info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
560	info->fbops = &qxlfb_ops;
561
562	/*
563	 * TODO: using gobj->size in various places in this function. Not sure
564	 * what the difference between the different sizes is.
565	 */
566	info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
567	info->fix.smem_len = gobj->size;
568	info->screen_base = qfbdev->shadow;
569	info->screen_size = gobj->size;
570
571	drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width,
572			       sizes->fb_height);
573
574	/* setup aperture base/size for vesafb takeover */
575	info->apertures = alloc_apertures(1);
576	if (!info->apertures) {
577		ret = -ENOMEM;
578		goto out_unref;
579	}
580	info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
581	info->apertures->ranges[0].size = qdev->vram_size;
582
583	info->fix.mmio_start = 0;
584	info->fix.mmio_len = 0;
585
586	if (info->screen_base == NULL) {
587		ret = -ENOSPC;
588		goto out_unref;
589	}
590
591	ret = fb_alloc_cmap(&info->cmap, 256, 0);
592	if (ret) {
593		ret = -ENOMEM;
594		goto out_unref;
595	}
596
597	info->fbdefio = &qxl_defio;
598	fb_deferred_io_init(info);
599
600	qdev->fbdev_info = info;
601	qdev->fbdev_qfb = &qfbdev->qfb;
602	DRM_INFO("fb mappable at 0x%lX, size %lu\n",  info->fix.smem_start, (unsigned long)info->screen_size);
603	DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
604	return 0;
605
606out_unref:
607	if (qbo) {
608		ret = qxl_bo_reserve(qbo, false);
609		if (likely(ret == 0)) {
610			qxl_bo_kunmap(qbo);
611			qxl_bo_unpin(qbo);
612			qxl_bo_unreserve(qbo);
613		}
614	}
615	if (fb && ret) {
616		drm_gem_object_unreference(gobj);
617		drm_framebuffer_cleanup(fb);
618		kfree(fb);
619	}
620	drm_gem_object_unreference(gobj);
621	return ret;
622}
623
624static int qxl_fb_find_or_create_single(
625		struct drm_fb_helper *helper,
626		struct drm_fb_helper_surface_size *sizes)
627{
628	struct qxl_fbdev *qfbdev =
629		container_of(helper, struct qxl_fbdev, helper);
630	int new_fb = 0;
631	int ret;
632
633	if (!helper->fb) {
634		ret = qxlfb_create(qfbdev, sizes);
635		if (ret)
636			return ret;
637		new_fb = 1;
638	}
639	return new_fb;
640}
641
642static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
643{
644	struct fb_info *info;
645	struct qxl_framebuffer *qfb = &qfbdev->qfb;
646
647	if (qfbdev->helper.fbdev) {
648		info = qfbdev->helper.fbdev;
649
650		unregister_framebuffer(info);
651		framebuffer_release(info);
652	}
653	if (qfb->obj) {
654		qxlfb_destroy_pinned_object(qfb->obj);
655		qfb->obj = NULL;
656	}
657	drm_fb_helper_fini(&qfbdev->helper);
658	vfree(qfbdev->shadow);
659	drm_framebuffer_cleanup(&qfb->base);
660
661	return 0;
662}
663
664static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
665	.fb_probe = qxl_fb_find_or_create_single,
666};
667
668int qxl_fbdev_init(struct qxl_device *qdev)
669{
670	struct qxl_fbdev *qfbdev;
671	int bpp_sel = 32; /* TODO: parameter from somewhere? */
672	int ret;
673
674	qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
675	if (!qfbdev)
676		return -ENOMEM;
677
678	qfbdev->qdev = qdev;
679	qdev->mode_info.qfbdev = qfbdev;
680	spin_lock_init(&qfbdev->delayed_ops_lock);
681	INIT_LIST_HEAD(&qfbdev->delayed_ops);
682
683	drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper,
684			      &qxl_fb_helper_funcs);
685
686	ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
687				 qxl_num_crtc /* num_crtc - QXL supports just 1 */,
688				 QXLFB_CONN_LIMIT);
689	if (ret)
690		goto free;
691
692	ret = drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
693	if (ret)
694		goto fini;
695
696	ret = drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
697	if (ret)
698		goto fini;
699
700	return 0;
701
702fini:
703	drm_fb_helper_fini(&qfbdev->helper);
704free:
705	kfree(qfbdev);
706	return ret;
707}
708
709void qxl_fbdev_fini(struct qxl_device *qdev)
710{
711	if (!qdev->mode_info.qfbdev)
712		return;
713
714	qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev);
715	kfree(qdev->mode_info.qfbdev);
716	qdev->mode_info.qfbdev = NULL;
717}
718
719void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
720{
721	fb_set_suspend(qdev->mode_info.qfbdev->helper.fbdev, state);
722}
723
724bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj)
725{
726	if (qobj == gem_to_qxl_bo(qdev->mode_info.qfbdev->qfb.obj))
727		return true;
728	return false;
729}
730