1 /*
2  * Copyright (C) 2014 Free Electrons
3  * Copyright (C) 2014 Atmel
4  *
5  * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published by
9  * the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 
23 #include "atmel_hlcdc_dc.h"
24 
25 static void
atmel_hlcdc_layer_fb_flip_release(struct drm_flip_work * work,void * val)26 atmel_hlcdc_layer_fb_flip_release(struct drm_flip_work *work, void *val)
27 {
28 	struct atmel_hlcdc_layer_fb_flip *flip = val;
29 
30 	if (flip->fb)
31 		drm_framebuffer_unreference(flip->fb);
32 	kfree(flip);
33 }
34 
35 static void
atmel_hlcdc_layer_fb_flip_destroy(struct atmel_hlcdc_layer_fb_flip * flip)36 atmel_hlcdc_layer_fb_flip_destroy(struct atmel_hlcdc_layer_fb_flip *flip)
37 {
38 	if (flip->fb)
39 		drm_framebuffer_unreference(flip->fb);
40 	kfree(flip->task);
41 	kfree(flip);
42 }
43 
44 static void
atmel_hlcdc_layer_fb_flip_release_queue(struct atmel_hlcdc_layer * layer,struct atmel_hlcdc_layer_fb_flip * flip)45 atmel_hlcdc_layer_fb_flip_release_queue(struct atmel_hlcdc_layer *layer,
46 					struct atmel_hlcdc_layer_fb_flip *flip)
47 {
48 	int i;
49 
50 	if (!flip)
51 		return;
52 
53 	for (i = 0; i < layer->max_planes; i++) {
54 		if (!flip->dscrs[i])
55 			break;
56 
57 		flip->dscrs[i]->status = 0;
58 		flip->dscrs[i] = NULL;
59 	}
60 
61 	drm_flip_work_queue_task(&layer->gc, flip->task);
62 	drm_flip_work_commit(&layer->gc, layer->wq);
63 }
64 
atmel_hlcdc_layer_update_reset(struct atmel_hlcdc_layer * layer,int id)65 static void atmel_hlcdc_layer_update_reset(struct atmel_hlcdc_layer *layer,
66 					   int id)
67 {
68 	struct atmel_hlcdc_layer_update *upd = &layer->update;
69 	struct atmel_hlcdc_layer_update_slot *slot;
70 
71 	if (id < 0 || id > 1)
72 		return;
73 
74 	slot = &upd->slots[id];
75 	bitmap_clear(slot->updated_configs, 0, layer->desc->nconfigs);
76 	memset(slot->configs, 0,
77 	       sizeof(*slot->configs) * layer->desc->nconfigs);
78 
79 	if (slot->fb_flip) {
80 		atmel_hlcdc_layer_fb_flip_release_queue(layer, slot->fb_flip);
81 		slot->fb_flip = NULL;
82 	}
83 }
84 
atmel_hlcdc_layer_update_apply(struct atmel_hlcdc_layer * layer)85 static void atmel_hlcdc_layer_update_apply(struct atmel_hlcdc_layer *layer)
86 {
87 	struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
88 	const struct atmel_hlcdc_layer_desc *desc = layer->desc;
89 	struct atmel_hlcdc_layer_update *upd = &layer->update;
90 	struct regmap *regmap = layer->hlcdc->regmap;
91 	struct atmel_hlcdc_layer_update_slot *slot;
92 	struct atmel_hlcdc_layer_fb_flip *fb_flip;
93 	struct atmel_hlcdc_dma_channel_dscr *dscr;
94 	unsigned int cfg;
95 	u32 action = 0;
96 	int i = 0;
97 
98 	if (upd->pending < 0 || upd->pending > 1)
99 		return;
100 
101 	slot = &upd->slots[upd->pending];
102 
103 	for_each_set_bit(cfg, slot->updated_configs, layer->desc->nconfigs) {
104 		regmap_write(regmap,
105 			     desc->regs_offset +
106 			     ATMEL_HLCDC_LAYER_CFG(layer, cfg),
107 			     slot->configs[cfg]);
108 		action |= ATMEL_HLCDC_LAYER_UPDATE;
109 	}
110 
111 	fb_flip = slot->fb_flip;
112 
113 	if (!fb_flip->fb)
114 		goto apply;
115 
116 	if (dma->status == ATMEL_HLCDC_LAYER_DISABLED) {
117 		for (i = 0; i < fb_flip->ngems; i++) {
118 			dscr = fb_flip->dscrs[i];
119 			dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH |
120 				     ATMEL_HLCDC_LAYER_DMA_IRQ |
121 				     ATMEL_HLCDC_LAYER_ADD_IRQ |
122 				     ATMEL_HLCDC_LAYER_DONE_IRQ;
123 
124 			regmap_write(regmap,
125 				     desc->regs_offset +
126 				     ATMEL_HLCDC_LAYER_PLANE_ADDR(i),
127 				     dscr->addr);
128 			regmap_write(regmap,
129 				     desc->regs_offset +
130 				     ATMEL_HLCDC_LAYER_PLANE_CTRL(i),
131 				     dscr->ctrl);
132 			regmap_write(regmap,
133 				     desc->regs_offset +
134 				     ATMEL_HLCDC_LAYER_PLANE_NEXT(i),
135 				     dscr->next);
136 		}
137 
138 		action |= ATMEL_HLCDC_LAYER_DMA_CHAN;
139 		dma->status = ATMEL_HLCDC_LAYER_ENABLED;
140 	} else {
141 		for (i = 0; i < fb_flip->ngems; i++) {
142 			dscr =  fb_flip->dscrs[i];
143 			dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH |
144 				     ATMEL_HLCDC_LAYER_DMA_IRQ |
145 				     ATMEL_HLCDC_LAYER_DSCR_IRQ |
146 				     ATMEL_HLCDC_LAYER_DONE_IRQ;
147 
148 			regmap_write(regmap,
149 				     desc->regs_offset +
150 				     ATMEL_HLCDC_LAYER_PLANE_HEAD(i),
151 				     dscr->next);
152 		}
153 
154 		action |= ATMEL_HLCDC_LAYER_A2Q;
155 	}
156 
157 	/* Release unneeded descriptors */
158 	for (i = fb_flip->ngems; i < layer->max_planes; i++) {
159 		fb_flip->dscrs[i]->status = 0;
160 		fb_flip->dscrs[i] = NULL;
161 	}
162 
163 	dma->queue = fb_flip;
164 	slot->fb_flip = NULL;
165 
166 apply:
167 	if (action)
168 		regmap_write(regmap,
169 			     desc->regs_offset + ATMEL_HLCDC_LAYER_CHER,
170 			     action);
171 
172 	atmel_hlcdc_layer_update_reset(layer, upd->pending);
173 
174 	upd->pending = -1;
175 }
176 
atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer * layer)177 void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer)
178 {
179 	struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
180 	const struct atmel_hlcdc_layer_desc *desc = layer->desc;
181 	struct regmap *regmap = layer->hlcdc->regmap;
182 	struct atmel_hlcdc_layer_fb_flip *flip;
183 	unsigned long flags;
184 	unsigned int isr, imr;
185 	unsigned int status;
186 	unsigned int plane_status;
187 	u32 flip_status;
188 
189 	int i;
190 
191 	regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IMR, &imr);
192 	regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
193 	status = imr & isr;
194 	if (!status)
195 		return;
196 
197 	spin_lock_irqsave(&layer->lock, flags);
198 
199 	flip = dma->queue ? dma->queue : dma->cur;
200 
201 	if (!flip) {
202 		spin_unlock_irqrestore(&layer->lock, flags);
203 		return;
204 	}
205 
206 	/*
207 	 * Set LOADED and DONE flags: they'll be cleared if at least one
208 	 * memory plane is not LOADED or DONE.
209 	 */
210 	flip_status = ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED |
211 		      ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
212 	for (i = 0; i < flip->ngems; i++) {
213 		plane_status = (status >> (8 * i));
214 
215 		if (plane_status &
216 		    (ATMEL_HLCDC_LAYER_ADD_IRQ |
217 		     ATMEL_HLCDC_LAYER_DSCR_IRQ) &
218 		    ~flip->dscrs[i]->ctrl) {
219 			flip->dscrs[i]->status |=
220 					ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED;
221 			flip->dscrs[i]->ctrl |=
222 					ATMEL_HLCDC_LAYER_ADD_IRQ |
223 					ATMEL_HLCDC_LAYER_DSCR_IRQ;
224 		}
225 
226 		if (plane_status &
227 		    ATMEL_HLCDC_LAYER_DONE_IRQ &
228 		    ~flip->dscrs[i]->ctrl) {
229 			flip->dscrs[i]->status |=
230 					ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
231 			flip->dscrs[i]->ctrl |=
232 					ATMEL_HLCDC_LAYER_DONE_IRQ;
233 		}
234 
235 		if (plane_status & ATMEL_HLCDC_LAYER_OVR_IRQ)
236 			flip->dscrs[i]->status |=
237 					ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN;
238 
239 		/*
240 		 * Clear LOADED and DONE flags if the memory plane is either
241 		 * not LOADED or not DONE.
242 		 */
243 		if (!(flip->dscrs[i]->status &
244 		      ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED))
245 			flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED;
246 
247 		if (!(flip->dscrs[i]->status &
248 		      ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE))
249 			flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
250 
251 		/*
252 		 * An overrun on one memory plane impact the whole framebuffer
253 		 * transfer, hence we set the OVERRUN flag as soon as there's
254 		 * one memory plane reporting such an overrun.
255 		 */
256 		flip_status |= flip->dscrs[i]->status &
257 			       ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN;
258 	}
259 
260 	/* Get changed bits */
261 	flip_status ^= flip->status;
262 	flip->status |= flip_status;
263 
264 	if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED) {
265 		atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
266 		dma->cur = dma->queue;
267 		dma->queue = NULL;
268 	}
269 
270 	if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE) {
271 		atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
272 		dma->cur = NULL;
273 	}
274 
275 	if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN) {
276 		regmap_write(regmap,
277 			     desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
278 			     ATMEL_HLCDC_LAYER_RST);
279 		if (dma->queue)
280 			atmel_hlcdc_layer_fb_flip_release_queue(layer,
281 								dma->queue);
282 
283 		if (dma->cur)
284 			atmel_hlcdc_layer_fb_flip_release_queue(layer,
285 								dma->cur);
286 
287 		dma->cur = NULL;
288 		dma->queue = NULL;
289 	}
290 
291 	if (!dma->queue) {
292 		atmel_hlcdc_layer_update_apply(layer);
293 
294 		if (!dma->cur)
295 			dma->status = ATMEL_HLCDC_LAYER_DISABLED;
296 	}
297 
298 	spin_unlock_irqrestore(&layer->lock, flags);
299 }
300 
atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer * layer)301 void atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
302 {
303 	struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
304 	struct atmel_hlcdc_layer_update *upd = &layer->update;
305 	struct regmap *regmap = layer->hlcdc->regmap;
306 	const struct atmel_hlcdc_layer_desc *desc = layer->desc;
307 	unsigned long flags;
308 	unsigned int isr;
309 
310 	spin_lock_irqsave(&layer->lock, flags);
311 
312 	/* Disable the layer */
313 	regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
314 		     ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q |
315 		     ATMEL_HLCDC_LAYER_UPDATE);
316 
317 	/* Clear all pending interrupts */
318 	regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
319 
320 	/* Discard current and queued framebuffer transfers. */
321 	if (dma->cur) {
322 		atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
323 		dma->cur = NULL;
324 	}
325 
326 	if (dma->queue) {
327 		atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->queue);
328 		dma->queue = NULL;
329 	}
330 
331 	/*
332 	 * Then discard the pending update request (if any) to prevent
333 	 * DMA irq handler from restarting the DMA channel after it has
334 	 * been disabled.
335 	 */
336 	if (upd->pending >= 0) {
337 		atmel_hlcdc_layer_update_reset(layer, upd->pending);
338 		upd->pending = -1;
339 	}
340 
341 	dma->status = ATMEL_HLCDC_LAYER_DISABLED;
342 
343 	spin_unlock_irqrestore(&layer->lock, flags);
344 }
345 
atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer * layer)346 int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer)
347 {
348 	struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
349 	struct atmel_hlcdc_layer_update *upd = &layer->update;
350 	struct regmap *regmap = layer->hlcdc->regmap;
351 	struct atmel_hlcdc_layer_fb_flip *fb_flip;
352 	struct atmel_hlcdc_layer_update_slot *slot;
353 	unsigned long flags;
354 	int i, j = 0;
355 
356 	fb_flip = kzalloc(sizeof(*fb_flip), GFP_KERNEL);
357 	if (!fb_flip)
358 		return -ENOMEM;
359 
360 	fb_flip->task = drm_flip_work_allocate_task(fb_flip, GFP_KERNEL);
361 	if (!fb_flip->task) {
362 		kfree(fb_flip);
363 		return -ENOMEM;
364 	}
365 
366 	spin_lock_irqsave(&layer->lock, flags);
367 
368 	upd->next = upd->pending ? 0 : 1;
369 
370 	slot = &upd->slots[upd->next];
371 
372 	for (i = 0; i < layer->max_planes * 4; i++) {
373 		if (!dma->dscrs[i].status) {
374 			fb_flip->dscrs[j++] = &dma->dscrs[i];
375 			dma->dscrs[i].status =
376 				ATMEL_HLCDC_DMA_CHANNEL_DSCR_RESERVED;
377 			if (j == layer->max_planes)
378 				break;
379 		}
380 	}
381 
382 	if (j < layer->max_planes) {
383 		for (i = 0; i < j; i++)
384 			fb_flip->dscrs[i]->status = 0;
385 	}
386 
387 	if (j < layer->max_planes) {
388 		spin_unlock_irqrestore(&layer->lock, flags);
389 		atmel_hlcdc_layer_fb_flip_destroy(fb_flip);
390 		return -EBUSY;
391 	}
392 
393 	slot->fb_flip = fb_flip;
394 
395 	if (upd->pending >= 0) {
396 		memcpy(slot->configs,
397 		       upd->slots[upd->pending].configs,
398 		       layer->desc->nconfigs * sizeof(u32));
399 		memcpy(slot->updated_configs,
400 		       upd->slots[upd->pending].updated_configs,
401 		       DIV_ROUND_UP(layer->desc->nconfigs,
402 				    BITS_PER_BYTE * sizeof(unsigned long)) *
403 		       sizeof(unsigned long));
404 		slot->fb_flip->fb = upd->slots[upd->pending].fb_flip->fb;
405 		if (upd->slots[upd->pending].fb_flip->fb) {
406 			slot->fb_flip->fb =
407 				upd->slots[upd->pending].fb_flip->fb;
408 			slot->fb_flip->ngems =
409 				upd->slots[upd->pending].fb_flip->ngems;
410 			drm_framebuffer_reference(slot->fb_flip->fb);
411 		}
412 	} else {
413 		regmap_bulk_read(regmap,
414 				 layer->desc->regs_offset +
415 				 ATMEL_HLCDC_LAYER_CFG(layer, 0),
416 				 upd->slots[upd->next].configs,
417 				 layer->desc->nconfigs);
418 	}
419 
420 	spin_unlock_irqrestore(&layer->lock, flags);
421 
422 	return 0;
423 }
424 
atmel_hlcdc_layer_update_rollback(struct atmel_hlcdc_layer * layer)425 void atmel_hlcdc_layer_update_rollback(struct atmel_hlcdc_layer *layer)
426 {
427 	struct atmel_hlcdc_layer_update *upd = &layer->update;
428 
429 	atmel_hlcdc_layer_update_reset(layer, upd->next);
430 	upd->next = -1;
431 }
432 
atmel_hlcdc_layer_update_set_fb(struct atmel_hlcdc_layer * layer,struct drm_framebuffer * fb,unsigned int * offsets)433 void atmel_hlcdc_layer_update_set_fb(struct atmel_hlcdc_layer *layer,
434 				     struct drm_framebuffer *fb,
435 				     unsigned int *offsets)
436 {
437 	struct atmel_hlcdc_layer_update *upd = &layer->update;
438 	struct atmel_hlcdc_layer_fb_flip *fb_flip;
439 	struct atmel_hlcdc_layer_update_slot *slot;
440 	struct atmel_hlcdc_dma_channel_dscr *dscr;
441 	struct drm_framebuffer *old_fb;
442 	int nplanes = 0;
443 	int i;
444 
445 	if (upd->next < 0 || upd->next > 1)
446 		return;
447 
448 	if (fb)
449 		nplanes = drm_format_num_planes(fb->pixel_format);
450 
451 	if (nplanes > layer->max_planes)
452 		return;
453 
454 	slot = &upd->slots[upd->next];
455 
456 	fb_flip = slot->fb_flip;
457 	old_fb = slot->fb_flip->fb;
458 
459 	for (i = 0; i < nplanes; i++) {
460 		struct drm_gem_cma_object *gem;
461 
462 		dscr = slot->fb_flip->dscrs[i];
463 		gem = drm_fb_cma_get_gem_obj(fb, i);
464 		dscr->addr = gem->paddr + offsets[i];
465 	}
466 
467 	fb_flip->ngems = nplanes;
468 	fb_flip->fb = fb;
469 
470 	if (fb)
471 		drm_framebuffer_reference(fb);
472 
473 	if (old_fb)
474 		drm_framebuffer_unreference(old_fb);
475 }
476 
atmel_hlcdc_layer_update_cfg(struct atmel_hlcdc_layer * layer,int cfg,u32 mask,u32 val)477 void atmel_hlcdc_layer_update_cfg(struct atmel_hlcdc_layer *layer, int cfg,
478 				  u32 mask, u32 val)
479 {
480 	struct atmel_hlcdc_layer_update *upd = &layer->update;
481 	struct atmel_hlcdc_layer_update_slot *slot;
482 
483 	if (upd->next < 0 || upd->next > 1)
484 		return;
485 
486 	if (cfg >= layer->desc->nconfigs)
487 		return;
488 
489 	slot = &upd->slots[upd->next];
490 	slot->configs[cfg] &= ~mask;
491 	slot->configs[cfg] |= (val & mask);
492 	set_bit(cfg, slot->updated_configs);
493 }
494 
atmel_hlcdc_layer_update_commit(struct atmel_hlcdc_layer * layer)495 void atmel_hlcdc_layer_update_commit(struct atmel_hlcdc_layer *layer)
496 {
497 	struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
498 	struct atmel_hlcdc_layer_update *upd = &layer->update;
499 	struct atmel_hlcdc_layer_update_slot *slot;
500 	unsigned long flags;
501 
502 	if (upd->next < 0  || upd->next > 1)
503 		return;
504 
505 	slot = &upd->slots[upd->next];
506 
507 	spin_lock_irqsave(&layer->lock, flags);
508 
509 	/*
510 	 * Release pending update request and replace it by the new one.
511 	 */
512 	if (upd->pending >= 0)
513 		atmel_hlcdc_layer_update_reset(layer, upd->pending);
514 
515 	upd->pending = upd->next;
516 	upd->next = -1;
517 
518 	if (!dma->queue)
519 		atmel_hlcdc_layer_update_apply(layer);
520 
521 	spin_unlock_irqrestore(&layer->lock, flags);
522 
523 
524 	upd->next = -1;
525 }
526 
atmel_hlcdc_layer_dma_init(struct drm_device * dev,struct atmel_hlcdc_layer * layer)527 static int atmel_hlcdc_layer_dma_init(struct drm_device *dev,
528 				      struct atmel_hlcdc_layer *layer)
529 {
530 	struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
531 	dma_addr_t dma_addr;
532 	int i;
533 
534 	dma->dscrs = dma_alloc_coherent(dev->dev,
535 					layer->max_planes * 4 *
536 					sizeof(*dma->dscrs),
537 					&dma_addr, GFP_KERNEL);
538 	if (!dma->dscrs)
539 		return -ENOMEM;
540 
541 	for (i = 0; i < layer->max_planes * 4; i++) {
542 		struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i];
543 
544 		dscr->next = dma_addr + (i * sizeof(*dscr));
545 	}
546 
547 	return 0;
548 }
549 
atmel_hlcdc_layer_dma_cleanup(struct drm_device * dev,struct atmel_hlcdc_layer * layer)550 static void atmel_hlcdc_layer_dma_cleanup(struct drm_device *dev,
551 					  struct atmel_hlcdc_layer *layer)
552 {
553 	struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
554 	int i;
555 
556 	for (i = 0; i < layer->max_planes * 4; i++) {
557 		struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i];
558 
559 		dscr->status = 0;
560 	}
561 
562 	dma_free_coherent(dev->dev, layer->max_planes * 4 *
563 			  sizeof(*dma->dscrs), dma->dscrs,
564 			  dma->dscrs[0].next);
565 }
566 
atmel_hlcdc_layer_update_init(struct drm_device * dev,struct atmel_hlcdc_layer * layer,const struct atmel_hlcdc_layer_desc * desc)567 static int atmel_hlcdc_layer_update_init(struct drm_device *dev,
568 				struct atmel_hlcdc_layer *layer,
569 				const struct atmel_hlcdc_layer_desc *desc)
570 {
571 	struct atmel_hlcdc_layer_update *upd = &layer->update;
572 	int updated_size;
573 	void *buffer;
574 	int i;
575 
576 	updated_size = DIV_ROUND_UP(desc->nconfigs,
577 				    BITS_PER_BYTE *
578 				    sizeof(unsigned long));
579 
580 	buffer = devm_kzalloc(dev->dev,
581 			      ((desc->nconfigs * sizeof(u32)) +
582 				(updated_size * sizeof(unsigned long))) * 2,
583 			      GFP_KERNEL);
584 	if (!buffer)
585 		return -ENOMEM;
586 
587 	for (i = 0; i < 2; i++) {
588 		upd->slots[i].updated_configs = buffer;
589 		buffer += updated_size * sizeof(unsigned long);
590 		upd->slots[i].configs = buffer;
591 		buffer += desc->nconfigs * sizeof(u32);
592 	}
593 
594 	upd->pending = -1;
595 	upd->next = -1;
596 
597 	return 0;
598 }
599 
atmel_hlcdc_layer_init(struct drm_device * dev,struct atmel_hlcdc_layer * layer,const struct atmel_hlcdc_layer_desc * desc)600 int atmel_hlcdc_layer_init(struct drm_device *dev,
601 			   struct atmel_hlcdc_layer *layer,
602 			   const struct atmel_hlcdc_layer_desc *desc)
603 {
604 	struct atmel_hlcdc_dc *dc = dev->dev_private;
605 	struct regmap *regmap = dc->hlcdc->regmap;
606 	unsigned int tmp;
607 	int ret;
608 	int i;
609 
610 	layer->hlcdc = dc->hlcdc;
611 	layer->wq = dc->wq;
612 	layer->desc = desc;
613 
614 	regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
615 		     ATMEL_HLCDC_LAYER_RST);
616 	for (i = 0; i < desc->formats->nformats; i++) {
617 		int nplanes = drm_format_num_planes(desc->formats->formats[i]);
618 
619 		if (nplanes > layer->max_planes)
620 			layer->max_planes = nplanes;
621 	}
622 
623 	spin_lock_init(&layer->lock);
624 	drm_flip_work_init(&layer->gc, desc->name,
625 			   atmel_hlcdc_layer_fb_flip_release);
626 	ret = atmel_hlcdc_layer_dma_init(dev, layer);
627 	if (ret)
628 		return ret;
629 
630 	ret = atmel_hlcdc_layer_update_init(dev, layer, desc);
631 	if (ret)
632 		return ret;
633 
634 	/* Flush Status Register */
635 	regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR,
636 		     0xffffffff);
637 	regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR,
638 		    &tmp);
639 
640 	tmp = 0;
641 	for (i = 0; i < layer->max_planes; i++)
642 		tmp |= (ATMEL_HLCDC_LAYER_DMA_IRQ |
643 			ATMEL_HLCDC_LAYER_DSCR_IRQ |
644 			ATMEL_HLCDC_LAYER_ADD_IRQ |
645 			ATMEL_HLCDC_LAYER_DONE_IRQ |
646 			ATMEL_HLCDC_LAYER_OVR_IRQ) << (8 * i);
647 
648 	regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IER, tmp);
649 
650 	return 0;
651 }
652 
atmel_hlcdc_layer_cleanup(struct drm_device * dev,struct atmel_hlcdc_layer * layer)653 void atmel_hlcdc_layer_cleanup(struct drm_device *dev,
654 			       struct atmel_hlcdc_layer *layer)
655 {
656 	const struct atmel_hlcdc_layer_desc *desc = layer->desc;
657 	struct regmap *regmap = layer->hlcdc->regmap;
658 
659 	regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR,
660 		     0xffffffff);
661 	regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
662 		     ATMEL_HLCDC_LAYER_RST);
663 
664 	atmel_hlcdc_layer_dma_cleanup(dev, layer);
665 	drm_flip_work_cleanup(&layer->gc);
666 }
667