1 /*
2  * Intel SST Firmware Loader
3  *
4  * Copyright (C) 2013, Intel Corporation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/firmware.h>
21 #include <linux/export.h>
22 #include <linux/platform_device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmaengine.h>
25 #include <linux/pci.h>
26 #include <linux/acpi.h>
27 
28 /* supported DMA engine drivers */
29 #include <linux/platform_data/dma-dw.h>
30 #include <linux/dma/dw.h>
31 
32 #include <asm/page.h>
33 #include <asm/pgtable.h>
34 
35 #include "sst-dsp.h"
36 #include "sst-dsp-priv.h"
37 
38 #define SST_DMA_RESOURCES	2
39 #define SST_DSP_DMA_MAX_BURST	0x3
40 #define SST_HSW_BLOCK_ANY	0xffffffff
41 
42 #define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
43 
44 struct sst_dma {
45 	struct sst_dsp *sst;
46 
47 	struct dw_dma_chip *chip;
48 
49 	struct dma_async_tx_descriptor *desc;
50 	struct dma_chan *ch;
51 };
52 
sst_memcpy32(volatile void __iomem * dest,void * src,u32 bytes)53 static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
54 {
55 	/* __iowrite32_copy use 32bit size values so divide by 4 */
56 	__iowrite32_copy((void *)dest, src, bytes/4);
57 }
58 
sst_dma_transfer_complete(void * arg)59 static void sst_dma_transfer_complete(void *arg)
60 {
61 	struct sst_dsp *sst = (struct sst_dsp *)arg;
62 
63 	dev_dbg(sst->dev, "DMA: callback\n");
64 }
65 
sst_dsp_dma_copy(struct sst_dsp * sst,dma_addr_t dest_addr,dma_addr_t src_addr,size_t size)66 static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
67 	dma_addr_t src_addr, size_t size)
68 {
69 	struct dma_async_tx_descriptor *desc;
70 	struct sst_dma *dma = sst->dma;
71 
72 	if (dma->ch == NULL) {
73 		dev_err(sst->dev, "error: no DMA channel\n");
74 		return -ENODEV;
75 	}
76 
77 	dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
78 		(unsigned long)src_addr, (unsigned long)dest_addr, size);
79 
80 	desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
81 		src_addr, size, DMA_CTRL_ACK);
82 	if (!desc){
83 		dev_err(sst->dev, "error: dma prep memcpy failed\n");
84 		return -EINVAL;
85 	}
86 
87 	desc->callback = sst_dma_transfer_complete;
88 	desc->callback_param = sst;
89 
90 	desc->tx_submit(desc);
91 	dma_wait_for_async_tx(desc);
92 
93 	return 0;
94 }
95 
96 /* copy to DSP */
sst_dsp_dma_copyto(struct sst_dsp * sst,dma_addr_t dest_addr,dma_addr_t src_addr,size_t size)97 int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
98 	dma_addr_t src_addr, size_t size)
99 {
100 	return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
101 			src_addr, size);
102 }
103 EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
104 
105 /* copy from DSP */
sst_dsp_dma_copyfrom(struct sst_dsp * sst,dma_addr_t dest_addr,dma_addr_t src_addr,size_t size)106 int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
107 	dma_addr_t src_addr, size_t size)
108 {
109 	return sst_dsp_dma_copy(sst, dest_addr,
110 		src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
111 }
112 EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
113 
114 /* remove module from memory - callers hold locks */
block_list_remove(struct sst_dsp * dsp,struct list_head * block_list)115 static void block_list_remove(struct sst_dsp *dsp,
116 	struct list_head *block_list)
117 {
118 	struct sst_mem_block *block, *tmp;
119 	int err;
120 
121 	/* disable each block  */
122 	list_for_each_entry(block, block_list, module_list) {
123 
124 		if (block->ops && block->ops->disable) {
125 			err = block->ops->disable(block);
126 			if (err < 0)
127 				dev_err(dsp->dev,
128 					"error: cant disable block %d:%d\n",
129 					block->type, block->index);
130 		}
131 	}
132 
133 	/* mark each block as free */
134 	list_for_each_entry_safe(block, tmp, block_list, module_list) {
135 		list_del(&block->module_list);
136 		list_move(&block->list, &dsp->free_block_list);
137 		dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
138 			block->type, block->index, block->offset);
139 	}
140 }
141 
142 /* prepare the memory block to receive data from host - callers hold locks */
block_list_prepare(struct sst_dsp * dsp,struct list_head * block_list)143 static int block_list_prepare(struct sst_dsp *dsp,
144 	struct list_head *block_list)
145 {
146 	struct sst_mem_block *block;
147 	int ret = 0;
148 
149 	/* enable each block so that's it'e ready for data */
150 	list_for_each_entry(block, block_list, module_list) {
151 
152 		if (block->ops && block->ops->enable && !block->users) {
153 			ret = block->ops->enable(block);
154 			if (ret < 0) {
155 				dev_err(dsp->dev,
156 					"error: cant disable block %d:%d\n",
157 					block->type, block->index);
158 				goto err;
159 			}
160 		}
161 	}
162 	return ret;
163 
164 err:
165 	list_for_each_entry(block, block_list, module_list) {
166 		if (block->ops && block->ops->disable)
167 			block->ops->disable(block);
168 	}
169 	return ret;
170 }
171 
172 static struct dw_dma_platform_data dw_pdata = {
173 	.is_private = 1,
174 	.chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
175 	.chan_priority = CHAN_PRIORITY_ASCENDING,
176 };
177 
dw_probe(struct device * dev,struct resource * mem,int irq)178 static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
179 	int irq)
180 {
181 	struct dw_dma_chip *chip;
182 	int err;
183 
184 	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
185 	if (!chip)
186 		return ERR_PTR(-ENOMEM);
187 
188 	chip->irq = irq;
189 	chip->regs = devm_ioremap_resource(dev, mem);
190 	if (IS_ERR(chip->regs))
191 		return ERR_CAST(chip->regs);
192 
193 	err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
194 	if (err)
195 		return ERR_PTR(err);
196 
197 	chip->dev = dev;
198 	err = dw_dma_probe(chip, &dw_pdata);
199 	if (err)
200 		return ERR_PTR(err);
201 
202 	return chip;
203 }
204 
dw_remove(struct dw_dma_chip * chip)205 static void dw_remove(struct dw_dma_chip *chip)
206 {
207 	dw_dma_remove(chip);
208 }
209 
dma_chan_filter(struct dma_chan * chan,void * param)210 static bool dma_chan_filter(struct dma_chan *chan, void *param)
211 {
212 	struct sst_dsp *dsp = (struct sst_dsp *)param;
213 
214 	return chan->device->dev == dsp->dma_dev;
215 }
216 
sst_dsp_dma_get_channel(struct sst_dsp * dsp,int chan_id)217 int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
218 {
219 	struct sst_dma *dma = dsp->dma;
220 	struct dma_slave_config slave;
221 	dma_cap_mask_t mask;
222 	int ret;
223 
224 	dma_cap_zero(mask);
225 	dma_cap_set(DMA_SLAVE, mask);
226 	dma_cap_set(DMA_MEMCPY, mask);
227 
228 	dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
229 	if (dma->ch == NULL) {
230 		dev_err(dsp->dev, "error: DMA request channel failed\n");
231 		return -EIO;
232 	}
233 
234 	memset(&slave, 0, sizeof(slave));
235 	slave.direction = DMA_MEM_TO_DEV;
236 	slave.src_addr_width =
237 		slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
238 	slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
239 
240 	ret = dmaengine_slave_config(dma->ch, &slave);
241 	if (ret) {
242 		dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
243 			ret);
244 		dma_release_channel(dma->ch);
245 		dma->ch = NULL;
246 	}
247 
248 	return ret;
249 }
250 EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
251 
sst_dsp_dma_put_channel(struct sst_dsp * dsp)252 void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
253 {
254 	struct sst_dma *dma = dsp->dma;
255 
256 	if (!dma->ch)
257 		return;
258 
259 	dma_release_channel(dma->ch);
260 	dma->ch = NULL;
261 }
262 EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
263 
sst_dma_new(struct sst_dsp * sst)264 int sst_dma_new(struct sst_dsp *sst)
265 {
266 	struct sst_pdata *sst_pdata = sst->pdata;
267 	struct sst_dma *dma;
268 	struct resource mem;
269 	const char *dma_dev_name;
270 	int ret = 0;
271 
272 	if (sst->pdata->resindex_dma_base == -1)
273 		/* DMA is not used, return and squelsh error messages */
274 		return 0;
275 
276 	/* configure the correct platform data for whatever DMA engine
277 	* is attached to the ADSP IP. */
278 	switch (sst->pdata->dma_engine) {
279 	case SST_DMA_TYPE_DW:
280 		dma_dev_name = "dw_dmac";
281 		break;
282 	default:
283 		dev_err(sst->dev, "error: invalid DMA engine %d\n",
284 			sst->pdata->dma_engine);
285 		return -EINVAL;
286 	}
287 
288 	dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
289 	if (!dma)
290 		return -ENOMEM;
291 
292 	dma->sst = sst;
293 
294 	memset(&mem, 0, sizeof(mem));
295 
296 	mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
297 	mem.end   = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
298 	mem.flags = IORESOURCE_MEM;
299 
300 	/* now register DMA engine device */
301 	dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
302 	if (IS_ERR(dma->chip)) {
303 		dev_err(sst->dev, "error: DMA device register failed\n");
304 		ret = PTR_ERR(dma->chip);
305 		goto err_dma_dev;
306 	}
307 
308 	sst->dma = dma;
309 	sst->fw_use_dma = true;
310 	return 0;
311 
312 err_dma_dev:
313 	devm_kfree(sst->dev, dma);
314 	return ret;
315 }
316 EXPORT_SYMBOL(sst_dma_new);
317 
sst_dma_free(struct sst_dma * dma)318 void sst_dma_free(struct sst_dma *dma)
319 {
320 
321 	if (dma == NULL)
322 		return;
323 
324 	if (dma->ch)
325 		dma_release_channel(dma->ch);
326 
327 	if (dma->chip)
328 		dw_remove(dma->chip);
329 
330 }
331 EXPORT_SYMBOL(sst_dma_free);
332 
333 /* create new generic firmware object */
sst_fw_new(struct sst_dsp * dsp,const struct firmware * fw,void * private)334 struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
335 	const struct firmware *fw, void *private)
336 {
337 	struct sst_fw *sst_fw;
338 	int err;
339 
340 	if (!dsp->ops->parse_fw)
341 		return NULL;
342 
343 	sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
344 	if (sst_fw == NULL)
345 		return NULL;
346 
347 	sst_fw->dsp = dsp;
348 	sst_fw->private = private;
349 	sst_fw->size = fw->size;
350 
351 	/* allocate DMA buffer to store FW data */
352 	sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
353 				&sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
354 	if (!sst_fw->dma_buf) {
355 		dev_err(dsp->dev, "error: DMA alloc failed\n");
356 		kfree(sst_fw);
357 		return NULL;
358 	}
359 
360 	/* copy FW data to DMA-able memory */
361 	memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
362 
363 	if (dsp->fw_use_dma) {
364 		err = sst_dsp_dma_get_channel(dsp, 0);
365 		if (err < 0)
366 			goto chan_err;
367 	}
368 
369 	/* call core specific FW paser to load FW data into DSP */
370 	err = dsp->ops->parse_fw(sst_fw);
371 	if (err < 0) {
372 		dev_err(dsp->dev, "error: parse fw failed %d\n", err);
373 		goto parse_err;
374 	}
375 
376 	if (dsp->fw_use_dma)
377 		sst_dsp_dma_put_channel(dsp);
378 
379 	mutex_lock(&dsp->mutex);
380 	list_add(&sst_fw->list, &dsp->fw_list);
381 	mutex_unlock(&dsp->mutex);
382 
383 	return sst_fw;
384 
385 parse_err:
386 	if (dsp->fw_use_dma)
387 		sst_dsp_dma_put_channel(dsp);
388 chan_err:
389 	dma_free_coherent(dsp->dma_dev, sst_fw->size,
390 				sst_fw->dma_buf,
391 				sst_fw->dmable_fw_paddr);
392 	sst_fw->dma_buf = NULL;
393 	kfree(sst_fw);
394 	return NULL;
395 }
396 EXPORT_SYMBOL_GPL(sst_fw_new);
397 
sst_fw_reload(struct sst_fw * sst_fw)398 int sst_fw_reload(struct sst_fw *sst_fw)
399 {
400 	struct sst_dsp *dsp = sst_fw->dsp;
401 	int ret;
402 
403 	dev_dbg(dsp->dev, "reloading firmware\n");
404 
405 	/* call core specific FW paser to load FW data into DSP */
406 	ret = dsp->ops->parse_fw(sst_fw);
407 	if (ret < 0)
408 		dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
409 
410 	return ret;
411 }
412 EXPORT_SYMBOL_GPL(sst_fw_reload);
413 
sst_fw_unload(struct sst_fw * sst_fw)414 void sst_fw_unload(struct sst_fw *sst_fw)
415 {
416 	struct sst_dsp *dsp = sst_fw->dsp;
417 	struct sst_module *module, *mtmp;
418 	struct sst_module_runtime *runtime, *rtmp;
419 
420 	dev_dbg(dsp->dev, "unloading firmware\n");
421 
422 	mutex_lock(&dsp->mutex);
423 
424 	/* check module by module */
425 	list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
426 		if (module->sst_fw == sst_fw) {
427 
428 			/* remove runtime modules */
429 			list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
430 
431 				block_list_remove(dsp, &runtime->block_list);
432 				list_del(&runtime->list);
433 				kfree(runtime);
434 			}
435 
436 			/* now remove the module */
437 			block_list_remove(dsp, &module->block_list);
438 			list_del(&module->list);
439 			kfree(module);
440 		}
441 	}
442 
443 	/* remove all scratch blocks */
444 	block_list_remove(dsp, &dsp->scratch_block_list);
445 
446 	mutex_unlock(&dsp->mutex);
447 }
448 EXPORT_SYMBOL_GPL(sst_fw_unload);
449 
450 /* free single firmware object */
sst_fw_free(struct sst_fw * sst_fw)451 void sst_fw_free(struct sst_fw *sst_fw)
452 {
453 	struct sst_dsp *dsp = sst_fw->dsp;
454 
455 	mutex_lock(&dsp->mutex);
456 	list_del(&sst_fw->list);
457 	mutex_unlock(&dsp->mutex);
458 
459 	if (sst_fw->dma_buf)
460 		dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
461 			sst_fw->dmable_fw_paddr);
462 	kfree(sst_fw);
463 }
464 EXPORT_SYMBOL_GPL(sst_fw_free);
465 
466 /* free all firmware objects */
sst_fw_free_all(struct sst_dsp * dsp)467 void sst_fw_free_all(struct sst_dsp *dsp)
468 {
469 	struct sst_fw *sst_fw, *t;
470 
471 	mutex_lock(&dsp->mutex);
472 	list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
473 
474 		list_del(&sst_fw->list);
475 		dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
476 			sst_fw->dmable_fw_paddr);
477 		kfree(sst_fw);
478 	}
479 	mutex_unlock(&dsp->mutex);
480 }
481 EXPORT_SYMBOL_GPL(sst_fw_free_all);
482 
483 /* create a new SST generic module from FW template */
sst_module_new(struct sst_fw * sst_fw,struct sst_module_template * template,void * private)484 struct sst_module *sst_module_new(struct sst_fw *sst_fw,
485 	struct sst_module_template *template, void *private)
486 {
487 	struct sst_dsp *dsp = sst_fw->dsp;
488 	struct sst_module *sst_module;
489 
490 	sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
491 	if (sst_module == NULL)
492 		return NULL;
493 
494 	sst_module->id = template->id;
495 	sst_module->dsp = dsp;
496 	sst_module->sst_fw = sst_fw;
497 	sst_module->scratch_size = template->scratch_size;
498 	sst_module->persistent_size = template->persistent_size;
499 	sst_module->entry = template->entry;
500 	sst_module->state = SST_MODULE_STATE_UNLOADED;
501 
502 	INIT_LIST_HEAD(&sst_module->block_list);
503 	INIT_LIST_HEAD(&sst_module->runtime_list);
504 
505 	mutex_lock(&dsp->mutex);
506 	list_add(&sst_module->list, &dsp->module_list);
507 	mutex_unlock(&dsp->mutex);
508 
509 	return sst_module;
510 }
511 EXPORT_SYMBOL_GPL(sst_module_new);
512 
513 /* free firmware module and remove from available list */
sst_module_free(struct sst_module * sst_module)514 void sst_module_free(struct sst_module *sst_module)
515 {
516 	struct sst_dsp *dsp = sst_module->dsp;
517 
518 	mutex_lock(&dsp->mutex);
519 	list_del(&sst_module->list);
520 	mutex_unlock(&dsp->mutex);
521 
522 	kfree(sst_module);
523 }
524 EXPORT_SYMBOL_GPL(sst_module_free);
525 
sst_module_runtime_new(struct sst_module * module,int id,void * private)526 struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
527 	int id, void *private)
528 {
529 	struct sst_dsp *dsp = module->dsp;
530 	struct sst_module_runtime *runtime;
531 
532 	runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
533 	if (runtime == NULL)
534 		return NULL;
535 
536 	runtime->id = id;
537 	runtime->dsp = dsp;
538 	runtime->module = module;
539 	INIT_LIST_HEAD(&runtime->block_list);
540 
541 	mutex_lock(&dsp->mutex);
542 	list_add(&runtime->list, &module->runtime_list);
543 	mutex_unlock(&dsp->mutex);
544 
545 	return runtime;
546 }
547 EXPORT_SYMBOL_GPL(sst_module_runtime_new);
548 
sst_module_runtime_free(struct sst_module_runtime * runtime)549 void sst_module_runtime_free(struct sst_module_runtime *runtime)
550 {
551 	struct sst_dsp *dsp = runtime->dsp;
552 
553 	mutex_lock(&dsp->mutex);
554 	list_del(&runtime->list);
555 	mutex_unlock(&dsp->mutex);
556 
557 	kfree(runtime);
558 }
559 EXPORT_SYMBOL_GPL(sst_module_runtime_free);
560 
find_block(struct sst_dsp * dsp,struct sst_block_allocator * ba)561 static struct sst_mem_block *find_block(struct sst_dsp *dsp,
562 	struct sst_block_allocator *ba)
563 {
564 	struct sst_mem_block *block;
565 
566 	list_for_each_entry(block, &dsp->free_block_list, list) {
567 		if (block->type == ba->type && block->offset == ba->offset)
568 			return block;
569 	}
570 
571 	return NULL;
572 }
573 
574 /* Block allocator must be on block boundary */
block_alloc_contiguous(struct sst_dsp * dsp,struct sst_block_allocator * ba,struct list_head * block_list)575 static int block_alloc_contiguous(struct sst_dsp *dsp,
576 	struct sst_block_allocator *ba, struct list_head *block_list)
577 {
578 	struct list_head tmp = LIST_HEAD_INIT(tmp);
579 	struct sst_mem_block *block;
580 	u32 block_start = SST_HSW_BLOCK_ANY;
581 	int size = ba->size, offset = ba->offset;
582 
583 	while (ba->size > 0) {
584 
585 		block = find_block(dsp, ba);
586 		if (!block) {
587 			list_splice(&tmp, &dsp->free_block_list);
588 
589 			ba->size = size;
590 			ba->offset = offset;
591 			return -ENOMEM;
592 		}
593 
594 		list_move_tail(&block->list, &tmp);
595 		ba->offset += block->size;
596 		ba->size -= block->size;
597 	}
598 	ba->size = size;
599 	ba->offset = offset;
600 
601 	list_for_each_entry(block, &tmp, list) {
602 
603 		if (block->offset < block_start)
604 			block_start = block->offset;
605 
606 		list_add(&block->module_list, block_list);
607 
608 		dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
609 			block->type, block->index, block->offset);
610 	}
611 
612 	list_splice(&tmp, &dsp->used_block_list);
613 	return 0;
614 }
615 
616 /* allocate first free DSP blocks for data - callers hold locks */
block_alloc(struct sst_dsp * dsp,struct sst_block_allocator * ba,struct list_head * block_list)617 static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
618 	struct list_head *block_list)
619 {
620 	struct sst_mem_block *block, *tmp;
621 	int ret = 0;
622 
623 	if (ba->size == 0)
624 		return 0;
625 
626 	/* find first free whole blocks that can hold module */
627 	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
628 
629 		/* ignore blocks with wrong type */
630 		if (block->type != ba->type)
631 			continue;
632 
633 		if (ba->size > block->size)
634 			continue;
635 
636 		ba->offset = block->offset;
637 		block->bytes_used = ba->size % block->size;
638 		list_add(&block->module_list, block_list);
639 		list_move(&block->list, &dsp->used_block_list);
640 		dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
641 			block->type, block->index, block->offset);
642 		return 0;
643 	}
644 
645 	/* then find free multiple blocks that can hold module */
646 	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
647 
648 		/* ignore blocks with wrong type */
649 		if (block->type != ba->type)
650 			continue;
651 
652 		/* do we span > 1 blocks */
653 		if (ba->size > block->size) {
654 
655 			/* align ba to block boundary */
656 			ba->offset = block->offset;
657 
658 			ret = block_alloc_contiguous(dsp, ba, block_list);
659 			if (ret == 0)
660 				return ret;
661 
662 		}
663 	}
664 
665 	/* not enough free block space */
666 	return -ENOMEM;
667 }
668 
sst_alloc_blocks(struct sst_dsp * dsp,struct sst_block_allocator * ba,struct list_head * block_list)669 int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
670 	struct list_head *block_list)
671 {
672 	int ret;
673 
674 	dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
675 		ba->size, ba->offset, ba->type);
676 
677 	mutex_lock(&dsp->mutex);
678 
679 	ret = block_alloc(dsp, ba, block_list);
680 	if (ret < 0) {
681 		dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
682 		goto out;
683 	}
684 
685 	/* prepare DSP blocks for module usage */
686 	ret = block_list_prepare(dsp, block_list);
687 	if (ret < 0)
688 		dev_err(dsp->dev, "error: prepare failed\n");
689 
690 out:
691 	mutex_unlock(&dsp->mutex);
692 	return ret;
693 }
694 EXPORT_SYMBOL_GPL(sst_alloc_blocks);
695 
sst_free_blocks(struct sst_dsp * dsp,struct list_head * block_list)696 int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
697 {
698 	mutex_lock(&dsp->mutex);
699 	block_list_remove(dsp, block_list);
700 	mutex_unlock(&dsp->mutex);
701 	return 0;
702 }
703 EXPORT_SYMBOL_GPL(sst_free_blocks);
704 
705 /* allocate memory blocks for static module addresses - callers hold locks */
block_alloc_fixed(struct sst_dsp * dsp,struct sst_block_allocator * ba,struct list_head * block_list)706 static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
707 	struct list_head *block_list)
708 {
709 	struct sst_mem_block *block, *tmp;
710 	struct sst_block_allocator ba_tmp = *ba;
711 	u32 end = ba->offset + ba->size, block_end;
712 	int err;
713 
714 	/* only IRAM/DRAM blocks are managed */
715 	if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
716 		return 0;
717 
718 	/* are blocks already attached to this module */
719 	list_for_each_entry_safe(block, tmp, block_list, module_list) {
720 
721 		/* ignore blocks with wrong type */
722 		if (block->type != ba->type)
723 			continue;
724 
725 		block_end = block->offset + block->size;
726 
727 		/* find block that holds section */
728 		if (ba->offset >= block->offset && end <= block_end)
729 			return 0;
730 
731 		/* does block span more than 1 section */
732 		if (ba->offset >= block->offset && ba->offset < block_end) {
733 
734 			/* align ba to block boundary */
735 			ba_tmp.size -= block_end - ba->offset;
736 			ba_tmp.offset = block_end;
737 			err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
738 			if (err < 0)
739 				return -ENOMEM;
740 
741 			/* module already owns blocks */
742 			return 0;
743 		}
744 	}
745 
746 	/* find first free blocks that can hold section in free list */
747 	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
748 		block_end = block->offset + block->size;
749 
750 		/* ignore blocks with wrong type */
751 		if (block->type != ba->type)
752 			continue;
753 
754 		/* find block that holds section */
755 		if (ba->offset >= block->offset && end <= block_end) {
756 
757 			/* add block */
758 			list_move(&block->list, &dsp->used_block_list);
759 			list_add(&block->module_list, block_list);
760 			dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
761 				block->type, block->index, block->offset);
762 			return 0;
763 		}
764 
765 		/* does block span more than 1 section */
766 		if (ba->offset >= block->offset && ba->offset < block_end) {
767 
768 			/* add block */
769 			list_move(&block->list, &dsp->used_block_list);
770 			list_add(&block->module_list, block_list);
771 			/* align ba to block boundary */
772 			ba_tmp.size -= block_end - ba->offset;
773 			ba_tmp.offset = block_end;
774 
775 			err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
776 			if (err < 0)
777 				return -ENOMEM;
778 
779 			return 0;
780 		}
781 	}
782 
783 	return -ENOMEM;
784 }
785 
786 /* Load fixed module data into DSP memory blocks */
sst_module_alloc_blocks(struct sst_module * module)787 int sst_module_alloc_blocks(struct sst_module *module)
788 {
789 	struct sst_dsp *dsp = module->dsp;
790 	struct sst_fw *sst_fw = module->sst_fw;
791 	struct sst_block_allocator ba;
792 	int ret;
793 
794 	memset(&ba, 0, sizeof(ba));
795 	ba.size = module->size;
796 	ba.type = module->type;
797 	ba.offset = module->offset;
798 
799 	dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
800 		ba.size, ba.offset, ba.type);
801 
802 	mutex_lock(&dsp->mutex);
803 
804 	/* alloc blocks that includes this section */
805 	ret = block_alloc_fixed(dsp, &ba, &module->block_list);
806 	if (ret < 0) {
807 		dev_err(dsp->dev,
808 			"error: no free blocks for section at offset 0x%x size 0x%x\n",
809 			module->offset, module->size);
810 		mutex_unlock(&dsp->mutex);
811 		return -ENOMEM;
812 	}
813 
814 	/* prepare DSP blocks for module copy */
815 	ret = block_list_prepare(dsp, &module->block_list);
816 	if (ret < 0) {
817 		dev_err(dsp->dev, "error: fw module prepare failed\n");
818 		goto err;
819 	}
820 
821 	/* copy partial module data to blocks */
822 	if (dsp->fw_use_dma) {
823 		ret = sst_dsp_dma_copyto(dsp,
824 			dsp->addr.lpe_base + module->offset,
825 			sst_fw->dmable_fw_paddr + module->data_offset,
826 			module->size);
827 		if (ret < 0) {
828 			dev_err(dsp->dev, "error: module copy failed\n");
829 			goto err;
830 		}
831 	} else
832 		sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
833 			module->size);
834 
835 	mutex_unlock(&dsp->mutex);
836 	return ret;
837 
838 err:
839 	block_list_remove(dsp, &module->block_list);
840 	mutex_unlock(&dsp->mutex);
841 	return ret;
842 }
843 EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
844 
845 /* Unload entire module from DSP memory */
sst_module_free_blocks(struct sst_module * module)846 int sst_module_free_blocks(struct sst_module *module)
847 {
848 	struct sst_dsp *dsp = module->dsp;
849 
850 	mutex_lock(&dsp->mutex);
851 	block_list_remove(dsp, &module->block_list);
852 	mutex_unlock(&dsp->mutex);
853 	return 0;
854 }
855 EXPORT_SYMBOL_GPL(sst_module_free_blocks);
856 
sst_module_runtime_alloc_blocks(struct sst_module_runtime * runtime,int offset)857 int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
858 	int offset)
859 {
860 	struct sst_dsp *dsp = runtime->dsp;
861 	struct sst_module *module = runtime->module;
862 	struct sst_block_allocator ba;
863 	int ret;
864 
865 	if (module->persistent_size == 0)
866 		return 0;
867 
868 	memset(&ba, 0, sizeof(ba));
869 	ba.size = module->persistent_size;
870 	ba.type = SST_MEM_DRAM;
871 
872 	mutex_lock(&dsp->mutex);
873 
874 	/* do we need to allocate at a fixed address ? */
875 	if (offset != 0) {
876 
877 		ba.offset = offset;
878 
879 		dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
880 			ba.size, ba.type, ba.offset);
881 
882 		/* alloc blocks that includes this section */
883 		ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
884 
885 	} else {
886 		dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
887 			ba.size, ba.type);
888 
889 		/* alloc blocks that includes this section */
890 		ret = block_alloc(dsp, &ba, &runtime->block_list);
891 	}
892 	if (ret < 0) {
893 		dev_err(dsp->dev,
894 		"error: no free blocks for runtime module size 0x%x\n",
895 			module->persistent_size);
896 		mutex_unlock(&dsp->mutex);
897 		return -ENOMEM;
898 	}
899 	runtime->persistent_offset = ba.offset;
900 
901 	/* prepare DSP blocks for module copy */
902 	ret = block_list_prepare(dsp, &runtime->block_list);
903 	if (ret < 0) {
904 		dev_err(dsp->dev, "error: runtime block prepare failed\n");
905 		goto err;
906 	}
907 
908 	mutex_unlock(&dsp->mutex);
909 	return ret;
910 
911 err:
912 	block_list_remove(dsp, &module->block_list);
913 	mutex_unlock(&dsp->mutex);
914 	return ret;
915 }
916 EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
917 
sst_module_runtime_free_blocks(struct sst_module_runtime * runtime)918 int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
919 {
920 	struct sst_dsp *dsp = runtime->dsp;
921 
922 	mutex_lock(&dsp->mutex);
923 	block_list_remove(dsp, &runtime->block_list);
924 	mutex_unlock(&dsp->mutex);
925 	return 0;
926 }
927 EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
928 
sst_module_runtime_save(struct sst_module_runtime * runtime,struct sst_module_runtime_context * context)929 int sst_module_runtime_save(struct sst_module_runtime *runtime,
930 	struct sst_module_runtime_context *context)
931 {
932 	struct sst_dsp *dsp = runtime->dsp;
933 	struct sst_module *module = runtime->module;
934 	int ret = 0;
935 
936 	dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
937 		runtime->id, runtime->persistent_offset,
938 		module->persistent_size);
939 
940 	context->buffer = dma_alloc_coherent(dsp->dma_dev,
941 		module->persistent_size,
942 		&context->dma_buffer, GFP_DMA | GFP_KERNEL);
943 	if (!context->buffer) {
944 		dev_err(dsp->dev, "error: DMA context alloc failed\n");
945 		return -ENOMEM;
946 	}
947 
948 	mutex_lock(&dsp->mutex);
949 
950 	if (dsp->fw_use_dma) {
951 
952 		ret = sst_dsp_dma_get_channel(dsp, 0);
953 		if (ret < 0)
954 			goto err;
955 
956 		ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
957 			dsp->addr.lpe_base + runtime->persistent_offset,
958 			module->persistent_size);
959 		sst_dsp_dma_put_channel(dsp);
960 		if (ret < 0) {
961 			dev_err(dsp->dev, "error: context copy failed\n");
962 			goto err;
963 		}
964 	} else
965 		sst_memcpy32(context->buffer, dsp->addr.lpe +
966 			runtime->persistent_offset,
967 			module->persistent_size);
968 
969 err:
970 	mutex_unlock(&dsp->mutex);
971 	return ret;
972 }
973 EXPORT_SYMBOL_GPL(sst_module_runtime_save);
974 
sst_module_runtime_restore(struct sst_module_runtime * runtime,struct sst_module_runtime_context * context)975 int sst_module_runtime_restore(struct sst_module_runtime *runtime,
976 	struct sst_module_runtime_context *context)
977 {
978 	struct sst_dsp *dsp = runtime->dsp;
979 	struct sst_module *module = runtime->module;
980 	int ret = 0;
981 
982 	dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
983 		runtime->id, runtime->persistent_offset,
984 		module->persistent_size);
985 
986 	mutex_lock(&dsp->mutex);
987 
988 	if (!context->buffer) {
989 		dev_info(dsp->dev, "no context buffer need to restore!\n");
990 		goto err;
991 	}
992 
993 	if (dsp->fw_use_dma) {
994 
995 		ret = sst_dsp_dma_get_channel(dsp, 0);
996 		if (ret < 0)
997 			goto err;
998 
999 		ret = sst_dsp_dma_copyto(dsp,
1000 			dsp->addr.lpe_base + runtime->persistent_offset,
1001 			context->dma_buffer, module->persistent_size);
1002 		sst_dsp_dma_put_channel(dsp);
1003 		if (ret < 0) {
1004 			dev_err(dsp->dev, "error: module copy failed\n");
1005 			goto err;
1006 		}
1007 	} else
1008 		sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
1009 			context->buffer, module->persistent_size);
1010 
1011 	dma_free_coherent(dsp->dma_dev, module->persistent_size,
1012 				context->buffer, context->dma_buffer);
1013 	context->buffer = NULL;
1014 
1015 err:
1016 	mutex_unlock(&dsp->mutex);
1017 	return ret;
1018 }
1019 EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
1020 
1021 /* register a DSP memory block for use with FW based modules */
sst_mem_block_register(struct sst_dsp * dsp,u32 offset,u32 size,enum sst_mem_type type,struct sst_block_ops * ops,u32 index,void * private)1022 struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
1023 	u32 size, enum sst_mem_type type, struct sst_block_ops *ops, u32 index,
1024 	void *private)
1025 {
1026 	struct sst_mem_block *block;
1027 
1028 	block = kzalloc(sizeof(*block), GFP_KERNEL);
1029 	if (block == NULL)
1030 		return NULL;
1031 
1032 	block->offset = offset;
1033 	block->size = size;
1034 	block->index = index;
1035 	block->type = type;
1036 	block->dsp = dsp;
1037 	block->private = private;
1038 	block->ops = ops;
1039 
1040 	mutex_lock(&dsp->mutex);
1041 	list_add(&block->list, &dsp->free_block_list);
1042 	mutex_unlock(&dsp->mutex);
1043 
1044 	return block;
1045 }
1046 EXPORT_SYMBOL_GPL(sst_mem_block_register);
1047 
1048 /* unregister all DSP memory blocks */
sst_mem_block_unregister_all(struct sst_dsp * dsp)1049 void sst_mem_block_unregister_all(struct sst_dsp *dsp)
1050 {
1051 	struct sst_mem_block *block, *tmp;
1052 
1053 	mutex_lock(&dsp->mutex);
1054 
1055 	/* unregister used blocks */
1056 	list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
1057 		list_del(&block->list);
1058 		kfree(block);
1059 	}
1060 
1061 	/* unregister free blocks */
1062 	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
1063 		list_del(&block->list);
1064 		kfree(block);
1065 	}
1066 
1067 	mutex_unlock(&dsp->mutex);
1068 }
1069 EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
1070 
1071 /* allocate scratch buffer blocks */
sst_block_alloc_scratch(struct sst_dsp * dsp)1072 int sst_block_alloc_scratch(struct sst_dsp *dsp)
1073 {
1074 	struct sst_module *module;
1075 	struct sst_block_allocator ba;
1076 	int ret;
1077 
1078 	mutex_lock(&dsp->mutex);
1079 
1080 	/* calculate required scratch size */
1081 	dsp->scratch_size = 0;
1082 	list_for_each_entry(module, &dsp->module_list, list) {
1083 		dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
1084 			module->id, module->scratch_size);
1085 		if (dsp->scratch_size < module->scratch_size)
1086 			dsp->scratch_size = module->scratch_size;
1087 	}
1088 
1089 	dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
1090 		dsp->scratch_size);
1091 
1092 	if (dsp->scratch_size == 0) {
1093 		dev_info(dsp->dev, "no modules need scratch buffer\n");
1094 		mutex_unlock(&dsp->mutex);
1095 		return 0;
1096 	}
1097 
1098 	/* allocate blocks for module scratch buffers */
1099 	dev_dbg(dsp->dev, "allocating scratch blocks\n");
1100 
1101 	ba.size = dsp->scratch_size;
1102 	ba.type = SST_MEM_DRAM;
1103 
1104 	/* do we need to allocate at fixed offset */
1105 	if (dsp->scratch_offset != 0) {
1106 
1107 		dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
1108 			ba.size, ba.type, ba.offset);
1109 
1110 		ba.offset = dsp->scratch_offset;
1111 
1112 		/* alloc blocks that includes this section */
1113 		ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
1114 
1115 	} else {
1116 		dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
1117 			ba.size, ba.type);
1118 
1119 		ba.offset = 0;
1120 		ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
1121 	}
1122 	if (ret < 0) {
1123 		dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
1124 		mutex_unlock(&dsp->mutex);
1125 		return ret;
1126 	}
1127 
1128 	ret = block_list_prepare(dsp, &dsp->scratch_block_list);
1129 	if (ret < 0) {
1130 		dev_err(dsp->dev, "error: scratch block prepare failed\n");
1131 		mutex_unlock(&dsp->mutex);
1132 		return ret;
1133 	}
1134 
1135 	/* assign the same offset of scratch to each module */
1136 	dsp->scratch_offset = ba.offset;
1137 	mutex_unlock(&dsp->mutex);
1138 	return dsp->scratch_size;
1139 }
1140 EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
1141 
1142 /* free all scratch blocks */
sst_block_free_scratch(struct sst_dsp * dsp)1143 void sst_block_free_scratch(struct sst_dsp *dsp)
1144 {
1145 	mutex_lock(&dsp->mutex);
1146 	block_list_remove(dsp, &dsp->scratch_block_list);
1147 	mutex_unlock(&dsp->mutex);
1148 }
1149 EXPORT_SYMBOL_GPL(sst_block_free_scratch);
1150 
1151 /* get a module from it's unique ID */
sst_module_get_from_id(struct sst_dsp * dsp,u32 id)1152 struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
1153 {
1154 	struct sst_module *module;
1155 
1156 	mutex_lock(&dsp->mutex);
1157 
1158 	list_for_each_entry(module, &dsp->module_list, list) {
1159 		if (module->id == id) {
1160 			mutex_unlock(&dsp->mutex);
1161 			return module;
1162 		}
1163 	}
1164 
1165 	mutex_unlock(&dsp->mutex);
1166 	return NULL;
1167 }
1168 EXPORT_SYMBOL_GPL(sst_module_get_from_id);
1169 
sst_module_runtime_get_from_id(struct sst_module * module,u32 id)1170 struct sst_module_runtime *sst_module_runtime_get_from_id(
1171 	struct sst_module *module, u32 id)
1172 {
1173 	struct sst_module_runtime *runtime;
1174 	struct sst_dsp *dsp = module->dsp;
1175 
1176 	mutex_lock(&dsp->mutex);
1177 
1178 	list_for_each_entry(runtime, &module->runtime_list, list) {
1179 		if (runtime->id == id) {
1180 			mutex_unlock(&dsp->mutex);
1181 			return runtime;
1182 		}
1183 	}
1184 
1185 	mutex_unlock(&dsp->mutex);
1186 	return NULL;
1187 }
1188 EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
1189 
1190 /* returns block address in DSP address space */
sst_dsp_get_offset(struct sst_dsp * dsp,u32 offset,enum sst_mem_type type)1191 u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
1192 	enum sst_mem_type type)
1193 {
1194 	switch (type) {
1195 	case SST_MEM_IRAM:
1196 		return offset - dsp->addr.iram_offset +
1197 			dsp->addr.dsp_iram_offset;
1198 	case SST_MEM_DRAM:
1199 		return offset - dsp->addr.dram_offset +
1200 			dsp->addr.dsp_dram_offset;
1201 	default:
1202 		return 0;
1203 	}
1204 }
1205 EXPORT_SYMBOL_GPL(sst_dsp_get_offset);
1206